hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
1d77fe7fd2c9ec26103dd7c55234acc04452a628.hip
// !!! This is a file automatically generated by hipify!!! #include "stdio.h" #include<iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> //Defining two constants __constant__ int constant_f; __constant__ int constant_g; #define N 5 //Kernel function for using constant memory __global__ void gpu_constant_memory(float *d_in, float *d_out) { //Thread index for current kernel int tid = threadIdx.x; d_out[tid] = constant_f*d_in[tid] + constant_g; } int main(void) { //Defining Arrays for host float h_in[N], h_out[N]; //Defining Pointers for device float *d_in, *d_out; int h_f = 2; int h_g = 20; // allocate the memory on the cpu hipMalloc((void**)&d_in, N * sizeof(float)); hipMalloc((void**)&d_out, N * sizeof(float)); //Initializing Array for (int i = 0; i < N; i++) { h_in[i] = i; } //Copy Array from host to device hipMemcpy(d_in, h_in, N * sizeof(float), hipMemcpyHostToDevice); //Copy constants to constant memory hipMemcpyToSymbol(constant_f, &h_f, sizeof(int),0,hipMemcpyHostToDevice); hipMemcpyToSymbol(constant_g, &h_g, sizeof(int)); //Calling kernel with one block and N threads per block gpu_constant_memory << <1, N >> >(d_in, d_out); //Coping result back to host from device memory hipMemcpy(h_out, d_out, N * sizeof(float), hipMemcpyDeviceToHost); //Printing result on console printf("Use of Constant memory on GPU \n"); for (int i = 0; i < N; i++) { printf("The expression for input %f is %f\n", h_in[i], h_out[i]); } //Free up memory hipFree(d_in); hipFree(d_out); return 0; }
1d77fe7fd2c9ec26103dd7c55234acc04452a628.cu
#include "stdio.h" #include<iostream> #include <cuda.h> #include <cuda_runtime.h> //Defining two constants __constant__ int constant_f; __constant__ int constant_g; #define N 5 //Kernel function for using constant memory __global__ void gpu_constant_memory(float *d_in, float *d_out) { //Thread index for current kernel int tid = threadIdx.x; d_out[tid] = constant_f*d_in[tid] + constant_g; } int main(void) { //Defining Arrays for host float h_in[N], h_out[N]; //Defining Pointers for device float *d_in, *d_out; int h_f = 2; int h_g = 20; // allocate the memory on the cpu cudaMalloc((void**)&d_in, N * sizeof(float)); cudaMalloc((void**)&d_out, N * sizeof(float)); //Initializing Array for (int i = 0; i < N; i++) { h_in[i] = i; } //Copy Array from host to device cudaMemcpy(d_in, h_in, N * sizeof(float), cudaMemcpyHostToDevice); //Copy constants to constant memory cudaMemcpyToSymbol(constant_f, &h_f, sizeof(int),0,cudaMemcpyHostToDevice); cudaMemcpyToSymbol(constant_g, &h_g, sizeof(int)); //Calling kernel with one block and N threads per block gpu_constant_memory << <1, N >> >(d_in, d_out); //Coping result back to host from device memory cudaMemcpy(h_out, d_out, N * sizeof(float), cudaMemcpyDeviceToHost); //Printing result on console printf("Use of Constant memory on GPU \n"); for (int i = 0; i < N; i++) { printf("The expression for input %f is %f\n", h_in[i], h_out[i]); } //Free up memory cudaFree(d_in); cudaFree(d_out); return 0; }
339a395e412a2f38aa5fe408b3bee4ce4e6fcd3b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "StiffnessMatrixGPU.h" #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } StiffnessMatrixGPU::StiffnessMatrixGPU(Material& mat, Geometry &geo, unsigned int n) : StiffnessMatrixFirstOrder(mat, geo, n) { int device = -1; hipGetDevice(&device); // copy from the material matarix hipMallocManaged(&D_d, 6*sizeof(float)); hipMemcpy(D_d, material->materialMatrix, 6*sizeof(float), hipMemcpyHostToDevice); hipDeviceSynchronize(); Log::Logger().Info("StiffnessMatrixGPU created by CPU"); }; StiffnessMatrixGPU::~StiffnessMatrixGPU() { Log::Logger().Info("StiffnessMatrixGPU deleted by CPU"); hipFree(D_d); } __global__ void constantCreatorKernel(int n, float* c, float* x, float* y, unsigned int* mesh, StiffnessMatrixGPU *s) { //printf("in the function\n blockDim.x = %d, gridDim.x = %d, blockIdx.x = %d\n", blockDim.x,gridDim.x, blockIdx.x); int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { //printf("i is %d stride is %d threadID = %d\n",i,stride,threadIdx.x); s->constantCreator(i, c, x, y, mesh); } }; __global__ void StiffnessMatrixKernel(unsigned int n, unsigned int nip, float* in, unsigned int* ip, float* iw, float* c, float* D, unsigned int* mesh, float* k, unsigned int* i_index, unsigned int *j_index, unsigned int* rowPtr,StiffnessMatrixGPU *obj) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { obj->stiffnessMatrixCalculation(i, nip, in, ip, iw, c, D, mesh, k, i_index, j_index, rowPtr); } } Sparse& StiffnessMatrixGPU::GetStiffnessMatrix() { blockSize = 32; //numberOfElements=33; int numBlocks = (numberOfElements + blockSize-1)/blockSize; hipLaunchKernelGGL(( constantCreatorKernel), dim3(numBlocks), dim3(blockSize), 0, 0, numberOfElements, c, geometry->get_x(), geometry->get_y(), geometry->get_mesh(), this); hipDeviceSynchronize(); numBlocks = (simulationSize + blockSize-1)/blockSize; Timer timer("Time spend in GPU: "); hipLaunchKernelGGL(( StiffnessMatrixKernel), dim3(numBlocks), dim3(blockSize), 0, 0, numberOfElements, nipSquared, integrationNode, integrationPos, integrationWeight, c, D_d, geometry->get_mesh(), stiffMat->value, stiffMat->i, stiffMat->j , stiffMat->rowPtr, this); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); hipDeviceSynchronize(); return *stiffMat; }
339a395e412a2f38aa5fe408b3bee4ce4e6fcd3b.cu
#include "StiffnessMatrixGPU.h" #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } StiffnessMatrixGPU::StiffnessMatrixGPU(Material& mat, Geometry &geo, unsigned int n) : StiffnessMatrixFirstOrder(mat, geo, n) { int device = -1; cudaGetDevice(&device); // copy from the material matarix cudaMallocManaged(&D_d, 6*sizeof(float)); cudaMemcpy(D_d, material->materialMatrix, 6*sizeof(float), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); Log::Logger().Info("StiffnessMatrixGPU created by CPU"); }; StiffnessMatrixGPU::~StiffnessMatrixGPU() { Log::Logger().Info("StiffnessMatrixGPU deleted by CPU"); cudaFree(D_d); } __global__ void constantCreatorKernel(int n, float* c, float* x, float* y, unsigned int* mesh, StiffnessMatrixGPU *s) { //printf("in the function\n blockDim.x = %d, gridDim.x = %d, blockIdx.x = %d\n", blockDim.x,gridDim.x, blockIdx.x); int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { //printf("i is %d stride is %d threadID = %d\n",i,stride,threadIdx.x); s->constantCreator(i, c, x, y, mesh); } }; __global__ void StiffnessMatrixKernel(unsigned int n, unsigned int nip, float* in, unsigned int* ip, float* iw, float* c, float* D, unsigned int* mesh, float* k, unsigned int* i_index, unsigned int *j_index, unsigned int* rowPtr,StiffnessMatrixGPU *obj) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { obj->stiffnessMatrixCalculation(i, nip, in, ip, iw, c, D, mesh, k, i_index, j_index, rowPtr); } } Sparse& StiffnessMatrixGPU::GetStiffnessMatrix() { blockSize = 32; //numberOfElements=33; int numBlocks = (numberOfElements + blockSize-1)/blockSize; constantCreatorKernel<<<numBlocks, blockSize>>>(numberOfElements, c, geometry->get_x(), geometry->get_y(), geometry->get_mesh(), this); cudaDeviceSynchronize(); numBlocks = (simulationSize + blockSize-1)/blockSize; Timer timer("Time spend in GPU: "); StiffnessMatrixKernel<<<numBlocks, blockSize>>>(numberOfElements, nipSquared, integrationNode, integrationPos, integrationWeight, c, D_d, geometry->get_mesh(), stiffMat->value, stiffMat->i, stiffMat->j , stiffMat->rowPtr, this); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); cudaDeviceSynchronize(); return *stiffMat; }
6ffbb1d89bf32c20f713b86d84c79b1223b1ccc2.hip
// !!! This is a file automatically generated by hipify!!! //#define DEPTH 2 #include <stdio.h> //#include <hip/hip_runtime.h> #include "sgbm_helper.cuh" __device__ float dp_criteria(float *dp, int ind, int depth_dim_size, int d, float P_one, float P_two, float * d_zero, float * d_one, float * d_two, float * d_three){ *d_zero = dp[ind]; if (d > 0) *d_one = dp[ind - depth_dim_size] + P_one; else *d_one = 10000000; if (d < D-1) *d_two = dp[ind + depth_dim_size] + P_one; else *d_two = 10000000; return fminf(fminf(*d_zero, *d_one), fminf(*d_two, *d_three)) - *d_three + P_two; } __device__ float __hamming_dist(unsigned long long a, unsigned long long b){ unsigned long long c = a^b; float z = 0; while (c != 0){ z += c & 1; c>>=1; } return z; } __device__ float __hamming_dist_int(unsigned int a, unsigned int b){ unsigned int c = a^b; float z = 0; while (c != 0){ z += c & 1; c>>=1; } return z; } #define XOR__(a,b) a = a^b; #define SHIFT1__(a) a = a - ((a >> 1) & 0x55555555); #define SHIFT2__(a) a = (a & 0x33333333) + ((a >> 2) & 0x33333333); #define SHIFT3__(a) a = (a + (a >> 4)) & 0xF0F0F0F; #define SHIFT4__(a) a = (a * 0x01010101) >> 24; __device__ float __hamming_dist_int_fast(unsigned int a, unsigned int b){ a = a^b; a = a - ((a >> 1) & 0x55555555); a = (a & 0x33333333) + ((a >> 2) & 0x33333333); a = (a + (a >> 4)) & 0xF0F0F0F; a = (a * 0x01010101) >> 24; return a; } float * device_shift_subtract_stack_baseline(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int blockSize = 32; int gridX = (cols + blockSize - 1) / blockSize; int gridY = (rows + blockSize - 1) / blockSize; dim3 grid(gridX, gridY); dim3 block(blockSize, blockSize, 1); hipLaunchKernelGGL(( __shift_subtract_stack_baseline), dim3(grid), dim3(block), 0, 0, L,R,out,rows,cols); return out; } float * device_shift_subtract_stack_base(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int blockSize = 32; int gridX = (cols + blockSize - 1) / blockSize; int gridY = (rows + blockSize - 1) / blockSize; dim3 grid(gridX, gridY); dim3 block(blockSize, blockSize, 1); hipLaunchKernelGGL(( __shift_subtract_stack_base), dim3(grid), dim3(block), 0, 0, L,R,out,rows,cols); return out; } float * device_shift_subtract_stack_level1(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int blockSize = 32; int gridX = (cols + blockSize - 1) / blockSize; int gridY = (rows + blockSize - 1) / blockSize; dim3 grid(gridX, gridY); dim3 block(blockSize, blockSize, 1); hipLaunchKernelGGL(( __shift_subtract_stack_level1pt5), dim3(grid), dim3(block), 0, 0, L,R,out,rows,cols); return out; } float * device_shift_subtract_stack_level2(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int blockSize = 32; int gridX = (cols + blockSize - 1) / blockSize; int gridY = ((rows / 8) + blockSize - 1) / blockSize; dim3 grid(gridX, gridY); dim3 block(blockSize, blockSize, 1); hipLaunchKernelGGL(( __shift_subtract_stack_level2), dim3(grid), dim3(block), 0, 0, L,R,out,rows,cols); return out; } __global__ void __shift_subtract_stack_base(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int imsize = rows * cols; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; int ind = i * cols + j; for(int d = 0; d < D; d++) { if (j + d < cols) out[ind] = __hamming_dist_int(R[(ind % imsize) + d], L[ind % imsize]); else out[ind] = 1e7; ind += imsize; } } __global__ void __shift_subtract_stack_level1(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int imsize = rows * cols; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; int ind = i * cols + j; for(int d = 0; d < D; d++) { if (j + d < cols) out[ind] = __hamming_dist_int_fast(R[(ind % imsize) + d], L[ind % imsize]); else out[ind] = 1e7; ind += imsize; } } __global__ void __shift_subtract_stack_level1pt5(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int imsize = rows * cols; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; int ind = i * cols + j; int out_ind = ind; int lval = L[ind]; for(int d = 0; d < D; d++) { if (j + d < cols) out[out_ind] = __hamming_dist_int_fast(R[ind + d], lval); else out[out_ind] = 1e7; out_ind += imsize; } } __global__ void __shift_subtract_stack_level1pt7(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int imsize = rows * cols; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; int ind = i * cols + j; int out_ind = ind; int lval = L[ind]; __shared__ unsigned int shmem[32][32]; for(int d = 0; d < D; d++) { if (j + d < cols) shmem[threadIdx.y][threadIdx.x] = R[ind + d]; __syncthreads(); if (i + d < cols) shmem[threadIdx.x][threadIdx.y] = __hamming_dist_int_fast(lval, shmem[threadIdx.x][threadIdx.y]); __syncthreads(); if(j + d < cols) out[out_ind] = shmem[threadIdx.y][threadIdx.x]; else out[ind] = 10e7; out_ind += imsize; } } __global__ void __shift_subtract_stack_baseline(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int imsize = rows * cols; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; int ind = i * cols + j; int out_ind = ind; int lval = L[ind]; for(int d = 0; d < D; d++) { if (j + d < cols) out[out_ind] = R[ind + d] - lval; else out[out_ind] = 1e7; out_ind += imsize; } } __global__ void __shift_subtract_stack_level2(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int imsize = rows * cols; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; int ind = i * cols + j; int out_ind = ind; int inc = gridDim.y * blockDim.y * cols; unsigned int b1 = L[ind]; unsigned int b2 = L[ind + inc]; unsigned int b3 = L[ind + 2 * inc]; unsigned int b4 = L[ind + 3 * inc]; unsigned int b5 = L[ind + 4 * inc]; unsigned int b6 = L[ind + 5 *inc]; unsigned int b7 = L[ind + 6 * inc]; unsigned int b8 = L[ind + 7 * inc]; for(int d = 0; d < D; d++) { if (j + d < cols) { unsigned int a1 = R[ind + d]; unsigned int a2 = R[ind + inc + d]; unsigned int a3 = R[ind + 2 * inc + d]; unsigned int a4 = R[ind + 3 * inc + d]; unsigned int a5 = R[ind + 4 * inc + d]; unsigned int a6 = R[ind + 5 * inc + d]; unsigned int a7 = R[ind + 6 * inc + d]; unsigned int a8 = R[ind + 7 * inc + d]; XOR__(a1,b1); XOR__(a2,b2); XOR__(a3,b3); XOR__(a4,b4); XOR__(a5,b5); XOR__(a6,b6); XOR__(a7,b7); XOR__(a8,b8); SHIFT1__(a1); SHIFT1__(a2); SHIFT1__(a3); SHIFT1__(a4); SHIFT1__(a5); SHIFT1__(a6); SHIFT1__(a7); SHIFT1__(a8); SHIFT2__(a1); SHIFT2__(a2); SHIFT2__(a3); SHIFT2__(a4); SHIFT2__(a5); SHIFT2__(a6); SHIFT2__(a7); SHIFT2__(a8); SHIFT3__(a1); SHIFT3__(a2); SHIFT3__(a3); SHIFT3__(a4); SHIFT3__(a5); SHIFT3__(a6); SHIFT3__(a7); SHIFT3__(a8); SHIFT4__(a1); SHIFT4__(a2); SHIFT4__(a3); SHIFT4__(a4); SHIFT4__(a5); SHIFT4__(a6); SHIFT4__(a7); SHIFT4__(a8); out[out_ind] = a1; out[out_ind + inc] = a2; out[out_ind + 2 * inc] = a3; out[out_ind + 3 * inc] = a4; out[out_ind + 4 * inc] = a5; out[out_ind + 5 * inc] = a6; out[out_ind + 6 * inc] = a7; out[out_ind + 7 * inc] = a8; } else { out[out_ind] = 1e7; out[out_ind + inc] = 1e7; out[out_ind + 2 * inc] = 1e7; out[out_ind + 3 * inc] = 1e7; out[out_ind + 4 * inc] = 1e7; out[out_ind + 5 * inc] = 1e7; out[out_ind + 6 * inc] = 1e7; out[out_ind + 7 * inc] = 1e7; } out_ind += imsize; } } // right aggregation __global__ void __r_aggregate(float *dp, float *cost_image, int m, int n) { int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x; int depth_dim_size = m*n; __shared__ float MinArray[SHMEM_SIZE][SHMEM_SIZE]; int K = 0; // this variable keeps track of the progress in aggregating // across the columns of the image while ((col < n) & (row < m)) { int ind = row * n + col; float prev_min = 100000000.0; for (int depth = 0; depth < D; depth+=D_STEP){ prev_min = fminf(dp[ind], prev_min); ind += (depth_dim_size * D_STEP); } MinArray[threadIdx.y][threadIdx.x] = prev_min; __syncthreads(); float d0 = 0; float d1 = 0; float d2 = 0; // when processing a video stream, need to make sure that processing of multiple // frames can overlap, since after this point only one warp of threads is executing // threads from only one warp will handle rightward aggregation across the // region that has been loaded into shared memory // for threads where threadIdx.y is 0, now threadIdx.x will index the rows if (threadIdx.y == 0) { int agg_row = threadIdx.x + blockIdx.y * blockDim.y; int start_K = K; int local_K = 0; if (agg_row < m) { for(; (K < (n - 1)) && (K < (start_K + SHMEM_SIZE)); K++) { float d3 = MinArray[threadIdx.x][local_K] + (float) P2; int ind = agg_row * n + K + 1; for (int d = 0; d < D; d+=D_STEP){ dp[ind] += cost_image[ind] + dp_criteria(dp, ind-1, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3); ind += (depth_dim_size * D_STEP); } local_K++; } } } __syncthreads(); col+=blockDim.x; } } // left aggregation __global__ void __l_aggregate(float *dp, float *cost_image, int m, int n) { int row = threadIdx.y + blockIdx.y * blockDim.y; int col = n - 1 - threadIdx.x; int depth_dim_size = m*n; __shared__ float MinArray[SHMEM_SIZE][SHMEM_SIZE]; int K = n-1; // this variable keeps track of the progress in aggregating // across the columns of the image while ((col >= 0) & (row < m)) { int ind = row * n + col; float prev_min = 100000000.0; for (int depth = 0; depth < D; depth+=D_STEP){ prev_min = fminf(dp[ind], prev_min); ind += (depth_dim_size * D_STEP); } MinArray[threadIdx.y][SHMEM_SIZE - 1 - threadIdx.x] = prev_min; __syncthreads(); float d0 = 0; float d1 = 0; float d2 = 0; // when processing a video stream, need to make sure that processing of multiple // frames can overlap, since after this point only one warp of threads is executing // threads from only one warp will handle rightward aggregation across the // region that has been loaded into shared memory // for threads where threadIdx.y is 0, now threadIdx.x will index the rows if (threadIdx.y == 0) { int agg_row = threadIdx.x + blockIdx.y * blockDim.y; int start_K = K; int local_K = SHMEM_SIZE - 1; if (agg_row < m) { for(; (K > 0) && (K > (start_K - SHMEM_SIZE)); K--) { float d3 = MinArray[threadIdx.x][local_K] + (float) P2; int ind = agg_row * n + K - 1; for (int d = 0; d < D; d+=D_STEP){ dp[ind] += cost_image[ind] + dp_criteria(dp, ind+1, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3); ind += (depth_dim_size * D_STEP); } local_K--; } } } __syncthreads(); col-=blockDim.x; } } // downward aggregation __global__ void __vertical_aggregate_down(float *dp, float *cost_image, int m, int n) { // which column of array to work on int col = blockDim.x * blockIdx.x + threadIdx.x; int depth_dim_size = m*n; // todo: maybe it will work better to take running average of every d // slices while(col < n) { for (int row = 1; row < m; row++) { //int arr_ind = 0; float prev_min = 100000000.0; int ind = (row - 1) * n + col; // calculate min cost disparity for this column from row-1 //#pragma unroll for (int depth = 0; depth < D; depth+=D_STEP){ prev_min = fminf(dp[ind], prev_min); ind += (depth_dim_size * D_STEP); } // float prev_min = arr_min(arr, D_SIZE); float d0 = 0; float d1 = 0; float d2 = 0; float d3 = prev_min + (float) P2; ind = (row - 1) * n + col; int current_ind = row * n + col; // todo: try having this loop go from 1 to d-1 and removing the if else for (int d = 0; d < D; d+=D_STEP){ // for each d I need dp[{d-1, d, d+1}, row-1, col], dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3); ind += (depth_dim_size * D_STEP); current_ind += (depth_dim_size * D_STEP); } } col += blockDim.x; } } // upward aggreagtion __global__ void __vertical_aggregate_up(float *dp, float *cost_image, int m, int n) { // which column of array to work on int col = blockDim.x * blockIdx.x + threadIdx.x; int depth_dim_size = m*n; // todo: maybe it will work better to take running average of every d // slices while(col < n) { for (int row = m-2; row >= 0; row--) { //int arr_ind = 0; float prev_min = 100000000.0; int ind = (row + 1) * n + col; // calculate min cost disparity for this column from row-1 for (int depth = 0; depth < D; depth+=D_STEP){ prev_min = fminf(dp[ind], prev_min); ind += (depth_dim_size * D_STEP); } // float prev_min = arr_min(arr, D_SIZE); float d0 = 0; float d1 = 0; float d2 = 0; float d3 = prev_min + (float) P2; ind = (row + 1) * n + col; int current_ind = row * n + col; // todo: try having this loop go from 1 to d-1 and removing the if else for (int d = 0; d < D; d+=D_STEP){ // for each d I need dp[{d-1, d, d+1}, row-1, col], dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3); ind += (depth_dim_size * D_STEP); current_ind += (depth_dim_size * D_STEP); } } col += blockDim.x; } } // aggregation along diagonal from top left to bottom right __global__ void __diagonal_tl_br_aggregate(float *dp, float *cost_image, int m, int n) { // which column of array to work on int start_col = blockDim.x * blockIdx.x + threadIdx.x + 1; int depth_dim_size = m*n; // todo: maybe it will work better to take running average of every d // slices while(start_col < n) { int col = start_col; for (int row = 1; row < m; row++) { //int arr_ind = 0; float prev_min = 100000000.0; int ind = (row - 1) * n + col - 1; // calculate min cost disparity for this column from row-1 //#pragma unroll for (int depth = 0; depth < D; depth+=D_STEP){ prev_min = fminf(dp[ind], prev_min); ind += (depth_dim_size * D_STEP); } float d0 = 0; float d1 = 0; float d2 = 0; float d3 = prev_min + (float) P2; ind = (row - 1) * n + col - 1; int current_ind = row * n + col; // todo: try having this loop go from 1 to d-1 and removing the if else for (int d = 0; d < D; d+=D_STEP){ // for each d I need dp[{d-1, d, d+1}, row-1, col], dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3); ind += (depth_dim_size * D_STEP); current_ind += (depth_dim_size * D_STEP); } col += 1; if (col == n) // wrap each thread around once it gets to the last column col = 1; } start_col += blockDim.x; } } // aggregation along diagonal from top right to bottom left __global__ void __diagonal_tr_bl_aggregate(float *dp, float *cost_image, int m, int n) { // which column of array to work on // thread with blockIdx.x == 0 and threadIdx.x == 0 will start at column n-2 (and aggregate // using data from columns n-1) int start_col = (n - 2) - (blockDim.x * blockIdx.x) - threadIdx.x; int depth_dim_size = m*n; // todo: maybe it will work better to take running average of every d // slices while(start_col >= 0) { int col = start_col; for (int row = 1; row < m; row++) { //int arr_ind = 0; float prev_min = 100000000.0; int ind = (row - 1) * n + col + 1; // calculate min cost disparity for this column from row-1 //#pragma unroll for (int depth = 0; depth < D; depth+=D_STEP){ prev_min = fminf(dp[ind], prev_min); ind += (depth_dim_size * D_STEP); } float d0 = 0; float d1 = 0; float d2 = 0; float d3 = prev_min + (float) P2; ind = (row - 1) * n + col + 1; int current_ind = row * n + col; // todo: try having this loop go from 1 to d-1 and removing the if else for (int d = 0; d < D; d+=D_STEP){ // for each d I need dp[{d-1, d, d+1}, row-1, col], dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3); ind += (depth_dim_size * D_STEP); current_ind += (depth_dim_size * D_STEP); } col -= 1; if (col < 0) // wrap each thread around once it gets to the last column col = n-2; } start_col -= blockDim.x; } } // aggregation along diagonal from bottom right to top left __global__ void __diagonal_br_tl_aggregate(float *dp, float *cost_image, int m, int n) { // which column of array to work on // thread with blockIdx.x == 0 and threadIdx.x == 0 will start at column n-2 (and aggregate // using data from columns n-1) int start_col = (n - 2) - (blockDim.x * blockIdx.x) - threadIdx.x; int depth_dim_size = m*n; // todo: maybe it will work better to take running average of every d // slices while(start_col >= 0) { int col = start_col; for (int row = m-2; row >= 0; row--) { //int arr_ind = 0; float prev_min = 100000000.0; int ind = (row + 1) * n + col + 1; // calculate min cost disparity for this column from row-1 //#pragma unroll for (int depth = 0; depth < D; depth+=D_STEP){ prev_min = fminf(dp[ind], prev_min); ind += (depth_dim_size * D_STEP); } float d0 = 0; float d1 = 0; float d2 = 0; float d3 = prev_min + (float) P2; ind = (row + 1) * n + col + 1; int current_ind = row * n + col; // todo: try having this loop go from 1 to d-1 and removing the if else for (int d = 0; d < D; d+=D_STEP){ // for each d I need dp[{d-1, d, d+1}, row-1, col], dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3); ind += (depth_dim_size * D_STEP); current_ind += (depth_dim_size * D_STEP); } col -= 1; if (col < 0) // wrap each thread around once it gets to the last column col = n-2; } start_col -= blockDim.x; } } // aggregation along diagonal from top left to bottom right __global__ void __diagonal_bl_tr_aggregate(float *dp, float *cost_image, int m, int n) { // which column of array to work on int start_col = blockDim.x * blockIdx.x + threadIdx.x + 1; int depth_dim_size = m*n; // todo: maybe it will work better to take running average of every d // slices while(start_col < n) { int col = start_col; for (int row = m-2; row >= 0; row--) { //int arr_ind = 0; float prev_min = 100000000.0; int ind = (row + 1) * n + col - 1; // calculate min cost disparity for this column from row-1 //#pragma unroll for (int depth = 0; depth < D; depth+=D_STEP){ prev_min = fminf(dp[ind], prev_min); ind += (depth_dim_size * D_STEP); } float d0 = 0; float d1 = 0; float d2 = 0; float d3 = prev_min + (float) P2; ind = (row + 1) * n + col - 1; int current_ind = row * n + col; // todo: try having this loop go from 1 to d-1 and removing the if else for (int d = 0; d < D; d+=D_STEP){ // for each d I need dp[{d-1, d, d+1}, row-1, col], dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3); ind += (depth_dim_size * D_STEP); current_ind += (depth_dim_size * D_STEP); } col += 1; if (col == n) // wrap each thread around once it gets to the last column col = 1; } start_col += blockDim.x; } } // takes min along depth dimension, puts output back in dp to save memory __global__ void __argmin_3d_mat(float * dp, int * stereo_im, int m, int n) { int col = blockDim.x * blockIdx.x + threadIdx.x; int imsize = m*n; int loop_limit = D*m*n; while(col < n) { int row = blockDim.y * blockIdx.y + threadIdx.y; while(row < m) { int min_ind = -1; float current_min = 100000000.0; int current_val = row * n + col; int v = 0; for (int depth = 0; depth < loop_limit; depth+=imsize){ if (dp[depth + current_val] < current_min) { min_ind = v; current_min = dp[depth + current_val]; } v++; } stereo_im[current_val] = min_ind; row+=blockDim.y; } col+=blockDim.x; } } // wrappers int * argmin(int nCols, int nRows, float * dp, int * stereo_im, hipStream_t stream){ dim3 blockSize = dim3(SHMEM_SIZE, SHMEM_SIZE, 1); dim3 gridSize = dim3(1, 1); hipLaunchKernelGGL(( __argmin_3d_mat), dim3(gridSize), dim3(blockSize), 0, stream, dp, stereo_im, nRows, nCols); return stereo_im; } float * r_aggregate(int nCols, int nRows, float * shifted_images, float * dp, hipStream_t stream){ int nblock = nRows / SHMEM_SIZE; dim3 blockSize = dim3(SHMEM_SIZE, SHMEM_SIZE, 1); dim3 gridSize = dim3(1, nblock); hipLaunchKernelGGL(( __r_aggregate), dim3(gridSize), dim3(blockSize), 0, stream, dp, shifted_images, nRows, nCols); return dp; } float * l_aggregate(int nCols, int nRows, float * shifted_images, float * dp, hipStream_t stream){ int nblock = nRows / SHMEM_SIZE; dim3 blockSize = dim3(SHMEM_SIZE, SHMEM_SIZE, 1); dim3 gridSize = dim3(1, nblock); hipLaunchKernelGGL(( __l_aggregate), dim3(gridSize), dim3(blockSize), 0, stream, dp, shifted_images, nRows, nCols); return dp; } float * vertical_aggregate_down(int nCols, int nRows, float * shifted_images, float * dp, hipStream_t stream){ hipLaunchKernelGGL(( __vertical_aggregate_down), dim3(1), dim3(256), 0, stream, dp, shifted_images, nRows, nCols); return dp; } float * vertical_aggregate_up(int nCols, int nRows, float * shifted_images, float * dp, hipStream_t stream){ hipLaunchKernelGGL(( __vertical_aggregate_up), dim3(1), dim3(256), 0, stream, dp, shifted_images, nRows, nCols); return dp; } float * diagonal_tl_br_aggregate(int nCols, int nRows, float * shifted_images, float * dp, hipStream_t stream){ hipLaunchKernelGGL(( __diagonal_tl_br_aggregate), dim3(1), dim3(256), 0, stream, dp, shifted_images, nRows, nCols); return dp; } float * diagonal_tr_bl_aggregate(int nCols, int nRows, float * shifted_images, float * dp, hipStream_t stream){ hipLaunchKernelGGL(( __diagonal_tr_bl_aggregate), dim3(1), dim3(256), 0, stream, dp, shifted_images, nRows, nCols); return dp; } float * diagonal_br_tl_aggregate(int nCols, int nRows, float * shifted_images, float * dp, hipStream_t stream){ hipLaunchKernelGGL(( __diagonal_br_tl_aggregate), dim3(1), dim3(256), 0, stream, dp, shifted_images, nRows, nCols); return dp; } float * diagonal_bl_tr_aggregate(int nCols, int nRows, float * shifted_images, float * dp, hipStream_t stream){ hipLaunchKernelGGL(( __diagonal_bl_tr_aggregate), dim3(1), dim3(256), 0, stream, dp, shifted_images, nRows, nCols); return dp; }
6ffbb1d89bf32c20f713b86d84c79b1223b1ccc2.cu
//#define DEPTH 2 #include <stdio.h> //#include <cuda_runtime.h> #include "sgbm_helper.cuh" __device__ float dp_criteria(float *dp, int ind, int depth_dim_size, int d, float P_one, float P_two, float * d_zero, float * d_one, float * d_two, float * d_three){ *d_zero = dp[ind]; if (d > 0) *d_one = dp[ind - depth_dim_size] + P_one; else *d_one = 10000000; if (d < D-1) *d_two = dp[ind + depth_dim_size] + P_one; else *d_two = 10000000; return fminf(fminf(*d_zero, *d_one), fminf(*d_two, *d_three)) - *d_three + P_two; } __device__ float __hamming_dist(unsigned long long a, unsigned long long b){ unsigned long long c = a^b; float z = 0; while (c != 0){ z += c & 1; c>>=1; } return z; } __device__ float __hamming_dist_int(unsigned int a, unsigned int b){ unsigned int c = a^b; float z = 0; while (c != 0){ z += c & 1; c>>=1; } return z; } #define XOR__(a,b) a = a^b; #define SHIFT1__(a) a = a - ((a >> 1) & 0x55555555); #define SHIFT2__(a) a = (a & 0x33333333) + ((a >> 2) & 0x33333333); #define SHIFT3__(a) a = (a + (a >> 4)) & 0xF0F0F0F; #define SHIFT4__(a) a = (a * 0x01010101) >> 24; __device__ float __hamming_dist_int_fast(unsigned int a, unsigned int b){ a = a^b; a = a - ((a >> 1) & 0x55555555); a = (a & 0x33333333) + ((a >> 2) & 0x33333333); a = (a + (a >> 4)) & 0xF0F0F0F; a = (a * 0x01010101) >> 24; return a; } float * device_shift_subtract_stack_baseline(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int blockSize = 32; int gridX = (cols + blockSize - 1) / blockSize; int gridY = (rows + blockSize - 1) / blockSize; dim3 grid(gridX, gridY); dim3 block(blockSize, blockSize, 1); __shift_subtract_stack_baseline<<<grid, block>>>(L,R,out,rows,cols); return out; } float * device_shift_subtract_stack_base(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int blockSize = 32; int gridX = (cols + blockSize - 1) / blockSize; int gridY = (rows + blockSize - 1) / blockSize; dim3 grid(gridX, gridY); dim3 block(blockSize, blockSize, 1); __shift_subtract_stack_base<<<grid, block>>>(L,R,out,rows,cols); return out; } float * device_shift_subtract_stack_level1(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int blockSize = 32; int gridX = (cols + blockSize - 1) / blockSize; int gridY = (rows + blockSize - 1) / blockSize; dim3 grid(gridX, gridY); dim3 block(blockSize, blockSize, 1); __shift_subtract_stack_level1pt5<<<grid, block>>>(L,R,out,rows,cols); return out; } float * device_shift_subtract_stack_level2(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int blockSize = 32; int gridX = (cols + blockSize - 1) / blockSize; int gridY = ((rows / 8) + blockSize - 1) / blockSize; dim3 grid(gridX, gridY); dim3 block(blockSize, blockSize, 1); __shift_subtract_stack_level2<<<grid, block>>>(L,R,out,rows,cols); return out; } __global__ void __shift_subtract_stack_base(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int imsize = rows * cols; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; int ind = i * cols + j; for(int d = 0; d < D; d++) { if (j + d < cols) out[ind] = __hamming_dist_int(R[(ind % imsize) + d], L[ind % imsize]); else out[ind] = 1e7; ind += imsize; } } __global__ void __shift_subtract_stack_level1(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int imsize = rows * cols; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; int ind = i * cols + j; for(int d = 0; d < D; d++) { if (j + d < cols) out[ind] = __hamming_dist_int_fast(R[(ind % imsize) + d], L[ind % imsize]); else out[ind] = 1e7; ind += imsize; } } __global__ void __shift_subtract_stack_level1pt5(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int imsize = rows * cols; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; int ind = i * cols + j; int out_ind = ind; int lval = L[ind]; for(int d = 0; d < D; d++) { if (j + d < cols) out[out_ind] = __hamming_dist_int_fast(R[ind + d], lval); else out[out_ind] = 1e7; out_ind += imsize; } } __global__ void __shift_subtract_stack_level1pt7(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int imsize = rows * cols; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; int ind = i * cols + j; int out_ind = ind; int lval = L[ind]; __shared__ unsigned int shmem[32][32]; for(int d = 0; d < D; d++) { if (j + d < cols) shmem[threadIdx.y][threadIdx.x] = R[ind + d]; __syncthreads(); if (i + d < cols) shmem[threadIdx.x][threadIdx.y] = __hamming_dist_int_fast(lval, shmem[threadIdx.x][threadIdx.y]); __syncthreads(); if(j + d < cols) out[out_ind] = shmem[threadIdx.y][threadIdx.x]; else out[ind] = 10e7; out_ind += imsize; } } __global__ void __shift_subtract_stack_baseline(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int imsize = rows * cols; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; int ind = i * cols + j; int out_ind = ind; int lval = L[ind]; for(int d = 0; d < D; d++) { if (j + d < cols) out[out_ind] = R[ind + d] - lval; else out[out_ind] = 1e7; out_ind += imsize; } } __global__ void __shift_subtract_stack_level2(unsigned int * L, unsigned int * R, float * out, int rows, int cols) { int imsize = rows * cols; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; int ind = i * cols + j; int out_ind = ind; int inc = gridDim.y * blockDim.y * cols; unsigned int b1 = L[ind]; unsigned int b2 = L[ind + inc]; unsigned int b3 = L[ind + 2 * inc]; unsigned int b4 = L[ind + 3 * inc]; unsigned int b5 = L[ind + 4 * inc]; unsigned int b6 = L[ind + 5 *inc]; unsigned int b7 = L[ind + 6 * inc]; unsigned int b8 = L[ind + 7 * inc]; for(int d = 0; d < D; d++) { if (j + d < cols) { unsigned int a1 = R[ind + d]; unsigned int a2 = R[ind + inc + d]; unsigned int a3 = R[ind + 2 * inc + d]; unsigned int a4 = R[ind + 3 * inc + d]; unsigned int a5 = R[ind + 4 * inc + d]; unsigned int a6 = R[ind + 5 * inc + d]; unsigned int a7 = R[ind + 6 * inc + d]; unsigned int a8 = R[ind + 7 * inc + d]; XOR__(a1,b1); XOR__(a2,b2); XOR__(a3,b3); XOR__(a4,b4); XOR__(a5,b5); XOR__(a6,b6); XOR__(a7,b7); XOR__(a8,b8); SHIFT1__(a1); SHIFT1__(a2); SHIFT1__(a3); SHIFT1__(a4); SHIFT1__(a5); SHIFT1__(a6); SHIFT1__(a7); SHIFT1__(a8); SHIFT2__(a1); SHIFT2__(a2); SHIFT2__(a3); SHIFT2__(a4); SHIFT2__(a5); SHIFT2__(a6); SHIFT2__(a7); SHIFT2__(a8); SHIFT3__(a1); SHIFT3__(a2); SHIFT3__(a3); SHIFT3__(a4); SHIFT3__(a5); SHIFT3__(a6); SHIFT3__(a7); SHIFT3__(a8); SHIFT4__(a1); SHIFT4__(a2); SHIFT4__(a3); SHIFT4__(a4); SHIFT4__(a5); SHIFT4__(a6); SHIFT4__(a7); SHIFT4__(a8); out[out_ind] = a1; out[out_ind + inc] = a2; out[out_ind + 2 * inc] = a3; out[out_ind + 3 * inc] = a4; out[out_ind + 4 * inc] = a5; out[out_ind + 5 * inc] = a6; out[out_ind + 6 * inc] = a7; out[out_ind + 7 * inc] = a8; } else { out[out_ind] = 1e7; out[out_ind + inc] = 1e7; out[out_ind + 2 * inc] = 1e7; out[out_ind + 3 * inc] = 1e7; out[out_ind + 4 * inc] = 1e7; out[out_ind + 5 * inc] = 1e7; out[out_ind + 6 * inc] = 1e7; out[out_ind + 7 * inc] = 1e7; } out_ind += imsize; } } // right aggregation __global__ void __r_aggregate(float *dp, float *cost_image, int m, int n) { int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x; int depth_dim_size = m*n; __shared__ float MinArray[SHMEM_SIZE][SHMEM_SIZE]; int K = 0; // this variable keeps track of the progress in aggregating // across the columns of the image while ((col < n) & (row < m)) { int ind = row * n + col; float prev_min = 100000000.0; for (int depth = 0; depth < D; depth+=D_STEP){ prev_min = fminf(dp[ind], prev_min); ind += (depth_dim_size * D_STEP); } MinArray[threadIdx.y][threadIdx.x] = prev_min; __syncthreads(); float d0 = 0; float d1 = 0; float d2 = 0; // when processing a video stream, need to make sure that processing of multiple // frames can overlap, since after this point only one warp of threads is executing // threads from only one warp will handle rightward aggregation across the // region that has been loaded into shared memory // for threads where threadIdx.y is 0, now threadIdx.x will index the rows if (threadIdx.y == 0) { int agg_row = threadIdx.x + blockIdx.y * blockDim.y; int start_K = K; int local_K = 0; if (agg_row < m) { for(; (K < (n - 1)) && (K < (start_K + SHMEM_SIZE)); K++) { float d3 = MinArray[threadIdx.x][local_K] + (float) P2; int ind = agg_row * n + K + 1; for (int d = 0; d < D; d+=D_STEP){ dp[ind] += cost_image[ind] + dp_criteria(dp, ind-1, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3); ind += (depth_dim_size * D_STEP); } local_K++; } } } __syncthreads(); col+=blockDim.x; } } // left aggregation __global__ void __l_aggregate(float *dp, float *cost_image, int m, int n) { int row = threadIdx.y + blockIdx.y * blockDim.y; int col = n - 1 - threadIdx.x; int depth_dim_size = m*n; __shared__ float MinArray[SHMEM_SIZE][SHMEM_SIZE]; int K = n-1; // this variable keeps track of the progress in aggregating // across the columns of the image while ((col >= 0) & (row < m)) { int ind = row * n + col; float prev_min = 100000000.0; for (int depth = 0; depth < D; depth+=D_STEP){ prev_min = fminf(dp[ind], prev_min); ind += (depth_dim_size * D_STEP); } MinArray[threadIdx.y][SHMEM_SIZE - 1 - threadIdx.x] = prev_min; __syncthreads(); float d0 = 0; float d1 = 0; float d2 = 0; // when processing a video stream, need to make sure that processing of multiple // frames can overlap, since after this point only one warp of threads is executing // threads from only one warp will handle rightward aggregation across the // region that has been loaded into shared memory // for threads where threadIdx.y is 0, now threadIdx.x will index the rows if (threadIdx.y == 0) { int agg_row = threadIdx.x + blockIdx.y * blockDim.y; int start_K = K; int local_K = SHMEM_SIZE - 1; if (agg_row < m) { for(; (K > 0) && (K > (start_K - SHMEM_SIZE)); K--) { float d3 = MinArray[threadIdx.x][local_K] + (float) P2; int ind = agg_row * n + K - 1; for (int d = 0; d < D; d+=D_STEP){ dp[ind] += cost_image[ind] + dp_criteria(dp, ind+1, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3); ind += (depth_dim_size * D_STEP); } local_K--; } } } __syncthreads(); col-=blockDim.x; } } // downward aggregation __global__ void __vertical_aggregate_down(float *dp, float *cost_image, int m, int n) { // which column of array to work on int col = blockDim.x * blockIdx.x + threadIdx.x; int depth_dim_size = m*n; // todo: maybe it will work better to take running average of every d // slices while(col < n) { for (int row = 1; row < m; row++) { //int arr_ind = 0; float prev_min = 100000000.0; int ind = (row - 1) * n + col; // calculate min cost disparity for this column from row-1 //#pragma unroll for (int depth = 0; depth < D; depth+=D_STEP){ prev_min = fminf(dp[ind], prev_min); ind += (depth_dim_size * D_STEP); } // float prev_min = arr_min(arr, D_SIZE); float d0 = 0; float d1 = 0; float d2 = 0; float d3 = prev_min + (float) P2; ind = (row - 1) * n + col; int current_ind = row * n + col; // todo: try having this loop go from 1 to d-1 and removing the if else for (int d = 0; d < D; d+=D_STEP){ // for each d I need dp[{d-1, d, d+1}, row-1, col], dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3); ind += (depth_dim_size * D_STEP); current_ind += (depth_dim_size * D_STEP); } } col += blockDim.x; } } // upward aggreagtion __global__ void __vertical_aggregate_up(float *dp, float *cost_image, int m, int n) { // which column of array to work on int col = blockDim.x * blockIdx.x + threadIdx.x; int depth_dim_size = m*n; // todo: maybe it will work better to take running average of every d // slices while(col < n) { for (int row = m-2; row >= 0; row--) { //int arr_ind = 0; float prev_min = 100000000.0; int ind = (row + 1) * n + col; // calculate min cost disparity for this column from row-1 for (int depth = 0; depth < D; depth+=D_STEP){ prev_min = fminf(dp[ind], prev_min); ind += (depth_dim_size * D_STEP); } // float prev_min = arr_min(arr, D_SIZE); float d0 = 0; float d1 = 0; float d2 = 0; float d3 = prev_min + (float) P2; ind = (row + 1) * n + col; int current_ind = row * n + col; // todo: try having this loop go from 1 to d-1 and removing the if else for (int d = 0; d < D; d+=D_STEP){ // for each d I need dp[{d-1, d, d+1}, row-1, col], dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3); ind += (depth_dim_size * D_STEP); current_ind += (depth_dim_size * D_STEP); } } col += blockDim.x; } } // aggregation along diagonal from top left to bottom right __global__ void __diagonal_tl_br_aggregate(float *dp, float *cost_image, int m, int n) { // which column of array to work on int start_col = blockDim.x * blockIdx.x + threadIdx.x + 1; int depth_dim_size = m*n; // todo: maybe it will work better to take running average of every d // slices while(start_col < n) { int col = start_col; for (int row = 1; row < m; row++) { //int arr_ind = 0; float prev_min = 100000000.0; int ind = (row - 1) * n + col - 1; // calculate min cost disparity for this column from row-1 //#pragma unroll for (int depth = 0; depth < D; depth+=D_STEP){ prev_min = fminf(dp[ind], prev_min); ind += (depth_dim_size * D_STEP); } float d0 = 0; float d1 = 0; float d2 = 0; float d3 = prev_min + (float) P2; ind = (row - 1) * n + col - 1; int current_ind = row * n + col; // todo: try having this loop go from 1 to d-1 and removing the if else for (int d = 0; d < D; d+=D_STEP){ // for each d I need dp[{d-1, d, d+1}, row-1, col], dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3); ind += (depth_dim_size * D_STEP); current_ind += (depth_dim_size * D_STEP); } col += 1; if (col == n) // wrap each thread around once it gets to the last column col = 1; } start_col += blockDim.x; } } // aggregation along diagonal from top right to bottom left __global__ void __diagonal_tr_bl_aggregate(float *dp, float *cost_image, int m, int n) { // which column of array to work on // thread with blockIdx.x == 0 and threadIdx.x == 0 will start at column n-2 (and aggregate // using data from columns n-1) int start_col = (n - 2) - (blockDim.x * blockIdx.x) - threadIdx.x; int depth_dim_size = m*n; // todo: maybe it will work better to take running average of every d // slices while(start_col >= 0) { int col = start_col; for (int row = 1; row < m; row++) { //int arr_ind = 0; float prev_min = 100000000.0; int ind = (row - 1) * n + col + 1; // calculate min cost disparity for this column from row-1 //#pragma unroll for (int depth = 0; depth < D; depth+=D_STEP){ prev_min = fminf(dp[ind], prev_min); ind += (depth_dim_size * D_STEP); } float d0 = 0; float d1 = 0; float d2 = 0; float d3 = prev_min + (float) P2; ind = (row - 1) * n + col + 1; int current_ind = row * n + col; // todo: try having this loop go from 1 to d-1 and removing the if else for (int d = 0; d < D; d+=D_STEP){ // for each d I need dp[{d-1, d, d+1}, row-1, col], dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3); ind += (depth_dim_size * D_STEP); current_ind += (depth_dim_size * D_STEP); } col -= 1; if (col < 0) // wrap each thread around once it gets to the last column col = n-2; } start_col -= blockDim.x; } } // aggregation along diagonal from bottom right to top left __global__ void __diagonal_br_tl_aggregate(float *dp, float *cost_image, int m, int n) { // which column of array to work on // thread with blockIdx.x == 0 and threadIdx.x == 0 will start at column n-2 (and aggregate // using data from columns n-1) int start_col = (n - 2) - (blockDim.x * blockIdx.x) - threadIdx.x; int depth_dim_size = m*n; // todo: maybe it will work better to take running average of every d // slices while(start_col >= 0) { int col = start_col; for (int row = m-2; row >= 0; row--) { //int arr_ind = 0; float prev_min = 100000000.0; int ind = (row + 1) * n + col + 1; // calculate min cost disparity for this column from row-1 //#pragma unroll for (int depth = 0; depth < D; depth+=D_STEP){ prev_min = fminf(dp[ind], prev_min); ind += (depth_dim_size * D_STEP); } float d0 = 0; float d1 = 0; float d2 = 0; float d3 = prev_min + (float) P2; ind = (row + 1) * n + col + 1; int current_ind = row * n + col; // todo: try having this loop go from 1 to d-1 and removing the if else for (int d = 0; d < D; d+=D_STEP){ // for each d I need dp[{d-1, d, d+1}, row-1, col], dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3); ind += (depth_dim_size * D_STEP); current_ind += (depth_dim_size * D_STEP); } col -= 1; if (col < 0) // wrap each thread around once it gets to the last column col = n-2; } start_col -= blockDim.x; } } // aggregation along diagonal from top left to bottom right __global__ void __diagonal_bl_tr_aggregate(float *dp, float *cost_image, int m, int n) { // which column of array to work on int start_col = blockDim.x * blockIdx.x + threadIdx.x + 1; int depth_dim_size = m*n; // todo: maybe it will work better to take running average of every d // slices while(start_col < n) { int col = start_col; for (int row = m-2; row >= 0; row--) { //int arr_ind = 0; float prev_min = 100000000.0; int ind = (row + 1) * n + col - 1; // calculate min cost disparity for this column from row-1 //#pragma unroll for (int depth = 0; depth < D; depth+=D_STEP){ prev_min = fminf(dp[ind], prev_min); ind += (depth_dim_size * D_STEP); } float d0 = 0; float d1 = 0; float d2 = 0; float d3 = prev_min + (float) P2; ind = (row + 1) * n + col - 1; int current_ind = row * n + col; // todo: try having this loop go from 1 to d-1 and removing the if else for (int d = 0; d < D; d+=D_STEP){ // for each d I need dp[{d-1, d, d+1}, row-1, col], dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3); ind += (depth_dim_size * D_STEP); current_ind += (depth_dim_size * D_STEP); } col += 1; if (col == n) // wrap each thread around once it gets to the last column col = 1; } start_col += blockDim.x; } } // takes min along depth dimension, puts output back in dp to save memory __global__ void __argmin_3d_mat(float * dp, int * stereo_im, int m, int n) { int col = blockDim.x * blockIdx.x + threadIdx.x; int imsize = m*n; int loop_limit = D*m*n; while(col < n) { int row = blockDim.y * blockIdx.y + threadIdx.y; while(row < m) { int min_ind = -1; float current_min = 100000000.0; int current_val = row * n + col; int v = 0; for (int depth = 0; depth < loop_limit; depth+=imsize){ if (dp[depth + current_val] < current_min) { min_ind = v; current_min = dp[depth + current_val]; } v++; } stereo_im[current_val] = min_ind; row+=blockDim.y; } col+=blockDim.x; } } // wrappers int * argmin(int nCols, int nRows, float * dp, int * stereo_im, cudaStream_t stream){ dim3 blockSize = dim3(SHMEM_SIZE, SHMEM_SIZE, 1); dim3 gridSize = dim3(1, 1); __argmin_3d_mat<<<gridSize, blockSize, 0, stream>>>(dp, stereo_im, nRows, nCols); return stereo_im; } float * r_aggregate(int nCols, int nRows, float * shifted_images, float * dp, cudaStream_t stream){ int nblock = nRows / SHMEM_SIZE; dim3 blockSize = dim3(SHMEM_SIZE, SHMEM_SIZE, 1); dim3 gridSize = dim3(1, nblock); __r_aggregate<<<gridSize, blockSize, 0, stream>>>(dp, shifted_images, nRows, nCols); return dp; } float * l_aggregate(int nCols, int nRows, float * shifted_images, float * dp, cudaStream_t stream){ int nblock = nRows / SHMEM_SIZE; dim3 blockSize = dim3(SHMEM_SIZE, SHMEM_SIZE, 1); dim3 gridSize = dim3(1, nblock); __l_aggregate<<<gridSize, blockSize, 0, stream>>>(dp, shifted_images, nRows, nCols); return dp; } float * vertical_aggregate_down(int nCols, int nRows, float * shifted_images, float * dp, cudaStream_t stream){ __vertical_aggregate_down<<<1, 256, 0, stream>>>(dp, shifted_images, nRows, nCols); return dp; } float * vertical_aggregate_up(int nCols, int nRows, float * shifted_images, float * dp, cudaStream_t stream){ __vertical_aggregate_up<<<1, 256, 0, stream>>>(dp, shifted_images, nRows, nCols); return dp; } float * diagonal_tl_br_aggregate(int nCols, int nRows, float * shifted_images, float * dp, cudaStream_t stream){ __diagonal_tl_br_aggregate<<<1, 256, 0, stream>>>(dp, shifted_images, nRows, nCols); return dp; } float * diagonal_tr_bl_aggregate(int nCols, int nRows, float * shifted_images, float * dp, cudaStream_t stream){ __diagonal_tr_bl_aggregate<<<1, 256, 0, stream>>>(dp, shifted_images, nRows, nCols); return dp; } float * diagonal_br_tl_aggregate(int nCols, int nRows, float * shifted_images, float * dp, cudaStream_t stream){ __diagonal_br_tl_aggregate<<<1, 256, 0, stream>>>(dp, shifted_images, nRows, nCols); return dp; } float * diagonal_bl_tr_aggregate(int nCols, int nRows, float * shifted_images, float * dp, cudaStream_t stream){ __diagonal_bl_tr_aggregate<<<1, 256, 0, stream>>>(dp, shifted_images, nRows, nCols); return dp; }
8b678c072fb03a30426c155ee5ab0fa51cba28b6.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include "util.hpp" #include "cuda_stream.hpp" #include "cuda_event.hpp" __global__ void blur(const double *in, double* out, int n) { auto i = threadIdx.x + blockDim.x * blockIdx.x + 1; if(i<n-1) { out[i] = 0.25*(in[i-1] + 2.0*in[i] + in[i+1]); } } template <int THREADS> __global__ void blur_twice(const double *in, double* out, int n) { __shared__ double buffer[THREADS+4]; auto block_start = blockDim.x * blockIdx.x; auto block_end = block_start + blockDim.x; auto lid = threadIdx.x; auto gid = lid + block_start; auto blur = [] (int pos, double const* field) { return 0.25*(field[pos-1] + 2.0*field[pos] + field[pos+1]); }; if(gid<n-4) { auto li = lid+2; auto gi = gid+2; buffer[li] = blur(gi, in); if(threadIdx.x==0) { buffer[1] = blur(block_start+1, in); buffer[blockDim.x+2] = blur(block_end+2, in); } __syncthreads(); out[gi] = blur(li, buffer); } } int main(int argc, char** argv) { size_t pow = read_arg(argc, argv, 1, 20); size_t nsteps = read_arg(argc, argv, 2, 100); bool fuse_loops = read_arg(argc, argv, 3, false); size_t n = (1 << pow) + 4; auto size_in_bytes = n * sizeof(double); std::cout << "dispersion 1D test of length n = " << n << " : " << size_in_bytes/(1024.*1024.) << "MB" << std::endl; auto x_host = malloc_host<double>(n, 0.); // set boundary conditions to 1 x_host[0] = 1.0; x_host[1] = 1.0; x_host[n-2] = 1.0; x_host[n-1] = 1.0; auto x0 = malloc_device<double>(n); auto x1 = malloc_device<double>(n); // copy initial conditions to device copy_to_device<double>(x_host, x0, n); copy_to_device<double>(x_host, x1, n); // find the launch grid configuration constexpr auto block_dim = 128; auto grid_dim = (n-4)/block_dim + ((n-4)%block_dim ? 1 : 0); auto shared_size = sizeof(double)*(block_dim+4); cuda_stream stream; auto start_event = stream.enqueue_event(); for(auto step=0; step<nsteps; ++step) { if (fuse_loops) { hipLaunchKernelGGL(( blur_twice<block_dim>), dim3(grid_dim), dim3(block_dim), shared_size, 0, x0, x1, n); } else { hipLaunchKernelGGL(( blur), dim3(grid_dim), dim3(block_dim), 0, 0, x0, x1, n); hipLaunchKernelGGL(( blur), dim3(grid_dim), dim3(block_dim), 0, 0, x0+1, x1+1, n-2); } std::swap(x0, x1); } auto stop_event = stream.enqueue_event(); // copy result back to host copy_to_host<double>(x0, x_host, n); stop_event.wait(); auto time = stop_event.time_since(start_event); std::cout << "==== " << time << " seconds : " << 1e3*time/nsteps << " ms/step\n"; return 0; }
8b678c072fb03a30426c155ee5ab0fa51cba28b6.cu
#include <iostream> #include <cuda.h> #include "util.hpp" #include "cuda_stream.hpp" #include "cuda_event.hpp" __global__ void blur(const double *in, double* out, int n) { auto i = threadIdx.x + blockDim.x * blockIdx.x + 1; if(i<n-1) { out[i] = 0.25*(in[i-1] + 2.0*in[i] + in[i+1]); } } template <int THREADS> __global__ void blur_twice(const double *in, double* out, int n) { __shared__ double buffer[THREADS+4]; auto block_start = blockDim.x * blockIdx.x; auto block_end = block_start + blockDim.x; auto lid = threadIdx.x; auto gid = lid + block_start; auto blur = [] (int pos, double const* field) { return 0.25*(field[pos-1] + 2.0*field[pos] + field[pos+1]); }; if(gid<n-4) { auto li = lid+2; auto gi = gid+2; buffer[li] = blur(gi, in); if(threadIdx.x==0) { buffer[1] = blur(block_start+1, in); buffer[blockDim.x+2] = blur(block_end+2, in); } __syncthreads(); out[gi] = blur(li, buffer); } } int main(int argc, char** argv) { size_t pow = read_arg(argc, argv, 1, 20); size_t nsteps = read_arg(argc, argv, 2, 100); bool fuse_loops = read_arg(argc, argv, 3, false); size_t n = (1 << pow) + 4; auto size_in_bytes = n * sizeof(double); std::cout << "dispersion 1D test of length n = " << n << " : " << size_in_bytes/(1024.*1024.) << "MB" << std::endl; auto x_host = malloc_host<double>(n, 0.); // set boundary conditions to 1 x_host[0] = 1.0; x_host[1] = 1.0; x_host[n-2] = 1.0; x_host[n-1] = 1.0; auto x0 = malloc_device<double>(n); auto x1 = malloc_device<double>(n); // copy initial conditions to device copy_to_device<double>(x_host, x0, n); copy_to_device<double>(x_host, x1, n); // find the launch grid configuration constexpr auto block_dim = 128; auto grid_dim = (n-4)/block_dim + ((n-4)%block_dim ? 1 : 0); auto shared_size = sizeof(double)*(block_dim+4); cuda_stream stream; auto start_event = stream.enqueue_event(); for(auto step=0; step<nsteps; ++step) { if (fuse_loops) { blur_twice<block_dim><<<grid_dim, block_dim, shared_size>>>(x0, x1, n); } else { blur<<<grid_dim, block_dim>>>(x0, x1, n); blur<<<grid_dim, block_dim>>>(x0+1, x1+1, n-2); } std::swap(x0, x1); } auto stop_event = stream.enqueue_event(); // copy result back to host copy_to_host<double>(x0, x_host, n); stop_event.wait(); auto time = stop_event.time_since(start_event); std::cout << "==== " << time << " seconds : " << 1e3*time/nsteps << " ms/step\n"; return 0; }
51740a2b307ccee844194625073aee2a5dd257b1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @generated ds Tue Aug 13 16:45:09 2013 */ #include "common_magma.h" #define num_threadds 64 __global__ void dslaswp_kernel(int n, double *a, int lda, float *sa, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*num_threadds + threadIdx.x; int newind; float res; if (ind < m) { sa += ind; ipiv += ind; newind = ipiv[0]; for(int i=0; i<n; i++) { res = MAGMA_S_MAKE( (float)(a[newind+i*lda]), (float)(a[newind+i*lda]) ); sa[i*lda] = res; } } } __global__ void dslaswp_inv_kernel(int n, double *a, int lda, float *sa, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*num_threadds + threadIdx.x; int newind; double res; if (ind < m) { a += ind; ipiv += ind; newind = ipiv[0]; for(int i=0; i<n; i++) { res = MAGMA_D_MAKE( (double)(sa[newind+i*lda]), (double)(sa[newind+i*lda]) ); a[i*lda] = res; } } } extern "C" void magmablas_dslaswp( magma_int_t n, double *a, magma_int_t lda, float *sa, magma_int_t m, const magma_int_t *ipiv, magma_int_t incx ) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= Row i of A is casted to single precision in row ipiv[i] of SA, 0<=i<m. N - (input) INTEGER. On entry, N specifies the number of columns of the matrix A. A - (input) DOUBLE PRECISION array on the GPU, dimension (LDA,N) On entry, the matrix of column dimension N and row dimension M to which the row interchanges will be applied. LDA - (input) INTEGER. LDA specifies the leading dimension of A. SA - (output) REAL array on the GPU, dimension (LDA,N) On exit, the casted to single precision and permuted matrix. M - (input) The number of rows to be interchanged. IPIV - (input) INTEGER array, dimension (M) The vector of pivot indices. Row i of A is casted to single precision in row ipiv[i] of SA, 0<=i<m. INCX - (input) INTEGER If IPIV is negative, the pivots are applied in reverse order, otherwise in straight-forward order. ===================================================================== */ int blocks; if (m % num_threadds==0) blocks = m/num_threadds; else blocks = m/num_threadds + 1; dim3 grid(blocks, 1, 1); dim3 threads(num_threadds, 1, 1); if (incx >=0) hipLaunchKernelGGL(( dslaswp_kernel), dim3(grid), dim3(threads), 0, magma_stream , n, a, lda, sa, m, ipiv); else hipLaunchKernelGGL(( dslaswp_inv_kernel), dim3(grid), dim3(threads), 0, magma_stream , n, a, lda, sa, m, ipiv); } #undef num_threadds
51740a2b307ccee844194625073aee2a5dd257b1.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @generated ds Tue Aug 13 16:45:09 2013 */ #include "common_magma.h" #define num_threadds 64 __global__ void dslaswp_kernel(int n, double *a, int lda, float *sa, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*num_threadds + threadIdx.x; int newind; float res; if (ind < m) { sa += ind; ipiv += ind; newind = ipiv[0]; for(int i=0; i<n; i++) { res = MAGMA_S_MAKE( (float)(a[newind+i*lda]), (float)(a[newind+i*lda]) ); sa[i*lda] = res; } } } __global__ void dslaswp_inv_kernel(int n, double *a, int lda, float *sa, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*num_threadds + threadIdx.x; int newind; double res; if (ind < m) { a += ind; ipiv += ind; newind = ipiv[0]; for(int i=0; i<n; i++) { res = MAGMA_D_MAKE( (double)(sa[newind+i*lda]), (double)(sa[newind+i*lda]) ); a[i*lda] = res; } } } extern "C" void magmablas_dslaswp( magma_int_t n, double *a, magma_int_t lda, float *sa, magma_int_t m, const magma_int_t *ipiv, magma_int_t incx ) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= Row i of A is casted to single precision in row ipiv[i] of SA, 0<=i<m. N - (input) INTEGER. On entry, N specifies the number of columns of the matrix A. A - (input) DOUBLE PRECISION array on the GPU, dimension (LDA,N) On entry, the matrix of column dimension N and row dimension M to which the row interchanges will be applied. LDA - (input) INTEGER. LDA specifies the leading dimension of A. SA - (output) REAL array on the GPU, dimension (LDA,N) On exit, the casted to single precision and permuted matrix. M - (input) The number of rows to be interchanged. IPIV - (input) INTEGER array, dimension (M) The vector of pivot indices. Row i of A is casted to single precision in row ipiv[i] of SA, 0<=i<m. INCX - (input) INTEGER If IPIV is negative, the pivots are applied in reverse order, otherwise in straight-forward order. ===================================================================== */ int blocks; if (m % num_threadds==0) blocks = m/num_threadds; else blocks = m/num_threadds + 1; dim3 grid(blocks, 1, 1); dim3 threads(num_threadds, 1, 1); if (incx >=0) dslaswp_kernel<<< grid, threads, 0, magma_stream >>>(n, a, lda, sa, m, ipiv); else dslaswp_inv_kernel<<< grid, threads, 0, magma_stream >>>(n, a, lda, sa, m, ipiv); } #undef num_threadds
cdca84a31f2f9ccc0fae79a84218bfd7a3310901.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/native/TensorIterator.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/Resize.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Reduce.cuh> #include <ATen/native/hip/Normalization.cuh> #include <c10/hip/HIPMathCompat.h> namespace at { namespace native { namespace { inline bool batch_norm_use_channels_last_kernels(const at::Tensor& self) { return self.is_contiguous(at::MemoryFormat::ChannelsLast) || self.ndimension() == 2; } enum class Impl { Contiguous, ChannelsLast, General, }; inline Impl batch_norm_choose_impl(const Tensor& self) { if (!at::cuda::detail::canUse32BitIndexMath(self)) { return Impl::General; } if (self.is_contiguous()) { return self.strides()[1] == 1 ? Impl::ChannelsLast : Impl::Contiguous; } if (self.is_contiguous(at::MemoryFormat::ChannelsLast)) { return Impl::ChannelsLast; } return Impl::General; } void batch_norm_elementwise( const Tensor& out, const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean_, const Tensor& invstd_) { switch (batch_norm_choose_impl(self)) { case Impl::Contiguous: { c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt); c10::MaybeOwned<Tensor> bias = at::borrow_from_optional_tensor(bias_opt); AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), "batch_norm_elementwise_cuda", [&] { batch_norm_elemt_cuda_template<scalar_t, scalar_t, int32_t>( out, self, *weight, *bias, mean_, invstd_); }); return; } case Impl::ChannelsLast: { auto weight = at::borrow_from_optional_tensor(weight_opt); auto bias = at::borrow_from_optional_tensor(bias_opt); if ((!weight->defined() || weight->is_contiguous()) && (!bias->defined() || bias->is_contiguous()) && (!mean_.defined() || mean_.is_contiguous()) && (!invstd_.defined() || invstd_.is_contiguous())) { batch_norm_elemt_channels_last_cuda_template( out, self, *weight, *bias, mean_, invstd_); return; } C10_FALLTHROUGH; } case Impl::General: { const int64_t ndim = self.dim(); DimVector sizes(ndim, 1), strides(ndim, 0); // Helper to convert 1d tensors to an nd tensor that broadcasts with input // All elements go into the channel dimension auto as_nd = [&](const Tensor& t) { TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1); sizes[1] = t.sizes()[0]; strides[1] = t.strides()[0]; return t.as_strided(sizes, strides); }; auto weight = weight_opt.has_value() && weight_opt->defined() ? as_nd(*weight_opt) : at::scalar_tensor(1, mean_.options()); auto bias = bias_opt.has_value() && bias_opt->defined() ? as_nd(*bias_opt) : at::scalar_tensor(0, mean_.options()); auto mean = as_nd(mean_); auto invstd = as_nd(invstd_); auto iter = TensorIteratorConfig() .add_output(out) .add_input(self) .add_input(weight) .add_input(bias) .add_input(mean) .add_input(invstd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), "batch_norm_elementwise_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input, acc_t weight, acc_t bias, acc_t mean, acc_t invstd) -> scalar_t { return ((input - mean) * invstd) * weight + bias; }); }); return; } } } void batch_norm_mean_var(const Tensor& self, Tensor& save_mean, Tensor& save_var) { // NOTE: Epsilon is only used for InvStd, not Var. The value here is ignored. const double dummy_epsilon = 1e-5; switch (batch_norm_choose_impl(self)) { case Impl::Contiguous: { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { batch_norm_stats_cuda_template<scalar_t, int32_t, Var>( save_mean, save_var, self, dummy_epsilon); }); return; } case Impl::ChannelsLast: { if ((!save_mean.defined() || save_mean.is_contiguous()) && (!save_var.defined() || save_var.is_contiguous())) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { batch_norm_stats_channels_last_cuda_template<scalar_t, Var>( save_mean, save_var, self, dummy_epsilon); }); return; } C10_FALLTHROUGH; } case Impl::General: { const int64_t ndim = self.dim(); DimVector reduce_dims(ndim - 1); reduce_dims[0] = 0; for (int64_t i = 2; i < ndim; ++i) { reduce_dims[i - 1] = i; } // For some reason this isn't an actual operator but it exists anyway... at::native::var_mean_out(save_var, save_mean, self, /*dims=*/reduce_dims, /*unbiased=*/false, /*keepdim=*/false); return; } } } void batch_norm_update_stats( const Tensor& save_mean, const Tensor& save_var, const Tensor& running_mean, const Tensor& running_var, double momentum_, int64_t N) { auto iter = TensorIteratorConfig() .add_output(running_mean) .add_output(running_var) .add_input(save_mean) .add_input(save_var) .add_input(running_mean) .add_input(running_var) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(), "batch_norm_update_stats_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; const auto bessel_correction_factor = static_cast<acc_t>( static_cast<double>(N) / static_cast<double>(N - 1)); const auto momentum = static_cast<acc_t>(momentum_); gpu_kernel_multiple_outputs( iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var) -> thrust::tuple<scalar_t, scalar_t> { const auto unbiased_var = var * bessel_correction_factor; return thrust::tuple<scalar_t, scalar_t>{ mean * momentum + (1 - momentum) * running_mean, unbiased_var * momentum + (1 - momentum) * running_var, }; }); }); } void batch_norm_update_stats_and_invert( const Tensor& save_mean, const Tensor& save_var, const Tensor& running_mean, const Tensor& running_var, double momentum_, double epsilon, int64_t N) { auto iter = TensorIteratorConfig() .add_output(running_mean) .add_output(running_var) .add_output(save_var) .add_input(save_mean) .add_input(save_var) .add_input(running_mean) .add_input(running_var) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(), "batch_norm_update_stats_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; const auto bessel_correction_factor = static_cast<acc_t>( static_cast<double>(N) / static_cast<double>(N - 1)); const auto eps = static_cast<acc_t>(epsilon); const auto momentum = static_cast<acc_t>(momentum_); gpu_kernel_multiple_outputs( iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var) -> thrust::tuple<scalar_t, scalar_t, acc_t> { const auto unbiased_var = var * bessel_correction_factor; return thrust::tuple<scalar_t, scalar_t, acc_t>{ mean * momentum + (1 - momentum) * running_mean, unbiased_var * momentum + (1 - momentum) * running_var, c10::hip::compat::rsqrt(var + eps) }; }); }); } void batch_norm_calc_invstd(const Tensor& out_invstd, const Tensor& running_var, double epsilon) { auto iter = TensorIteratorConfig() .add_output(out_invstd) .add_input(running_var) .check_all_same_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_var.scalar_type(), "batch_norm_invert_std_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; auto eps = static_cast<acc_t>(epsilon); gpu_kernel(iter, [eps] GPU_LAMBDA (scalar_t var) -> acc_t { return c10::hip::compat::rsqrt(var + eps); }); }); } } std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) { const bool has_running_mean = (running_mean_opt.has_value() && running_mean_opt->defined()); const bool has_running_var = (running_mean_opt.has_value() && running_mean_opt->defined()); TORCH_CHECK(has_running_mean == has_running_var); if (train) { batch_norm_mean_var(self, save_mean, save_invstd); if (has_running_mean) { const int64_t N = self.numel() / save_mean.numel(); batch_norm_update_stats_and_invert( save_mean, save_invstd, *running_mean_opt, *running_var_opt, momentum, epsilon, N); } else { batch_norm_calc_invstd(save_invstd, save_invstd, epsilon); } } else { TORCH_CHECK(has_running_mean); at::native::resize_output(save_mean, running_mean_opt->sizes()); save_mean.copy_(*running_mean_opt, /*non_blocking=*/true); batch_norm_calc_invstd(save_invstd, running_var_opt.value(), epsilon); } batch_norm_elementwise(output, self, weight_opt, bias_opt, save_mean, save_invstd); return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon) { auto output = at::empty_like(self, at::MemoryFormat::Contiguous); int64_t n_input = self.size(1); auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto save_mean = at::empty({n_input}, options); auto save_invstd = at::empty({n_input}, options); at::native::batch_norm_cuda_out( self, weight_opt, bias_opt, running_mean_opt, running_var_opt, train, momentum, epsilon, output, save_mean, save_invstd); return std::make_tuple(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_invstd_opt, bool train, double epsilon, std::array<bool,3> grad_input_mask) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();}); const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); const Tensor& save_mean = c10::value_or_else(save_mean_opt, [] {return Tensor();}); const Tensor& save_invstd = c10::value_or_else(save_invstd_opt, [] {return Tensor();}); return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_cuda", [&] { auto mean_st = running_mean.dtype(); auto var_st = running_var.dtype(); TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_cuda_template<scalar_t, float, int32_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } else { return batch_norm_backward_cuda_template<scalar_t, scalar_t, int32_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_cuda_template<scalar_t, float, int64_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } else { return batch_norm_backward_cuda_template<scalar_t, scalar_t, int64_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } } }); } std::tuple<Tensor, Tensor> batch_norm_stats_cuda(const Tensor& self, double epsilon) { auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto n_channels = self.size(1); auto save_mean = at::empty({n_channels}, options); auto save_invstd = at::empty({n_channels}, options); bool use_channels_last_kernel = batch_norm_use_channels_last_kernels(self); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { if (cuda::detail::canUse32BitIndexMath(self)) { if (use_channels_last_kernel) { batch_norm_stats_channels_last_cuda_template<scalar_t, InvStd>( save_mean, save_invstd, self, epsilon); } else { batch_norm_stats_cuda_template<scalar_t, int32_t, InvStd>( save_mean, save_invstd, self, epsilon); } } else { batch_norm_stats_cuda_template<scalar_t, int64_t, InvStd>( save_mean, save_invstd, self, epsilon); } }); return std::tuple<Tensor, Tensor>(save_mean, save_invstd); } Tensor batch_norm_elemt_cuda( const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon) { auto output = at::empty_like(self, self.suggest_memory_format()); // FIXME: Epsilon parameter isn't required, we don't take the reciprocal batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd); return output; } Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon, Tensor& output) { // FIXME: Epsilon parameter isn't required, we don't take the reciprocal batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd); return output; } // accepting input(self) here to determine template data types, since running_mean/running_var are optional std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum, double epsilon, int64_t count) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); const Tensor& running_mean = *running_mean_maybe_owned; const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); std::vector<int64_t> counts(mean.size(0), count); Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU)); counts_ = counts_.to(self.device()).to(running_mean.defined() ? running_mean.dtype() : self.dtype()); return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts_); } std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda( const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt /* optional */, const c10::optional<Tensor>& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); const Tensor& running_mean = *running_mean_maybe_owned; const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); auto scalar_type = running_mean.defined() ? running_mean.scalar_type() : self.scalar_type(); return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "batch_norm_update_stats_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(self)) { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int32_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } else { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int64_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } }); } std::tuple<Tensor, Tensor, Tensor, Tensor> batch_norm_backward_reduce_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, bool input_g, bool weight_g, bool bias_g) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; // self is grad_output if (at::cuda::detail::canUse32BitIndexMath(self) && batch_norm_use_channels_last_kernels(self)){ return batch_norm_backward_reduce_cuda_channels_last_template(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_reduce", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_reduce_cuda_template<scalar_t, float, int32_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_reduce_cuda_template<scalar_t, float, int64_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } } }); } Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, const Tensor& sum_dy, const Tensor& sum_dy_xmu, const Tensor& count) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; if (at::cuda::detail::canUse32BitIndexMath(self) && batch_norm_use_channels_last_kernels(self)){ return batch_norm_backward_elemt_channels_last_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_elemt", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, float, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, float, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } } }); } std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda( const Tensor& self, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum) { c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt); c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt); const int64_t n_input = self.size(1); auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto save_mean = at::empty({n_input}, options); auto save_var = at::empty({n_input}, options); batch_norm_mean_var(self, save_mean, save_var); TORCH_CHECK(running_mean->defined() == running_var->defined()); if (running_mean->defined()) { const int64_t N = self.numel() / save_mean.numel(); batch_norm_update_stats(save_mean, save_var, *running_mean, *running_var, momentum, N); } return std::tuple<Tensor, Tensor>(save_mean, save_var); } } } // namespace at::native
cdca84a31f2f9ccc0fae79a84218bfd7a3310901.cu
#include <ATen/native/TensorIterator.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/Resize.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Reduce.cuh> #include <ATen/native/cuda/Normalization.cuh> #include <c10/cuda/CUDAMathCompat.h> namespace at { namespace native { namespace { inline bool batch_norm_use_channels_last_kernels(const at::Tensor& self) { return self.is_contiguous(at::MemoryFormat::ChannelsLast) || self.ndimension() == 2; } enum class Impl { Contiguous, ChannelsLast, General, }; inline Impl batch_norm_choose_impl(const Tensor& self) { if (!at::cuda::detail::canUse32BitIndexMath(self)) { return Impl::General; } if (self.is_contiguous()) { return self.strides()[1] == 1 ? Impl::ChannelsLast : Impl::Contiguous; } if (self.is_contiguous(at::MemoryFormat::ChannelsLast)) { return Impl::ChannelsLast; } return Impl::General; } void batch_norm_elementwise( const Tensor& out, const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean_, const Tensor& invstd_) { switch (batch_norm_choose_impl(self)) { case Impl::Contiguous: { c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt); c10::MaybeOwned<Tensor> bias = at::borrow_from_optional_tensor(bias_opt); AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), "batch_norm_elementwise_cuda", [&] { batch_norm_elemt_cuda_template<scalar_t, scalar_t, int32_t>( out, self, *weight, *bias, mean_, invstd_); }); return; } case Impl::ChannelsLast: { auto weight = at::borrow_from_optional_tensor(weight_opt); auto bias = at::borrow_from_optional_tensor(bias_opt); if ((!weight->defined() || weight->is_contiguous()) && (!bias->defined() || bias->is_contiguous()) && (!mean_.defined() || mean_.is_contiguous()) && (!invstd_.defined() || invstd_.is_contiguous())) { batch_norm_elemt_channels_last_cuda_template( out, self, *weight, *bias, mean_, invstd_); return; } C10_FALLTHROUGH; } case Impl::General: { const int64_t ndim = self.dim(); DimVector sizes(ndim, 1), strides(ndim, 0); // Helper to convert 1d tensors to an nd tensor that broadcasts with input // All elements go into the channel dimension auto as_nd = [&](const Tensor& t) { TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1); sizes[1] = t.sizes()[0]; strides[1] = t.strides()[0]; return t.as_strided(sizes, strides); }; auto weight = weight_opt.has_value() && weight_opt->defined() ? as_nd(*weight_opt) : at::scalar_tensor(1, mean_.options()); auto bias = bias_opt.has_value() && bias_opt->defined() ? as_nd(*bias_opt) : at::scalar_tensor(0, mean_.options()); auto mean = as_nd(mean_); auto invstd = as_nd(invstd_); auto iter = TensorIteratorConfig() .add_output(out) .add_input(self) .add_input(weight) .add_input(bias) .add_input(mean) .add_input(invstd) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), "batch_norm_elementwise_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input, acc_t weight, acc_t bias, acc_t mean, acc_t invstd) -> scalar_t { return ((input - mean) * invstd) * weight + bias; }); }); return; } } } void batch_norm_mean_var(const Tensor& self, Tensor& save_mean, Tensor& save_var) { // NOTE: Epsilon is only used for InvStd, not Var. The value here is ignored. const double dummy_epsilon = 1e-5; switch (batch_norm_choose_impl(self)) { case Impl::Contiguous: { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { batch_norm_stats_cuda_template<scalar_t, int32_t, Var>( save_mean, save_var, self, dummy_epsilon); }); return; } case Impl::ChannelsLast: { if ((!save_mean.defined() || save_mean.is_contiguous()) && (!save_var.defined() || save_var.is_contiguous())) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { batch_norm_stats_channels_last_cuda_template<scalar_t, Var>( save_mean, save_var, self, dummy_epsilon); }); return; } C10_FALLTHROUGH; } case Impl::General: { const int64_t ndim = self.dim(); DimVector reduce_dims(ndim - 1); reduce_dims[0] = 0; for (int64_t i = 2; i < ndim; ++i) { reduce_dims[i - 1] = i; } // For some reason this isn't an actual operator but it exists anyway... at::native::var_mean_out(save_var, save_mean, self, /*dims=*/reduce_dims, /*unbiased=*/false, /*keepdim=*/false); return; } } } void batch_norm_update_stats( const Tensor& save_mean, const Tensor& save_var, const Tensor& running_mean, const Tensor& running_var, double momentum_, int64_t N) { auto iter = TensorIteratorConfig() .add_output(running_mean) .add_output(running_var) .add_input(save_mean) .add_input(save_var) .add_input(running_mean) .add_input(running_var) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(), "batch_norm_update_stats_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; const auto bessel_correction_factor = static_cast<acc_t>( static_cast<double>(N) / static_cast<double>(N - 1)); const auto momentum = static_cast<acc_t>(momentum_); gpu_kernel_multiple_outputs( iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var) -> thrust::tuple<scalar_t, scalar_t> { const auto unbiased_var = var * bessel_correction_factor; return thrust::tuple<scalar_t, scalar_t>{ mean * momentum + (1 - momentum) * running_mean, unbiased_var * momentum + (1 - momentum) * running_var, }; }); }); } void batch_norm_update_stats_and_invert( const Tensor& save_mean, const Tensor& save_var, const Tensor& running_mean, const Tensor& running_var, double momentum_, double epsilon, int64_t N) { auto iter = TensorIteratorConfig() .add_output(running_mean) .add_output(running_var) .add_output(save_var) .add_input(save_mean) .add_input(save_var) .add_input(running_mean) .add_input(running_var) .check_all_same_dtype(false) .promote_inputs_to_common_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(), "batch_norm_update_stats_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; const auto bessel_correction_factor = static_cast<acc_t>( static_cast<double>(N) / static_cast<double>(N - 1)); const auto eps = static_cast<acc_t>(epsilon); const auto momentum = static_cast<acc_t>(momentum_); gpu_kernel_multiple_outputs( iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var) -> thrust::tuple<scalar_t, scalar_t, acc_t> { const auto unbiased_var = var * bessel_correction_factor; return thrust::tuple<scalar_t, scalar_t, acc_t>{ mean * momentum + (1 - momentum) * running_mean, unbiased_var * momentum + (1 - momentum) * running_var, c10::cuda::compat::rsqrt(var + eps) }; }); }); } void batch_norm_calc_invstd(const Tensor& out_invstd, const Tensor& running_var, double epsilon) { auto iter = TensorIteratorConfig() .add_output(out_invstd) .add_input(running_var) .check_all_same_dtype(false) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_var.scalar_type(), "batch_norm_invert_std_cuda", [&] { using acc_t = at::acc_type<scalar_t, true>; auto eps = static_cast<acc_t>(epsilon); gpu_kernel(iter, [eps] GPU_LAMBDA (scalar_t var) -> acc_t { return c10::cuda::compat::rsqrt(var + eps); }); }); } } std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) { const bool has_running_mean = (running_mean_opt.has_value() && running_mean_opt->defined()); const bool has_running_var = (running_mean_opt.has_value() && running_mean_opt->defined()); TORCH_CHECK(has_running_mean == has_running_var); if (train) { batch_norm_mean_var(self, save_mean, save_invstd); if (has_running_mean) { const int64_t N = self.numel() / save_mean.numel(); batch_norm_update_stats_and_invert( save_mean, save_invstd, *running_mean_opt, *running_var_opt, momentum, epsilon, N); } else { batch_norm_calc_invstd(save_invstd, save_invstd, epsilon); } } else { TORCH_CHECK(has_running_mean); at::native::resize_output(save_mean, running_mean_opt->sizes()); save_mean.copy_(*running_mean_opt, /*non_blocking=*/true); batch_norm_calc_invstd(save_invstd, running_var_opt.value(), epsilon); } batch_norm_elementwise(output, self, weight_opt, bias_opt, save_mean, save_invstd); return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon) { auto output = at::empty_like(self, at::MemoryFormat::Contiguous); int64_t n_input = self.size(1); auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto save_mean = at::empty({n_input}, options); auto save_invstd = at::empty({n_input}, options); at::native::batch_norm_cuda_out( self, weight_opt, bias_opt, running_mean_opt, running_var_opt, train, momentum, epsilon, output, save_mean, save_invstd); return std::make_tuple(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_invstd_opt, bool train, double epsilon, std::array<bool,3> grad_input_mask) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();}); const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); const Tensor& save_mean = c10::value_or_else(save_mean_opt, [] {return Tensor();}); const Tensor& save_invstd = c10::value_or_else(save_invstd_opt, [] {return Tensor();}); return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_cuda", [&] { auto mean_st = running_mean.dtype(); auto var_st = running_var.dtype(); TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_cuda_template<scalar_t, float, int32_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } else { return batch_norm_backward_cuda_template<scalar_t, scalar_t, int32_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_cuda_template<scalar_t, float, int64_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } else { return batch_norm_backward_cuda_template<scalar_t, scalar_t, int64_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } } }); } std::tuple<Tensor, Tensor> batch_norm_stats_cuda(const Tensor& self, double epsilon) { auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto n_channels = self.size(1); auto save_mean = at::empty({n_channels}, options); auto save_invstd = at::empty({n_channels}, options); bool use_channels_last_kernel = batch_norm_use_channels_last_kernels(self); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { if (cuda::detail::canUse32BitIndexMath(self)) { if (use_channels_last_kernel) { batch_norm_stats_channels_last_cuda_template<scalar_t, InvStd>( save_mean, save_invstd, self, epsilon); } else { batch_norm_stats_cuda_template<scalar_t, int32_t, InvStd>( save_mean, save_invstd, self, epsilon); } } else { batch_norm_stats_cuda_template<scalar_t, int64_t, InvStd>( save_mean, save_invstd, self, epsilon); } }); return std::tuple<Tensor, Tensor>(save_mean, save_invstd); } Tensor batch_norm_elemt_cuda( const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon) { auto output = at::empty_like(self, self.suggest_memory_format()); // FIXME: Epsilon parameter isn't required, we don't take the reciprocal batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd); return output; } Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon, Tensor& output) { // FIXME: Epsilon parameter isn't required, we don't take the reciprocal batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd); return output; } // accepting input(self) here to determine template data types, since running_mean/running_var are optional std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum, double epsilon, int64_t count) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); const Tensor& running_mean = *running_mean_maybe_owned; const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); std::vector<int64_t> counts(mean.size(0), count); Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU)); counts_ = counts_.to(self.device()).to(running_mean.defined() ? running_mean.dtype() : self.dtype()); return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts_); } std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda( const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt /* optional */, const c10::optional<Tensor>& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); const Tensor& running_mean = *running_mean_maybe_owned; const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); auto scalar_type = running_mean.defined() ? running_mean.scalar_type() : self.scalar_type(); return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "batch_norm_update_stats_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(self)) { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int32_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } else { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int64_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } }); } std::tuple<Tensor, Tensor, Tensor, Tensor> batch_norm_backward_reduce_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, bool input_g, bool weight_g, bool bias_g) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; // self is grad_output if (at::cuda::detail::canUse32BitIndexMath(self) && batch_norm_use_channels_last_kernels(self)){ return batch_norm_backward_reduce_cuda_channels_last_template(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_reduce", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_reduce_cuda_template<scalar_t, float, int32_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_reduce_cuda_template<scalar_t, float, int64_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } } }); } Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, const Tensor& sum_dy, const Tensor& sum_dy_xmu, const Tensor& count) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; if (at::cuda::detail::canUse32BitIndexMath(self) && batch_norm_use_channels_last_kernels(self)){ return batch_norm_backward_elemt_channels_last_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_elemt", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, float, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, float, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } } }); } std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda( const Tensor& self, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum) { c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt); c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt); const int64_t n_input = self.size(1); auto options = self.options().dtype( at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); auto save_mean = at::empty({n_input}, options); auto save_var = at::empty({n_input}, options); batch_norm_mean_var(self, save_mean, save_var); TORCH_CHECK(running_mean->defined() == running_var->defined()); if (running_mean->defined()) { const int64_t N = self.numel() / save_mean.numel(); batch_norm_update_stats(save_mean, save_var, *running_mean, *running_var, momentum, N); } return std::tuple<Tensor, Tensor>(save_mean, save_var); } } } // namespace at::native
8ef0d0d84dd3ee49475507f074287c3d8b1eb06a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernel_sqrtweights.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int N = XSIZE*YSIZE; double *wt = NULL; hipMalloc(&wt, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernel_sqrtweights), dim3(gridBlock),dim3(threadBlock), 0, 0, N,wt); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernel_sqrtweights), dim3(gridBlock),dim3(threadBlock), 0, 0, N,wt); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernel_sqrtweights), dim3(gridBlock),dim3(threadBlock), 0, 0, N,wt); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8ef0d0d84dd3ee49475507f074287c3d8b1eb06a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernel_sqrtweights.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int N = XSIZE*YSIZE; double *wt = NULL; cudaMalloc(&wt, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernel_sqrtweights<<<gridBlock,threadBlock>>>(N,wt); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernel_sqrtweights<<<gridBlock,threadBlock>>>(N,wt); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernel_sqrtweights<<<gridBlock,threadBlock>>>(N,wt); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6c44d0b0826757921196f4dd557ce69a5db5bbfa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #define BLOCK_SIZE 256 #define STR_SIZE 256 #define DEVICE 0 #define HALO 1 // halo width along one direction when advancing to the next iteration #define BENCH_PRINT void run(int argc, char** argv); int rows, cols; int* data; int** wall; int* result; #define M_SEED 9 int pyramid_height; //#define BENCH_PRINT void init(int argc, char** argv) { if(argc==4){ cols = atoi(argv[1]); rows = atoi(argv[2]); pyramid_height=atoi(argv[3]); }else{ printf("Usage: dynproc row_len col_len pyramid_height\n"); exit(0); } // data = new int[rows*cols]; hipHostMalloc((void **)&data,rows*cols * sizeof(int)); // wall = new int*[rows]; hipHostMalloc((void**)&wall, rows * sizeof(int*)); for(int n=0; n<rows; n++) wall[n]=data+cols*n; // result = new int[cols]; hipHostMalloc((void **)&result,cols * sizeof(int)); int seed = M_SEED; srand(seed); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { wall[i][j] = rand() % 10; } } #ifdef BENCH_PRINT for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { printf("%d ",wall[i][j]) ; } printf("\n") ; } #endif } void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void dynproc_kernel( int iteration, int *gpuWall, int *gpuSrc, int *gpuResults, int cols, int rows, int startStep, int border) { __shared__ int prev[BLOCK_SIZE]; __shared__ int result[BLOCK_SIZE]; int bx = blockIdx.x; int tx=threadIdx.x; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_cols = BLOCK_SIZE-iteration*HALO*2; // calculate the boundary for the block according to // the boundary of its small block int blkX = small_block_cols*bx-border; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int xidx = blkX+tx; // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1; int W = tx-1; int E = tx+1; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool isValid = IN_RANGE(tx, validXmin, validXmax); if(IN_RANGE(xidx, 0, cols-1)){ prev[tx] = gpuSrc[xidx]; } __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ isValid){ computed = true; int left = prev[W]; int up = prev[tx]; int right = prev[E]; int shortest = MIN(left, up); shortest = MIN(shortest, right); int index = cols*(startStep+i)+xidx; result[tx] = shortest + gpuWall[index]; } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range prev[tx]= result[tx]; __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ gpuResults[xidx]=result[tx]; } } /* compute N time steps */ int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \ int pyramid_height, int blockCols, int borderCols) { dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(blockCols); int src = 1, dst = 0; for (int t = 0; t < rows-1; t+=pyramid_height) { int temp = src; src = dst; dst = temp; hipLaunchKernelGGL(( dynproc_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(pyramid_height, rows-t-1), gpuWall, gpuResult[src], gpuResult[dst], cols,rows, t, borderCols); } return dst; } int main(int argc, char** argv) { int num_devices; hipGetDeviceCount(&num_devices); if (num_devices > 1) hipSetDevice(DEVICE); run(argc,argv); return EXIT_SUCCESS; } void run(int argc, char** argv) { init(argc, argv); /* --------------- pyramid parameters --------------- */ int borderCols = (pyramid_height)*HALO; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2; int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1); printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\ pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol); int *gpuWall, *gpuResult[2]; int size = rows*cols; hipMalloc((void**)&gpuResult[0], sizeof(int)*cols); hipMalloc((void**)&gpuResult[1], sizeof(int)*cols); hipMemcpy(gpuResult[0], data, sizeof(int)*cols, hipMemcpyHostToDevice); hipMalloc((void**)&gpuWall, sizeof(int)*(size-cols)); hipMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), hipMemcpyHostToDevice); int final_ret; for (int i = 0; i < 1; i ++) final_ret = calc_path(gpuWall, gpuResult, rows, cols, \ pyramid_height, blockCols, borderCols); hipMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, hipMemcpyDeviceToHost); #ifdef BENCH_PRINT for (int i = 0; i < cols; i++) printf("%d ",data[i]) ; printf("\n") ; for (int i = 0; i < cols; i++) printf("%d ",result[i]) ; printf("\n") ; #endif hipFree(gpuWall); hipFree(gpuResult[0]); hipFree(gpuResult[1]); // delete [] data; // delete [] wall; // delete [] result; hipFree(data); hipFree(result); hipFree(wall); }
6c44d0b0826757921196f4dd557ce69a5db5bbfa.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #define BLOCK_SIZE 256 #define STR_SIZE 256 #define DEVICE 0 #define HALO 1 // halo width along one direction when advancing to the next iteration #define BENCH_PRINT void run(int argc, char** argv); int rows, cols; int* data; int** wall; int* result; #define M_SEED 9 int pyramid_height; //#define BENCH_PRINT void init(int argc, char** argv) { if(argc==4){ cols = atoi(argv[1]); rows = atoi(argv[2]); pyramid_height=atoi(argv[3]); }else{ printf("Usage: dynproc row_len col_len pyramid_height\n"); exit(0); } // data = new int[rows*cols]; cudaMallocHost((void **)&data,rows*cols * sizeof(int)); // wall = new int*[rows]; cudaMallocHost((void**)&wall, rows * sizeof(int*)); for(int n=0; n<rows; n++) wall[n]=data+cols*n; // result = new int[cols]; cudaMallocHost((void **)&result,cols * sizeof(int)); int seed = M_SEED; srand(seed); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { wall[i][j] = rand() % 10; } } #ifdef BENCH_PRINT for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { printf("%d ",wall[i][j]) ; } printf("\n") ; } #endif } void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void dynproc_kernel( int iteration, int *gpuWall, int *gpuSrc, int *gpuResults, int cols, int rows, int startStep, int border) { __shared__ int prev[BLOCK_SIZE]; __shared__ int result[BLOCK_SIZE]; int bx = blockIdx.x; int tx=threadIdx.x; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_cols = BLOCK_SIZE-iteration*HALO*2; // calculate the boundary for the block according to // the boundary of its small block int blkX = small_block_cols*bx-border; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int xidx = blkX+tx; // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1; int W = tx-1; int E = tx+1; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool isValid = IN_RANGE(tx, validXmin, validXmax); if(IN_RANGE(xidx, 0, cols-1)){ prev[tx] = gpuSrc[xidx]; } __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ isValid){ computed = true; int left = prev[W]; int up = prev[tx]; int right = prev[E]; int shortest = MIN(left, up); shortest = MIN(shortest, right); int index = cols*(startStep+i)+xidx; result[tx] = shortest + gpuWall[index]; } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range prev[tx]= result[tx]; __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ gpuResults[xidx]=result[tx]; } } /* compute N time steps */ int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \ int pyramid_height, int blockCols, int borderCols) { dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(blockCols); int src = 1, dst = 0; for (int t = 0; t < rows-1; t+=pyramid_height) { int temp = src; src = dst; dst = temp; dynproc_kernel<<<dimGrid, dimBlock>>>( MIN(pyramid_height, rows-t-1), gpuWall, gpuResult[src], gpuResult[dst], cols,rows, t, borderCols); } return dst; } int main(int argc, char** argv) { int num_devices; cudaGetDeviceCount(&num_devices); if (num_devices > 1) cudaSetDevice(DEVICE); run(argc,argv); return EXIT_SUCCESS; } void run(int argc, char** argv) { init(argc, argv); /* --------------- pyramid parameters --------------- */ int borderCols = (pyramid_height)*HALO; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2; int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1); printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\ pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol); int *gpuWall, *gpuResult[2]; int size = rows*cols; cudaMalloc((void**)&gpuResult[0], sizeof(int)*cols); cudaMalloc((void**)&gpuResult[1], sizeof(int)*cols); cudaMemcpy(gpuResult[0], data, sizeof(int)*cols, cudaMemcpyHostToDevice); cudaMalloc((void**)&gpuWall, sizeof(int)*(size-cols)); cudaMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), cudaMemcpyHostToDevice); int final_ret; for (int i = 0; i < 1; i ++) final_ret = calc_path(gpuWall, gpuResult, rows, cols, \ pyramid_height, blockCols, borderCols); cudaMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, cudaMemcpyDeviceToHost); #ifdef BENCH_PRINT for (int i = 0; i < cols; i++) printf("%d ",data[i]) ; printf("\n") ; for (int i = 0; i < cols; i++) printf("%d ",result[i]) ; printf("\n") ; #endif cudaFree(gpuWall); cudaFree(gpuResult[0]); cudaFree(gpuResult[1]); // delete [] data; // delete [] wall; // delete [] result; cudaFree(data); cudaFree(result); cudaFree(wall); }
534bfff090b2723791f650da39b43ae5e398a68d.hip
// !!! This is a file automatically generated by hipify!!! // // by Jan Eric Kyprianidis <www.kyprianidis.com> // Copyright (C) 2010-2012 Computer Graphics Systems Group at the // Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de> // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // #include <oz/gkf_opt.h> #include <oz/generate.h> #include <oz/gpu_sampler2.h> namespace oz { template<typename T> struct imp_gkf_opt8 : public generator<T> { gpu_sampler<T,0> src_; gpu_sampler<float4,1> krnl_; float radius_; float q_; float threshold_; imp_gkf_opt8( const gpu_image& src, const gpu_image& krnl, float radius, float q, float threshold ) : src_(src), krnl_(krnl,hipFilterModeLinear, hipAddressModeWrap, true), radius_(radius), q_(q), threshold_(threshold) {} inline __device__ T operator()( int ix, int iy) const { T m[8]; T s[8]; float w[8]; { T c = src_(ix, iy); float wx = krnl_(0,0).x; #pragma unroll for (int k = 0; k < 8; ++k) { m[k] = c * wx; s[k] = c * c * wx; w[k] = wx; } } float piN = 2 * CUDART_PI_F / 8; float4 RpiN = make_float4(cosf(piN), sinf(piN), -sinf(piN), cosf(piN)); int r = (int)ceilf(radius_); for (int j = 0; j <= r; ++j) { for (int i = -r; i <= r; ++i) { if ((j !=0) || (i > 0)) { float2 v = make_float2( 0.5f * i / radius_, 0.5f * j / radius_); float dot_v = dot(v,v); if (dot_v <= 0.25f) { T c0 = src_(ix + i, iy +j); T c1 = src_(ix - i, iy -j); T cc0 = c0 * c0; T cc1 = c1 * c1; float4 tmp0 = krnl_(v); float4 tmp1 = krnl_(-v); float wx[8] = { tmp0.x, tmp0.y, tmp0.z, tmp0.w, tmp1.x, tmp1.y, tmp1.z, tmp1.w }; #pragma unroll for (int k = 0; k < 4; ++k) { m[k] += c0 * wx[k] + c1 * wx[k+4]; s[k] += cc0 * wx[k] + cc1 * wx[k+4]; w[k] += wx[k] + wx[k+4]; m[k+4] += c1 * wx[k] + c0 * wx[k+4]; s[k+4] += cc1 * wx[k] + cc0 * wx[k+4]; w[k+4] += wx[k] + wx[k+4]; } } } } } T o = make_zero<T>(); float ow = 0; #pragma unroll for (int k = 0; k < 8; ++k) { m[k] /= w[k]; s[k] = fabs(s[k] / w[k] - m[k] * m[k]); float sigma2 = fmaxf(threshold_, sqrtf(sum(s[k]))); float wk = __powf(sigma2, -q_); o += m[k] * wk; ow += wk; } return o / ow; } }; gpu_image gkf_opt8_filter( const gpu_image& src, const gpu_image& krnl, float radius, float q, float threshold ) { switch (src.format()) { case FMT_FLOAT: return generate(src.size(), imp_gkf_opt8<float >(src, krnl, radius, q, threshold)); case FMT_FLOAT3: return generate(src.size(), imp_gkf_opt8<float3>(src, krnl, radius, q, threshold)); default: OZ_INVALID_FORMAT(); } } }
534bfff090b2723791f650da39b43ae5e398a68d.cu
// // by Jan Eric Kyprianidis <www.kyprianidis.com> // Copyright (C) 2010-2012 Computer Graphics Systems Group at the // Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de> // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // #include <oz/gkf_opt.h> #include <oz/generate.h> #include <oz/gpu_sampler2.h> namespace oz { template<typename T> struct imp_gkf_opt8 : public generator<T> { gpu_sampler<T,0> src_; gpu_sampler<float4,1> krnl_; float radius_; float q_; float threshold_; imp_gkf_opt8( const gpu_image& src, const gpu_image& krnl, float radius, float q, float threshold ) : src_(src), krnl_(krnl,cudaFilterModeLinear, cudaAddressModeWrap, true), radius_(radius), q_(q), threshold_(threshold) {} inline __device__ T operator()( int ix, int iy) const { T m[8]; T s[8]; float w[8]; { T c = src_(ix, iy); float wx = krnl_(0,0).x; #pragma unroll for (int k = 0; k < 8; ++k) { m[k] = c * wx; s[k] = c * c * wx; w[k] = wx; } } float piN = 2 * CUDART_PI_F / 8; float4 RpiN = make_float4(cosf(piN), sinf(piN), -sinf(piN), cosf(piN)); int r = (int)ceilf(radius_); for (int j = 0; j <= r; ++j) { for (int i = -r; i <= r; ++i) { if ((j !=0) || (i > 0)) { float2 v = make_float2( 0.5f * i / radius_, 0.5f * j / radius_); float dot_v = dot(v,v); if (dot_v <= 0.25f) { T c0 = src_(ix + i, iy +j); T c1 = src_(ix - i, iy -j); T cc0 = c0 * c0; T cc1 = c1 * c1; float4 tmp0 = krnl_(v); float4 tmp1 = krnl_(-v); float wx[8] = { tmp0.x, tmp0.y, tmp0.z, tmp0.w, tmp1.x, tmp1.y, tmp1.z, tmp1.w }; #pragma unroll for (int k = 0; k < 4; ++k) { m[k] += c0 * wx[k] + c1 * wx[k+4]; s[k] += cc0 * wx[k] + cc1 * wx[k+4]; w[k] += wx[k] + wx[k+4]; m[k+4] += c1 * wx[k] + c0 * wx[k+4]; s[k+4] += cc1 * wx[k] + cc0 * wx[k+4]; w[k+4] += wx[k] + wx[k+4]; } } } } } T o = make_zero<T>(); float ow = 0; #pragma unroll for (int k = 0; k < 8; ++k) { m[k] /= w[k]; s[k] = fabs(s[k] / w[k] - m[k] * m[k]); float sigma2 = fmaxf(threshold_, sqrtf(sum(s[k]))); float wk = __powf(sigma2, -q_); o += m[k] * wk; ow += wk; } return o / ow; } }; gpu_image gkf_opt8_filter( const gpu_image& src, const gpu_image& krnl, float radius, float q, float threshold ) { switch (src.format()) { case FMT_FLOAT: return generate(src.size(), imp_gkf_opt8<float >(src, krnl, radius, q, threshold)); case FMT_FLOAT3: return generate(src.size(), imp_gkf_opt8<float3>(src, krnl, radius, q, threshold)); default: OZ_INVALID_FORMAT(); } } }
664bfe6f62024ab02d898dd49328b6efc5e06cd4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> __global__ void add(int n, float *x, float *y){ int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i+=stride) y[i] = x[i] + y[i]; } int main(void){ int N = 1<<20; // 1M elemenets std::cout << N << std::endl; float *x, *y; hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); for (int i = 0; i < N; i++){ x[i] = 1.0f; y[i] = 2.0f; } ///Run kernel hipLaunchKernelGGL(( add) , dim3(1), dim3(256) , 0, 0, N,x,y); //wait for synchro hipDeviceSynchronize(); float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "max error: " << maxError << std::endl; hipFree(x); hipFree(y); return 0; }
664bfe6f62024ab02d898dd49328b6efc5e06cd4.cu
#include <iostream> #include <math.h> __global__ void add(int n, float *x, float *y){ int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i+=stride) y[i] = x[i] + y[i]; } int main(void){ int N = 1<<20; // 1M elemenets std::cout << N << std::endl; float *x, *y; cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); for (int i = 0; i < N; i++){ x[i] = 1.0f; y[i] = 2.0f; } ///Run kernel add <<< 1, 256 >>> (N,x,y); //wait for synchro cudaDeviceSynchronize(); float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "max error: " << maxError << std::endl; cudaFree(x); cudaFree(y); return 0; }
fa208e5b0007e454fb8e8f00e4cbe8308efd6c71.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <cutil.h> #include <math.h> // Includes #include <stdio.h> // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples #include "../include/ContAcq-IntClk.h" //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 240 #define ITERATIONS REPLACE_ITERATIONS // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(float*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal1(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=A[i]; float Value2=0; float Value3=0; float Value=0; float I1=A[i]; float I2=B[i]; // exponential function for(unsigned k=0; k<ITERATIONS*(blockDim.x/blockDim.x+50);k++) { Value2=exp(Value1); Value3=exp(Value2); Value1=exp(Value3); Value2=exp(Value1); } Value=Value3-Value2; C[i]=Value; __syncthreads(); } __global__ void PowerKernal2(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=A[i]; float Value2=0; float Value3=0; float Value=0; float I1=A[i]; float I2=B[i]; //sinusoidal functions for(unsigned k=0; k<ITERATIONS*(blockDim.x/blockDim.x+50);k++) { Value2=cos(Value1); Value3=sin(Value2); Value2=cos(Value1); Value1=sin(Value2); } Value=Value3-Value2; C[i]=Value; __syncthreads(); } __global__ void PowerKernal3(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=0; float Value2=99999; float Value3=0; float Value=0; float I1=A[i]; float I2=B[i]; //square root for(unsigned long k=0; k<ITERATIONS*(blockDim.x/blockDim.x+100);k++) { Value1=Value2*Value2; Value1=sqrt(abs(Value1)); Value2=sqrt(abs(I2))*sqrt(abs(I2)); Value3=sqrt(abs(Value2)); Value2=sqrt(abs(Value1)); } Value=Value3-Value2; C[i]=Value; __syncthreads(); } __global__ void PowerKernalEmpty(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned Value1=0; unsigned Value2=0; unsigned Value3=0; unsigned Value=0; unsigned I1=A[i]; unsigned I2=B[i]; __syncthreads(); // Excessive Mod/Div Operations for(unsigned long k=0; k<ITERATIONS*(blockDim.x+299);k++) { Value1=(I1)+k; Value2=(I2)+k; Value3=(Value2)+k; Value2=(Value1)+k; __asm volatile ( "B0: bra.uni B1;\n\t" "B1: bra.uni B2;\n\t" "B2: bra.uni B3;\n\t" "B3: bra.uni B4;\n\t" "B4: bra.uni B5;\n\t" "B5: bra.uni B6;\n\t" "B6: bra.uni B7;\n\t" "B7: bra.uni B8;\n\t" "B8: bra.uni B9;\n\t" "B9: bra.uni B10;\n\t" "B10: bra.uni B11;\n\t" "B11: bra.uni B12;\n\t" "B12: bra.uni B13;\n\t" "B13: bra.uni B14;\n\t" "B14: bra.uni B15;\n\t" "B15: bra.uni B16;\n\t" "B16: bra.uni B17;\n\t" "B17: bra.uni B18;\n\t" "B18: bra.uni B19;\n\t" "B19: bra.uni B20;\n\t" "B20: bra.uni B21;\n\t" "B21: bra.uni B22;\n\t" "B22: bra.uni B23;\n\t" "B23: bra.uni B24;\n\t" "B24: bra.uni B25;\n\t" "B25: bra.uni B26;\n\t" "B26: bra.uni B27;\n\t" "B27: bra.uni B28;\n\t" "B28: bra.uni B29;\n\t" "B29: bra.uni B30;\n\t" "B30: bra.uni B31;\n\t" "B31: bra.uni LOOP;\n\t" "LOOP:" ); } C[i]=I1; __syncthreads(); } __global__ void PowerKernal4(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=0; float Value2=0; float Value3=0; float Value=0; float I1=A[i]; float I2=B[i]; // logarithmic for(unsigned k=0; k<ITERATIONS*(blockDim.x+50);k++) { Value1=log2((I1)); Value2=log2((I2)); Value3=log2((Value2)); Value2=log2((Value1)); } Value=Value3-Value2; C[i]=Value; __syncthreads(); } // Host code int main() { printf("Power Microbenchmarks\n"); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before malloc in GPU0\n"); checkCudaErrors( hipMalloc((void**)&d_A, size) ); printf("before malloc in GPU1\n"); checkCudaErrors( hipMalloc((void**)&d_B, size) ); printf("before malloc in GPU2\n"); checkCudaErrors( hipMalloc((void**)&d_C, size) ); printf("after malloc in GPU\n"); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); hipLaunchKernelGGL(( PowerKernalEmpty), dim3(dimGrid2),dim3(dimBlock2), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); //sleep(0.5); dimGrid.y = NUM_OF_BLOCKS; for (int i=0; i<3; i++) { dimGrid.y /= 3; hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); hipLaunchKernelGGL(( PowerKernalEmpty), dim3(dimGrid2),dim3(dimBlock2), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); } dimGrid.y = NUM_OF_BLOCKS; for (int i=0; i<3; i++) { dimGrid.y /= 3; hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N); hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); hipLaunchKernelGGL(( PowerKernalEmpty), dim3(dimGrid2),dim3(dimBlock2), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); } dimGrid.y = NUM_OF_BLOCKS; for (int i=0; i<3; i++) { dimGrid.y /= 3; hipLaunchKernelGGL(( PowerKernal3), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); hipLaunchKernelGGL(( PowerKernalEmpty), dim3(dimGrid2),dim3(dimBlock2), 0, 0, d_A, d_B, d_C, N); //sleep(0.5); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); } dimGrid.y = NUM_OF_BLOCKS; for (int i=0; i<3; i++) { dimGrid.y /= 3; hipLaunchKernelGGL(( PowerKernal4), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); hipLaunchKernelGGL(( PowerKernalEmpty), dim3(dimGrid2),dim3(dimBlock2), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); } getLastCudaError("kernel launch failure"); CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_SAFE_CALL(cutStopTimer(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); #ifdef _DEBUG checkCudaErrors( hipDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) hipFree(d_A); if (d_B) hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i) data[i] = rand() / (float)RAND_MAX; }
fa208e5b0007e454fb8e8f00e4cbe8308efd6c71.cu
#include <stdio.h> #include <stdlib.h> #include <cutil.h> #include <math.h> // Includes #include <stdio.h> // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples #include "../include/ContAcq-IntClk.h" //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> #include <cuda_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 240 #define ITERATIONS REPLACE_ITERATIONS // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(float*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal1(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=A[i]; float Value2=0; float Value3=0; float Value=0; float I1=A[i]; float I2=B[i]; // exponential function for(unsigned k=0; k<ITERATIONS*(blockDim.x/blockDim.x+50);k++) { Value2=exp(Value1); Value3=exp(Value2); Value1=exp(Value3); Value2=exp(Value1); } Value=Value3-Value2; C[i]=Value; __syncthreads(); } __global__ void PowerKernal2(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=A[i]; float Value2=0; float Value3=0; float Value=0; float I1=A[i]; float I2=B[i]; //sinusoidal functions for(unsigned k=0; k<ITERATIONS*(blockDim.x/blockDim.x+50);k++) { Value2=cos(Value1); Value3=sin(Value2); Value2=cos(Value1); Value1=sin(Value2); } Value=Value3-Value2; C[i]=Value; __syncthreads(); } __global__ void PowerKernal3(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=0; float Value2=99999; float Value3=0; float Value=0; float I1=A[i]; float I2=B[i]; //square root for(unsigned long k=0; k<ITERATIONS*(blockDim.x/blockDim.x+100);k++) { Value1=Value2*Value2; Value1=sqrt(abs(Value1)); Value2=sqrt(abs(I2))*sqrt(abs(I2)); Value3=sqrt(abs(Value2)); Value2=sqrt(abs(Value1)); } Value=Value3-Value2; C[i]=Value; __syncthreads(); } __global__ void PowerKernalEmpty(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned Value1=0; unsigned Value2=0; unsigned Value3=0; unsigned Value=0; unsigned I1=A[i]; unsigned I2=B[i]; __syncthreads(); // Excessive Mod/Div Operations for(unsigned long k=0; k<ITERATIONS*(blockDim.x+299);k++) { Value1=(I1)+k; Value2=(I2)+k; Value3=(Value2)+k; Value2=(Value1)+k; __asm volatile ( "B0: bra.uni B1;\n\t" "B1: bra.uni B2;\n\t" "B2: bra.uni B3;\n\t" "B3: bra.uni B4;\n\t" "B4: bra.uni B5;\n\t" "B5: bra.uni B6;\n\t" "B6: bra.uni B7;\n\t" "B7: bra.uni B8;\n\t" "B8: bra.uni B9;\n\t" "B9: bra.uni B10;\n\t" "B10: bra.uni B11;\n\t" "B11: bra.uni B12;\n\t" "B12: bra.uni B13;\n\t" "B13: bra.uni B14;\n\t" "B14: bra.uni B15;\n\t" "B15: bra.uni B16;\n\t" "B16: bra.uni B17;\n\t" "B17: bra.uni B18;\n\t" "B18: bra.uni B19;\n\t" "B19: bra.uni B20;\n\t" "B20: bra.uni B21;\n\t" "B21: bra.uni B22;\n\t" "B22: bra.uni B23;\n\t" "B23: bra.uni B24;\n\t" "B24: bra.uni B25;\n\t" "B25: bra.uni B26;\n\t" "B26: bra.uni B27;\n\t" "B27: bra.uni B28;\n\t" "B28: bra.uni B29;\n\t" "B29: bra.uni B30;\n\t" "B30: bra.uni B31;\n\t" "B31: bra.uni LOOP;\n\t" "LOOP:" ); } C[i]=I1; __syncthreads(); } __global__ void PowerKernal4(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=0; float Value2=0; float Value3=0; float Value=0; float I1=A[i]; float I2=B[i]; // logarithmic for(unsigned k=0; k<ITERATIONS*(blockDim.x+50);k++) { Value1=log2((I1)); Value2=log2((I2)); Value3=log2((Value2)); Value2=log2((Value1)); } Value=Value3-Value2; C[i]=Value; __syncthreads(); } // Host code int main() { printf("Power Microbenchmarks\n"); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before malloc in GPU0\n"); checkCudaErrors( cudaMalloc((void**)&d_A, size) ); printf("before malloc in GPU1\n"); checkCudaErrors( cudaMalloc((void**)&d_B, size) ); printf("before malloc in GPU2\n"); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); printf("after malloc in GPU\n"); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); //sleep(0.5); dimGrid.y = NUM_OF_BLOCKS; for (int i=0; i<3; i++) { dimGrid.y /= 3; PowerKernal1<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); } dimGrid.y = NUM_OF_BLOCKS; for (int i=0; i<3; i++) { dimGrid.y /= 3; PowerKernal1<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N); PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); } dimGrid.y = NUM_OF_BLOCKS; for (int i=0; i<3; i++) { dimGrid.y /= 3; PowerKernal3<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A, d_B, d_C, N); //sleep(0.5); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); } dimGrid.y = NUM_OF_BLOCKS; for (int i=0; i<3; i++) { dimGrid.y /= 3; PowerKernal4<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); } getLastCudaError("kernel launch failure"); CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL(cutStopTimer(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); #ifdef _DEBUG checkCudaErrors( cudaDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i) data[i] = rand() / (float)RAND_MAX; }
0bb24900dd7f9c84dc407a2f8a0f892baa759d21.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal z -> s d c */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #if (GPUSHMEM < 200) #define NUM_THREADS 512 #else #define NUM_THREADS 1024 #endif /////////////////////////////////////////////////////////////////////////////////////////////////// // size of work for a thread block #define BLK_M 16 #define BLK_N 16 #define BLK_K (NUM_THREADS / (BLK_M * BLK_N)) /////////////////////////////////////////////////////////////////////////////////////////////////// // ---------------------------------------- // Does sum reduction of array x, leaving total in x[0]. // Contents of x are destroyed in the process. // With k threads, can reduce array up to 2*k in size. // Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0) // Having n as template parameter allows compiler to evaluate some conditions at compile time. template< int n > __device__ void sum_reduce2( /*int n,*/ int j, int k, int i, magmaDoubleComplex x[][ BLK_N +1][ BLK_K +1] ) { __syncthreads(); /* if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[j][k][i] += x[j][k][i+1024]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[j][k][i] += x[j][k][i+ 512]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[j][k][i] += x[j][k][i+ 256]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[j][k][i] += x[j][k][i+ 128]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[j][k][i] += x[j][k][i+ 64]; } __syncthreads(); } */ if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[j][k][i] += x[j][k][i+ 32]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[j][k][i] += x[j][k][i+ 16]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[j][k][i] += x[j][k][i+ 8]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[j][k][i] += x[j][k][i+ 4]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[j][k][i] += x[j][k][i+ 2]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[j][k][i] += x[j][k][i+ 1]; } __syncthreads(); } } // end sum_reduce //============================================================================== __global__ void magmablas_zgemm_reduce_kernel(int m, int n, int k, magmaDoubleComplex alpha, const magmaDoubleComplex * __restrict__ d_A, int lda, const magmaDoubleComplex * __restrict__ d_B, int ldb, magmaDoubleComplex beta, magmaDoubleComplex *d_C, int ldc) { const int i = threadIdx.x; if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n){ const magmaDoubleComplex *dA = d_A + (blockIdx.x*BLK_M + threadIdx.y) * lda; const magmaDoubleComplex *dB = d_B + (blockIdx.y*BLK_N + threadIdx.z) * ldb; magmaDoubleComplex *dC = d_C + blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc; __shared__ magmaDoubleComplex sum[BLK_M][BLK_N+1][ BLK_K +1]; magmaDoubleComplex lsum; /* w := v' * C */ lsum = MAGMA_Z_ZERO; for( int j = i; j < k; j += BLK_K ) lsum += MAGMA_Z_CNJG( dA[j] )* dB[j]; sum[threadIdx.y][threadIdx.z][i] = lsum; sum_reduce2< BLK_K >( threadIdx.y, threadIdx.z, i, sum ); /* C := C - v * w */ __syncthreads(); if (threadIdx.x == 0) { if (MAGMA_Z_EQUAL(beta, MAGMA_Z_ZERO)) dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[threadIdx.y][threadIdx.z][0]; else dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] + alpha*sum[threadIdx.y][threadIdx.z][0]; } } } //============================================================================== extern "C" void magmablas_zgemm_reduce(magma_int_t m, magma_int_t n, magma_int_t k, magmaDoubleComplex alpha, const magmaDoubleComplex *d_A, magma_int_t lda, const magmaDoubleComplex *d_B, magma_int_t ldb, magmaDoubleComplex beta, magmaDoubleComplex *d_C, magma_int_t ldc ) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= ZGEMM_REDUCE performs one of the matrix-matrix operations C := alpha* A' B + beta*C, where alpha and beta are scalars, and A, B and C are matrices, with A a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix. This routine is tuned for m, n << k. Typically, m and n are expected to be less than 128. ===================================================================== */ dim3 blocks( (m+BLK_M-1)/BLK_M, (n+BLK_N-1)/BLK_N ); dim3 threads( BLK_K, BLK_M, BLK_N ); hipLaunchKernelGGL(( magmablas_zgemm_reduce_kernel), dim3(blocks),dim3(threads), 0, magma_stream , m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc ); } //==============================================================================
0bb24900dd7f9c84dc407a2f8a0f892baa759d21.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal z -> s d c */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #if (GPUSHMEM < 200) #define NUM_THREADS 512 #else #define NUM_THREADS 1024 #endif /////////////////////////////////////////////////////////////////////////////////////////////////// // size of work for a thread block #define BLK_M 16 #define BLK_N 16 #define BLK_K (NUM_THREADS / (BLK_M * BLK_N)) /////////////////////////////////////////////////////////////////////////////////////////////////// // ---------------------------------------- // Does sum reduction of array x, leaving total in x[0]. // Contents of x are destroyed in the process. // With k threads, can reduce array up to 2*k in size. // Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0) // Having n as template parameter allows compiler to evaluate some conditions at compile time. template< int n > __device__ void sum_reduce2( /*int n,*/ int j, int k, int i, magmaDoubleComplex x[][ BLK_N +1][ BLK_K +1] ) { __syncthreads(); /* if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[j][k][i] += x[j][k][i+1024]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[j][k][i] += x[j][k][i+ 512]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[j][k][i] += x[j][k][i+ 256]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[j][k][i] += x[j][k][i+ 128]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[j][k][i] += x[j][k][i+ 64]; } __syncthreads(); } */ if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[j][k][i] += x[j][k][i+ 32]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[j][k][i] += x[j][k][i+ 16]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[j][k][i] += x[j][k][i+ 8]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[j][k][i] += x[j][k][i+ 4]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[j][k][i] += x[j][k][i+ 2]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[j][k][i] += x[j][k][i+ 1]; } __syncthreads(); } } // end sum_reduce //============================================================================== __global__ void magmablas_zgemm_reduce_kernel(int m, int n, int k, magmaDoubleComplex alpha, const magmaDoubleComplex * __restrict__ d_A, int lda, const magmaDoubleComplex * __restrict__ d_B, int ldb, magmaDoubleComplex beta, magmaDoubleComplex *d_C, int ldc) { const int i = threadIdx.x; if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n){ const magmaDoubleComplex *dA = d_A + (blockIdx.x*BLK_M + threadIdx.y) * lda; const magmaDoubleComplex *dB = d_B + (blockIdx.y*BLK_N + threadIdx.z) * ldb; magmaDoubleComplex *dC = d_C + blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc; __shared__ magmaDoubleComplex sum[BLK_M][BLK_N+1][ BLK_K +1]; magmaDoubleComplex lsum; /* w := v' * C */ lsum = MAGMA_Z_ZERO; for( int j = i; j < k; j += BLK_K ) lsum += MAGMA_Z_CNJG( dA[j] )* dB[j]; sum[threadIdx.y][threadIdx.z][i] = lsum; sum_reduce2< BLK_K >( threadIdx.y, threadIdx.z, i, sum ); /* C := C - v * w */ __syncthreads(); if (threadIdx.x == 0) { if (MAGMA_Z_EQUAL(beta, MAGMA_Z_ZERO)) dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[threadIdx.y][threadIdx.z][0]; else dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] + alpha*sum[threadIdx.y][threadIdx.z][0]; } } } //============================================================================== extern "C" void magmablas_zgemm_reduce(magma_int_t m, magma_int_t n, magma_int_t k, magmaDoubleComplex alpha, const magmaDoubleComplex *d_A, magma_int_t lda, const magmaDoubleComplex *d_B, magma_int_t ldb, magmaDoubleComplex beta, magmaDoubleComplex *d_C, magma_int_t ldc ) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= ZGEMM_REDUCE performs one of the matrix-matrix operations C := alpha* A' B + beta*C, where alpha and beta are scalars, and A, B and C are matrices, with A a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix. This routine is tuned for m, n << k. Typically, m and n are expected to be less than 128. ===================================================================== */ dim3 blocks( (m+BLK_M-1)/BLK_M, (n+BLK_N-1)/BLK_N ); dim3 threads( BLK_K, BLK_M, BLK_N ); magmablas_zgemm_reduce_kernel<<<blocks,threads, 0, magma_stream >>>( m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc ); } //==============================================================================
fd6309661b109b788e651f328cf07c35da3c5235.hip
// !!! This is a file automatically generated by hipify!!! #include <string> #include <vector> #include <iostream> #include <iomanip> #include <fstream> #include <cmath> #include <hip/hip_runtime.h> #include "stopwatch.h" #include "knearests.h" #include "voronoi.h" #ifdef __CUDA_ARCH__ __shared__ uchar3 tr_data[VORO_BLOCK_SIZE * _MAX_T_]; // memory pool for chained lists of triangles __shared__ uchar boundary_next_data[VORO_BLOCK_SIZE * _MAX_P_]; __shared__ float4 clip_data[VORO_BLOCK_SIZE * _MAX_P_]; // clipping planes inline __device__ uchar3& tr(int t) { return tr_data[threadIdx.x*_MAX_T_ + t]; } inline __device__ uchar& boundary_next(int v) { return boundary_next_data[threadIdx.x*_MAX_P_ + v]; } inline __device__ float4& clip(int v) { return clip_data[threadIdx.x*_MAX_P_ + v]; } #else uchar3 tr_data[_MAX_T_]; uchar boundary_next_data[_MAX_P_]; float4 clip_data[_MAX_P_]; inline uchar3& tr(int t) { return tr_data[t]; } inline uchar& boundary_next(int v) { return boundary_next_data[v]; } inline float4& clip(int v) { return clip_data[v]; } GlobalStats gs; #endif __host__ __device__ float4 point_from_ptr3(float* f) { return make_float4(f[0], f[1], f[2], 1); } __host__ __device__ float4 minus4(float4 A, float4 B) { return make_float4(A.x-B.x, A.y-B.y, A.z-B.z, A.w-B.w); } __host__ __device__ float4 plus4(float4 A, float4 B) { return make_float4(A.x+B.x, A.y+B.y, A.z+B.z, A.w+B.w); } __host__ __device__ float dot4(float4 A, float4 B) { return A.x*B.x + A.y*B.y + A.z*B.z + A.w*B.w; } __host__ __device__ float dot3(float4 A, float4 B) { return A.x*B.x + A.y*B.y + A.z*B.z; } __host__ __device__ float4 mul3(float s, float4 A) { return make_float4(s*A.x, s*A.y, s*A.z, 1.); } __host__ __device__ float4 cross3(float4 A, float4 B) { return make_float4(A.y*B.z - A.z*B.y, A.z*B.x - A.x*B.z, A.x*B.y - A.y*B.x, 0); } __host__ __device__ float4 plane_from_point_and_normal(float4 P, float4 n) { return make_float4(n.x, n.y, n.z, -dot3(P, n)); } __host__ __device__ inline float det2x2(float a11, float a12, float a21, float a22) { return a11*a22 - a12*a21; } __host__ __device__ inline float det3x3(float a11, float a12, float a13, float a21, float a22, float a23, float a31, float a32, float a33) { return a11*det2x2(a22, a23, a32, a33) - a21*det2x2(a12, a13, a32, a33) + a31*det2x2(a12, a13, a22, a23); } __host__ __device__ inline float det4x4( float a11, float a12, float a13, float a14, float a21, float a22, float a23, float a24, float a31, float a32, float a33, float a34, float a41, float a42, float a43, float a44 ) { float m12 = a21*a12 - a11*a22; float m13 = a31*a12 - a11*a32; float m14 = a41*a12 - a11*a42; float m23 = a31*a22 - a21*a32; float m24 = a41*a22 - a21*a42; float m34 = a41*a32 - a31*a42; float m123 = m23*a13 - m13*a23 + m12*a33; float m124 = m24*a13 - m14*a23 + m12*a43; float m134 = m34*a13 - m14*a33 + m13*a43; float m234 = m34*a23 - m24*a33 + m23*a43; return (m234*a14 - m134*a24 + m124*a34 - m123*a44); } __host__ __device__ inline double det2x2(double a11, double a12, double a21, double a22) { return a11*a22 - a12*a21; } __host__ __device__ inline double det3x3(double a11, double a12, double a13, double a21, double a22, double a23, double a31, double a32, double a33) { return a11*det2x2(a22, a23, a32, a33) - a21*det2x2(a12, a13, a32, a33) + a31*det2x2(a12, a13, a22, a23); } __host__ __device__ inline double det4x4( double a11, double a12, double a13, double a14, double a21, double a22, double a23, double a24, double a31, double a32, double a33, double a34, double a41, double a42, double a43, double a44 ) { double m12 = a21*a12 - a11*a22; double m13 = a31*a12 - a11*a32; double m14 = a41*a12 - a11*a42; double m23 = a31*a22 - a21*a32; double m24 = a41*a22 - a21*a42; double m34 = a41*a32 - a31*a42; double m123 = m23*a13 - m13*a23 + m12*a33; double m124 = m24*a13 - m14*a23 + m12*a43; double m134 = m34*a13 - m14*a33 + m13*a43; double m234 = m34*a23 - m24*a33 + m23*a43; return (m234*a14 - m134*a24 + m124*a34 - m123*a44); } __host__ __device__ inline float get_tet_volume(float4 A, float4 B, float4 C) { return -det3x3(A.x, A.y, A.z, B.x, B.y, B.z, C.x, C.y, C.z)/6.; } __host__ __device__ void get_tet_volume_and_barycenter(float4& bary, float& volume, float4 A, float4 B, float4 C, float4 D) { volume = get_tet_volume(minus4(A, D), minus4(B, D), minus4(C, D)); bary = make_float4(.25*(A.x+B.x+C.x+D.x), .25*(A.y+B.y+C.y+D.y), .25*(A.z+B.z+C.z+D.z), 1); } __host__ __device__ float4 get_plane_from_points(float4 A, float4 B, float4 C) { float4 plane = cross3(minus4(B, A), minus4(C, A)); plane.z = -dot3(plane, A); return plane; } __host__ __device__ float4 project_on_plane(float4 P, float4 plane) { float4 n = make_float4(plane.x, plane.y, plane.z, 0); float lambda = (dot4(n, P) + plane.w)/dot4(n, n); // lambda = (dot3(n, P) + plane.w) / norm23(n); return plus4(P, mul3(-lambda, n)); } template <typename T> __host__ __device__ void inline swap(T& a, T& b) { T c(a); a = b; b = c; } __host__ __device__ ConvexCell::ConvexCell(int p_seed, float* p_pts,Status *p_status) { float eps = .1f; float xmin = -eps; float ymin = -eps; float zmin = -eps; float xmax = 1000 + eps; float ymax = 1000 + eps; float zmax = 1000 + eps; pts = p_pts; first_boundary_ = END_OF_LIST; FOR(i, _MAX_P_) boundary_next(i) = END_OF_LIST; voro_id = p_seed; voro_seed = make_float4(pts[3 * voro_id], pts[3 * voro_id + 1], pts[3 * voro_id + 2], 1); status = p_status; *status = success; clip(0) = make_float4( 1.0, 0.0, 0.0, -xmin); clip(1) = make_float4(-1.0, 0.0, 0.0, xmax); clip(2) = make_float4( 0.0, 1.0, 0.0, -ymin); clip(3) = make_float4( 0.0, -1.0, 0.0, ymax); clip(4) = make_float4( 0.0, 0.0, 1.0, -zmin); clip(5) = make_float4( 0.0, 0.0, -1.0, zmax); nb_v = 6; tr(0) = make_uchar3(2, 5, 0); tr(1) = make_uchar3(5, 3, 0); tr(2) = make_uchar3(1, 5, 2); tr(3) = make_uchar3(5, 1, 3); tr(4) = make_uchar3(4, 2, 0); tr(5) = make_uchar3(4, 0, 3); tr(6) = make_uchar3(2, 4, 1); tr(7) = make_uchar3(4, 3, 1); nb_t = 8; } __host__ __device__ bool ConvexCell::is_security_radius_reached(float4 last_neig) { // finds furthest voro vertex distance2 float v_dist = 0; FOR(i, nb_t) { float4 pc = compute_triangle_point(tr(i)); float4 diff = minus4(pc, voro_seed); float d2 = dot3(diff, diff); // TODO safe to put dot4 here, diff.w = 0 v_dist = max(d2, v_dist); } //compare to new neighbors distance2 float4 diff = minus4(last_neig, voro_seed); // TODO it really should take index of the neighbor instead of the float4, then would be safe to put dot4 float d2 = dot3(diff, diff); return (d2 > 4*v_dist); } __host__ __device__ inline uchar& ConvexCell::ith_plane(uchar t, int i) { return reinterpret_cast<uchar *>(&(tr(t)))[i]; } __host__ __device__ float4 ConvexCell::compute_triangle_point(uchar3 t, bool persp_divide) const { float4 pi1 = clip(t.x); float4 pi2 = clip(t.y); float4 pi3 = clip(t.z); float4 result; result.x = -det3x3(pi1.w, pi1.y, pi1.z, pi2.w, pi2.y, pi2.z, pi3.w, pi3.y, pi3.z); result.y = -det3x3(pi1.x, pi1.w, pi1.z, pi2.x, pi2.w, pi2.z, pi3.x, pi3.w, pi3.z); result.z = -det3x3(pi1.x, pi1.y, pi1.w, pi2.x, pi2.y, pi2.w, pi3.x, pi3.y, pi3.w); result.w = det3x3(pi1.x, pi1.y, pi1.z, pi2.x, pi2.y, pi2.z, pi3.x, pi3.y, pi3.z); if (persp_divide) return make_float4(result.x / result.w, result.y / result.w, result.z / result.w, 1); return result; } inline __host__ __device__ float max4(float a, float b, float c, float d) { return fmaxf(fmaxf(a,b),fmaxf(c,d)); } inline __host__ __device__ void get_minmax3( float& m, float& M, float x1, float x2, float x3 ) { m = fminf(fminf(x1,x2), x3); M = fmaxf(fmaxf(x1,x2), x3); } inline __host__ __device__ double max4(double a, double b, double c, double d) { return fmax(fmax(a,b),fmax(c,d)); } inline __host__ __device__ void get_minmax3( double& m, double& M, double x1, double x2, double x3 ) { m = fmin(fmin(x1,x2), x3); M = fmax(fmax(x1,x2), x3); } __host__ __device__ bool ConvexCell::triangle_is_in_conflict_float(uchar3 t, float4 eqn) const { float4 pi1 = clip(t.x); float4 pi2 = clip(t.y); float4 pi3 = clip(t.z); float det = det4x4( pi1.x, pi2.x, pi3.x, eqn.x, pi1.y, pi2.y, pi3.y, eqn.y, pi1.z, pi2.z, pi3.z, eqn.z, pi1.w, pi2.w, pi3.w, eqn.w ); #ifdef USE_ARITHMETIC_FILTER float maxx = max4(fabsf(pi1.x), fabsf(pi2.x), fabsf(pi3.x), fabsf(eqn.x)); float maxy = max4(fabsf(pi1.y), fabsf(pi2.y), fabsf(pi3.y), fabsf(eqn.y)); float maxz = max4(fabsf(pi1.z), fabsf(pi2.z), fabsf(pi3.z), fabsf(eqn.z)); // The constant is computed by the program // in predicate_generator/ float eps = 6.6876506e-05 * maxx * maxy * maxz; float min_max; float max_max; get_minmax3(min_max, max_max, maxx, maxy, maxz); eps *= (max_max * max_max); if(fabsf(det) < eps) { *status = needs_exact_predicates; } #endif return (det > 0.0f); } __host__ __device__ bool ConvexCell::triangle_is_in_conflict_double(uchar3 t, float4 eqn_f) const { float4 pi1_f = clip(t.x); float4 pi2_f = clip(t.y); float4 pi3_f = clip(t.z); double4 eqn = make_double4(eqn_f.x, eqn_f.y, eqn_f.z, eqn_f.w); double4 pi1 = make_double4(pi1_f.x, pi1_f.y, pi1_f.z, pi1_f.w); double4 pi2 = make_double4(pi2_f.x, pi2_f.y, pi2_f.z, pi2_f.w); double4 pi3 = make_double4(pi3_f.x, pi3_f.y, pi3_f.z, pi3_f.w); double det = det4x4( pi1.x, pi2.x, pi3.x, eqn.x, pi1.y, pi2.y, pi3.y, eqn.y, pi1.z, pi2.z, pi3.z, eqn.z, pi1.w, pi2.w, pi3.w, eqn.w ); #ifdef USE_ARITHMETIC_FILTER double maxx = max4(fabs(pi1.x), fabs(pi2.x), fabs(pi3.x), fabs(eqn.x)); double maxy = max4(fabs(pi1.y), fabs(pi2.y), fabs(pi3.y), fabs(eqn.y)); double maxz = max4(fabs(pi1.z), fabs(pi2.z), fabs(pi3.z), fabs(eqn.z)); // The constant is computed by the program // in predicate_generator/ double eps = 1.2466136531027298e-13 * maxx * maxy * maxz; double min_max; double max_max; get_minmax3(min_max, max_max, maxx, maxy, maxz); eps *= (max_max * max_max); if(fabs(det) < eps) { *status = needs_exact_predicates; } #endif return (det > 0.0f); } __host__ __device__ void ConvexCell::new_triangle(uchar i, uchar j, uchar k) { if (nb_t+1 >= _MAX_T_) { *status = triangle_overflow; return; } tr(nb_t) = make_uchar3(i, j, k); nb_t++; } __host__ __device__ int ConvexCell::new_point(int vid) { if (nb_v >= _MAX_P_) { *status = vertex_overflow; return -1; } float4 B = point_from_ptr3(pts + 3 * vid); float4 dir = minus4(voro_seed, B); float4 ave2 = plus4(voro_seed, B); float dot = dot3(ave2,dir); // TODO safe to put dot4 here, dir.w = 0 clip(nb_v) = make_float4(dir.x, dir.y, dir.z, -dot / 2.f); nb_v++; return nb_v - 1; } __host__ __device__ void ConvexCell::compute_boundary() { // clean circular list of the boundary FOR(i, _MAX_P_) boundary_next(i) = END_OF_LIST; first_boundary_ = END_OF_LIST; int nb_iter = 0; uchar t = nb_t; #ifndef __CUDA_ARCH__ if (nb_r>20) std::cerr << "nb_t " << (int)nb_t << " nb_r " << (int)nb_r << std::endl; #endif while (nb_r>0) { if (nb_iter++>100) { *status = inconsistent_boundary; return; } bool is_in_border[3]; bool next_is_opp[3]; FOR(e, 3) is_in_border[e] = (boundary_next(ith_plane(t, e)) != END_OF_LIST); FOR(e, 3) next_is_opp[e] = (boundary_next(ith_plane(t, (e + 1) % 3)) == ith_plane(t, e)); bool new_border_is_simple = true; // check for non manifoldness FOR(e, 3) if (!next_is_opp[e] && !next_is_opp[(e + 1) % 3] && is_in_border[(e + 1) % 3]) new_border_is_simple = false; // check for more than one boundary ... or first triangle if (!next_is_opp[0] && !next_is_opp[1] && !next_is_opp[2]) { if (first_boundary_ == END_OF_LIST) { FOR(e, 3) boundary_next(ith_plane(t, e)) = ith_plane(t, (e + 1) % 3); first_boundary_ = tr(t).x; } else new_border_is_simple = false; } if (!new_border_is_simple) { t++; if (t == nb_t + nb_r) t = nb_t; continue; } // link next FOR(e, 3) if (!next_is_opp[e]) boundary_next(ith_plane(t, e)) = ith_plane(t, (e + 1) % 3); // destroy link from removed vertices FOR(e, 3) if (next_is_opp[e] && next_is_opp[(e + 1) % 3]) { if (first_boundary_ == ith_plane(t, (e + 1) % 3)) first_boundary_ = boundary_next(ith_plane(t, (e + 1) % 3)); boundary_next(ith_plane(t, (e + 1) % 3)) = END_OF_LIST; } //remove triangle from R, and restart iterating on R swap(tr(t), tr(nb_t+nb_r-1)); t = nb_t; nb_r--; } IF_CPU(gs.add_compute_boundary_iter(nb_iter);) } __host__ __device__ void ConvexCell::clip_by_plane(int cur_v) { if (*status == vertex_overflow) return; float4 eqn = clip(cur_v); nb_r = 0; int i = 0; while (i < nb_t) { // for all vertices of the cell if(triangle_is_in_conflict(tr(i), eqn)) { nb_t--; swap(tr(i), tr(nb_t)); nb_r++; } else i++; } if (nb_t < 1) { *status = empty_cell; return; } IF_CPU(gs.add_clip(nb_r);) if (*status == needs_exact_predicates) { return; } if (nb_r == 0) { // if no clips, then remove the plane equation nb_v--; return; } // Step 2: compute cavity boundary compute_boundary(); if (*status != success) return; if (first_boundary_ == END_OF_LIST) return; // Step 3: Triangulate cavity uchar cir = first_boundary_; do { new_triangle(cur_v, cir, boundary_next(cir)); #ifndef __CUDA_ARCH__ if (nb_t >= _MAX_T_) { std::cerr << "erreur grave" << std::endl; } if (cur_v >= _MAX_P_ || cir >= _MAX_P_ || boundary_next(cir) >= _MAX_P_ ) { std::cerr << "erreur grave, triangle: " << (int)cur_v << " " << (int)cir << " " << (int)boundary_next(cir) << std::endl; break; } #endif if (*status != success) return; cir = boundary_next(cir); } while (cir != first_boundary_); } __host__ __device__ void get_tet_decomposition_of_vertex(ConvexCell& cc, int t, float4* P) { float4 C = cc.voro_seed; float4 A = cc.compute_triangle_point(tr(t)); FOR(i,3) P[2*i ] = project_on_plane(C, clip(cc.ith_plane(t,i))); FOR(i, 3) P[2*i+1] = project_on_plane(A, plane_from_point_and_normal(C, cross3(minus4(P[2*i], C), minus4(P[(2*(i+1))%6], C)))); } __host__ __device__ void export_bary_and_volume(ConvexCell& cc, float* out_pts, int seed) { float4 bary_sum = make_float4(0, 0, 0, 0); float cell_vol = 0; float4 tet_bary; float tet_vol; float4 P[6]; float4 C = cc.voro_seed; FOR(t, cc.nb_t) { float4 A = cc.compute_triangle_point(tr(t)); get_tet_decomposition_of_vertex(cc, t, P); FOR(i, 6) { get_tet_volume_and_barycenter(tet_bary, tet_vol, P[i], P[(i + 1) % 6], C, A); bary_sum = plus4(bary_sum, mul3(tet_vol, tet_bary)); cell_vol += tet_vol; } } // /cc.cell_vol out_pts[4 * seed] += bary_sum.x; out_pts[4 * seed + 1] += bary_sum.y; out_pts[4 * seed + 2] += bary_sum.z; out_pts[4 * seed + 3] += cell_vol; } __host__ void get_voro_diagram(ConvexCell& cc, float* out_pts, int seed, std::vector<float3>& voro_points, std::string& voro_faces) { #ifndef __CUDA_ARCH__ int row = voro_points.size() + 1; FOR(i, cc.nb_t) { float4 voro_vertex = cc.compute_triangle_point(tr(i)); voro_points.push_back(make_float3(voro_vertex.x, voro_vertex.y, voro_vertex.z)); //voro_points.push_back(make_float3(voro_vertex.x*0.9+ out_pts[3*seed]*0.1, voro_vertex.y*0.9 + out_pts[3 * seed +1] *0.1, voro_vertex.z*0.9 + out_pts[3 * seed + 2] *0.1)); } std::vector<int> clipping_plane(cc.nb_v + 1, 0); FOR(t, cc.nb_t) { clipping_plane[tr(t).x]++; clipping_plane[tr(t).y]++; clipping_plane[tr(t).z]++; } std::vector<std::vector<int>> result; int ind = 0; FOR(plane, cc.nb_v) { if (clipping_plane[plane] > 0) { std::vector<int> tab_lp; std::vector<int> tab_v; FOR(tet, cc.nb_t) { if ((int)tr(tet).x == plane) { tab_v.push_back(tet); tab_lp.push_back(0); } else if ((int)tr(tet).y == plane) { tab_v.push_back(tet); tab_lp.push_back(1); } else if ((int)tr(tet).z == plane) { tab_v.push_back(tet); tab_lp.push_back(2); } } if (tab_lp.size() <= 2) { std::cout << (int)plane << std::endl; } int i = 0; int j = 0; result.push_back(std::vector<int>(0)); while (result[ind].size() < tab_lp.size()) { int ind_i = (tab_lp[i] + 1) % 3; bool temp = false; j = 0; while (temp == false) { int ind_j = (tab_lp[j] + 2) % 3; if ((int)cc.ith_plane(tab_v[i], ind_i) == (int)cc.ith_plane(tab_v[j], ind_j)) { result[ind].push_back(tab_v[i]); temp = true; i = j; } j++; } } voro_faces += "f"; FOR(i, result[ind].size()) { voro_faces += " "; voro_faces += std::to_string(row + result[ind][i] ); } voro_faces += "\n"; ind++; } } #endif } __device__ __host__ float4 points_to_plane(float4 A, float4 B, float4 C) { float4 u = minus4(B, A); float4 v = minus4(C, A); float4 plane = cross3(u, v); plane.w = -dot3(plane, A); return make_float4(plane.x, plane.y, plane.z, plane.w); } __device__ __host__ void ConvexCell::clip_tet_from_points(float4 A, float4 B, float4 C, float4 D) { clip(nb_v) = points_to_plane(A, B, C); nb_v++; clip_by_plane(nb_v - 1); clip(nb_v) = points_to_plane(A, D, B); nb_v++; clip_by_plane(nb_v - 1); clip(nb_v) = points_to_plane(A, C, D); nb_v++; clip_by_plane(nb_v - 1); clip(nb_v) = points_to_plane(C, B, D); nb_v++; clip_by_plane(nb_v - 1); } __host__ void compute_voro_cell_CPU( float * pts, int nbpts, unsigned int* neigs, Status* gpu_stat, float* out_pts, int seed, std::vector<float3>& voro_points, std::string& voro_faces, int tet, int* tet_indices, float* tet_pts ) { ConvexCell cc(seed, pts, &(gpu_stat[seed])); IF_CPU(gs.start_cell()); float4 P0 = make_float4(tet_pts[4 * tet_indices[tet * 4]], tet_pts[4 * tet_indices[tet * 4] + 1], tet_pts[4 * tet_indices[tet * 4] + 2], tet_pts[4 * tet_indices[tet * 4] + 4]); float4 P1 = make_float4(tet_pts[4 * tet_indices[tet * 4 + 1]], tet_pts[4 * tet_indices[tet * 4 + 1] + 1], tet_pts[4 * tet_indices[tet * 4 + 1] + 2], tet_pts[4 * tet_indices[tet * 4 + 1] + 4]); float4 P2 = make_float4(tet_pts[4 * tet_indices[tet * 4 + 2]], tet_pts[4 * tet_indices[tet * 4 + 2] + 1], tet_pts[4 * tet_indices[tet * 4 + 2] + 2], tet_pts[4 * tet_indices[tet * 4 + 2] + 4]); float4 P3 = make_float4(tet_pts[4 * tet_indices[tet * 4 + 3]], tet_pts[4 * tet_indices[tet * 4 + 3] + 1], tet_pts[4 * tet_indices[tet * 4 + 3] + 2], tet_pts[4 * tet_indices[tet * 4 + 3] + 4]); cc.clip_tet_from_points(P0, P1, P2, P3); FOR(v, _K_) { unsigned int z = neigs[_K_ * seed + v]; int cur_v = cc.new_point(z); // add new plane equation cc.clip_by_plane(cur_v); if (cc.is_security_radius_reached(point_from_ptr3(pts + 3 * z))) { break; } if (gpu_stat[seed] != success) { IF_CPU(gs.end_cell()); return; } } IF_CPU(gs.end_cell()); IF_CPU(gs.nbv[cc.nb_v]++); IF_CPU(gs.nbt[cc.nb_t]++); // check security radius if (!cc.is_security_radius_reached(point_from_ptr3(pts + 3 * neigs[_K_ * (seed + 1) - 1]))) { gpu_stat[seed] = security_radius_not_reached; } if (gpu_stat[seed] == success) { export_bary_and_volume(cc, out_pts, seed); get_voro_diagram(cc, out_pts, seed, voro_points, voro_faces); } } //################### KERNEL ###################### __host__ __device__ void compute_voro_cell( float * pts, int nbpts, unsigned int* neigs, Status* gpu_stat, float* out_pts, int seed, int tet, int* tet_indices, float* tet_pts ) { ConvexCell cc(seed, pts, &(gpu_stat[seed])); //clip by tet float4 P0 = make_float4(tet_pts[4 * tet_indices[tet * 4]], tet_pts[4 * tet_indices[tet * 4] + 1], tet_pts[4 * tet_indices[tet * 4] + 2], tet_pts[4 * tet_indices[tet * 4] + 4]); float4 P1 = make_float4(tet_pts[4 * tet_indices[tet * 4 + 1]], tet_pts[4 * tet_indices[tet * 4 + 1] + 1], tet_pts[4 * tet_indices[tet * 4 + 1] + 2], tet_pts[4 * tet_indices[tet * 4 + 1] + 4]); float4 P2 = make_float4(tet_pts[4 * tet_indices[tet * 4 + 2]], tet_pts[4 * tet_indices[tet * 4 + 2] + 1], tet_pts[4 * tet_indices[tet * 4 + 2] + 2], tet_pts[4 * tet_indices[tet * 4 + 2] + 4]); float4 P3 = make_float4(tet_pts[4 * tet_indices[tet * 4 + 3]], tet_pts[4 * tet_indices[tet * 4 + 3] + 1], tet_pts[4 * tet_indices[tet * 4 + 3] + 2], tet_pts[4 * tet_indices[tet * 4 + 3] + 4]); cc.clip_tet_from_points(P0, P1, P2, P3); FOR(v, _K_) { unsigned int z = neigs[_K_ * seed + v]; int cur_v = cc.new_point(z); cc.clip_by_plane(cur_v); if (cc.is_security_radius_reached(point_from_ptr3(pts + 3*z))) { break; } if (gpu_stat[seed] != success) { return; } } // check security radius if (!cc.is_security_radius_reached(point_from_ptr3(pts + 3 * neigs[_K_ * (seed+1) -1]))) { gpu_stat[seed] = security_radius_not_reached; } if (gpu_stat[seed] == success) { export_bary_and_volume(cc, out_pts, seed); } } //----------------------------------KERNEL __global__ void voro_cell_test_GPU_param(float * pts, int nbpts, int nbtets, unsigned int* neigs, Status* gpu_stat, float* out_pts, int* tet_indices, float* tet_pts) { int seed = blockIdx.x * blockDim.x + threadIdx.x; if (seed < nbpts){ for (int tet = 0; tet < nbtets; ++tet) { compute_voro_cell(pts, nbpts, neigs, gpu_stat, out_pts, seed, tet, tet_indices, tet_pts); } } } //----------------------------------WRAPPER template <class T> struct GPUBuffer { void init(T* data) { IF_VERBOSE(std::cerr << "GPU: " << size * sizeof(T)/1048576 << " Mb used" << std::endl); cpu_data = data; cuda_check(hipMalloc((void**)& gpu_data, size * sizeof(T))); cpu2gpu(); } GPUBuffer(std::vector<T>& v) {size = v.size();init(v.data());} ~GPUBuffer() { cuda_check(hipFree(gpu_data)); } void cpu2gpu() { cuda_check(hipMemcpy(gpu_data, cpu_data, size * sizeof(T), hipMemcpyHostToDevice)); } void gpu2cpu() { cuda_check(hipMemcpy(cpu_data, gpu_data, size * sizeof(T), hipMemcpyDeviceToHost)); } T* cpu_data; T* gpu_data; int size; }; char StatusStr[6][128] = { "triangle_overflow","vertex_overflow","inconsistent_boundary","security_radius_not_reached","success", "needs_exact_predicates" }; void show_status_stats(std::vector<Status> &stat) { IF_VERBOSE(std::cerr << " \n\n\n---------Summary of success/failure------------\n"); std::vector<int> nb_statuss(6, 0); FOR(i, stat.size()) nb_statuss[stat[i]]++; IF_VERBOSE(FOR(r, 6) std::cerr << " " << StatusStr[r] << " " << nb_statuss[r] << "\n";) std::cerr << " " << StatusStr[4] << " " << nb_statuss[4] << " / " << stat.size() << "\n"; } void cuda_check_error() { hipError_t err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed (1) (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } void compute_voro_diagram_GPU( std::vector<float>& pts, std::vector<Status> &stat, std::vector<float>& bary, std::vector<float>& tet_pts, std::vector<int>& tet_indices, std::vector<int>* KNN, int nb_Lloyd_iter ) { int nbpts = pts.size() / 3; kn_problem *kn = NULL; { IF_VERBOSE(Stopwatch W("GPU KNN")); kn = kn_prepare((float3*) pts.data(), nbpts); hipMemcpy(pts.data(), kn->d_stored_points, kn->allocated_points * sizeof(float) * 3, hipMemcpyDeviceToHost); cuda_check_error(); kn_solve(kn); IF_VERBOSE(kn_print_stats(kn)); } int nbtets = tet_pts.size() / 3; GPUBuffer<float> out_pts_w(bary); GPUBuffer<int> tet_indices_w(tet_indices); GPUBuffer<float> tet_pts_w(tet_pts); GPUBuffer<Status> gpu_stat(stat); // if (nb_Lloyd_iter == 0) { IF_VERBOSE(Stopwatch W("GPU voro kernel only")); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); //dim3 threads_per_block(16, 16, 1); // A 16 x 16 block threads //dim3 number_of_blocks((nbpts / threads_per_block.x) + 1, (nbtets / threads_per_block.y) + 1, 1); hipLaunchKernelGGL(( voro_cell_test_GPU_param) , dim3((nbpts / 16) + 1), dim3(16) , 0, 0, (float*)kn->d_stored_points, nbpts, nbtets, kn->d_knearests, gpu_stat.gpu_data, out_pts_w.gpu_data, tet_indices_w.gpu_data, tet_pts_w.gpu_data); cuda_check_error(); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); IF_VERBOSE(std::cerr << "kn voro: " << milliseconds << " msec" << std::endl); /* // } // // Lloyd // FOR(lit,nb_Lloyd_iter){ // IF_VERBOSE(Stopwatch W("Loyd iterations")); // hipEvent_t start, stop; // hipEventCreate(&start); // hipEventCreate(&stop); // hipEventRecord(start); // voro_cell_test_GPU_param << < nbpts / VORO_BLOCK_SIZE + 1, VORO_BLOCK_SIZE >> > ((float*)kn->d_stored_points, nbpts, kn->d_knearests, gpu_stat.gpu_data, out_pts_w.gpu_data); // cuda_check_error(); // voro_cell_test_GPU_param << < nbpts / VORO_BLOCK_SIZE + 1, VORO_BLOCK_SIZE >> > (out_pts_w.gpu_data, nbpts, kn->d_knearests, gpu_stat.gpu_data, (float*)kn->d_stored_points); // cuda_check_error(); // hipEventRecord(stop); // hipEventSynchronize(stop); // float milliseconds = 0; // hipEventElapsedTime(&milliseconds, start, stop); // IF_VERBOSE(std::cerr << "kn voro: " << milliseconds << " msec" << std::endl); // } */ { IF_VERBOSE(Stopwatch W("copy data back to the cpu")); out_pts_w.gpu2cpu(); gpu_stat.gpu2cpu(); } // Read back nearest neighbor indices if KNN is specified. if(KNN != NULL) { KNN->resize(kn->allocated_points*_K_); cuda_check(hipMemcpy( KNN->data(), kn->d_knearests, sizeof(int) * _K_ * kn->allocated_points, hipMemcpyDeviceToHost) ); } kn_free(&kn); show_status_stats(stat); } void compute_voro_diagram_CPU( std::vector<float>& pts, std::vector<Status>& stat, std::vector<float>& bary, std::vector<float>& tet_pts, std::vector<int>& tet_indices, std::vector<int>* KNN, int nb_Lloyd_iter ) { int nbpts = pts.size() / 3; kn_problem *kn = NULL; { IF_VERBOSE(Stopwatch W("GPU KNN")); kn = kn_prepare((float3*)pts.data(), nbpts); kn_solve(kn); IF_VERBOSE(kn_print_stats(kn)); } float* nvpts = (float*)kn_get_points(kn); unsigned int* knn = kn_get_knearests(kn); IF_VERBOSE(Stopwatch W("CPU VORO KERNEL")); std::vector<float3> voro_points; std::string voro_faces; int nbtet = tet_indices.size() / 4; FOR(seed, nbpts) { FOR(tet, nbtet) { compute_voro_cell_CPU( nvpts, nbpts, knn, stat.data(), bary.data(), seed, voro_points, voro_faces, tet, tet_indices.data(), tet_pts.data() ); } } std::fstream output_file; output_file.open("voro_tet.obj", std::ios_base::out); output_file << "# voro_cell.obj" << std::endl; output_file << "#" << std::endl; output_file << std::endl; output_file << "o voro_cell" << std::endl; output_file << std::endl; FOR(i, voro_points.size()) { output_file << "v " << voro_points[i].x << " " << voro_points[i].y << " " << voro_points[i].z << std::endl; } output_file << std::endl; output_file << voro_faces << std::endl; output_file.close(); static int callid = 0; /* //FOR(i, nb_Lloyd_iter) { // gs.reset(); // FOR(seed, nbpts) compute_voro_cell(nvpts, nbpts, knn, stat.data(), bary.data(), seed); // FOR(i, pts.size()) pts[i] = bary[i]; //} */ callid++; if (KNN != NULL) { FOR(i, _K_ * kn->allocated_points) { KNN->push_back(knn[i]); } } kn_free(&kn); free(nvpts); free(knn); show_status_stats(stat); }
fd6309661b109b788e651f328cf07c35da3c5235.cu
#include <string> #include <vector> #include <iostream> #include <iomanip> #include <fstream> #include <cmath> #include <cuda_runtime.h> #include "stopwatch.h" #include "knearests.h" #include "voronoi.h" #ifdef __CUDA_ARCH__ __shared__ uchar3 tr_data[VORO_BLOCK_SIZE * _MAX_T_]; // memory pool for chained lists of triangles __shared__ uchar boundary_next_data[VORO_BLOCK_SIZE * _MAX_P_]; __shared__ float4 clip_data[VORO_BLOCK_SIZE * _MAX_P_]; // clipping planes inline __device__ uchar3& tr(int t) { return tr_data[threadIdx.x*_MAX_T_ + t]; } inline __device__ uchar& boundary_next(int v) { return boundary_next_data[threadIdx.x*_MAX_P_ + v]; } inline __device__ float4& clip(int v) { return clip_data[threadIdx.x*_MAX_P_ + v]; } #else uchar3 tr_data[_MAX_T_]; uchar boundary_next_data[_MAX_P_]; float4 clip_data[_MAX_P_]; inline uchar3& tr(int t) { return tr_data[t]; } inline uchar& boundary_next(int v) { return boundary_next_data[v]; } inline float4& clip(int v) { return clip_data[v]; } GlobalStats gs; #endif __host__ __device__ float4 point_from_ptr3(float* f) { return make_float4(f[0], f[1], f[2], 1); } __host__ __device__ float4 minus4(float4 A, float4 B) { return make_float4(A.x-B.x, A.y-B.y, A.z-B.z, A.w-B.w); } __host__ __device__ float4 plus4(float4 A, float4 B) { return make_float4(A.x+B.x, A.y+B.y, A.z+B.z, A.w+B.w); } __host__ __device__ float dot4(float4 A, float4 B) { return A.x*B.x + A.y*B.y + A.z*B.z + A.w*B.w; } __host__ __device__ float dot3(float4 A, float4 B) { return A.x*B.x + A.y*B.y + A.z*B.z; } __host__ __device__ float4 mul3(float s, float4 A) { return make_float4(s*A.x, s*A.y, s*A.z, 1.); } __host__ __device__ float4 cross3(float4 A, float4 B) { return make_float4(A.y*B.z - A.z*B.y, A.z*B.x - A.x*B.z, A.x*B.y - A.y*B.x, 0); } __host__ __device__ float4 plane_from_point_and_normal(float4 P, float4 n) { return make_float4(n.x, n.y, n.z, -dot3(P, n)); } __host__ __device__ inline float det2x2(float a11, float a12, float a21, float a22) { return a11*a22 - a12*a21; } __host__ __device__ inline float det3x3(float a11, float a12, float a13, float a21, float a22, float a23, float a31, float a32, float a33) { return a11*det2x2(a22, a23, a32, a33) - a21*det2x2(a12, a13, a32, a33) + a31*det2x2(a12, a13, a22, a23); } __host__ __device__ inline float det4x4( float a11, float a12, float a13, float a14, float a21, float a22, float a23, float a24, float a31, float a32, float a33, float a34, float a41, float a42, float a43, float a44 ) { float m12 = a21*a12 - a11*a22; float m13 = a31*a12 - a11*a32; float m14 = a41*a12 - a11*a42; float m23 = a31*a22 - a21*a32; float m24 = a41*a22 - a21*a42; float m34 = a41*a32 - a31*a42; float m123 = m23*a13 - m13*a23 + m12*a33; float m124 = m24*a13 - m14*a23 + m12*a43; float m134 = m34*a13 - m14*a33 + m13*a43; float m234 = m34*a23 - m24*a33 + m23*a43; return (m234*a14 - m134*a24 + m124*a34 - m123*a44); } __host__ __device__ inline double det2x2(double a11, double a12, double a21, double a22) { return a11*a22 - a12*a21; } __host__ __device__ inline double det3x3(double a11, double a12, double a13, double a21, double a22, double a23, double a31, double a32, double a33) { return a11*det2x2(a22, a23, a32, a33) - a21*det2x2(a12, a13, a32, a33) + a31*det2x2(a12, a13, a22, a23); } __host__ __device__ inline double det4x4( double a11, double a12, double a13, double a14, double a21, double a22, double a23, double a24, double a31, double a32, double a33, double a34, double a41, double a42, double a43, double a44 ) { double m12 = a21*a12 - a11*a22; double m13 = a31*a12 - a11*a32; double m14 = a41*a12 - a11*a42; double m23 = a31*a22 - a21*a32; double m24 = a41*a22 - a21*a42; double m34 = a41*a32 - a31*a42; double m123 = m23*a13 - m13*a23 + m12*a33; double m124 = m24*a13 - m14*a23 + m12*a43; double m134 = m34*a13 - m14*a33 + m13*a43; double m234 = m34*a23 - m24*a33 + m23*a43; return (m234*a14 - m134*a24 + m124*a34 - m123*a44); } __host__ __device__ inline float get_tet_volume(float4 A, float4 B, float4 C) { return -det3x3(A.x, A.y, A.z, B.x, B.y, B.z, C.x, C.y, C.z)/6.; } __host__ __device__ void get_tet_volume_and_barycenter(float4& bary, float& volume, float4 A, float4 B, float4 C, float4 D) { volume = get_tet_volume(minus4(A, D), minus4(B, D), minus4(C, D)); bary = make_float4(.25*(A.x+B.x+C.x+D.x), .25*(A.y+B.y+C.y+D.y), .25*(A.z+B.z+C.z+D.z), 1); } __host__ __device__ float4 get_plane_from_points(float4 A, float4 B, float4 C) { float4 plane = cross3(minus4(B, A), minus4(C, A)); plane.z = -dot3(plane, A); return plane; } __host__ __device__ float4 project_on_plane(float4 P, float4 plane) { float4 n = make_float4(plane.x, plane.y, plane.z, 0); float lambda = (dot4(n, P) + plane.w)/dot4(n, n); // lambda = (dot3(n, P) + plane.w) / norm23(n); return plus4(P, mul3(-lambda, n)); } template <typename T> __host__ __device__ void inline swap(T& a, T& b) { T c(a); a = b; b = c; } __host__ __device__ ConvexCell::ConvexCell(int p_seed, float* p_pts,Status *p_status) { float eps = .1f; float xmin = -eps; float ymin = -eps; float zmin = -eps; float xmax = 1000 + eps; float ymax = 1000 + eps; float zmax = 1000 + eps; pts = p_pts; first_boundary_ = END_OF_LIST; FOR(i, _MAX_P_) boundary_next(i) = END_OF_LIST; voro_id = p_seed; voro_seed = make_float4(pts[3 * voro_id], pts[3 * voro_id + 1], pts[3 * voro_id + 2], 1); status = p_status; *status = success; clip(0) = make_float4( 1.0, 0.0, 0.0, -xmin); clip(1) = make_float4(-1.0, 0.0, 0.0, xmax); clip(2) = make_float4( 0.0, 1.0, 0.0, -ymin); clip(3) = make_float4( 0.0, -1.0, 0.0, ymax); clip(4) = make_float4( 0.0, 0.0, 1.0, -zmin); clip(5) = make_float4( 0.0, 0.0, -1.0, zmax); nb_v = 6; tr(0) = make_uchar3(2, 5, 0); tr(1) = make_uchar3(5, 3, 0); tr(2) = make_uchar3(1, 5, 2); tr(3) = make_uchar3(5, 1, 3); tr(4) = make_uchar3(4, 2, 0); tr(5) = make_uchar3(4, 0, 3); tr(6) = make_uchar3(2, 4, 1); tr(7) = make_uchar3(4, 3, 1); nb_t = 8; } __host__ __device__ bool ConvexCell::is_security_radius_reached(float4 last_neig) { // finds furthest voro vertex distance2 float v_dist = 0; FOR(i, nb_t) { float4 pc = compute_triangle_point(tr(i)); float4 diff = minus4(pc, voro_seed); float d2 = dot3(diff, diff); // TODO safe to put dot4 here, diff.w = 0 v_dist = max(d2, v_dist); } //compare to new neighbors distance2 float4 diff = minus4(last_neig, voro_seed); // TODO it really should take index of the neighbor instead of the float4, then would be safe to put dot4 float d2 = dot3(diff, diff); return (d2 > 4*v_dist); } __host__ __device__ inline uchar& ConvexCell::ith_plane(uchar t, int i) { return reinterpret_cast<uchar *>(&(tr(t)))[i]; } __host__ __device__ float4 ConvexCell::compute_triangle_point(uchar3 t, bool persp_divide) const { float4 pi1 = clip(t.x); float4 pi2 = clip(t.y); float4 pi3 = clip(t.z); float4 result; result.x = -det3x3(pi1.w, pi1.y, pi1.z, pi2.w, pi2.y, pi2.z, pi3.w, pi3.y, pi3.z); result.y = -det3x3(pi1.x, pi1.w, pi1.z, pi2.x, pi2.w, pi2.z, pi3.x, pi3.w, pi3.z); result.z = -det3x3(pi1.x, pi1.y, pi1.w, pi2.x, pi2.y, pi2.w, pi3.x, pi3.y, pi3.w); result.w = det3x3(pi1.x, pi1.y, pi1.z, pi2.x, pi2.y, pi2.z, pi3.x, pi3.y, pi3.z); if (persp_divide) return make_float4(result.x / result.w, result.y / result.w, result.z / result.w, 1); return result; } inline __host__ __device__ float max4(float a, float b, float c, float d) { return fmaxf(fmaxf(a,b),fmaxf(c,d)); } inline __host__ __device__ void get_minmax3( float& m, float& M, float x1, float x2, float x3 ) { m = fminf(fminf(x1,x2), x3); M = fmaxf(fmaxf(x1,x2), x3); } inline __host__ __device__ double max4(double a, double b, double c, double d) { return fmax(fmax(a,b),fmax(c,d)); } inline __host__ __device__ void get_minmax3( double& m, double& M, double x1, double x2, double x3 ) { m = fmin(fmin(x1,x2), x3); M = fmax(fmax(x1,x2), x3); } __host__ __device__ bool ConvexCell::triangle_is_in_conflict_float(uchar3 t, float4 eqn) const { float4 pi1 = clip(t.x); float4 pi2 = clip(t.y); float4 pi3 = clip(t.z); float det = det4x4( pi1.x, pi2.x, pi3.x, eqn.x, pi1.y, pi2.y, pi3.y, eqn.y, pi1.z, pi2.z, pi3.z, eqn.z, pi1.w, pi2.w, pi3.w, eqn.w ); #ifdef USE_ARITHMETIC_FILTER float maxx = max4(fabsf(pi1.x), fabsf(pi2.x), fabsf(pi3.x), fabsf(eqn.x)); float maxy = max4(fabsf(pi1.y), fabsf(pi2.y), fabsf(pi3.y), fabsf(eqn.y)); float maxz = max4(fabsf(pi1.z), fabsf(pi2.z), fabsf(pi3.z), fabsf(eqn.z)); // The constant is computed by the program // in predicate_generator/ float eps = 6.6876506e-05 * maxx * maxy * maxz; float min_max; float max_max; get_minmax3(min_max, max_max, maxx, maxy, maxz); eps *= (max_max * max_max); if(fabsf(det) < eps) { *status = needs_exact_predicates; } #endif return (det > 0.0f); } __host__ __device__ bool ConvexCell::triangle_is_in_conflict_double(uchar3 t, float4 eqn_f) const { float4 pi1_f = clip(t.x); float4 pi2_f = clip(t.y); float4 pi3_f = clip(t.z); double4 eqn = make_double4(eqn_f.x, eqn_f.y, eqn_f.z, eqn_f.w); double4 pi1 = make_double4(pi1_f.x, pi1_f.y, pi1_f.z, pi1_f.w); double4 pi2 = make_double4(pi2_f.x, pi2_f.y, pi2_f.z, pi2_f.w); double4 pi3 = make_double4(pi3_f.x, pi3_f.y, pi3_f.z, pi3_f.w); double det = det4x4( pi1.x, pi2.x, pi3.x, eqn.x, pi1.y, pi2.y, pi3.y, eqn.y, pi1.z, pi2.z, pi3.z, eqn.z, pi1.w, pi2.w, pi3.w, eqn.w ); #ifdef USE_ARITHMETIC_FILTER double maxx = max4(fabs(pi1.x), fabs(pi2.x), fabs(pi3.x), fabs(eqn.x)); double maxy = max4(fabs(pi1.y), fabs(pi2.y), fabs(pi3.y), fabs(eqn.y)); double maxz = max4(fabs(pi1.z), fabs(pi2.z), fabs(pi3.z), fabs(eqn.z)); // The constant is computed by the program // in predicate_generator/ double eps = 1.2466136531027298e-13 * maxx * maxy * maxz; double min_max; double max_max; get_minmax3(min_max, max_max, maxx, maxy, maxz); eps *= (max_max * max_max); if(fabs(det) < eps) { *status = needs_exact_predicates; } #endif return (det > 0.0f); } __host__ __device__ void ConvexCell::new_triangle(uchar i, uchar j, uchar k) { if (nb_t+1 >= _MAX_T_) { *status = triangle_overflow; return; } tr(nb_t) = make_uchar3(i, j, k); nb_t++; } __host__ __device__ int ConvexCell::new_point(int vid) { if (nb_v >= _MAX_P_) { *status = vertex_overflow; return -1; } float4 B = point_from_ptr3(pts + 3 * vid); float4 dir = minus4(voro_seed, B); float4 ave2 = plus4(voro_seed, B); float dot = dot3(ave2,dir); // TODO safe to put dot4 here, dir.w = 0 clip(nb_v) = make_float4(dir.x, dir.y, dir.z, -dot / 2.f); nb_v++; return nb_v - 1; } __host__ __device__ void ConvexCell::compute_boundary() { // clean circular list of the boundary FOR(i, _MAX_P_) boundary_next(i) = END_OF_LIST; first_boundary_ = END_OF_LIST; int nb_iter = 0; uchar t = nb_t; #ifndef __CUDA_ARCH__ if (nb_r>20) std::cerr << "nb_t " << (int)nb_t << " nb_r " << (int)nb_r << std::endl; #endif while (nb_r>0) { if (nb_iter++>100) { *status = inconsistent_boundary; return; } bool is_in_border[3]; bool next_is_opp[3]; FOR(e, 3) is_in_border[e] = (boundary_next(ith_plane(t, e)) != END_OF_LIST); FOR(e, 3) next_is_opp[e] = (boundary_next(ith_plane(t, (e + 1) % 3)) == ith_plane(t, e)); bool new_border_is_simple = true; // check for non manifoldness FOR(e, 3) if (!next_is_opp[e] && !next_is_opp[(e + 1) % 3] && is_in_border[(e + 1) % 3]) new_border_is_simple = false; // check for more than one boundary ... or first triangle if (!next_is_opp[0] && !next_is_opp[1] && !next_is_opp[2]) { if (first_boundary_ == END_OF_LIST) { FOR(e, 3) boundary_next(ith_plane(t, e)) = ith_plane(t, (e + 1) % 3); first_boundary_ = tr(t).x; } else new_border_is_simple = false; } if (!new_border_is_simple) { t++; if (t == nb_t + nb_r) t = nb_t; continue; } // link next FOR(e, 3) if (!next_is_opp[e]) boundary_next(ith_plane(t, e)) = ith_plane(t, (e + 1) % 3); // destroy link from removed vertices FOR(e, 3) if (next_is_opp[e] && next_is_opp[(e + 1) % 3]) { if (first_boundary_ == ith_plane(t, (e + 1) % 3)) first_boundary_ = boundary_next(ith_plane(t, (e + 1) % 3)); boundary_next(ith_plane(t, (e + 1) % 3)) = END_OF_LIST; } //remove triangle from R, and restart iterating on R swap(tr(t), tr(nb_t+nb_r-1)); t = nb_t; nb_r--; } IF_CPU(gs.add_compute_boundary_iter(nb_iter);) } __host__ __device__ void ConvexCell::clip_by_plane(int cur_v) { if (*status == vertex_overflow) return; float4 eqn = clip(cur_v); nb_r = 0; int i = 0; while (i < nb_t) { // for all vertices of the cell if(triangle_is_in_conflict(tr(i), eqn)) { nb_t--; swap(tr(i), tr(nb_t)); nb_r++; } else i++; } if (nb_t < 1) { *status = empty_cell; return; } IF_CPU(gs.add_clip(nb_r);) if (*status == needs_exact_predicates) { return; } if (nb_r == 0) { // if no clips, then remove the plane equation nb_v--; return; } // Step 2: compute cavity boundary compute_boundary(); if (*status != success) return; if (first_boundary_ == END_OF_LIST) return; // Step 3: Triangulate cavity uchar cir = first_boundary_; do { new_triangle(cur_v, cir, boundary_next(cir)); #ifndef __CUDA_ARCH__ if (nb_t >= _MAX_T_) { std::cerr << "erreur grave" << std::endl; } if (cur_v >= _MAX_P_ || cir >= _MAX_P_ || boundary_next(cir) >= _MAX_P_ ) { std::cerr << "erreur grave, triangle: " << (int)cur_v << " " << (int)cir << " " << (int)boundary_next(cir) << std::endl; break; } #endif if (*status != success) return; cir = boundary_next(cir); } while (cir != first_boundary_); } __host__ __device__ void get_tet_decomposition_of_vertex(ConvexCell& cc, int t, float4* P) { float4 C = cc.voro_seed; float4 A = cc.compute_triangle_point(tr(t)); FOR(i,3) P[2*i ] = project_on_plane(C, clip(cc.ith_plane(t,i))); FOR(i, 3) P[2*i+1] = project_on_plane(A, plane_from_point_and_normal(C, cross3(minus4(P[2*i], C), minus4(P[(2*(i+1))%6], C)))); } __host__ __device__ void export_bary_and_volume(ConvexCell& cc, float* out_pts, int seed) { float4 bary_sum = make_float4(0, 0, 0, 0); float cell_vol = 0; float4 tet_bary; float tet_vol; float4 P[6]; float4 C = cc.voro_seed; FOR(t, cc.nb_t) { float4 A = cc.compute_triangle_point(tr(t)); get_tet_decomposition_of_vertex(cc, t, P); FOR(i, 6) { get_tet_volume_and_barycenter(tet_bary, tet_vol, P[i], P[(i + 1) % 6], C, A); bary_sum = plus4(bary_sum, mul3(tet_vol, tet_bary)); cell_vol += tet_vol; } } // /cc.cell_vol out_pts[4 * seed] += bary_sum.x; out_pts[4 * seed + 1] += bary_sum.y; out_pts[4 * seed + 2] += bary_sum.z; out_pts[4 * seed + 3] += cell_vol; } __host__ void get_voro_diagram(ConvexCell& cc, float* out_pts, int seed, std::vector<float3>& voro_points, std::string& voro_faces) { #ifndef __CUDA_ARCH__ int row = voro_points.size() + 1; FOR(i, cc.nb_t) { float4 voro_vertex = cc.compute_triangle_point(tr(i)); voro_points.push_back(make_float3(voro_vertex.x, voro_vertex.y, voro_vertex.z)); //voro_points.push_back(make_float3(voro_vertex.x*0.9+ out_pts[3*seed]*0.1, voro_vertex.y*0.9 + out_pts[3 * seed +1] *0.1, voro_vertex.z*0.9 + out_pts[3 * seed + 2] *0.1)); } std::vector<int> clipping_plane(cc.nb_v + 1, 0); FOR(t, cc.nb_t) { clipping_plane[tr(t).x]++; clipping_plane[tr(t).y]++; clipping_plane[tr(t).z]++; } std::vector<std::vector<int>> result; int ind = 0; FOR(plane, cc.nb_v) { if (clipping_plane[plane] > 0) { std::vector<int> tab_lp; std::vector<int> tab_v; FOR(tet, cc.nb_t) { if ((int)tr(tet).x == plane) { tab_v.push_back(tet); tab_lp.push_back(0); } else if ((int)tr(tet).y == plane) { tab_v.push_back(tet); tab_lp.push_back(1); } else if ((int)tr(tet).z == plane) { tab_v.push_back(tet); tab_lp.push_back(2); } } if (tab_lp.size() <= 2) { std::cout << (int)plane << std::endl; } int i = 0; int j = 0; result.push_back(std::vector<int>(0)); while (result[ind].size() < tab_lp.size()) { int ind_i = (tab_lp[i] + 1) % 3; bool temp = false; j = 0; while (temp == false) { int ind_j = (tab_lp[j] + 2) % 3; if ((int)cc.ith_plane(tab_v[i], ind_i) == (int)cc.ith_plane(tab_v[j], ind_j)) { result[ind].push_back(tab_v[i]); temp = true; i = j; } j++; } } voro_faces += "f"; FOR(i, result[ind].size()) { voro_faces += " "; voro_faces += std::to_string(row + result[ind][i] ); } voro_faces += "\n"; ind++; } } #endif } __device__ __host__ float4 points_to_plane(float4 A, float4 B, float4 C) { float4 u = minus4(B, A); float4 v = minus4(C, A); float4 plane = cross3(u, v); plane.w = -dot3(plane, A); return make_float4(plane.x, plane.y, plane.z, plane.w); } __device__ __host__ void ConvexCell::clip_tet_from_points(float4 A, float4 B, float4 C, float4 D) { clip(nb_v) = points_to_plane(A, B, C); nb_v++; clip_by_plane(nb_v - 1); clip(nb_v) = points_to_plane(A, D, B); nb_v++; clip_by_plane(nb_v - 1); clip(nb_v) = points_to_plane(A, C, D); nb_v++; clip_by_plane(nb_v - 1); clip(nb_v) = points_to_plane(C, B, D); nb_v++; clip_by_plane(nb_v - 1); } __host__ void compute_voro_cell_CPU( float * pts, int nbpts, unsigned int* neigs, Status* gpu_stat, float* out_pts, int seed, std::vector<float3>& voro_points, std::string& voro_faces, int tet, int* tet_indices, float* tet_pts ) { ConvexCell cc(seed, pts, &(gpu_stat[seed])); IF_CPU(gs.start_cell()); float4 P0 = make_float4(tet_pts[4 * tet_indices[tet * 4]], tet_pts[4 * tet_indices[tet * 4] + 1], tet_pts[4 * tet_indices[tet * 4] + 2], tet_pts[4 * tet_indices[tet * 4] + 4]); float4 P1 = make_float4(tet_pts[4 * tet_indices[tet * 4 + 1]], tet_pts[4 * tet_indices[tet * 4 + 1] + 1], tet_pts[4 * tet_indices[tet * 4 + 1] + 2], tet_pts[4 * tet_indices[tet * 4 + 1] + 4]); float4 P2 = make_float4(tet_pts[4 * tet_indices[tet * 4 + 2]], tet_pts[4 * tet_indices[tet * 4 + 2] + 1], tet_pts[4 * tet_indices[tet * 4 + 2] + 2], tet_pts[4 * tet_indices[tet * 4 + 2] + 4]); float4 P3 = make_float4(tet_pts[4 * tet_indices[tet * 4 + 3]], tet_pts[4 * tet_indices[tet * 4 + 3] + 1], tet_pts[4 * tet_indices[tet * 4 + 3] + 2], tet_pts[4 * tet_indices[tet * 4 + 3] + 4]); cc.clip_tet_from_points(P0, P1, P2, P3); FOR(v, _K_) { unsigned int z = neigs[_K_ * seed + v]; int cur_v = cc.new_point(z); // add new plane equation cc.clip_by_plane(cur_v); if (cc.is_security_radius_reached(point_from_ptr3(pts + 3 * z))) { break; } if (gpu_stat[seed] != success) { IF_CPU(gs.end_cell()); return; } } IF_CPU(gs.end_cell()); IF_CPU(gs.nbv[cc.nb_v]++); IF_CPU(gs.nbt[cc.nb_t]++); // check security radius if (!cc.is_security_radius_reached(point_from_ptr3(pts + 3 * neigs[_K_ * (seed + 1) - 1]))) { gpu_stat[seed] = security_radius_not_reached; } if (gpu_stat[seed] == success) { export_bary_and_volume(cc, out_pts, seed); get_voro_diagram(cc, out_pts, seed, voro_points, voro_faces); } } //################### KERNEL ###################### __host__ __device__ void compute_voro_cell( float * pts, int nbpts, unsigned int* neigs, Status* gpu_stat, float* out_pts, int seed, int tet, int* tet_indices, float* tet_pts ) { ConvexCell cc(seed, pts, &(gpu_stat[seed])); //clip by tet float4 P0 = make_float4(tet_pts[4 * tet_indices[tet * 4]], tet_pts[4 * tet_indices[tet * 4] + 1], tet_pts[4 * tet_indices[tet * 4] + 2], tet_pts[4 * tet_indices[tet * 4] + 4]); float4 P1 = make_float4(tet_pts[4 * tet_indices[tet * 4 + 1]], tet_pts[4 * tet_indices[tet * 4 + 1] + 1], tet_pts[4 * tet_indices[tet * 4 + 1] + 2], tet_pts[4 * tet_indices[tet * 4 + 1] + 4]); float4 P2 = make_float4(tet_pts[4 * tet_indices[tet * 4 + 2]], tet_pts[4 * tet_indices[tet * 4 + 2] + 1], tet_pts[4 * tet_indices[tet * 4 + 2] + 2], tet_pts[4 * tet_indices[tet * 4 + 2] + 4]); float4 P3 = make_float4(tet_pts[4 * tet_indices[tet * 4 + 3]], tet_pts[4 * tet_indices[tet * 4 + 3] + 1], tet_pts[4 * tet_indices[tet * 4 + 3] + 2], tet_pts[4 * tet_indices[tet * 4 + 3] + 4]); cc.clip_tet_from_points(P0, P1, P2, P3); FOR(v, _K_) { unsigned int z = neigs[_K_ * seed + v]; int cur_v = cc.new_point(z); cc.clip_by_plane(cur_v); if (cc.is_security_radius_reached(point_from_ptr3(pts + 3*z))) { break; } if (gpu_stat[seed] != success) { return; } } // check security radius if (!cc.is_security_radius_reached(point_from_ptr3(pts + 3 * neigs[_K_ * (seed+1) -1]))) { gpu_stat[seed] = security_radius_not_reached; } if (gpu_stat[seed] == success) { export_bary_and_volume(cc, out_pts, seed); } } //----------------------------------KERNEL __global__ void voro_cell_test_GPU_param(float * pts, int nbpts, int nbtets, unsigned int* neigs, Status* gpu_stat, float* out_pts, int* tet_indices, float* tet_pts) { int seed = blockIdx.x * blockDim.x + threadIdx.x; if (seed < nbpts){ for (int tet = 0; tet < nbtets; ++tet) { compute_voro_cell(pts, nbpts, neigs, gpu_stat, out_pts, seed, tet, tet_indices, tet_pts); } } } //----------------------------------WRAPPER template <class T> struct GPUBuffer { void init(T* data) { IF_VERBOSE(std::cerr << "GPU: " << size * sizeof(T)/1048576 << " Mb used" << std::endl); cpu_data = data; cuda_check(cudaMalloc((void**)& gpu_data, size * sizeof(T))); cpu2gpu(); } GPUBuffer(std::vector<T>& v) {size = v.size();init(v.data());} ~GPUBuffer() { cuda_check(cudaFree(gpu_data)); } void cpu2gpu() { cuda_check(cudaMemcpy(gpu_data, cpu_data, size * sizeof(T), cudaMemcpyHostToDevice)); } void gpu2cpu() { cuda_check(cudaMemcpy(cpu_data, gpu_data, size * sizeof(T), cudaMemcpyDeviceToHost)); } T* cpu_data; T* gpu_data; int size; }; char StatusStr[6][128] = { "triangle_overflow","vertex_overflow","inconsistent_boundary","security_radius_not_reached","success", "needs_exact_predicates" }; void show_status_stats(std::vector<Status> &stat) { IF_VERBOSE(std::cerr << " \n\n\n---------Summary of success/failure------------\n"); std::vector<int> nb_statuss(6, 0); FOR(i, stat.size()) nb_statuss[stat[i]]++; IF_VERBOSE(FOR(r, 6) std::cerr << " " << StatusStr[r] << " " << nb_statuss[r] << "\n";) std::cerr << " " << StatusStr[4] << " " << nb_statuss[4] << " / " << stat.size() << "\n"; } void cuda_check_error() { cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed (1) (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } void compute_voro_diagram_GPU( std::vector<float>& pts, std::vector<Status> &stat, std::vector<float>& bary, std::vector<float>& tet_pts, std::vector<int>& tet_indices, std::vector<int>* KNN, int nb_Lloyd_iter ) { int nbpts = pts.size() / 3; kn_problem *kn = NULL; { IF_VERBOSE(Stopwatch W("GPU KNN")); kn = kn_prepare((float3*) pts.data(), nbpts); cudaMemcpy(pts.data(), kn->d_stored_points, kn->allocated_points * sizeof(float) * 3, cudaMemcpyDeviceToHost); cuda_check_error(); kn_solve(kn); IF_VERBOSE(kn_print_stats(kn)); } int nbtets = tet_pts.size() / 3; GPUBuffer<float> out_pts_w(bary); GPUBuffer<int> tet_indices_w(tet_indices); GPUBuffer<float> tet_pts_w(tet_pts); GPUBuffer<Status> gpu_stat(stat); // if (nb_Lloyd_iter == 0) { IF_VERBOSE(Stopwatch W("GPU voro kernel only")); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //dim3 threads_per_block(16, 16, 1); // A 16 x 16 block threads //dim3 number_of_blocks((nbpts / threads_per_block.x) + 1, (nbtets / threads_per_block.y) + 1, 1); voro_cell_test_GPU_param <<< (nbpts / 16) + 1, 16 >>> ((float*)kn->d_stored_points, nbpts, nbtets, kn->d_knearests, gpu_stat.gpu_data, out_pts_w.gpu_data, tet_indices_w.gpu_data, tet_pts_w.gpu_data); cuda_check_error(); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); IF_VERBOSE(std::cerr << "kn voro: " << milliseconds << " msec" << std::endl); /* // } // // Lloyd // FOR(lit,nb_Lloyd_iter){ // IF_VERBOSE(Stopwatch W("Loyd iterations")); // cudaEvent_t start, stop; // cudaEventCreate(&start); // cudaEventCreate(&stop); // cudaEventRecord(start); // voro_cell_test_GPU_param << < nbpts / VORO_BLOCK_SIZE + 1, VORO_BLOCK_SIZE >> > ((float*)kn->d_stored_points, nbpts, kn->d_knearests, gpu_stat.gpu_data, out_pts_w.gpu_data); // cuda_check_error(); // voro_cell_test_GPU_param << < nbpts / VORO_BLOCK_SIZE + 1, VORO_BLOCK_SIZE >> > (out_pts_w.gpu_data, nbpts, kn->d_knearests, gpu_stat.gpu_data, (float*)kn->d_stored_points); // cuda_check_error(); // cudaEventRecord(stop); // cudaEventSynchronize(stop); // float milliseconds = 0; // cudaEventElapsedTime(&milliseconds, start, stop); // IF_VERBOSE(std::cerr << "kn voro: " << milliseconds << " msec" << std::endl); // } */ { IF_VERBOSE(Stopwatch W("copy data back to the cpu")); out_pts_w.gpu2cpu(); gpu_stat.gpu2cpu(); } // Read back nearest neighbor indices if KNN is specified. if(KNN != NULL) { KNN->resize(kn->allocated_points*_K_); cuda_check(cudaMemcpy( KNN->data(), kn->d_knearests, sizeof(int) * _K_ * kn->allocated_points, cudaMemcpyDeviceToHost) ); } kn_free(&kn); show_status_stats(stat); } void compute_voro_diagram_CPU( std::vector<float>& pts, std::vector<Status>& stat, std::vector<float>& bary, std::vector<float>& tet_pts, std::vector<int>& tet_indices, std::vector<int>* KNN, int nb_Lloyd_iter ) { int nbpts = pts.size() / 3; kn_problem *kn = NULL; { IF_VERBOSE(Stopwatch W("GPU KNN")); kn = kn_prepare((float3*)pts.data(), nbpts); kn_solve(kn); IF_VERBOSE(kn_print_stats(kn)); } float* nvpts = (float*)kn_get_points(kn); unsigned int* knn = kn_get_knearests(kn); IF_VERBOSE(Stopwatch W("CPU VORO KERNEL")); std::vector<float3> voro_points; std::string voro_faces; int nbtet = tet_indices.size() / 4; FOR(seed, nbpts) { FOR(tet, nbtet) { compute_voro_cell_CPU( nvpts, nbpts, knn, stat.data(), bary.data(), seed, voro_points, voro_faces, tet, tet_indices.data(), tet_pts.data() ); } } std::fstream output_file; output_file.open("voro_tet.obj", std::ios_base::out); output_file << "# voro_cell.obj" << std::endl; output_file << "#" << std::endl; output_file << std::endl; output_file << "o voro_cell" << std::endl; output_file << std::endl; FOR(i, voro_points.size()) { output_file << "v " << voro_points[i].x << " " << voro_points[i].y << " " << voro_points[i].z << std::endl; } output_file << std::endl; output_file << voro_faces << std::endl; output_file.close(); static int callid = 0; /* //FOR(i, nb_Lloyd_iter) { // gs.reset(); // FOR(seed, nbpts) compute_voro_cell(nvpts, nbpts, knn, stat.data(), bary.data(), seed); // FOR(i, pts.size()) pts[i] = bary[i]; //} */ callid++; if (KNN != NULL) { FOR(i, _K_ * kn->allocated_points) { KNN->push_back(knn[i]); } } kn_free(&kn); free(nvpts); free(knn); show_status_stats(stat); }
dc04a36e0631bcddab9960ada8787500eb0a8a9f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <vector> #include "caffe/layers/eltwise_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data, int* mask) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype maxval = -FLT_MAX; int maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { maxval = bottom_data_a[index]; top_data[index] = maxval; maxidx = blob_idx; mask[index] = maxidx; } } else { maxval = bottom_data_b[index]; top_data[index] = maxval; maxidx = blob_idx + 1; mask[index] = maxidx; } } } template <typename Dtype> void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int* mask = NULL; const int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data); for (int i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_SUM: caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? for (int i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); } break; default: LOG(FATAL) << "Unknown elementwise operation."; } } template <typename Dtype> __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, const int blob_idx, const int* mask, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype gradient = 0; if (mask[index] == blob_idx) { gradient += top_diff[index]; } bottom_diff[index] = gradient; } } template <typename Dtype> void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int* mask = NULL; const int count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_copy(count, bottom[j]->gpu_data(), bottom_diff, 3); initialized = true; } else { caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff, bottom_diff); } } } else { caffe_gpu_div(count, top_data, bottom_data, bottom_diff); } caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); break; case EltwiseParameter_EltwiseOp_SUM: if (coeffs_[i] == Dtype(1.)) { caffe_copy(count, top_diff, bottom_diff, 3); } else { caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.gpu_data(); MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, i, mask, bottom_diff); break; default: LOG(FATAL) << "Unknown elementwise operation."; } } } } INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer); } // namespace caffe
dc04a36e0631bcddab9960ada8787500eb0a8a9f.cu
#include <cfloat> #include <vector> #include "caffe/layers/eltwise_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data, int* mask) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype maxval = -FLT_MAX; int maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { maxval = bottom_data_a[index]; top_data[index] = maxval; maxidx = blob_idx; mask[index] = maxidx; } } else { maxval = bottom_data_b[index]; top_data[index] = maxval; maxidx = blob_idx + 1; mask[index] = maxidx; } } } template <typename Dtype> void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int* mask = NULL; const int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data); for (int i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_SUM: caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? for (int i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) MaxForward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) MaxForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); } break; default: LOG(FATAL) << "Unknown elementwise operation."; } } template <typename Dtype> __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, const int blob_idx, const int* mask, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype gradient = 0; if (mask[index] == blob_idx) { gradient += top_diff[index]; } bottom_diff[index] = gradient; } } template <typename Dtype> void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int* mask = NULL; const int count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_copy(count, bottom[j]->gpu_data(), bottom_diff, 3); initialized = true; } else { caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff, bottom_diff); } } } else { caffe_gpu_div(count, top_data, bottom_data, bottom_diff); } caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); break; case EltwiseParameter_EltwiseOp_SUM: if (coeffs_[i] == Dtype(1.)) { caffe_copy(count, top_diff, bottom_diff, 3); } else { caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.gpu_data(); MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, i, mask, bottom_diff); break; default: LOG(FATAL) << "Unknown elementwise operation."; } } } } INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer); } // namespace caffe
8a175f0e8eb363b5e04b7adf6bfffdcd28f1f3e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <stdio.h> #include <math.h> #define RAND_RANGE(N) ((double)rand()/((double)RAND_MAX + 1)*(N)) //data generator void dataGenerator(int* data, int count, int first, int step) { assert(data != NULL); for(int i = 0; i < count; ++i) data[i] = first + i * step; srand(time(NULL)); for(int i = count-1; i>0; i--) //knuth shuffle { int j = RAND_RANGE(i); int k_tmp = data[i]; data[i] = data[j]; data[j] = k_tmp; } } //non random data generator void nrdataGenerator(int* data, int count, int first, int step) { assert(data != NULL); for(int i = 0; i < count; ++i) data[i] = first + i * step; //srand(time(NULL)); for(int i = count-1; i>0; i--) //knuth shuffle { int j = RAND_RANGE(i); int k_tmp = data[i]; data[i] = data[j]; data[j] = k_tmp; } } //sequential data generator void sDataGenerator(int* data, int count){ assert(data !=NULL); int j = 0; for(int i = count-1; i > 0; --i){ data[j] = i; j++; } } /* This function embeds PTX code of CUDA to extract bit field from x. "start" is the starting bit position relative to the LSB. "nbits" is the bit field length. It returns the extracted bit field as an unsigned integer. */ __device__ uint bfe(uint x, uint start, uint nbits) { uint bits; asm("bfe.u32 %0, %1, %2, %3;" : "=r"(bits) : "r"(x), "r"(start), "r"(nbits)); return bits; } //Feel free to change the names of the kernels or define more kernels below if necessary //define the histogram kernel here __global__ void histogram(int* d_data, int* d_histogram, int tagLength, int size, int num_buckets) { extern __shared__ int s_histogram[]; for(int i = threadIdx.x; i < num_buckets; i += blockDim.x){ //initialize array to 0 in block sized chunks s_histogram[i] = 0; } __syncthreads(); int T = blockIdx.x * blockDim.x + threadIdx.x; for(int i = T; i < size; i += blockDim.x * gridDim.x){ //grid stride int h = bfe(d_data[i], 0, tagLength); atomicAdd(&(s_histogram[h]), 1); } __syncthreads(); //reduce shared output into global output for(int i = threadIdx.x; i < num_buckets; i += blockDim.x){ //output to global memory in block sized chunks atomicAdd(&(d_histogram[i]), s_histogram[i]); } } //define the prefix scan kernel here //implement it yourself or borrow the code from CUDA samples __global__ void prefixscan(int *d_input, int *d_output, int n) { extern __shared__ int shmem[]; int T = threadIdx.x; int offset = 1; //there are n/2 threads so each thread must load 2 data points shmem[2*T] = d_input[2*T]; // load even indices into shared memory shmem[2*T+1] = d_input[2*T+1]; //load odd indices for (int d = n>>1; d > 0; d >>= 1) //upsweep, compute partial sums { __syncthreads(); if (T < d) { int ai = offset*(2*T+1)-1; int bi = offset*(2*T+2)-1; shmem[bi] += shmem[ai]; } offset *= 2; } if (T == 0) { shmem[n - 1] = 0; } //last element to 0 for (int d = 1; d < n; d *= 2) //downsweep, use partial sums to complete the psum { offset >>= 1; __syncthreads(); if (T < d){ int ai = offset*(2*T+1)-1; int bi = offset*(2*T+2)-1; int temp = shmem[ai]; shmem[ai] = shmem[bi]; shmem[bi] += temp; } } __syncthreads(); d_output[2*T] = shmem[2*T]; //write to global memory in even odd pairs like above d_output[2*T+1] = shmem[2*T+1]; } //define the reorder kernel here __global__ void Reorder(int* d_data, int* d_output, int* sum, int tagLength, int size) { int T = blockIdx.x * blockDim.x + threadIdx.x; if(T < size){ int h = bfe(d_data[T], 0, tagLength); //extract bits from input data int offset = atomicAdd(&(sum[h]), 1); //calculate offset d_output[offset] = d_data[T]; //use the offset to place input data into correct partition } } bool isPowerOfTwo(unsigned long x) { return (x != 0) && ((x & (x - 1)) == 0); } void outputHistogram(int* histogram, int buckets){ printf("Histogram:"); for(int i = 0; i < buckets && i < 10; i++){ printf("\n%02d: ", i); printf("%15lld ", histogram[i]); } } void output_result(int* histogram, int* psum, int num_buckets){ int i; long long total_cnt = 0; printf("Partition number: Offset and number of keys per partition:"); for(i=0; i< num_buckets; i++) { if(i%5 == 0) /* we print 5 buckets in a row */ printf("\n%02d: ", i); printf("%8d: %7lld ", psum[i],histogram[i]); total_cnt += histogram[i]; /* we also want to make sure the total distance count is correct */ if(i == num_buckets - 1) printf("\n T:%lld \n", total_cnt); else printf("| "); } } int main(int argc, char const *argv[]) { int rSize = atoi(argv[1]); //number of elements in input array int numP = atoi(argv[2]); //number of partitions that input will be sorted into assert(numP <= rSize && isPowerOfTwo(numP)); //number of partitions must be less than or equal to the input array size and power of 2 int tag = int(log2(float(numP))); //define number of bits in a tag //errors for incorrect inputs if(argc > 3){ printf("Too many command line arguments, ending program\n"); return 0; } else if(argc < 3){ printf("Too few command line arguments, ending program\n"); return 0; } if(rSize <= 0 || numP > 1024 || numP <=0){ //input size must be >= 0 and max # partitions is 1024 printf("Invalid command line arguments, ending program\n"); return 0; } //Create all needed arrays for all 3 kernels and allocate memory int* r_d; //input array for device hipMalloc((void**)&r_d, sizeof(int)*rSize); //size of number of inputs int* r_h; hipHostMalloc((void**)&r_h, sizeof(int)*rSize); //use pinned memory in host so it copies to GPU faster dataGenerator(r_h, rSize, 0, 1); //randomly generate input data int* d_histogram; //histogram for algorithm 1 hipMalloc((void**)&d_histogram, sizeof(int)*numP); //size of number of partitions int* h_histogram; hipHostMalloc((void**)&h_histogram, sizeof(int)*numP); //a bucket for each partition memset(h_histogram, 0, sizeof(int)*numP); //initialize host histogram to zero int* d_psum; //array to hold the prefix sum of algorithm 2 hipMalloc((void**)&d_psum, sizeof(int)*numP); //a bucket for each partition int* h_psum; hipHostMalloc((void**)&h_psum, sizeof(int)*numP); int* d_output; //output array for final sorted result hipMalloc((void**)&d_output, sizeof(int)*rSize); int* h_output; hipHostMalloc((void**)&h_output, sizeof(int)*rSize); //copy host data to device memory hipMemcpy(r_d, r_h, sizeof(int)*rSize, hipMemcpyHostToDevice); hipMemcpy(d_histogram, h_histogram, sizeof(int)*numP, hipMemcpyHostToDevice); printf("The number of elements in the input array is: %d\n",rSize); printf("The number of partitions is: %d\n",numP); printf("The number of bits in a tag is: %d\n\n",tag); printf("The contents of the input array are: \n"); for(int i = 0; i < rSize && i < 100; i++){ printf("%d\n",r_h[i]); } //define block and grid size for algorithm 1 and 3. 2 only runs with 1 block int num_threads = 1024; //number of threads in a block int num_blocks = (rSize + num_threads - 1)/num_threads; //start counting time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); //launch kernel 1 - create histogram hipLaunchKernelGGL(( histogram), dim3(1024), dim3(256), numP*sizeof(int), 0, r_d, d_histogram, tag, rSize, numP); //copy data from gpu to host hipMemcpy(h_histogram, d_histogram, sizeof(int)*numP, hipMemcpyDeviceToHost); //launch kernel 2 - exclusive prefix sum of histogram hipLaunchKernelGGL(( prefixscan), dim3(1), dim3(numP/2), numP*sizeof(int), 0, d_histogram, d_psum, numP); //copy data from gpu to host hipMemcpy(h_psum, d_psum, sizeof(int)*numP, hipMemcpyDeviceToHost); /*//print psum printf("First 100 of exclusive prefix: \n"); for(int i = 0; i < numP && i < 50; i++){ printf("%d\n",h_psum[i]); } printf("\n");*/ //launch kernel 3 - reorder input array hipLaunchKernelGGL(( Reorder), dim3(num_blocks), dim3(num_threads), 0, 0, r_d, d_output, d_psum, tag, rSize); //copy final result from gpu to host hipMemcpy(h_output, d_output, sizeof(int)*rSize, hipMemcpyDeviceToHost); //stop counting time hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); //print sorted result printf("First 50 of the sorted output: \n"); for(int i = 0; i < rSize && i < 50; i++){ printf("%d\n",h_output[i]); } //print formatted output output_result(h_histogram, h_psum, numP); //report running time printf("******** Total Running Time of All Kernels = %0.5f ms *******\n", elapsedTime); hipEventDestroy(start); hipEventDestroy(stop); hipHostFree(r_h); hipHostFree(h_histogram); return 0; }
8a175f0e8eb363b5e04b7adf6bfffdcd28f1f3e5.cu
#include <assert.h> #include <stdio.h> #include <math.h> #define RAND_RANGE(N) ((double)rand()/((double)RAND_MAX + 1)*(N)) //data generator void dataGenerator(int* data, int count, int first, int step) { assert(data != NULL); for(int i = 0; i < count; ++i) data[i] = first + i * step; srand(time(NULL)); for(int i = count-1; i>0; i--) //knuth shuffle { int j = RAND_RANGE(i); int k_tmp = data[i]; data[i] = data[j]; data[j] = k_tmp; } } //non random data generator void nrdataGenerator(int* data, int count, int first, int step) { assert(data != NULL); for(int i = 0; i < count; ++i) data[i] = first + i * step; //srand(time(NULL)); for(int i = count-1; i>0; i--) //knuth shuffle { int j = RAND_RANGE(i); int k_tmp = data[i]; data[i] = data[j]; data[j] = k_tmp; } } //sequential data generator void sDataGenerator(int* data, int count){ assert(data !=NULL); int j = 0; for(int i = count-1; i > 0; --i){ data[j] = i; j++; } } /* This function embeds PTX code of CUDA to extract bit field from x. "start" is the starting bit position relative to the LSB. "nbits" is the bit field length. It returns the extracted bit field as an unsigned integer. */ __device__ uint bfe(uint x, uint start, uint nbits) { uint bits; asm("bfe.u32 %0, %1, %2, %3;" : "=r"(bits) : "r"(x), "r"(start), "r"(nbits)); return bits; } //Feel free to change the names of the kernels or define more kernels below if necessary //define the histogram kernel here __global__ void histogram(int* d_data, int* d_histogram, int tagLength, int size, int num_buckets) { extern __shared__ int s_histogram[]; for(int i = threadIdx.x; i < num_buckets; i += blockDim.x){ //initialize array to 0 in block sized chunks s_histogram[i] = 0; } __syncthreads(); int T = blockIdx.x * blockDim.x + threadIdx.x; for(int i = T; i < size; i += blockDim.x * gridDim.x){ //grid stride int h = bfe(d_data[i], 0, tagLength); atomicAdd(&(s_histogram[h]), 1); } __syncthreads(); //reduce shared output into global output for(int i = threadIdx.x; i < num_buckets; i += blockDim.x){ //output to global memory in block sized chunks atomicAdd(&(d_histogram[i]), s_histogram[i]); } } //define the prefix scan kernel here //implement it yourself or borrow the code from CUDA samples __global__ void prefixscan(int *d_input, int *d_output, int n) { extern __shared__ int shmem[]; int T = threadIdx.x; int offset = 1; //there are n/2 threads so each thread must load 2 data points shmem[2*T] = d_input[2*T]; // load even indices into shared memory shmem[2*T+1] = d_input[2*T+1]; //load odd indices for (int d = n>>1; d > 0; d >>= 1) //upsweep, compute partial sums { __syncthreads(); if (T < d) { int ai = offset*(2*T+1)-1; int bi = offset*(2*T+2)-1; shmem[bi] += shmem[ai]; } offset *= 2; } if (T == 0) { shmem[n - 1] = 0; } //last element to 0 for (int d = 1; d < n; d *= 2) //downsweep, use partial sums to complete the psum { offset >>= 1; __syncthreads(); if (T < d){ int ai = offset*(2*T+1)-1; int bi = offset*(2*T+2)-1; int temp = shmem[ai]; shmem[ai] = shmem[bi]; shmem[bi] += temp; } } __syncthreads(); d_output[2*T] = shmem[2*T]; //write to global memory in even odd pairs like above d_output[2*T+1] = shmem[2*T+1]; } //define the reorder kernel here __global__ void Reorder(int* d_data, int* d_output, int* sum, int tagLength, int size) { int T = blockIdx.x * blockDim.x + threadIdx.x; if(T < size){ int h = bfe(d_data[T], 0, tagLength); //extract bits from input data int offset = atomicAdd(&(sum[h]), 1); //calculate offset d_output[offset] = d_data[T]; //use the offset to place input data into correct partition } } bool isPowerOfTwo(unsigned long x) { return (x != 0) && ((x & (x - 1)) == 0); } void outputHistogram(int* histogram, int buckets){ printf("Histogram:"); for(int i = 0; i < buckets && i < 10; i++){ printf("\n%02d: ", i); printf("%15lld ", histogram[i]); } } void output_result(int* histogram, int* psum, int num_buckets){ int i; long long total_cnt = 0; printf("Partition number: Offset and number of keys per partition:"); for(i=0; i< num_buckets; i++) { if(i%5 == 0) /* we print 5 buckets in a row */ printf("\n%02d: ", i); printf("%8d: %7lld ", psum[i],histogram[i]); total_cnt += histogram[i]; /* we also want to make sure the total distance count is correct */ if(i == num_buckets - 1) printf("\n T:%lld \n", total_cnt); else printf("| "); } } int main(int argc, char const *argv[]) { int rSize = atoi(argv[1]); //number of elements in input array int numP = atoi(argv[2]); //number of partitions that input will be sorted into assert(numP <= rSize && isPowerOfTwo(numP)); //number of partitions must be less than or equal to the input array size and power of 2 int tag = int(log2(float(numP))); //define number of bits in a tag //errors for incorrect inputs if(argc > 3){ printf("Too many command line arguments, ending program\n"); return 0; } else if(argc < 3){ printf("Too few command line arguments, ending program\n"); return 0; } if(rSize <= 0 || numP > 1024 || numP <=0){ //input size must be >= 0 and max # partitions is 1024 printf("Invalid command line arguments, ending program\n"); return 0; } //Create all needed arrays for all 3 kernels and allocate memory int* r_d; //input array for device cudaMalloc((void**)&r_d, sizeof(int)*rSize); //size of number of inputs int* r_h; cudaMallocHost((void**)&r_h, sizeof(int)*rSize); //use pinned memory in host so it copies to GPU faster dataGenerator(r_h, rSize, 0, 1); //randomly generate input data int* d_histogram; //histogram for algorithm 1 cudaMalloc((void**)&d_histogram, sizeof(int)*numP); //size of number of partitions int* h_histogram; cudaMallocHost((void**)&h_histogram, sizeof(int)*numP); //a bucket for each partition memset(h_histogram, 0, sizeof(int)*numP); //initialize host histogram to zero int* d_psum; //array to hold the prefix sum of algorithm 2 cudaMalloc((void**)&d_psum, sizeof(int)*numP); //a bucket for each partition int* h_psum; cudaMallocHost((void**)&h_psum, sizeof(int)*numP); int* d_output; //output array for final sorted result cudaMalloc((void**)&d_output, sizeof(int)*rSize); int* h_output; cudaMallocHost((void**)&h_output, sizeof(int)*rSize); //copy host data to device memory cudaMemcpy(r_d, r_h, sizeof(int)*rSize, cudaMemcpyHostToDevice); cudaMemcpy(d_histogram, h_histogram, sizeof(int)*numP, cudaMemcpyHostToDevice); printf("The number of elements in the input array is: %d\n",rSize); printf("The number of partitions is: %d\n",numP); printf("The number of bits in a tag is: %d\n\n",tag); printf("The contents of the input array are: \n"); for(int i = 0; i < rSize && i < 100; i++){ printf("%d\n",r_h[i]); } //define block and grid size for algorithm 1 and 3. 2 only runs with 1 block int num_threads = 1024; //number of threads in a block int num_blocks = (rSize + num_threads - 1)/num_threads; //start counting time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); //launch kernel 1 - create histogram histogram<<<1024, 256, numP*sizeof(int)>>>(r_d, d_histogram, tag, rSize, numP); //copy data from gpu to host cudaMemcpy(h_histogram, d_histogram, sizeof(int)*numP, cudaMemcpyDeviceToHost); //launch kernel 2 - exclusive prefix sum of histogram prefixscan<<<1, numP/2, numP*sizeof(int)>>>(d_histogram, d_psum, numP); //copy data from gpu to host cudaMemcpy(h_psum, d_psum, sizeof(int)*numP, cudaMemcpyDeviceToHost); /*//print psum printf("First 100 of exclusive prefix: \n"); for(int i = 0; i < numP && i < 50; i++){ printf("%d\n",h_psum[i]); } printf("\n");*/ //launch kernel 3 - reorder input array Reorder<<<num_blocks, num_threads>>>(r_d, d_output, d_psum, tag, rSize); //copy final result from gpu to host cudaMemcpy(h_output, d_output, sizeof(int)*rSize, cudaMemcpyDeviceToHost); //stop counting time cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); //print sorted result printf("First 50 of the sorted output: \n"); for(int i = 0; i < rSize && i < 50; i++){ printf("%d\n",h_output[i]); } //print formatted output output_result(h_histogram, h_psum, numP); //report running time printf("******** Total Running Time of All Kernels = %0.5f ms *******\n", elapsedTime); cudaEventDestroy(start); cudaEventDestroy(stop); cudaFreeHost(r_h); cudaFreeHost(h_histogram); return 0; }
90dadcb50ba77697b655923d6e155164e163dd52.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2018 XIAOLIN WANG ([email protected]; [email protected]) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "Transpose01.h" #include "Global.h" namespace cytonLib { __global__ void transpose01_forward_kernel(Precision* x, Precision* y, int dim2, int dim1, int dim0) { int i0=blockDim.x*blockIdx.x+threadIdx.x; if(i0<dim0) { int i2=blockIdx.z; int i1=blockIdx.y; y[i1*dim2*dim0+i2*dim0+i0]=x[i2*dim1*dim0+i1*dim0+i0]; } } __global__ void transpose01_backward_kernel(Precision* x, Precision* y, int dim2, int dim1, int dim0, bool add) { int i0=blockDim.x*blockIdx.x+threadIdx.x; if(i0<dim0) { int i2=blockIdx.z; int i1=blockIdx.y; Precision* tx=x+i2*dim1*dim0+i1*dim0+i0; Precision* ty=y+i1*dim2*dim0+i2*dim0+i0; if(add) { *tx+=*ty; } else { *tx=*ty; } } } Variable* Transpose01::init(string tag_, Variable* x_) { tag=tag_; x=x_; y.resize(x->c, x->n, x->h, x->w); y.enlarge=false; return &y; } void Transpose01::forward() { y.resize(x->c, x->n, x->h, x->w); int dim2=x->n; int dim1=x->c; int dim0=x->h*x->w; dim3 grid(ceil(dim0, blockSize),dim1, dim2); hipLaunchKernelGGL(( transpose01_forward_kernel), dim3(grid), dim3(blockSize), 0, 0, x->data, y.data, dim2, dim1, dim0); } void Transpose01::backward() { int dim2=x->n; int dim1=x->c; int dim0=x->h*x->w; dim3 grid(ceil(dim0, blockSize),dim1, dim2); hipLaunchKernelGGL(( transpose01_backward_kernel), dim3(grid), dim3(blockSize), 0, 0, x->grad.data, y.grad.data, dim2, dim1, dim0, addGrad); } } /* namespace cytonLib */
90dadcb50ba77697b655923d6e155164e163dd52.cu
/* Copyright 2018 XIAOLIN WANG ([email protected]; [email protected]) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "Transpose01.h" #include "Global.h" namespace cytonLib { __global__ void transpose01_forward_kernel(Precision* x, Precision* y, int dim2, int dim1, int dim0) { int i0=blockDim.x*blockIdx.x+threadIdx.x; if(i0<dim0) { int i2=blockIdx.z; int i1=blockIdx.y; y[i1*dim2*dim0+i2*dim0+i0]=x[i2*dim1*dim0+i1*dim0+i0]; } } __global__ void transpose01_backward_kernel(Precision* x, Precision* y, int dim2, int dim1, int dim0, bool add) { int i0=blockDim.x*blockIdx.x+threadIdx.x; if(i0<dim0) { int i2=blockIdx.z; int i1=blockIdx.y; Precision* tx=x+i2*dim1*dim0+i1*dim0+i0; Precision* ty=y+i1*dim2*dim0+i2*dim0+i0; if(add) { *tx+=*ty; } else { *tx=*ty; } } } Variable* Transpose01::init(string tag_, Variable* x_) { tag=tag_; x=x_; y.resize(x->c, x->n, x->h, x->w); y.enlarge=false; return &y; } void Transpose01::forward() { y.resize(x->c, x->n, x->h, x->w); int dim2=x->n; int dim1=x->c; int dim0=x->h*x->w; dim3 grid(ceil(dim0, blockSize),dim1, dim2); transpose01_forward_kernel<<<grid, blockSize>>>(x->data, y.data, dim2, dim1, dim0); } void Transpose01::backward() { int dim2=x->n; int dim1=x->c; int dim0=x->h*x->w; dim3 grid(ceil(dim0, blockSize),dim1, dim2); transpose01_backward_kernel<<<grid, blockSize>>>(x->grad.data, y.grad.data, dim2, dim1, dim0, addGrad); } } /* namespace cytonLib */
cc30631175c13a270a0c591d67cabb8c495b1f11.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <time.h> #include <stdlib.h> #include <sys/time.h> //Tamao de matrices (cuadradas) #define N 1024 //Kernel __global__ void mul(int * A, int * B, int * C){ int i = blockIdx.x; int j = threadIdx.x; //TODO -> Calcular elemento C(i,j) } int main(){ struct timeval t1, t2; int *hA, *hB, *hC, *hC2; //Host Matrix int *dA, *dB, *dC; //Device Matrix //Reserva de memoria Host hA = (int*)malloc(N*N*sizeof(int)); hB = (int*)malloc(N*N*sizeof(int)); hC = (int*)malloc(N*N*sizeof(int)); hC2 = (int*)malloc(N*N*sizeof(int)); //Inicializacin de matrices srand(time(NULL)); for (int i = 0; i < N; i++){ for (int j = 0; j < N; j++){ hA[i*N+j] = rand(); hB[i*N+j] = rand(); } } //Reserva de memoria GPU //TODO -> Reservar Memoria (dA, dB, dC) //Copia Host -> GPU //TODO -> Copiar hA a dA y hB a dB gettimeofday(&t1, 0); //Ejecucin Kernel hipLaunchKernelGGL(( mul), dim3(N), dim3(N), 0, 0, dA, dB, dC); hipDeviceSynchronize(); gettimeofday(&t2, 0); //Copia Device -> Host //TODO -> Copiar dC a hC //Multiplicacin en Host for(int i = 0; i < N; i++){ for(int j = 0; j < N; j++){ hC2[i*N + j] = 0; for(int k = 0; k < N; k++){ hC2[i*N + j] += hA[i*N + k] * hB[k*N + j]; } } } //Comprobacin de errores bool error = false; for(int i = 0; i < N*N; i++){ if(hC[i] != hC2[i]){ error = true; break; } } if(error) printf("La multiplicacin de matrices ha fallado.\n"); else printf("Multiplicacin de matrices correcta.\n"); double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; printf("Tiempo: %f ms\n", time); //Liberar memoria free(hA); free(hB); free(hC); //TODO -> Liberar dA, dB y dC }
cc30631175c13a270a0c591d67cabb8c495b1f11.cu
#include <cuda_runtime.h> #include <stdio.h> #include <time.h> #include <stdlib.h> #include <sys/time.h> //Tamaño de matrices (cuadradas) #define N 1024 //Kernel __global__ void mul(int * A, int * B, int * C){ int i = blockIdx.x; int j = threadIdx.x; //TODO -> Calcular elemento C(i,j) } int main(){ struct timeval t1, t2; int *hA, *hB, *hC, *hC2; //Host Matrix int *dA, *dB, *dC; //Device Matrix //Reserva de memoria Host hA = (int*)malloc(N*N*sizeof(int)); hB = (int*)malloc(N*N*sizeof(int)); hC = (int*)malloc(N*N*sizeof(int)); hC2 = (int*)malloc(N*N*sizeof(int)); //Inicialización de matrices srand(time(NULL)); for (int i = 0; i < N; i++){ for (int j = 0; j < N; j++){ hA[i*N+j] = rand(); hB[i*N+j] = rand(); } } //Reserva de memoria GPU //TODO -> Reservar Memoria (dA, dB, dC) //Copia Host -> GPU //TODO -> Copiar hA a dA y hB a dB gettimeofday(&t1, 0); //Ejecución Kernel mul<<<N, N>>>(dA, dB, dC); cudaDeviceSynchronize(); gettimeofday(&t2, 0); //Copia Device -> Host //TODO -> Copiar dC a hC //Multiplicación en Host for(int i = 0; i < N; i++){ for(int j = 0; j < N; j++){ hC2[i*N + j] = 0; for(int k = 0; k < N; k++){ hC2[i*N + j] += hA[i*N + k] * hB[k*N + j]; } } } //Comprobación de errores bool error = false; for(int i = 0; i < N*N; i++){ if(hC[i] != hC2[i]){ error = true; break; } } if(error) printf("La multiplicación de matrices ha fallado.\n"); else printf("Multiplicación de matrices correcta.\n"); double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; printf("Tiempo: %f ms\n", time); //Liberar memoria free(hA); free(hB); free(hC); //TODO -> Liberar dA, dB y dC }
94ca0d15df0ccc01b927cb88f311f3ecde306789.hip
// !!! This is a file automatically generated by hipify!!! //#include <helper_cuda.h> #include <helper_timer.h> #include <mpfr.h> #include <qd/dd_real.h> #include "../../gpuprec/gqd/gqd.cu" using namespace std; void qd2gqd(dd_real* dd_data, gdd_real* gdd_data, const unsigned int numElement) { for (unsigned int i = 0; i < numElement; i++) { gdd_data[i].x = dd_data[i].x[0]; gdd_data[i].y = dd_data[i].x[1]; } } void gqd2qd(gdd_real* gdd_data, dd_real* dd_data, const unsigned int numElement) { for (unsigned int i = 0; i < numElement; i++) { dd_data[i].x[0] = gdd_data[i].x; dd_data[i].x[1] = gdd_data[i].y; } } void qd2gqd2(dd_real dd_data[][5], gdd_real gdd_data[][5], int d1, int d2, int numElement) { for (unsigned int i = 0; i < d1; i++) { for (unsigned int j = 0; j < d2; j++) { gdd_data[i][j].x = dd_data[i][j].x[0]; gdd_data[i][j].y = dd_data[i][j].x[1]; } } } #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "hip/hip_runtime.h" #include <hip/hip_runtime.h> #include <string.h> #include <math.h> #include <mpfr.h> #include <iostream> using namespace std; #ifdef RD_WG_SIZE_0_0 #define MAXBLOCKSIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define MAXBLOCKSIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define MAXBLOCKSIZE RD_WG_SIZE #else #define MAXBLOCKSIZE 512 #endif //2D defines. Go from specific to general #ifdef RD_WG_SIZE_1_0 #define BLOCK_SIZE_XY RD_WG_SIZE_1_0 #elif defined(RD_WG_SIZE_1) #define BLOCK_SIZE_XY RD_WG_SIZE_1 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_XY RD_WG_SIZE #else #define BLOCK_SIZE_XY 4 #endif FILE *fp; unsigned int totalKernelTime = 0; // create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06 void create_matrix(float *trix,int size) { int i; int j; double lamda = - 0.01; double cof[2 * size - 1]; double coe_i = 0.0; for (i = 0; i < size; i++) { coe_i = 10 * exp(lamda * i); j = size - 1 + i; cof[j] = coe_i; j = size - 1 - i; cof[j] = coe_i; } for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { trix[i * size + j] = cof[size - 1 - i + j]; } } } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr,"Cuda error: %s: %s.\n",msg,(hipGetErrorString(err))); exit(1); } } /*------------------------------------------------------- ** Pay attention to the index. Index i give the range ** which starts from 0 to range-1. The real values of ** the index should be adjust and related with the value **------------------------------------------------------- */ __global__ void Fan1(float *c_m,float *c_a,int Size,int t) { if (threadIdx . x + blockIdx . x * blockDim . x >= (Size - 1 - t)) return ; c_m[Size * (blockDim . x * blockIdx . x + threadIdx . x + t + 1) + t] = c_a[Size * (blockDim . x * blockIdx . x + threadIdx . x + t + 1) + t] / c_a[Size * t + t]; } /*------------------------------------------------------- **------------------------------------------------------- */ __global__ void Fan2(float *c_m,float *c_a,float *c_b,int Size,int j1,int t) { if (threadIdx . x + blockIdx . x * blockDim . x >= (Size - 1 - t)) return ; if (threadIdx . y + blockIdx . y * blockDim . y >= (Size - t)) return ; int xidx = (blockIdx . x * blockDim . x + threadIdx . x); int yidx = (blockIdx . y * blockDim . y + threadIdx . y); c_a[Size * (xidx + 1 + t) + (yidx + t)] =( c_a[Size * (xidx + 1 + t) + (yidx + t)] - c_m[Size * (xidx + 1 + t) + t] * c_a[Size * t + (yidx + t)]); if (yidx == 0) { c_b[xidx + 1 + t] =( c_b[xidx + 1 + t] - c_m[Size * (xidx + 1 + t) + (yidx + t)] * c_b[t]); } } /*------------------------------------------------------ ** ForwardSub() -- Forward substitution of Gaussian ** elimination. **------------------------------------------------------ */ /*------------------------------------------------------ ** BackSub() -- Backward substitution **------------------------------------------------------ */ int main(int argc,char *argv[]) { printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n",512,4,4); int verbose = 0; int i; int j; int t; char flag; if (argc < 2) { printf("Usage: gaussian -f filename / -s size [-q]\n\n"); printf("-q (quiet) suppresses printing the matrix and result values.\n"); printf("-f (filename) path of input file\n"); printf("-s (size) size of matrix. Create matrix and rhs in this program \n"); printf("The first line of the file contains the dimension of the matrix, n."); printf("The second line of the file is a newline.\n"); printf("The next n lines contain n tab separated values for the matrix."); printf("The next line of the file is a newline.\n"); printf("The next line of the file is a 1xn vector with tab separated values.\n"); printf("The next line of the file is a newline. (optional)\n"); printf("The final line of the file is the pre-computed solution. (optional)\n"); printf("Example: matrix4.txt:\n"); printf("4\n"); printf("\n"); printf("-0.6\t-0.5\t0.7\t0.3\n"); printf("-0.3\t-0.9\t0.3\t0.7\n"); printf("-0.4\t-0.5\t-0.3\t-0.8\n"); printf("0.0\t-0.1\t0.2\t0.9\n"); printf("\n"); printf("-0.85\t-0.68\t0.24\t-0.53\n"); printf("\n"); printf("0.7\t0.0\t-0.4\t-0.5\n"); exit(0); } int Size; for (i = 1; i < argc; i++) { // flag if (argv[i][0] == '-') { flag = argv[i][1]; switch(flag){ // platform case 's': i++; Size = atoi(argv[i]); printf("Create matrix internally in parse, size = %d \n",Size); break; } } } float *ha = new float [Size * Size]; create_matrix(ha,Size); float *hb = new float [Size]; for (j = 0; j < Size; j++) hb[j] = 1.0; float *hm = new float [Size * Size]; float *finalVec = new float [Size]; //InitProblemOnce(filename); for (i = 0; i < Size * Size; i++) hm[i] = ((float )0.0); //begin timing struct timeval start_t; struct timeval end_t; struct timeval skt_t; struct timeval ske_t; struct timeval sht_t; struct timeval she_t; gettimeofday(&start_t,0L); float *cuda_m; float *cuda_a; float *cuda_b; // allocate memory on GPU hipMalloc((void **)(&cuda_m),(Size * Size) * sizeof(float )); hipMalloc((void **)(&cuda_a),(Size * Size) * sizeof(float )); hipMalloc((void **)(&cuda_b),Size * sizeof(float )); // copy memory to GPU hipMemcpy(cuda_m,hm,(Size * Size) * sizeof(float ),hipMemcpyHostToDevice); hipMemcpy(cuda_a,ha,(Size * Size) * sizeof(float ),hipMemcpyHostToDevice); hipMemcpy(cuda_b,hb,Size * sizeof(float ),hipMemcpyHostToDevice); int block_size; int grid_size; block_size = 512; grid_size = Size / block_size + ((!(Size % block_size)?0 : 1)); //printf("1d grid size: %d\n",grid_size); //dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) ); int blockSize2d; int gridSize2d; blockSize2d = 4; gridSize2d = Size / blockSize2d + (!(((Size % blockSize2d)?0 : 1))); ::dim3 dimBlockXY(blockSize2d,blockSize2d); ::dim3 dimGridXY(gridSize2d,gridSize2d); ::dim3 dimBlock(block_size); ::dim3 dimGrid(grid_size); gettimeofday(&skt_t,0L); for (t = 0; t < Size - 1; t++) { hipLaunchKernelGGL(( Fan1), dim3(dimGrid),dim3(dimBlock), 0, 0, cuda_m,cuda_a,Size,t); hipDeviceSynchronize(); hipLaunchKernelGGL(( Fan2), dim3(dimGridXY),dim3(dimBlockXY), 0, 0, cuda_m,cuda_a,cuda_b,Size,Size - t,t); hipDeviceSynchronize(); checkCUDAError("Fan2"); } gettimeofday(&ske_t,0L); // copy memory back to CPU hipMemcpy(hm,cuda_m,(Size * Size) * sizeof(float ),hipMemcpyDeviceToHost); hipMemcpy(ha,cuda_a,(Size * Size) * sizeof(float ),hipMemcpyDeviceToHost); hipMemcpy(hb,cuda_b,Size * sizeof(float ),hipMemcpyDeviceToHost); //BackSub(); // create a new vector to hold the final answer // solve "bottom up" gettimeofday(&sht_t,0L); for (i = 0; i < Size; i++) { finalVec[Size - i - 1] = hb[Size - i - 1]; for (j = 0; j < i; j++) { finalVec[Size - i - 1] = finalVec[Size - i - 1] - ha[Size * (Size - i - 1) + (Size - j - 1)] * finalVec[Size - j - 1]; } finalVec[Size - i - 1] = finalVec[Size - i - 1] / ha[Size * (Size - i - 1) + (Size - i - 1)]; } gettimeofday(&she_t,0L); gettimeofday(&end_t,0L); mpf_t val_x, val_y, val_in, err; mpf_init2(val_x, 128); mpf_init2(val_y, 128); mpf_init2(val_in, 128); mpf_init2(err, 128); FILE* infile = fopen("fv_ref.txt", "r"); for(int i = 0; i < Size; i++) { gmp_fscanf(infile, "%Fe\n", val_in); mpf_set_d(val_x, finalVec[i]); mpf_sub(val_x, val_x, val_in); mpf_abs(val_y, val_x); mpf_div(val_x, val_y, val_in); if (i==0) mpf_set(err, val_x); else mpf_add(err, err, val_x); } mpf_div_ui(err, err, Size); fclose(infile); gmp_printf("error: %10.5Fe\n", err); ((std::cout<<"time: ") << ((end_t . tv_sec - start_t . tv_sec) + (end_t . tv_usec - start_t . tv_usec) * 1e-6)) << endl; ((std::cout<<"kernel: ") << ((ske_t . tv_sec - skt_t . tv_sec) + (ske_t . tv_usec - skt_t . tv_usec) * 1e-6 + (she_t . tv_sec - sht_t . tv_sec) + (she_t . tv_usec - sht_t . tv_usec) * 1e-6)) << endl; //if (verbose) { // printf("The final solution is: \n"); // PrintAry(finalVec,Size); //} hipFree(cuda_m); hipFree(cuda_a); hipFree(cuda_b); free(hm); free(ha); free(hb); }
94ca0d15df0ccc01b927cb88f311f3ecde306789.cu
//#include <helper_cuda.h> #include <helper_timer.h> #include <mpfr.h> #include <qd/dd_real.h> #include "../../gpuprec/gqd/gqd.cu" using namespace std; void qd2gqd(dd_real* dd_data, gdd_real* gdd_data, const unsigned int numElement) { for (unsigned int i = 0; i < numElement; i++) { gdd_data[i].x = dd_data[i].x[0]; gdd_data[i].y = dd_data[i].x[1]; } } void gqd2qd(gdd_real* gdd_data, dd_real* dd_data, const unsigned int numElement) { for (unsigned int i = 0; i < numElement; i++) { dd_data[i].x[0] = gdd_data[i].x; dd_data[i].x[1] = gdd_data[i].y; } } void qd2gqd2(dd_real dd_data[][5], gdd_real gdd_data[][5], int d1, int d2, int numElement) { for (unsigned int i = 0; i < d1; i++) { for (unsigned int j = 0; j < d2; j++) { gdd_data[i][j].x = dd_data[i][j].x[0]; gdd_data[i][j].y = dd_data[i][j].x[1]; } } } #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "cuda.h" #include <cuda_runtime.h> #include <string.h> #include <math.h> #include <mpfr.h> #include <iostream> using namespace std; #ifdef RD_WG_SIZE_0_0 #define MAXBLOCKSIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define MAXBLOCKSIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define MAXBLOCKSIZE RD_WG_SIZE #else #define MAXBLOCKSIZE 512 #endif //2D defines. Go from specific to general #ifdef RD_WG_SIZE_1_0 #define BLOCK_SIZE_XY RD_WG_SIZE_1_0 #elif defined(RD_WG_SIZE_1) #define BLOCK_SIZE_XY RD_WG_SIZE_1 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_XY RD_WG_SIZE #else #define BLOCK_SIZE_XY 4 #endif FILE *fp; unsigned int totalKernelTime = 0; // create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06 void create_matrix(float *trix,int size) { int i; int j; double lamda = - 0.01; double cof[2 * size - 1]; double coe_i = 0.0; for (i = 0; i < size; i++) { coe_i = 10 * exp(lamda * i); j = size - 1 + i; cof[j] = coe_i; j = size - 1 - i; cof[j] = coe_i; } for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { trix[i * size + j] = cof[size - 1 - i + j]; } } } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr,"Cuda error: %s: %s.\n",msg,(cudaGetErrorString(err))); exit(1); } } /*------------------------------------------------------- ** Pay attention to the index. Index i give the range ** which starts from 0 to range-1. The real values of ** the index should be adjust and related with the value **------------------------------------------------------- */ __global__ void Fan1(float *c_m,float *c_a,int Size,int t) { if (threadIdx . x + blockIdx . x * blockDim . x >= (Size - 1 - t)) return ; c_m[Size * (blockDim . x * blockIdx . x + threadIdx . x + t + 1) + t] = c_a[Size * (blockDim . x * blockIdx . x + threadIdx . x + t + 1) + t] / c_a[Size * t + t]; } /*------------------------------------------------------- **------------------------------------------------------- */ __global__ void Fan2(float *c_m,float *c_a,float *c_b,int Size,int j1,int t) { if (threadIdx . x + blockIdx . x * blockDim . x >= (Size - 1 - t)) return ; if (threadIdx . y + blockIdx . y * blockDim . y >= (Size - t)) return ; int xidx = (blockIdx . x * blockDim . x + threadIdx . x); int yidx = (blockIdx . y * blockDim . y + threadIdx . y); c_a[Size * (xidx + 1 + t) + (yidx + t)] =( c_a[Size * (xidx + 1 + t) + (yidx + t)] - c_m[Size * (xidx + 1 + t) + t] * c_a[Size * t + (yidx + t)]); if (yidx == 0) { c_b[xidx + 1 + t] =( c_b[xidx + 1 + t] - c_m[Size * (xidx + 1 + t) + (yidx + t)] * c_b[t]); } } /*------------------------------------------------------ ** ForwardSub() -- Forward substitution of Gaussian ** elimination. **------------------------------------------------------ */ /*------------------------------------------------------ ** BackSub() -- Backward substitution **------------------------------------------------------ */ int main(int argc,char *argv[]) { printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n",512,4,4); int verbose = 0; int i; int j; int t; char flag; if (argc < 2) { printf("Usage: gaussian -f filename / -s size [-q]\n\n"); printf("-q (quiet) suppresses printing the matrix and result values.\n"); printf("-f (filename) path of input file\n"); printf("-s (size) size of matrix. Create matrix and rhs in this program \n"); printf("The first line of the file contains the dimension of the matrix, n."); printf("The second line of the file is a newline.\n"); printf("The next n lines contain n tab separated values for the matrix."); printf("The next line of the file is a newline.\n"); printf("The next line of the file is a 1xn vector with tab separated values.\n"); printf("The next line of the file is a newline. (optional)\n"); printf("The final line of the file is the pre-computed solution. (optional)\n"); printf("Example: matrix4.txt:\n"); printf("4\n"); printf("\n"); printf("-0.6\t-0.5\t0.7\t0.3\n"); printf("-0.3\t-0.9\t0.3\t0.7\n"); printf("-0.4\t-0.5\t-0.3\t-0.8\n"); printf("0.0\t-0.1\t0.2\t0.9\n"); printf("\n"); printf("-0.85\t-0.68\t0.24\t-0.53\n"); printf("\n"); printf("0.7\t0.0\t-0.4\t-0.5\n"); exit(0); } int Size; for (i = 1; i < argc; i++) { // flag if (argv[i][0] == '-') { flag = argv[i][1]; switch(flag){ // platform case 's': i++; Size = atoi(argv[i]); printf("Create matrix internally in parse, size = %d \n",Size); break; } } } float *ha = new float [Size * Size]; create_matrix(ha,Size); float *hb = new float [Size]; for (j = 0; j < Size; j++) hb[j] = 1.0; float *hm = new float [Size * Size]; float *finalVec = new float [Size]; //InitProblemOnce(filename); for (i = 0; i < Size * Size; i++) hm[i] = ((float )0.0); //begin timing struct timeval start_t; struct timeval end_t; struct timeval skt_t; struct timeval ske_t; struct timeval sht_t; struct timeval she_t; gettimeofday(&start_t,0L); float *cuda_m; float *cuda_a; float *cuda_b; // allocate memory on GPU cudaMalloc((void **)(&cuda_m),(Size * Size) * sizeof(float )); cudaMalloc((void **)(&cuda_a),(Size * Size) * sizeof(float )); cudaMalloc((void **)(&cuda_b),Size * sizeof(float )); // copy memory to GPU cudaMemcpy(cuda_m,hm,(Size * Size) * sizeof(float ),cudaMemcpyHostToDevice); cudaMemcpy(cuda_a,ha,(Size * Size) * sizeof(float ),cudaMemcpyHostToDevice); cudaMemcpy(cuda_b,hb,Size * sizeof(float ),cudaMemcpyHostToDevice); int block_size; int grid_size; block_size = 512; grid_size = Size / block_size + ((!(Size % block_size)?0 : 1)); //printf("1d grid size: %d\n",grid_size); //dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) ); int blockSize2d; int gridSize2d; blockSize2d = 4; gridSize2d = Size / blockSize2d + (!(((Size % blockSize2d)?0 : 1))); ::dim3 dimBlockXY(blockSize2d,blockSize2d); ::dim3 dimGridXY(gridSize2d,gridSize2d); ::dim3 dimBlock(block_size); ::dim3 dimGrid(grid_size); gettimeofday(&skt_t,0L); for (t = 0; t < Size - 1; t++) { Fan1<<<dimGrid,dimBlock>>>(cuda_m,cuda_a,Size,t); cudaThreadSynchronize(); Fan2<<<dimGridXY,dimBlockXY>>>(cuda_m,cuda_a,cuda_b,Size,Size - t,t); cudaThreadSynchronize(); checkCUDAError("Fan2"); } gettimeofday(&ske_t,0L); // copy memory back to CPU cudaMemcpy(hm,cuda_m,(Size * Size) * sizeof(float ),cudaMemcpyDeviceToHost); cudaMemcpy(ha,cuda_a,(Size * Size) * sizeof(float ),cudaMemcpyDeviceToHost); cudaMemcpy(hb,cuda_b,Size * sizeof(float ),cudaMemcpyDeviceToHost); //BackSub(); // create a new vector to hold the final answer // solve "bottom up" gettimeofday(&sht_t,0L); for (i = 0; i < Size; i++) { finalVec[Size - i - 1] = hb[Size - i - 1]; for (j = 0; j < i; j++) { finalVec[Size - i - 1] = finalVec[Size - i - 1] - ha[Size * (Size - i - 1) + (Size - j - 1)] * finalVec[Size - j - 1]; } finalVec[Size - i - 1] = finalVec[Size - i - 1] / ha[Size * (Size - i - 1) + (Size - i - 1)]; } gettimeofday(&she_t,0L); gettimeofday(&end_t,0L); mpf_t val_x, val_y, val_in, err; mpf_init2(val_x, 128); mpf_init2(val_y, 128); mpf_init2(val_in, 128); mpf_init2(err, 128); FILE* infile = fopen("fv_ref.txt", "r"); for(int i = 0; i < Size; i++) { gmp_fscanf(infile, "%Fe\n", val_in); mpf_set_d(val_x, finalVec[i]); mpf_sub(val_x, val_x, val_in); mpf_abs(val_y, val_x); mpf_div(val_x, val_y, val_in); if (i==0) mpf_set(err, val_x); else mpf_add(err, err, val_x); } mpf_div_ui(err, err, Size); fclose(infile); gmp_printf("error: %10.5Fe\n", err); ((std::cout<<"time: ") << ((end_t . tv_sec - start_t . tv_sec) + (end_t . tv_usec - start_t . tv_usec) * 1e-6)) << endl; ((std::cout<<"kernel: ") << ((ske_t . tv_sec - skt_t . tv_sec) + (ske_t . tv_usec - skt_t . tv_usec) * 1e-6 + (she_t . tv_sec - sht_t . tv_sec) + (she_t . tv_usec - sht_t . tv_usec) * 1e-6)) << endl; //if (verbose) { // printf("The final solution is: \n"); // PrintAry(finalVec,Size); //} cudaFree(cuda_m); cudaFree(cuda_a); cudaFree(cuda_b); free(hm); free(ha); free(hb); }
1b9b47811840151631400c61c4a1c61186d215e8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #ifdef __NVCC__ #include <hipcub/hipcub.hpp> #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/fluid/operators/sequence_ops/sequence_softmax_op.h" #include "paddle/phi/kernels/funcs/math.h" namespace paddle { namespace operators { using LoDTensor = phi::DenseTensor; template <typename T, int BlockDim> using BlockReduce = hipcub::BlockReduce<T, BlockDim>; template <typename T, int BlockDim> using BlockReduceTempStorage = typename BlockReduce<T, BlockDim>::TempStorage; template <typename T, int BlockDim> __global__ void sequence_softmax_kernel(const T *in_data, const size_t *ref_lod, const size_t src_hight, T *out_data) { __shared__ BlockReduceTempStorage<T, BlockDim> temp_storage; __shared__ T shared_max_data; __shared__ T shared_sum_data; for (int i = blockIdx.x; i < src_hight; i += gridDim.x) { size_t start = ref_lod[i]; size_t span = ref_lod[i + 1] - start; // Find the max ele T max_ele = -FLT_MAX; for (int tid = threadIdx.x; tid < span; tid += blockDim.x) { T ele = in_data[start + tid]; max_ele = max_ele > ele ? max_ele : ele; } max_ele = BlockReduce<T, BlockDim>(temp_storage).Reduce(max_ele, hipcub::Max()); if (threadIdx.x == 0) { shared_max_data = max_ele; } __syncthreads(); // sum T sum_data = 0; for (int tid = threadIdx.x; tid < span; tid += blockDim.x) { T ele = in_data[start + tid]; sum_data += phi::funcs::real_exp(ele - shared_max_data); } sum_data = BlockReduce<T, BlockDim>(temp_storage).Reduce(sum_data, hipcub::Sum()); if (threadIdx.x == 0) { shared_sum_data = sum_data; } __syncthreads(); // get final resit for (int tid = threadIdx.x; tid < span; tid += blockDim.x) { T ele = in_data[start + tid]; ele = phi::funcs::real_exp(ele - shared_max_data) / shared_sum_data; out_data[start + tid] = ele; } } } template <typename T, int BlockDim> __global__ void sequence_softmax_grad_kernel(const T *softmax_grad_data, const T *softmax_data, const size_t *ref_lod, const size_t src_hight, T *dx_data) { __shared__ BlockReduceTempStorage<T, BlockDim> temp_storage; __shared__ T shared_data; for (int i = blockIdx.x; i < src_hight; i += gridDim.x) { size_t start = ref_lod[i]; size_t span = ref_lod[i + 1] - start; T result = 0; for (int tid = threadIdx.x; tid < span; tid += blockDim.x) { size_t idx = start + tid; T s_g_d = softmax_grad_data[idx]; T s_d = softmax_data[idx]; result += s_g_d * s_d; } result = BlockReduce<T, BlockDim>(temp_storage).Reduce(result, hipcub::Sum()); if (threadIdx.x == 0) { shared_data = result; } __syncthreads(); for (int tid = threadIdx.x; tid < span; tid += blockDim.x) { size_t idx = start + tid; T s_g_d = softmax_grad_data[idx]; T s_d = softmax_data[idx]; dx_data[idx] = (s_g_d - shared_data) * s_d; } } } template <typename T> struct SequenceSoftmaxFunctor<phi::GPUContext, T> { void operator()(const phi::GPUContext &context, const LoDTensor &x, const phi::Vector<size_t> &ref_lod, /*referenced lod*/ LoDTensor *out) { int height = ref_lod.size() - 1; const int kThreadsPerBlock = 32; int thread_x = kThreadsPerBlock; int max_threads = context.GetMaxPhysicalThreadCount(); int max_blocks = ::max(max_threads / kThreadsPerBlock, 1); dim3 block_size(thread_x); dim3 grid_size(max_blocks); phi::MixVector<size_t> mixv_ref_lod(&ref_lod); hipLaunchKernelGGL(( sequence_softmax_kernel<T, kThreadsPerBlock>) , dim3(grid_size), dim3(block_size), 0, context.stream(), x.data<T>(), mixv_ref_lod.CUDAData(context.GetPlace()), height, out->mutable_data<T>(context.GetPlace())); } }; template <typename T> struct SequenceSoftmaxGradFunctor<phi::GPUContext, T> { void operator()(const phi::GPUContext &context, const LoDTensor &dout, const LoDTensor &out, const phi::Vector<size_t> &ref_lod, /*referenced lod*/ LoDTensor *dx) { size_t height = ref_lod.size() - 1; const int kThreadsPerBlock = 32; int thread_x = kThreadsPerBlock; int max_threads = context.GetMaxPhysicalThreadCount(); int max_blocks = ::max(max_threads / kThreadsPerBlock, 1); dim3 block_size(thread_x); dim3 grid_size(max_blocks); phi::MixVector<size_t> mixv_ref_lod(&ref_lod); hipLaunchKernelGGL(( sequence_softmax_grad_kernel<T, kThreadsPerBlock>) , dim3(grid_size), dim3(block_size), 0, context.stream(), dout.data<T>(), out.data<T>(), mixv_ref_lod.CUDAData(context.GetPlace()), height, dx->mutable_data<T>(context.GetPlace())); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(sequence_softmax, ops::SequenceSoftmaxKernel<phi::GPUContext, float>, ops::SequenceSoftmaxKernel<phi::GPUContext, double>); REGISTER_OP_CUDA_KERNEL( sequence_softmax_grad, ops::SequenceSoftmaxGradKernel<phi::GPUContext, float>, ops::SequenceSoftmaxGradKernel<phi::GPUContext, double>);
1b9b47811840151631400c61c4a1c61186d215e8.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #ifdef __NVCC__ #include <cub/cub.cuh> #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/fluid/operators/sequence_ops/sequence_softmax_op.h" #include "paddle/phi/kernels/funcs/math.h" namespace paddle { namespace operators { using LoDTensor = phi::DenseTensor; template <typename T, int BlockDim> using BlockReduce = cub::BlockReduce<T, BlockDim>; template <typename T, int BlockDim> using BlockReduceTempStorage = typename BlockReduce<T, BlockDim>::TempStorage; template <typename T, int BlockDim> __global__ void sequence_softmax_kernel(const T *in_data, const size_t *ref_lod, const size_t src_hight, T *out_data) { __shared__ BlockReduceTempStorage<T, BlockDim> temp_storage; __shared__ T shared_max_data; __shared__ T shared_sum_data; for (int i = blockIdx.x; i < src_hight; i += gridDim.x) { size_t start = ref_lod[i]; size_t span = ref_lod[i + 1] - start; // Find the max ele T max_ele = -FLT_MAX; for (int tid = threadIdx.x; tid < span; tid += blockDim.x) { T ele = in_data[start + tid]; max_ele = max_ele > ele ? max_ele : ele; } max_ele = BlockReduce<T, BlockDim>(temp_storage).Reduce(max_ele, cub::Max()); if (threadIdx.x == 0) { shared_max_data = max_ele; } __syncthreads(); // sum T sum_data = 0; for (int tid = threadIdx.x; tid < span; tid += blockDim.x) { T ele = in_data[start + tid]; sum_data += phi::funcs::real_exp(ele - shared_max_data); } sum_data = BlockReduce<T, BlockDim>(temp_storage).Reduce(sum_data, cub::Sum()); if (threadIdx.x == 0) { shared_sum_data = sum_data; } __syncthreads(); // get final resit for (int tid = threadIdx.x; tid < span; tid += blockDim.x) { T ele = in_data[start + tid]; ele = phi::funcs::real_exp(ele - shared_max_data) / shared_sum_data; out_data[start + tid] = ele; } } } template <typename T, int BlockDim> __global__ void sequence_softmax_grad_kernel(const T *softmax_grad_data, const T *softmax_data, const size_t *ref_lod, const size_t src_hight, T *dx_data) { __shared__ BlockReduceTempStorage<T, BlockDim> temp_storage; __shared__ T shared_data; for (int i = blockIdx.x; i < src_hight; i += gridDim.x) { size_t start = ref_lod[i]; size_t span = ref_lod[i + 1] - start; T result = 0; for (int tid = threadIdx.x; tid < span; tid += blockDim.x) { size_t idx = start + tid; T s_g_d = softmax_grad_data[idx]; T s_d = softmax_data[idx]; result += s_g_d * s_d; } result = BlockReduce<T, BlockDim>(temp_storage).Reduce(result, cub::Sum()); if (threadIdx.x == 0) { shared_data = result; } __syncthreads(); for (int tid = threadIdx.x; tid < span; tid += blockDim.x) { size_t idx = start + tid; T s_g_d = softmax_grad_data[idx]; T s_d = softmax_data[idx]; dx_data[idx] = (s_g_d - shared_data) * s_d; } } } template <typename T> struct SequenceSoftmaxFunctor<phi::GPUContext, T> { void operator()(const phi::GPUContext &context, const LoDTensor &x, const phi::Vector<size_t> &ref_lod, /*referenced lod*/ LoDTensor *out) { int height = ref_lod.size() - 1; const int kThreadsPerBlock = 32; int thread_x = kThreadsPerBlock; int max_threads = context.GetMaxPhysicalThreadCount(); int max_blocks = std::max(max_threads / kThreadsPerBlock, 1); dim3 block_size(thread_x); dim3 grid_size(max_blocks); phi::MixVector<size_t> mixv_ref_lod(&ref_lod); sequence_softmax_kernel<T, kThreadsPerBlock> <<<grid_size, block_size, 0, context.stream()>>>( x.data<T>(), mixv_ref_lod.CUDAData(context.GetPlace()), height, out->mutable_data<T>(context.GetPlace())); } }; template <typename T> struct SequenceSoftmaxGradFunctor<phi::GPUContext, T> { void operator()(const phi::GPUContext &context, const LoDTensor &dout, const LoDTensor &out, const phi::Vector<size_t> &ref_lod, /*referenced lod*/ LoDTensor *dx) { size_t height = ref_lod.size() - 1; const int kThreadsPerBlock = 32; int thread_x = kThreadsPerBlock; int max_threads = context.GetMaxPhysicalThreadCount(); int max_blocks = std::max(max_threads / kThreadsPerBlock, 1); dim3 block_size(thread_x); dim3 grid_size(max_blocks); phi::MixVector<size_t> mixv_ref_lod(&ref_lod); sequence_softmax_grad_kernel<T, kThreadsPerBlock> <<<grid_size, block_size, 0, context.stream()>>>( dout.data<T>(), out.data<T>(), mixv_ref_lod.CUDAData(context.GetPlace()), height, dx->mutable_data<T>(context.GetPlace())); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(sequence_softmax, ops::SequenceSoftmaxKernel<phi::GPUContext, float>, ops::SequenceSoftmaxKernel<phi::GPUContext, double>); REGISTER_OP_CUDA_KERNEL( sequence_softmax_grad, ops::SequenceSoftmaxGradKernel<phi::GPUContext, float>, ops::SequenceSoftmaxGradKernel<phi::GPUContext, double>);
76732a782bc35d7609d1df93f6da70d0b563b8f9.hip
// !!! This is a file automatically generated by hipify!!! #include <opencv2/core.hpp> #include <opencv2/imgproc.hpp> #include <opencv2/core/cuda/common.hpp> #include <opencv2/cudaarithm.hpp> #include <opencv2/cudafilters.hpp> #include "stereo.hpp" #include "util_opencv.hpp" #include "dsi.hpp" #include "wta.hpp" #include "cost_aggregation.hpp" #include "aggregations/standard_sgm.hpp" #include "aggregations/adaptive_penalty.hpp" #include "median_filter.hpp" #include "costs/census.hpp" #include "costs/sad.hpp" #include "costs/dual.hpp" #include "costs/scale.hpp" #include <chrono> #include <iostream> static void timer_set() {} static void timer_print(const std::string &msg, const bool reset=true) {} using cv::Mat; using cv::Size; using ftl::stereo::aggregations::AdaptivePenaltySGM; static void variance_mask(cv::InputArray in, cv::OutputArray out, int wsize=3) { if (in.isGpuMat()) { cv::cuda::GpuMat im; cv::cuda::GpuMat im2; cv::cuda::GpuMat mean; cv::cuda::GpuMat mean2; mean.create(in.size(), CV_32FC1); mean2.create(in.size(), CV_32FC1); im2.create(in.size(), CV_32FC1); if (in.type() != CV_32FC1) { in.getGpuMat().convertTo(im, CV_32FC1); } else { im = in.getGpuMat(); } cv::cuda::multiply(im, im, im2); auto filter = cv::cuda::createBoxFilter(CV_32FC1, CV_32FC1, cv::Size(wsize,wsize)); filter->apply(im, mean); // E[X] filter->apply(im2, mean2); // E[X^2] cv::cuda::multiply(mean, mean, mean); // (E[X])^2 // NOTE: floating point accuracy in subtraction // (cv::cuda::createBoxFilter only supports 8 bit integer types) cv::cuda::subtract(mean2, mean, out.getGpuMatRef()); // E[X^2] - (E[X])^2 } else { throw std::exception(); /* todo CPU version */ } } typedef DualCostsWeighted<LinearCost<CensusMatchingCost>, ExpCost<SADMatchingCost>> MatchingCost; struct StereoWADCensus::Impl { CensusMatchingCost census; LinearCost<CensusMatchingCost> exp_census; SADMatchingCost sad; ExpCost<SADMatchingCost> exp_sad; Array2D<float> variance; Array2D<float> variance_r; MatchingCost cost; Array2D<float> penalty; Array2D<float> cost_min; Array2D<float> cost_min_paths; Array2D<float> uncertainty; Array2D<float> confidence; Array2D<float> disparity_r; Array2D<uchar> l; Array2D<uchar> r; Mat prior; // used only to calculate MI PathAggregator<AdaptivePenaltySGM<MatchingCost::DataType>> aggr; WinnerTakesAll<DSImageFloat,float> wta; Impl(int width, int height, int min_disp, int max_disp) : census(width, height, min_disp, max_disp), exp_census(width, height, min_disp, max_disp, census, 1.0), sad(width, height, min_disp, max_disp), exp_sad(width, height, min_disp, max_disp, sad, 1.0), variance(width, height), variance_r(width, height), cost(width, height, min_disp, max_disp, exp_census, exp_sad, variance, variance_r), penalty(width, height), cost_min(width, height), cost_min_paths(width, height), uncertainty(width, height), confidence(width, height), disparity_r(width, height), l(width, height), r(width, height) {} }; StereoWADCensus::StereoWADCensus() : impl_(nullptr) { impl_ = new Impl(0, 0, 0, 0); } void StereoWADCensus::compute(cv::InputArray l, cv::InputArray r, cv::OutputArray disparity) { hipSetDevice(0); if (l.rows() != impl_->cost.height() || r.cols() != impl_->cost.width()) { delete impl_; impl_ = nullptr; impl_ = new Impl(l.cols(), l.rows(), params.d_min, params.d_max); } mat2gray(l, impl_->l); mat2gray(r, impl_->r); timer_set(); cv::cuda::GpuMat var_l = impl_->variance.toGpuMat(); variance_mask(impl_->l.toGpuMat(), var_l, params.var_window); cv::cuda::GpuMat var_r = impl_->variance_r.toGpuMat(); variance_mask(impl_->r.toGpuMat(), var_r, params.var_window); cv::cuda::normalize(var_l, var_l, params.alpha, params.beta, cv::NORM_MINMAX, -1); cv::cuda::normalize(var_r, var_r, params.alpha, params.beta, cv::NORM_MINMAX, -1); impl_->census.set(impl_->l, impl_->r); impl_->exp_census.set(params.l1); impl_->sad.setWindow(params.wsize, params.wsize); impl_->sad.set(l, r); impl_->exp_sad.set(params.l2); impl_->cost.set(); if (params.debug) { timer_print("Matching cost"); } cudaSafeCall(hipDeviceSynchronize()); auto penalty = impl_->penalty.toGpuMat(); penalty.setTo(params.P2); AdaptivePenaltySGM<MatchingCost::DataType> func = {impl_->cost.data(), impl_->cost_min_paths.data(), params.P1}; impl_->aggr.getDirectionData(AggregationDirections::LEFTRIGHT).penalties = impl_->penalty; impl_->aggr.getDirectionData(AggregationDirections::RIGHTLEFT).penalties = impl_->penalty; impl_->aggr.getDirectionData(AggregationDirections::UPDOWN).penalties = impl_->penalty; impl_->aggr.getDirectionData(AggregationDirections::DOWNUP).penalties = impl_->penalty; impl_->aggr.getDirectionData(AggregationDirections::TOPLEFTBOTTOMRIGHT).penalties = impl_->penalty; impl_->aggr.getDirectionData(AggregationDirections::BOTTOMRIGHTTOPLEFT).penalties = impl_->penalty; impl_->aggr.getDirectionData(AggregationDirections::BOTTOMLEFTTOPRIGHT).penalties = impl_->penalty; impl_->aggr.getDirectionData(AggregationDirections::TOPRIGHTBOTTOMLEFT).penalties = impl_->penalty; auto &out = impl_->aggr(func, params.paths); cudaSafeCall(hipDeviceSynchronize()); if (params.debug) { timer_print("Aggregation"); } impl_->wta(out, params.subpixel); cudaSafeCall(hipDeviceSynchronize()); if (params.debug) { timer_print("WTA"); } median_filter(impl_->wta.disparity, disparity); if (params.debug) { timer_print("median filter"); } } StereoWADCensus::~StereoWADCensus() { if (impl_) { delete impl_; impl_ = nullptr; } }
76732a782bc35d7609d1df93f6da70d0b563b8f9.cu
#include <opencv2/core.hpp> #include <opencv2/imgproc.hpp> #include <opencv2/core/cuda/common.hpp> #include <opencv2/cudaarithm.hpp> #include <opencv2/cudafilters.hpp> #include "stereo.hpp" #include "util_opencv.hpp" #include "dsi.hpp" #include "wta.hpp" #include "cost_aggregation.hpp" #include "aggregations/standard_sgm.hpp" #include "aggregations/adaptive_penalty.hpp" #include "median_filter.hpp" #include "costs/census.hpp" #include "costs/sad.hpp" #include "costs/dual.hpp" #include "costs/scale.hpp" #include <chrono> #include <iostream> static void timer_set() {} static void timer_print(const std::string &msg, const bool reset=true) {} using cv::Mat; using cv::Size; using ftl::stereo::aggregations::AdaptivePenaltySGM; static void variance_mask(cv::InputArray in, cv::OutputArray out, int wsize=3) { if (in.isGpuMat()) { cv::cuda::GpuMat im; cv::cuda::GpuMat im2; cv::cuda::GpuMat mean; cv::cuda::GpuMat mean2; mean.create(in.size(), CV_32FC1); mean2.create(in.size(), CV_32FC1); im2.create(in.size(), CV_32FC1); if (in.type() != CV_32FC1) { in.getGpuMat().convertTo(im, CV_32FC1); } else { im = in.getGpuMat(); } cv::cuda::multiply(im, im, im2); auto filter = cv::cuda::createBoxFilter(CV_32FC1, CV_32FC1, cv::Size(wsize,wsize)); filter->apply(im, mean); // E[X] filter->apply(im2, mean2); // E[X^2] cv::cuda::multiply(mean, mean, mean); // (E[X])^2 // NOTE: floating point accuracy in subtraction // (cv::cuda::createBoxFilter only supports 8 bit integer types) cv::cuda::subtract(mean2, mean, out.getGpuMatRef()); // E[X^2] - (E[X])^2 } else { throw std::exception(); /* todo CPU version */ } } typedef DualCostsWeighted<LinearCost<CensusMatchingCost>, ExpCost<SADMatchingCost>> MatchingCost; struct StereoWADCensus::Impl { CensusMatchingCost census; LinearCost<CensusMatchingCost> exp_census; SADMatchingCost sad; ExpCost<SADMatchingCost> exp_sad; Array2D<float> variance; Array2D<float> variance_r; MatchingCost cost; Array2D<float> penalty; Array2D<float> cost_min; Array2D<float> cost_min_paths; Array2D<float> uncertainty; Array2D<float> confidence; Array2D<float> disparity_r; Array2D<uchar> l; Array2D<uchar> r; Mat prior; // used only to calculate MI PathAggregator<AdaptivePenaltySGM<MatchingCost::DataType>> aggr; WinnerTakesAll<DSImageFloat,float> wta; Impl(int width, int height, int min_disp, int max_disp) : census(width, height, min_disp, max_disp), exp_census(width, height, min_disp, max_disp, census, 1.0), sad(width, height, min_disp, max_disp), exp_sad(width, height, min_disp, max_disp, sad, 1.0), variance(width, height), variance_r(width, height), cost(width, height, min_disp, max_disp, exp_census, exp_sad, variance, variance_r), penalty(width, height), cost_min(width, height), cost_min_paths(width, height), uncertainty(width, height), confidence(width, height), disparity_r(width, height), l(width, height), r(width, height) {} }; StereoWADCensus::StereoWADCensus() : impl_(nullptr) { impl_ = new Impl(0, 0, 0, 0); } void StereoWADCensus::compute(cv::InputArray l, cv::InputArray r, cv::OutputArray disparity) { cudaSetDevice(0); if (l.rows() != impl_->cost.height() || r.cols() != impl_->cost.width()) { delete impl_; impl_ = nullptr; impl_ = new Impl(l.cols(), l.rows(), params.d_min, params.d_max); } mat2gray(l, impl_->l); mat2gray(r, impl_->r); timer_set(); cv::cuda::GpuMat var_l = impl_->variance.toGpuMat(); variance_mask(impl_->l.toGpuMat(), var_l, params.var_window); cv::cuda::GpuMat var_r = impl_->variance_r.toGpuMat(); variance_mask(impl_->r.toGpuMat(), var_r, params.var_window); cv::cuda::normalize(var_l, var_l, params.alpha, params.beta, cv::NORM_MINMAX, -1); cv::cuda::normalize(var_r, var_r, params.alpha, params.beta, cv::NORM_MINMAX, -1); impl_->census.set(impl_->l, impl_->r); impl_->exp_census.set(params.l1); impl_->sad.setWindow(params.wsize, params.wsize); impl_->sad.set(l, r); impl_->exp_sad.set(params.l2); impl_->cost.set(); if (params.debug) { timer_print("Matching cost"); } cudaSafeCall(cudaDeviceSynchronize()); auto penalty = impl_->penalty.toGpuMat(); penalty.setTo(params.P2); AdaptivePenaltySGM<MatchingCost::DataType> func = {impl_->cost.data(), impl_->cost_min_paths.data(), params.P1}; impl_->aggr.getDirectionData(AggregationDirections::LEFTRIGHT).penalties = impl_->penalty; impl_->aggr.getDirectionData(AggregationDirections::RIGHTLEFT).penalties = impl_->penalty; impl_->aggr.getDirectionData(AggregationDirections::UPDOWN).penalties = impl_->penalty; impl_->aggr.getDirectionData(AggregationDirections::DOWNUP).penalties = impl_->penalty; impl_->aggr.getDirectionData(AggregationDirections::TOPLEFTBOTTOMRIGHT).penalties = impl_->penalty; impl_->aggr.getDirectionData(AggregationDirections::BOTTOMRIGHTTOPLEFT).penalties = impl_->penalty; impl_->aggr.getDirectionData(AggregationDirections::BOTTOMLEFTTOPRIGHT).penalties = impl_->penalty; impl_->aggr.getDirectionData(AggregationDirections::TOPRIGHTBOTTOMLEFT).penalties = impl_->penalty; auto &out = impl_->aggr(func, params.paths); cudaSafeCall(cudaDeviceSynchronize()); if (params.debug) { timer_print("Aggregation"); } impl_->wta(out, params.subpixel); cudaSafeCall(cudaDeviceSynchronize()); if (params.debug) { timer_print("WTA"); } median_filter(impl_->wta.disparity, disparity); if (params.debug) { timer_print("median filter"); } } StereoWADCensus::~StereoWADCensus() { if (impl_) { delete impl_; impl_ = nullptr; } }
dfdc18107787516d0d1cbb53a8db4ff43726729b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Udacity HW 4 //Radix Sorting #include "utils.h" #include <thrust/host_vector.h> #include <math.h> /* Red Eye Removal =============== For this assignment we are implementing red eye removal. This is accomplished by first creating a score for every pixel that tells us how likely it is to be a red eye pixel. We have already done this for you - you are receiving the scores and need to sort them in ascending order so that we know which pixels to alter to remove the red eye. Note: ascending order == smallest to largest Each score is associated with a position, when you sort the scores, you must also move the positions accordingly. Implementing Parallel Radix Sort with CUDA ========================================== The basic idea is to construct a histogram on each pass of how many of each "digit" there are. Then we scan this histogram so that we know where to put the output of each digit. For example, the first 1 must come after all the 0s so we have to know how many 0s there are to be able to start moving 1s into the correct position. 1) Histogram of the number of occurrences of each digit 2) Exclusive Prefix Sum of Histogram 3) Determine relative offset of each digit For example [0 0 1 1 0 0 1] -> [0 1 0 1 2 3 2] 4) Combine the results of steps 2 & 3 to determine the final output location for each element and move it there LSB Radix sort is an out-of-place sort and you will need to ping-pong values between the input and output buffers we have provided. Make sure the final sorted results end up in the output buffer! Hint: You may need to do a copy at the end. */ #ifndef max #define max( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef min #define min( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif const int block_size = 1024; const int DIM = 32; const int MAX_THREADS_PER_BLOCK = 65535; const int FIND_MAX_THREADS = 1024; //allocate to shared memory __global__ void findMax(unsigned int* const d_inputVals, unsigned int *d_collectMax, const size_t numElems) { __shared__ unsigned int s_inputVals[FIND_MAX_THREADS]; int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElems) s_inputVals[threadIdx.x] = d_inputVals[idx]; else s_inputVals[threadIdx.x] = 0; __syncthreads(); int half = FIND_MAX_THREADS / 2; while (half != 0) { if (threadIdx.x < half) { s_inputVals[threadIdx.x] = max(s_inputVals[threadIdx.x], s_inputVals[threadIdx.x + half]); } half /= 2; __syncthreads(); } d_collectMax[blockIdx.x] = s_inputVals[0]; } __global__ void scanSB(unsigned int* const d_inputVals, unsigned int *d_collectScan, unsigned int *d_collectSumScan, unsigned int *d_sumBlock, unsigned int pos, size_t const numElems, unsigned int compare, int numMaxBlock) { __shared__ unsigned int s_inputVals[FIND_MAX_THREADS]; __shared__ unsigned int s_inputValsTMP[FIND_MAX_THREADS]; int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElems){ s_inputVals[threadIdx.x] = (d_inputVals[idx] & pos) == compare; d_collectScan[idx] = s_inputVals[threadIdx.x]; } else s_inputVals[threadIdx.x] = 0; __syncthreads(); int dist = 1; int count = 0; while (dist < FIND_MAX_THREADS) { if (count % 2 == 0){ s_inputValsTMP[threadIdx.x] = s_inputVals[threadIdx.x]; if (threadIdx.x >= dist) { s_inputValsTMP[threadIdx.x] += s_inputVals[threadIdx.x - dist]; } } else { s_inputVals[threadIdx.x] = s_inputValsTMP[threadIdx.x]; if (threadIdx.x >= dist) { s_inputVals[threadIdx.x] += s_inputValsTMP[threadIdx.x - dist]; } } dist *= 2; count++; __syncthreads(); } if (count % 2 == 0){ d_collectSumScan[idx] = s_inputVals[threadIdx.x]; d_sumBlock[blockIdx.x] = s_inputVals[FIND_MAX_THREADS - 1]; } else { d_collectSumScan[idx] = s_inputValsTMP[threadIdx.x]; d_sumBlock[blockIdx.x] = s_inputValsTMP[FIND_MAX_THREADS - 1]; } } __global__ void reduceBlockSum(unsigned int *d_sumBlock, const size_t numMaxBlock) { __shared__ unsigned int s_sumBlock[FIND_MAX_THREADS]; __shared__ unsigned int s_sumBlockTMP[FIND_MAX_THREADS]; int idx = threadIdx.x; if(idx >= numMaxBlock) return; s_sumBlock[idx] = d_sumBlock[idx]; __syncthreads(); int dist = 1; int count = 0; while (dist < numMaxBlock) { if(count % 2 == 0){ s_sumBlockTMP[idx] = s_sumBlock[idx]; if (idx >= dist) { s_sumBlockTMP[idx] += s_sumBlock[idx - dist]; } } else { s_sumBlock[idx] = s_sumBlockTMP[idx]; if (idx >= dist) { s_sumBlock[idx] += s_sumBlockTMP[idx - dist]; } } dist *= 2; count++; __syncthreads(); } if (count % 2 == 0){ if(idx < numMaxBlock) d_sumBlock[idx + 1] = s_sumBlock[idx]; //else d_sumBlock[0] = 0; } else { if(idx < numMaxBlock) d_sumBlock[idx + 1] = s_sumBlockTMP[idx]; //else d_sumBlock[0] = 0; } } __global__ void mergeScan(unsigned int* const d_inputVals, unsigned int* const d_inputPos, unsigned int *d_collectScan, unsigned int *d_collectSumScan, unsigned int *d_sumBlock, unsigned int *d_interVals, unsigned int *d_interPos, unsigned int offset) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (d_collectScan[idx]==0) return; d_interVals[d_collectSumScan[idx] + d_sumBlock[blockIdx.x] + offset - 1] = d_inputVals[idx]; d_interPos[d_collectSumScan[idx] + d_sumBlock[blockIdx.x] + offset - 1] = d_inputPos[idx]; } __global__ void copyData(unsigned int* const d_inputVals, unsigned int *d_interVals, size_t const numElems) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= numElems) return; d_inputVals[idx] = d_interVals[idx]; } void print_debug(unsigned int* const d_inputVals, unsigned int *d_interVals, unsigned int *d_collectScan, unsigned int *d_collectSumScan, unsigned int *d_sumBlock) { printf("Cut"); for(int i = 0; i < 10; i++) { printf("%d ", d_inputVals[i]); } printf("\n"); for(int i = 0; i < 10; i++) { printf("%d ", d_interVals[i]); } printf("\n"); for(int i = 0; i < 10; i++) { printf("%d ", d_collectScan[i]); } printf("\n"); for(int i = 0; i < 10; i++) { printf("%d ", d_collectSumScan[i]); } printf("\n"); for(int i = 0; i < 10; i++) { printf("%d ", d_sumBlock[i]); } printf("\n"); } void your_sort(unsigned int* const d_inputVals, unsigned int* const d_inputPos, unsigned int* const d_outputVals, unsigned int* const d_outputPos, const size_t numElems) { //TODO // P.1 search for maximum //printf("hello"); unsigned int *d_collectMax; int numMaxBlock = (numElems + FIND_MAX_THREADS - 1)/FIND_MAX_THREADS; checkCudaErrors(hipMallocManaged(&d_collectMax, sizeof(unsigned int) * numMaxBlock)); printf("\n %d %d", numMaxBlock, FIND_MAX_THREADS); hipLaunchKernelGGL(( findMax) , dim3(numMaxBlock),dim3(FIND_MAX_THREADS), 0, 0, d_inputVals, d_collectMax, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( findMax) , dim3(1), dim3(numMaxBlock), 0, 0, d_collectMax, d_collectMax, numMaxBlock); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); unsigned int MAX = d_collectMax[0]; printf("max %d \n", MAX); checkCudaErrors(hipFree(d_collectMax)); // P.2 Scan and Compact int N = log2(MAX); unsigned int MSB = 1; unsigned int *d_collectSumScan, *d_interVals, *d_interPos, *d_sumBlock; unsigned int *d_collectScan; unsigned int h_inputVals[numElems]; checkCudaErrors(hipMallocManaged(&d_collectSumScan, sizeof(unsigned int) * numElems)); checkCudaErrors(hipMallocManaged(&d_collectScan, sizeof(unsigned int) * numElems)); checkCudaErrors(hipMallocManaged(&d_interVals, sizeof(unsigned int) * numElems)); checkCudaErrors(hipMallocManaged(&d_interPos, sizeof(unsigned int) * numElems)); checkCudaErrors(hipMallocManaged(&d_sumBlock, sizeof(unsigned int) * (numMaxBlock+1))); for (int i = 0; i < N; ++i) { /* 1. Predict & Scan through each block 2. Reduce sum for each block 3. compact elements by merging all block */ checkCudaErrors(hipMemcpy(h_inputVals, d_inputVals, sizeof(unsigned int) * numElems, hipMemcpyDeviceToHost)); hipLaunchKernelGGL(( scanSB), dim3(numMaxBlock),dim3(FIND_MAX_THREADS), 0, 0, d_inputVals, d_collectScan, d_collectSumScan, d_sumBlock, MSB, numElems, 0, numMaxBlock); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( reduceBlockSum), dim3(1),dim3(numMaxBlock), 0, 0, d_sumBlock, numMaxBlock); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); d_sumBlock[0] = 0; hipLaunchKernelGGL(( mergeScan), dim3(numMaxBlock), dim3(FIND_MAX_THREADS), 0, 0, d_inputVals, d_inputPos, d_collectScan, d_collectSumScan, d_sumBlock, d_interVals, d_interPos, 0); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //printf("%d\n", d_collectScan[2]); //printf("%d\n", d_inputVals[2]); int offset = d_sumBlock[numMaxBlock]; //printf("%d\n", offset); hipLaunchKernelGGL(( scanSB), dim3(numMaxBlock),dim3(FIND_MAX_THREADS), 0, 0, d_inputVals, d_collectScan, d_collectSumScan, d_sumBlock, MSB, numElems, MSB, numMaxBlock); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( reduceBlockSum), dim3(1),dim3(numMaxBlock), 0, 0, d_sumBlock, numMaxBlock); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); d_sumBlock[0] = 0; hipLaunchKernelGGL(( mergeScan), dim3(numMaxBlock), dim3(FIND_MAX_THREADS), 0, 0, d_inputVals, d_inputPos, d_collectScan, d_collectSumScan, d_sumBlock, d_interVals, d_interPos, offset); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //printf("Cut"); printf("%d ", d_interVals[numElems - 1]); //printf("\n"); //printf("%d \n", d_interVals[numElems - 2]); //if(i == 0) print_debug(h_inputVals, d_interVals, d_collectScan, d_collectSumScan, d_sumBlock); checkCudaErrors(hipMemcpy(d_inputPos, d_interPos, sizeof(unsigned int) * numElems, hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(d_inputVals, d_interVals, sizeof(unsigned int) * numElems, hipMemcpyDeviceToDevice)); MSB *= 2; } checkCudaErrors(hipMemcpy(d_outputPos, d_inputPos, sizeof(unsigned int) * numElems, hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(d_outputVals, d_inputVals, sizeof(unsigned int) * numElems, hipMemcpyDeviceToDevice)); //PUT YOUR SORT HERE checkCudaErrors(hipFree(d_collectSumScan)); checkCudaErrors(hipFree(d_collectScan)); checkCudaErrors(hipFree(d_interVals)); checkCudaErrors(hipFree(d_interPos)); checkCudaErrors(hipFree(d_sumBlock)); }
dfdc18107787516d0d1cbb53a8db4ff43726729b.cu
//Udacity HW 4 //Radix Sorting #include "utils.h" #include <thrust/host_vector.h> #include <math.h> /* Red Eye Removal =============== For this assignment we are implementing red eye removal. This is accomplished by first creating a score for every pixel that tells us how likely it is to be a red eye pixel. We have already done this for you - you are receiving the scores and need to sort them in ascending order so that we know which pixels to alter to remove the red eye. Note: ascending order == smallest to largest Each score is associated with a position, when you sort the scores, you must also move the positions accordingly. Implementing Parallel Radix Sort with CUDA ========================================== The basic idea is to construct a histogram on each pass of how many of each "digit" there are. Then we scan this histogram so that we know where to put the output of each digit. For example, the first 1 must come after all the 0s so we have to know how many 0s there are to be able to start moving 1s into the correct position. 1) Histogram of the number of occurrences of each digit 2) Exclusive Prefix Sum of Histogram 3) Determine relative offset of each digit For example [0 0 1 1 0 0 1] -> [0 1 0 1 2 3 2] 4) Combine the results of steps 2 & 3 to determine the final output location for each element and move it there LSB Radix sort is an out-of-place sort and you will need to ping-pong values between the input and output buffers we have provided. Make sure the final sorted results end up in the output buffer! Hint: You may need to do a copy at the end. */ #ifndef max #define max( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef min #define min( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif const int block_size = 1024; const int DIM = 32; const int MAX_THREADS_PER_BLOCK = 65535; const int FIND_MAX_THREADS = 1024; //allocate to shared memory __global__ void findMax(unsigned int* const d_inputVals, unsigned int *d_collectMax, const size_t numElems) { __shared__ unsigned int s_inputVals[FIND_MAX_THREADS]; int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElems) s_inputVals[threadIdx.x] = d_inputVals[idx]; else s_inputVals[threadIdx.x] = 0; __syncthreads(); int half = FIND_MAX_THREADS / 2; while (half != 0) { if (threadIdx.x < half) { s_inputVals[threadIdx.x] = max(s_inputVals[threadIdx.x], s_inputVals[threadIdx.x + half]); } half /= 2; __syncthreads(); } d_collectMax[blockIdx.x] = s_inputVals[0]; } __global__ void scanSB(unsigned int* const d_inputVals, unsigned int *d_collectScan, unsigned int *d_collectSumScan, unsigned int *d_sumBlock, unsigned int pos, size_t const numElems, unsigned int compare, int numMaxBlock) { __shared__ unsigned int s_inputVals[FIND_MAX_THREADS]; __shared__ unsigned int s_inputValsTMP[FIND_MAX_THREADS]; int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElems){ s_inputVals[threadIdx.x] = (d_inputVals[idx] & pos) == compare; d_collectScan[idx] = s_inputVals[threadIdx.x]; } else s_inputVals[threadIdx.x] = 0; __syncthreads(); int dist = 1; int count = 0; while (dist < FIND_MAX_THREADS) { if (count % 2 == 0){ s_inputValsTMP[threadIdx.x] = s_inputVals[threadIdx.x]; if (threadIdx.x >= dist) { s_inputValsTMP[threadIdx.x] += s_inputVals[threadIdx.x - dist]; } } else { s_inputVals[threadIdx.x] = s_inputValsTMP[threadIdx.x]; if (threadIdx.x >= dist) { s_inputVals[threadIdx.x] += s_inputValsTMP[threadIdx.x - dist]; } } dist *= 2; count++; __syncthreads(); } if (count % 2 == 0){ d_collectSumScan[idx] = s_inputVals[threadIdx.x]; d_sumBlock[blockIdx.x] = s_inputVals[FIND_MAX_THREADS - 1]; } else { d_collectSumScan[idx] = s_inputValsTMP[threadIdx.x]; d_sumBlock[blockIdx.x] = s_inputValsTMP[FIND_MAX_THREADS - 1]; } } __global__ void reduceBlockSum(unsigned int *d_sumBlock, const size_t numMaxBlock) { __shared__ unsigned int s_sumBlock[FIND_MAX_THREADS]; __shared__ unsigned int s_sumBlockTMP[FIND_MAX_THREADS]; int idx = threadIdx.x; if(idx >= numMaxBlock) return; s_sumBlock[idx] = d_sumBlock[idx]; __syncthreads(); int dist = 1; int count = 0; while (dist < numMaxBlock) { if(count % 2 == 0){ s_sumBlockTMP[idx] = s_sumBlock[idx]; if (idx >= dist) { s_sumBlockTMP[idx] += s_sumBlock[idx - dist]; } } else { s_sumBlock[idx] = s_sumBlockTMP[idx]; if (idx >= dist) { s_sumBlock[idx] += s_sumBlockTMP[idx - dist]; } } dist *= 2; count++; __syncthreads(); } if (count % 2 == 0){ if(idx < numMaxBlock) d_sumBlock[idx + 1] = s_sumBlock[idx]; //else d_sumBlock[0] = 0; } else { if(idx < numMaxBlock) d_sumBlock[idx + 1] = s_sumBlockTMP[idx]; //else d_sumBlock[0] = 0; } } __global__ void mergeScan(unsigned int* const d_inputVals, unsigned int* const d_inputPos, unsigned int *d_collectScan, unsigned int *d_collectSumScan, unsigned int *d_sumBlock, unsigned int *d_interVals, unsigned int *d_interPos, unsigned int offset) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (d_collectScan[idx]==0) return; d_interVals[d_collectSumScan[idx] + d_sumBlock[blockIdx.x] + offset - 1] = d_inputVals[idx]; d_interPos[d_collectSumScan[idx] + d_sumBlock[blockIdx.x] + offset - 1] = d_inputPos[idx]; } __global__ void copyData(unsigned int* const d_inputVals, unsigned int *d_interVals, size_t const numElems) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= numElems) return; d_inputVals[idx] = d_interVals[idx]; } void print_debug(unsigned int* const d_inputVals, unsigned int *d_interVals, unsigned int *d_collectScan, unsigned int *d_collectSumScan, unsigned int *d_sumBlock) { printf("Cut"); for(int i = 0; i < 10; i++) { printf("%d ", d_inputVals[i]); } printf("\n"); for(int i = 0; i < 10; i++) { printf("%d ", d_interVals[i]); } printf("\n"); for(int i = 0; i < 10; i++) { printf("%d ", d_collectScan[i]); } printf("\n"); for(int i = 0; i < 10; i++) { printf("%d ", d_collectSumScan[i]); } printf("\n"); for(int i = 0; i < 10; i++) { printf("%d ", d_sumBlock[i]); } printf("\n"); } void your_sort(unsigned int* const d_inputVals, unsigned int* const d_inputPos, unsigned int* const d_outputVals, unsigned int* const d_outputPos, const size_t numElems) { //TODO // P.1 search for maximum //printf("hello"); unsigned int *d_collectMax; int numMaxBlock = (numElems + FIND_MAX_THREADS - 1)/FIND_MAX_THREADS; checkCudaErrors(cudaMallocManaged(&d_collectMax, sizeof(unsigned int) * numMaxBlock)); printf("\n %d %d", numMaxBlock, FIND_MAX_THREADS); findMax <<<numMaxBlock,FIND_MAX_THREADS>>>(d_inputVals, d_collectMax, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); findMax <<<1, numMaxBlock>>>(d_collectMax, d_collectMax, numMaxBlock); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); unsigned int MAX = d_collectMax[0]; printf("max %d \n", MAX); checkCudaErrors(cudaFree(d_collectMax)); // P.2 Scan and Compact int N = log2(MAX); unsigned int MSB = 1; unsigned int *d_collectSumScan, *d_interVals, *d_interPos, *d_sumBlock; unsigned int *d_collectScan; unsigned int h_inputVals[numElems]; checkCudaErrors(cudaMallocManaged(&d_collectSumScan, sizeof(unsigned int) * numElems)); checkCudaErrors(cudaMallocManaged(&d_collectScan, sizeof(unsigned int) * numElems)); checkCudaErrors(cudaMallocManaged(&d_interVals, sizeof(unsigned int) * numElems)); checkCudaErrors(cudaMallocManaged(&d_interPos, sizeof(unsigned int) * numElems)); checkCudaErrors(cudaMallocManaged(&d_sumBlock, sizeof(unsigned int) * (numMaxBlock+1))); for (int i = 0; i < N; ++i) { /* 1. Predict & Scan through each block 2. Reduce sum for each block 3. compact elements by merging all block */ checkCudaErrors(cudaMemcpy(h_inputVals, d_inputVals, sizeof(unsigned int) * numElems, cudaMemcpyDeviceToHost)); scanSB<<<numMaxBlock,FIND_MAX_THREADS>>>(d_inputVals, d_collectScan, d_collectSumScan, d_sumBlock, MSB, numElems, 0, numMaxBlock); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); reduceBlockSum<<<1,numMaxBlock>>>(d_sumBlock, numMaxBlock); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); d_sumBlock[0] = 0; mergeScan<<<numMaxBlock, FIND_MAX_THREADS>>>(d_inputVals, d_inputPos, d_collectScan, d_collectSumScan, d_sumBlock, d_interVals, d_interPos, 0); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //printf("%d\n", d_collectScan[2]); //printf("%d\n", d_inputVals[2]); int offset = d_sumBlock[numMaxBlock]; //printf("%d\n", offset); scanSB<<<numMaxBlock,FIND_MAX_THREADS>>>(d_inputVals, d_collectScan, d_collectSumScan, d_sumBlock, MSB, numElems, MSB, numMaxBlock); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); reduceBlockSum<<<1,numMaxBlock>>>(d_sumBlock, numMaxBlock); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); d_sumBlock[0] = 0; mergeScan<<<numMaxBlock, FIND_MAX_THREADS>>>(d_inputVals, d_inputPos, d_collectScan, d_collectSumScan, d_sumBlock, d_interVals, d_interPos, offset); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //printf("Cut"); printf("%d ", d_interVals[numElems - 1]); //printf("\n"); //printf("%d \n", d_interVals[numElems - 2]); //if(i == 0) print_debug(h_inputVals, d_interVals, d_collectScan, d_collectSumScan, d_sumBlock); checkCudaErrors(cudaMemcpy(d_inputPos, d_interPos, sizeof(unsigned int) * numElems, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(d_inputVals, d_interVals, sizeof(unsigned int) * numElems, cudaMemcpyDeviceToDevice)); MSB *= 2; } checkCudaErrors(cudaMemcpy(d_outputPos, d_inputPos, sizeof(unsigned int) * numElems, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(d_outputVals, d_inputVals, sizeof(unsigned int) * numElems, cudaMemcpyDeviceToDevice)); //PUT YOUR SORT HERE checkCudaErrors(cudaFree(d_collectSumScan)); checkCudaErrors(cudaFree(d_collectScan)); checkCudaErrors(cudaFree(d_interVals)); checkCudaErrors(cudaFree(d_interPos)); checkCudaErrors(cudaFree(d_sumBlock)); }
88b6d7b1e9049dc8e62526995318c411d13c7461.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/vec_traits.hpp" #include "opencv2/core/cuda/vec_math.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" #include "opencv2/core/cuda/border_interpolate.hpp" namespace cv { namespace gpu { namespace cudev { namespace imgproc { /////////////////////////////////////////// Corner Harris ///////////////////////////////////////////////// texture<float, hipTextureType2D, hipReadModeElementType> harrisDxTex(0, hipFilterModePoint, hipAddressModeClamp); texture<float, hipTextureType2D, hipReadModeElementType> harrisDyTex(0, hipFilterModePoint, hipAddressModeClamp); __global__ void cornerHarris_kernel(const int block_size, const float k, PtrStepSzf dst) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float a = 0.f; float b = 0.f; float c = 0.f; const int ibegin = y - (block_size / 2); const int jbegin = x - (block_size / 2); const int iend = ibegin + block_size; const int jend = jbegin + block_size; for (int i = ibegin; i < iend; ++i) { for (int j = jbegin; j < jend; ++j) { float dx = tex2D(harrisDxTex, j, i); float dy = tex2D(harrisDyTex, j, i); a += dx * dx; b += dx * dy; c += dy * dy; } } dst(y, x) = a * c - b * b - k * (a + c) * (a + c); } } template <typename BR, typename BC> __global__ void cornerHarris_kernel(const int block_size, const float k, PtrStepSzf dst, const BR border_row, const BC border_col) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float a = 0.f; float b = 0.f; float c = 0.f; const int ibegin = y - (block_size / 2); const int jbegin = x - (block_size / 2); const int iend = ibegin + block_size; const int jend = jbegin + block_size; for (int i = ibegin; i < iend; ++i) { const int y = border_col.idx_row(i); for (int j = jbegin; j < jend; ++j) { const int x = border_row.idx_col(j); float dx = tex2D(harrisDxTex, x, y); float dy = tex2D(harrisDyTex, x, y); a += dx * dx; b += dx * dy; c += dy * dy; } } dst(y, x) = a * c - b * b - k * (a + c) * (a + c); } } void cornerHarris_gpu(int block_size, float k, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, hipStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(Dx.cols, block.x), divUp(Dx.rows, block.y)); bindTexture(&harrisDxTex, Dx); bindTexture(&harrisDyTex, Dy); switch (border_type) { case BORDER_REFLECT101: hipLaunchKernelGGL(( cornerHarris_kernel), dim3(grid), dim3(block), 0, stream, block_size, k, dst, BrdRowReflect101<void>(Dx.cols), BrdColReflect101<void>(Dx.rows)); break; case BORDER_REFLECT: hipLaunchKernelGGL(( cornerHarris_kernel), dim3(grid), dim3(block), 0, stream, block_size, k, dst, BrdRowReflect<void>(Dx.cols), BrdColReflect<void>(Dx.rows)); break; case BORDER_REPLICATE: hipLaunchKernelGGL(( cornerHarris_kernel), dim3(grid), dim3(block), 0, stream, block_size, k, dst); break; } cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } /////////////////////////////////////////// Corner Min Eigen Val ///////////////////////////////////////////////// texture<float, hipTextureType2D, hipReadModeElementType> minEigenValDxTex(0, hipFilterModePoint, hipAddressModeClamp); texture<float, hipTextureType2D, hipReadModeElementType> minEigenValDyTex(0, hipFilterModePoint, hipAddressModeClamp); __global__ void cornerMinEigenVal_kernel(const int block_size, PtrStepSzf dst) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float a = 0.f; float b = 0.f; float c = 0.f; const int ibegin = y - (block_size / 2); const int jbegin = x - (block_size / 2); const int iend = ibegin + block_size; const int jend = jbegin + block_size; for (int i = ibegin; i < iend; ++i) { for (int j = jbegin; j < jend; ++j) { float dx = tex2D(minEigenValDxTex, j, i); float dy = tex2D(minEigenValDyTex, j, i); a += dx * dx; b += dx * dy; c += dy * dy; } } a *= 0.5f; c *= 0.5f; dst(y, x) = (a + c) - sqrtf((a - c) * (a - c) + b * b); } } template <typename BR, typename BC> __global__ void cornerMinEigenVal_kernel(const int block_size, PtrStepSzf dst, const BR border_row, const BC border_col) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float a = 0.f; float b = 0.f; float c = 0.f; const int ibegin = y - (block_size / 2); const int jbegin = x - (block_size / 2); const int iend = ibegin + block_size; const int jend = jbegin + block_size; for (int i = ibegin; i < iend; ++i) { int y = border_col.idx_row(i); for (int j = jbegin; j < jend; ++j) { int x = border_row.idx_col(j); float dx = tex2D(minEigenValDxTex, x, y); float dy = tex2D(minEigenValDyTex, x, y); a += dx * dx; b += dx * dy; c += dy * dy; } } a *= 0.5f; c *= 0.5f; dst(y, x) = (a + c) - sqrtf((a - c) * (a - c) + b * b); } } void cornerMinEigenVal_gpu(int block_size, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, hipStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(Dx.cols, block.x), divUp(Dx.rows, block.y)); bindTexture(&minEigenValDxTex, Dx); bindTexture(&minEigenValDyTex, Dy); switch (border_type) { case BORDER_REFLECT101: hipLaunchKernelGGL(( cornerMinEigenVal_kernel), dim3(grid), dim3(block), 0, stream, block_size, dst, BrdRowReflect101<void>(Dx.cols), BrdColReflect101<void>(Dx.rows)); break; case BORDER_REFLECT: hipLaunchKernelGGL(( cornerMinEigenVal_kernel), dim3(grid), dim3(block), 0, stream, block_size, dst, BrdRowReflect<void>(Dx.cols), BrdColReflect<void>(Dx.rows)); break; case BORDER_REPLICATE: hipLaunchKernelGGL(( cornerMinEigenVal_kernel), dim3(grid), dim3(block), 0, stream, block_size, dst); break; } cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } } }}} #endif
88b6d7b1e9049dc8e62526995318c411d13c7461.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/vec_traits.hpp" #include "opencv2/core/cuda/vec_math.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" #include "opencv2/core/cuda/border_interpolate.hpp" namespace cv { namespace gpu { namespace cudev { namespace imgproc { /////////////////////////////////////////// Corner Harris ///////////////////////////////////////////////// texture<float, cudaTextureType2D, cudaReadModeElementType> harrisDxTex(0, cudaFilterModePoint, cudaAddressModeClamp); texture<float, cudaTextureType2D, cudaReadModeElementType> harrisDyTex(0, cudaFilterModePoint, cudaAddressModeClamp); __global__ void cornerHarris_kernel(const int block_size, const float k, PtrStepSzf dst) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float a = 0.f; float b = 0.f; float c = 0.f; const int ibegin = y - (block_size / 2); const int jbegin = x - (block_size / 2); const int iend = ibegin + block_size; const int jend = jbegin + block_size; for (int i = ibegin; i < iend; ++i) { for (int j = jbegin; j < jend; ++j) { float dx = tex2D(harrisDxTex, j, i); float dy = tex2D(harrisDyTex, j, i); a += dx * dx; b += dx * dy; c += dy * dy; } } dst(y, x) = a * c - b * b - k * (a + c) * (a + c); } } template <typename BR, typename BC> __global__ void cornerHarris_kernel(const int block_size, const float k, PtrStepSzf dst, const BR border_row, const BC border_col) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float a = 0.f; float b = 0.f; float c = 0.f; const int ibegin = y - (block_size / 2); const int jbegin = x - (block_size / 2); const int iend = ibegin + block_size; const int jend = jbegin + block_size; for (int i = ibegin; i < iend; ++i) { const int y = border_col.idx_row(i); for (int j = jbegin; j < jend; ++j) { const int x = border_row.idx_col(j); float dx = tex2D(harrisDxTex, x, y); float dy = tex2D(harrisDyTex, x, y); a += dx * dx; b += dx * dy; c += dy * dy; } } dst(y, x) = a * c - b * b - k * (a + c) * (a + c); } } void cornerHarris_gpu(int block_size, float k, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, cudaStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(Dx.cols, block.x), divUp(Dx.rows, block.y)); bindTexture(&harrisDxTex, Dx); bindTexture(&harrisDyTex, Dy); switch (border_type) { case BORDER_REFLECT101: cornerHarris_kernel<<<grid, block, 0, stream>>>(block_size, k, dst, BrdRowReflect101<void>(Dx.cols), BrdColReflect101<void>(Dx.rows)); break; case BORDER_REFLECT: cornerHarris_kernel<<<grid, block, 0, stream>>>(block_size, k, dst, BrdRowReflect<void>(Dx.cols), BrdColReflect<void>(Dx.rows)); break; case BORDER_REPLICATE: cornerHarris_kernel<<<grid, block, 0, stream>>>(block_size, k, dst); break; } cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////// Corner Min Eigen Val ///////////////////////////////////////////////// texture<float, cudaTextureType2D, cudaReadModeElementType> minEigenValDxTex(0, cudaFilterModePoint, cudaAddressModeClamp); texture<float, cudaTextureType2D, cudaReadModeElementType> minEigenValDyTex(0, cudaFilterModePoint, cudaAddressModeClamp); __global__ void cornerMinEigenVal_kernel(const int block_size, PtrStepSzf dst) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float a = 0.f; float b = 0.f; float c = 0.f; const int ibegin = y - (block_size / 2); const int jbegin = x - (block_size / 2); const int iend = ibegin + block_size; const int jend = jbegin + block_size; for (int i = ibegin; i < iend; ++i) { for (int j = jbegin; j < jend; ++j) { float dx = tex2D(minEigenValDxTex, j, i); float dy = tex2D(minEigenValDyTex, j, i); a += dx * dx; b += dx * dy; c += dy * dy; } } a *= 0.5f; c *= 0.5f; dst(y, x) = (a + c) - sqrtf((a - c) * (a - c) + b * b); } } template <typename BR, typename BC> __global__ void cornerMinEigenVal_kernel(const int block_size, PtrStepSzf dst, const BR border_row, const BC border_col) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float a = 0.f; float b = 0.f; float c = 0.f; const int ibegin = y - (block_size / 2); const int jbegin = x - (block_size / 2); const int iend = ibegin + block_size; const int jend = jbegin + block_size; for (int i = ibegin; i < iend; ++i) { int y = border_col.idx_row(i); for (int j = jbegin; j < jend; ++j) { int x = border_row.idx_col(j); float dx = tex2D(minEigenValDxTex, x, y); float dy = tex2D(minEigenValDyTex, x, y); a += dx * dx; b += dx * dy; c += dy * dy; } } a *= 0.5f; c *= 0.5f; dst(y, x) = (a + c) - sqrtf((a - c) * (a - c) + b * b); } } void cornerMinEigenVal_gpu(int block_size, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, cudaStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(Dx.cols, block.x), divUp(Dx.rows, block.y)); bindTexture(&minEigenValDxTex, Dx); bindTexture(&minEigenValDyTex, Dy); switch (border_type) { case BORDER_REFLECT101: cornerMinEigenVal_kernel<<<grid, block, 0, stream>>>(block_size, dst, BrdRowReflect101<void>(Dx.cols), BrdColReflect101<void>(Dx.rows)); break; case BORDER_REFLECT: cornerMinEigenVal_kernel<<<grid, block, 0, stream>>>(block_size, dst, BrdRowReflect<void>(Dx.cols), BrdColReflect<void>(Dx.rows)); break; case BORDER_REPLICATE: cornerMinEigenVal_kernel<<<grid, block, 0, stream>>>(block_size, dst); break; } cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } } }}} #endif
b8f221ccd8a08639afdc8f29d1aaff150d97425b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/hip/HIPContext.h> #include <ATen/AccumulateType.h> #include <ATen/TensorUtils.h> #include <ATen/native/ConvUtils.h> #include <algorithm> #include <tuple> #include <limits> namespace at { namespace native { namespace { template <typename scalar_t, typename accscalar_t, int kKnownKernelT, int kKnownKernelH, int kKnownKernelW, int kKnownDilationT, int kKnownDilationH, int kKnownDilationW> __global__ void conv_depthwise3d_cuda_kernel( const PackedTensorAccessor32<scalar_t, 5> input, PackedTensorAccessor32<scalar_t, 5> output, const PackedTensorAccessor32<scalar_t, 5> kernel, const scalar_t* bias, int strideT, int strideH, int strideW, int paddingT, int paddingH, int paddingW, int dilationT_, int dilationH_, int dilationW_) { const int kT = kKnownKernelT > 0 ? kKnownKernelT : kernel.size(2); const int kH = kKnownKernelH > 0 ? kKnownKernelH : kernel.size(3); const int kW = kKnownKernelW > 0 ? kKnownKernelW : kernel.size(4); const int oC = output.size(1); const int oT = output.size(2); const int oH = output.size(3); const int oW = output.size(4); const int iC = input.size(1); const int iT = input.size(2); const int iH = input.size(3); const int iW = input.size(4); const int channel_multiplier = oC / iC; const int dilationT = kKnownDilationT > 0 ? kKnownDilationT : dilationT_; const int dilationH = kKnownDilationH > 0 ? kKnownDilationH : dilationH_; const int dilationW = kKnownDilationW > 0 ? kKnownDilationW : dilationW_; const int num_output = output.size(0) * output.stride(0); CUDA_KERNEL_LOOP(index, num_output) { const int out_col = index % oW; const int out_row = (index / oW) % oH; const int out_frame = (index / oW / oH) % oT; const int out_channel = (index / oW / oH / oT) % oC; const int batch = index / oW / oH / oT / oC; const int in_channel = out_channel / channel_multiplier; const int in_col_start = out_col * strideW - paddingW; const int in_row_start = out_row * strideH - paddingH; const int in_frame_start = out_frame * strideT - paddingT; accscalar_t sum = 0; const scalar_t *kernel_ptr = kernel[out_channel].data(); const scalar_t *input_ptr = &input[batch][in_channel][in_frame_start][in_row_start][in_col_start]; for (int k_frame = 0; k_frame < kT; ++k_frame) { const int in_frame = in_frame_start + k_frame * dilationT; for (int k_row = 0; k_row < kH; ++k_row) { const int in_row = in_row_start + k_row * dilationH; for (int k_col = 0; k_col < kW; ++k_col) { const accscalar_t op1 = *(kernel_ptr++); const int in_col = in_col_start + k_col * dilationW; if (in_frame >= 0 && in_row >= 0 && in_col >= 0 && in_frame < iT && in_row < iH && in_col < iW) { sum += op1 * *(input_ptr); } input_ptr += dilationW; } input_ptr += iW * dilationH - kW * dilationW; } input_ptr += iW * (iH * dilationT - kH * dilationH); } if (bias != NULL) { sum += bias[out_channel]; } output[batch][out_channel][out_frame][out_row][out_col] = sum; } } template <typename scalar_t, typename accscalar_t, int kKnownKernelT, int kKnownKernelH, int kKnownKernelW, int kKnownDilationT, int kKnownDilationH, int kKnownDilationW, int kKnownStrideT, int kKnownStrideH, int kKnownStrideW> __global__ void conv_depthwise3d_cuda_backward_input_kernel( const PackedTensorAccessor32<scalar_t, 5> grad_output, PackedTensorAccessor32<scalar_t, 5> grad_input, const PackedTensorAccessor32<scalar_t, 5> kernel, int strideT_, int strideH_, int strideW_, int paddingT, int paddingH, int paddingW, int dilationT_, int dilationH_, int dilationW_) { const int kT = kKnownKernelT > 0 ? kKnownKernelT : kernel.size(2); const int kH = kKnownKernelH > 0 ? kKnownKernelH : kernel.size(3); const int kW = kKnownKernelW > 0 ? kKnownKernelW : kernel.size(4); const int oC = grad_output.size(1); const int oT = grad_output.size(2); const int oH = grad_output.size(3); const int oW = grad_output.size(4); const int iC = grad_input.size(1); const int iT = grad_input.size(2); const int iH = grad_input.size(3); const int iW = grad_input.size(4); const int channel_multiplier = oC / iC; const int dilationT = kKnownDilationT > 0 ? kKnownDilationT : dilationT_; const int dilationH = kKnownDilationH > 0 ? kKnownDilationH : dilationH_; const int dilationW = kKnownDilationW > 0 ? kKnownDilationW : dilationW_; const int strideT = kKnownStrideT > 0 ? kKnownStrideT : strideT_; const int strideH = kKnownStrideH > 0 ? kKnownStrideH : strideH_; const int strideW = kKnownStrideW > 0 ? kKnownStrideW : strideW_; const int num_input = grad_input.size(0) * grad_input.stride(0); CUDA_KERNEL_LOOP(index, num_input) { const int in_col = index % iW; const int in_row = (index / iW) % iH; const int in_frame = (index / iW / iH) % iT; const int in_channel = (index / iW / iH / iT) % iC; const int batch = index / iW / iH / iT / iC; const int out_col_end = in_col + paddingW; const int out_row_end = in_row + paddingH; const int out_frame_end = in_frame + paddingT; const scalar_t* kernel_ptr = kernel[in_channel * channel_multiplier].data(); accscalar_t sum = 0; for (int k_chn = in_channel * channel_multiplier; k_chn < (in_channel + 1) * channel_multiplier; ++k_chn) { const scalar_t* gout_ptr = grad_output[batch][k_chn].data(); for (int k_frame = 0; k_frame < kT; ++k_frame) { const int out_frame_raw = out_frame_end - k_frame * dilationT; const int out_frame = out_frame_raw / strideT; for (int k_row = 0; k_row < kH; ++k_row) { const int out_row_raw = out_row_end - k_row * dilationH; const int out_row = out_row_raw / strideH; for (int k_col = 0; k_col < kW; ++k_col) { const accscalar_t op1 = *(kernel_ptr++); const int out_col_raw = out_col_end - k_col * dilationW; const int out_col = out_col_raw / strideW; const int out_offs = (out_frame * oH + out_row) * oW + out_col; accscalar_t op2 = (accscalar_t)0; if (out_col >= 0 && out_row >= 0 && out_frame >= 0 && out_col < oW && out_row < oH && out_frame < oT) { op2 = *(gout_ptr + out_offs); } if (out_frame * strideT == out_frame_raw && out_row * strideH == out_row_raw && out_col * strideW == out_col_raw) { sum += op1 * op2; } } } } } grad_input[batch][in_channel][in_frame][in_row][in_col] = sum; } } template <typename scalar_t, typename accscalar_t, int kKnownStrideH, int kKnownStrideW> __global__ void conv_depthwise3d_cuda_backward_weight_kernel( const PackedTensorAccessor32<scalar_t, 5> grad_output, const PackedTensorAccessor32<scalar_t, 5> input, PackedTensorAccessor32<scalar_t, 5> grad_kernel, int strideT, int strideH_, int strideW_, int paddingT, int paddingH, int paddingW, int dilationT, int dilationH, int dilationW) { const int kC = grad_kernel.size(0); const int kT = grad_kernel.size(2); const int kH = grad_kernel.size(3); const int kW = grad_kernel.size(4); const int strideH = kKnownStrideH > 0 ? kKnownStrideH : strideH_; const int strideW = kKnownStrideW > 0 ? kKnownStrideW : strideW_; const int k_col = blockIdx.x % kW; const int k_row = (blockIdx.x / kW) % kH; const int k_frame = (blockIdx.x / kW / kH) % kT; const int k_channel = blockIdx.x / kW / kH / kT; scalar_t *result = &grad_kernel[k_channel][0][k_frame][k_row][k_col]; const int oT = grad_output.size(2); const int oH = grad_output.size(3); const int oW = grad_output.size(4); const int iT = input.size(2); const int iH = input.size(3); const int iW = input.size(4); const int channel_multiplier = grad_output.size(1) / input.size(1); const int in_channel = k_channel / channel_multiplier; extern __shared__ int sdata_raw[]; scalar_t* sdata = reinterpret_cast<scalar_t*>(sdata_raw); if (k_channel >= kC) { return; } const int laneid = threadIdx.x % C10_WARP_SIZE; const int warpid = threadIdx.x / C10_WARP_SIZE; const int nwarps = blockDim.x / C10_WARP_SIZE; accscalar_t grad = 0; int batch = warpid / oT; int gout_frame = warpid - batch * oT; for (int outer_pos = warpid; outer_pos < input.size(0) * oT; outer_pos += nwarps, gout_frame += nwarps) { while (gout_frame >= oT) { gout_frame -= oT; batch ++; } const int in_frame = (gout_frame * strideT) + (k_frame * dilationT) - paddingT; if (in_frame < 0 || in_frame >= iT) { continue; } const scalar_t* gout_ptr = grad_output[batch][k_channel][gout_frame].data() + laneid; const scalar_t* input_ptr = input[batch][in_channel][in_frame].data(); int gout_row = laneid / oW; int gout_col = laneid - gout_row * oW; for (; gout_row < oH; ) { const accscalar_t op1 = *(gout_ptr); gout_ptr += C10_WARP_SIZE; const int in_col = (gout_col * strideW) + (k_col * dilationW) - paddingW; const int in_row = (gout_row * strideH) + (k_row * dilationH) - paddingH; const int in_pos = in_row * iW + in_col; accscalar_t op2 = (accscalar_t)0; if (in_col >= 0 && in_col < iW && in_row >= 0 && in_row < iH) { op2 = *(input_ptr + in_pos); } gout_col += C10_WARP_SIZE; while (gout_col >= oW) { gout_col -= oW; gout_row ++; } grad += op1 * op2; } } sdata[threadIdx.x] = grad; __syncthreads(); CUDA_KERNEL_ASSERT(__popc(blockDim.x) == 1); #pragma unroll for (int i = blockDim.x / 2; i >= 1; i >>= 1) { if (threadIdx.x < i) { sdata[threadIdx.x] += sdata[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = sdata[0]; } } template <int dim> void conv_depthwise_shape_check( const Tensor& input, const Tensor& weight, const Tensor& bias, const Tensor& grad_output, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { TORCH_CHECK(kernel_size.size() == dim, "kernel size length should be ", dim, ", but got ", kernel_size.size()); TORCH_CHECK(stride.size() == dim, "stride length should be ", dim, ", but got ", stride.size()); TORCH_CHECK(padding.size() == dim, "padding length should be ", dim, ", but got ", padding.size()); TORCH_CHECK(dilation.size() == dim, "dilation length should be ", dim, ", but got ", dilation.size()); TORCH_CHECK(weight.defined(), "Weight must be defined."); TORCH_CHECK(input.dim() == dim + 1 || input.dim() == dim + 2, "Input dimension should be ", dim + 1, "D or ", dim + 2, "D, got ", input.dim(), "D"); TORCH_CHECK(weight.dim() == dim + 2, "Weight dimension should be ", dim + 2, "D, got ", weight.dim(), "D"); TORCH_CHECK(weight.size(1) == 1, "Depthwise weight should have in_channels=1, got ", weight.size(1)); TORCH_CHECK(weight.size(0) % input.size(-dim - 1) == 0, "Depthwise out channels should be a multiple of in channels, got ", weight.size(0), " and ", input.size(-dim - 1)); for (int i = 0; i < dim; ++i) { TORCH_CHECK(weight.size(i + 2) == kernel_size[i], "kernel size and weight size mismatch, got ", kernel_size, " and ", weight.sizes()); TORCH_CHECK(stride[i] >= 1, "stride should be at least 1, got ", stride); TORCH_CHECK(padding[i] >= 0, "padding should be non-negative, got ", padding); TORCH_CHECK(dilation[i] >= 1, "dilation should be at least 1, got ", dilation); } if (bias.defined()) { TORCH_CHECK(bias.dim() == 1, "Bias should be 1D tensor, got ", bias.dim(), "D"); TORCH_CHECK(bias.size(0) == weight.size(0), "Bias length should be equal to out_channels, got ", bias.size(0), " and ", weight.size(0)); } if (grad_output.defined()) { auto expected_output_size = conv_output_size(input.sizes(), weight.sizes(), padding, stride, dilation); TORCH_CHECK(grad_output.dim() == expected_output_size.size(), "Expect grad_output to be ", expected_output_size.size(), "D, got ", grad_output.dim(), "D."); for (int i = 0; i < grad_output.dim(); ++i) { TORCH_CHECK(grad_output.size(i) == expected_output_size[i], "Expect grad_output to be of same shape as output, got ", grad_output.size(i), " and ", expected_output_size[i], " at dimension ", i); } } } } #define NODEF_OR_EQUAL(x, y) ((y) < 0 || (x) == (y)) #define NODEF_OR_EQUAL_3(x, y1, y2, y3) \ (NODEF_OR_EQUAL(x[0], y1) && \ NODEF_OR_EQUAL(x[1], y2) && \ NODEF_OR_EQUAL(x[2], y3)) #define DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(kt, kh, kw, dilt, dilh, dilw) \ if (NODEF_OR_EQUAL_3(kernel_size, (kt), (kh), (kw)) && \ NODEF_OR_EQUAL_3(dilation, (dilt), (dilh), (dilw))) { \ using accscalar_t = acc_type<scalar_t, true>; \ hipLaunchKernelGGL(( conv_depthwise3d_cuda_kernel \ <scalar_t, accscalar_t, (kt), (kh), (kw), (dilt), (dilh), (dilw)>) \ , dim3(grid), dim3(block), (smem), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ input_.packed_accessor32<scalar_t, 5>(), \ output_.packed_accessor32<scalar_t, 5>(), \ weight_.packed_accessor32<scalar_t, 5>(), \ bias_ptr, \ stride[0], stride[1], stride[2], \ padding[0], padding[1], padding[2], \ dilation[0], dilation[1], dilation[2]); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } else #define DWCONV3D_FORWARD_DISPATCH_OTHERS \ { \ using accscalar_t = acc_type<scalar_t, true>; \ hipLaunchKernelGGL(( conv_depthwise3d_cuda_kernel \ <scalar_t,accscalar_t, -1, -1, -1, -1, -1, -1>) \ , dim3(grid), dim3(block), (smem), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ input_.packed_accessor32<scalar_t, 5>(), \ output_.packed_accessor32<scalar_t, 5>(), \ weight_.packed_accessor32<scalar_t, 5>(), \ bias_ptr, \ stride[0], stride[1], stride[2], \ padding[0], padding[1], padding[2], \ dilation[0], dilation[1], dilation[2]); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } Tensor conv_depthwise3d_cuda( const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; TORCH_CHECK(input.device() == weight.device(), "expects input and weight tensors to be on the same device."); if (bias.defined()) { TORCH_CHECK(input.device() == bias.device(), "expects input and bias tensors to be on the same device."); } conv_depthwise_shape_check<3>(input, weight, bias, Tensor() /* undefined */, kernel_size, stride, padding, dilation); Tensor input_ = input.contiguous(); if (input.dim() == 4 /* no batch */) { input_ = input.unsqueeze(0); } auto output_size = conv_output_size(input_.sizes(), weight.sizes(), padding, stride, dilation); for (size_t i = 0; i < output_size.size(); ++i) { TORCH_CHECK(output_size[i] > 0, "Output size should be positive, got ", output_size[i], " at dim ", i); } Tensor output = at::empty(output_size, input.options()); Tensor output_ = output; Tensor weight_ = weight.contiguous(); Tensor bias_ = bias.defined() ? bias.contiguous() : bias; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "conv_depthwise3d", [&]{ int64_t num_outputs = output_.numel(); int64_t block = 256; int64_t grid = ::min((num_outputs - 1) / block + 1, (int64_t)65536); int64_t smem = 0; const scalar_t* bias_ptr = bias_.defined() ? bias_.data_ptr<scalar_t>() : NULL; // Range check to avoid overflow in CUDA kernels. TORCH_CHECK(input_.numel() <= std::numeric_limits<int32_t>::max(), "Input tensor is too large."); TORCH_CHECK(output_.numel() <= std::numeric_limits<int32_t>::max(), "Output tensor is too large."); TORCH_CHECK(weight_.numel() <= std::numeric_limits<int32_t>::max(), "Weight tensor is too large."); for (int i = 0; i < 3; ++i) { TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= std::numeric_limits<int32_t>::max(), "Padded input tensor is too large."); } DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(3, 3, 3, 1, 1, 1) DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(-1, -1, -1, 1, 1, 1) DWCONV3D_FORWARD_DISPATCH_OTHERS } ); return output; } #undef DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION #undef DWCONV3D_FORWARD_DISPATCH_OTHERS #define DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION( \ kt, kh, kw, dilt, dilh, dilw, dt, dh, dw) \ if (NODEF_OR_EQUAL_3(kernel_size, (kt), (kh), (kw)) && \ NODEF_OR_EQUAL_3(dilation, (dilt), (dilh), (dilw)) && \ NODEF_OR_EQUAL_3(stride, (dt), (dh), (dw))) { \ using accscalar_t = acc_type<scalar_t, true>; \ hipLaunchKernelGGL(( conv_depthwise3d_cuda_backward_input_kernel \ <scalar_t, accscalar_t, (kt), (kh), (kw), (dilt), (dilh), (dilw), (dt), (dh), (dw)>) \ , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ grad_output_.packed_accessor32<scalar_t, 5>(), \ grad_input_.packed_accessor32<scalar_t, 5>(), \ weight_.packed_accessor32<scalar_t, 5>(), \ stride[0], stride[1], stride[2], \ padding[0], padding[1], padding[2], \ dilation[0], dilation[1], dilation[2]); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } else #define DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS \ { \ using accscalar_t = acc_type<scalar_t, true>; \ hipLaunchKernelGGL(( conv_depthwise3d_cuda_backward_input_kernel \ <scalar_t, accscalar_t, -1, -1, -1, -1, -1, -1, -1, -1, -1>) \ , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ grad_output_.packed_accessor32<scalar_t, 5>(), \ grad_input_.packed_accessor32<scalar_t, 5>(), \ weight_.packed_accessor32<scalar_t, 5>(), \ stride[0], stride[1], stride[2], \ padding[0], padding[1], padding[2], \ dilation[0], dilation[1], dilation[2]); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } #define DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(dh, dw) \ if (NODEF_OR_EQUAL_3(stride, -1, (dh), (dw))) { \ using accscalar_t = acc_type<scalar_t, true>; \ hipLaunchKernelGGL(( conv_depthwise3d_cuda_backward_weight_kernel \ <scalar_t, accscalar_t, (dh), (dw)>) \ , dim3(grid), dim3(block), smem, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ grad_output_.packed_accessor32<scalar_t, 5>(), \ input_.packed_accessor32<scalar_t, 5>(), \ grad_weight.packed_accessor32<scalar_t, 5>(), \ stride[0], stride[1], stride[2], \ padding[0], padding[1], padding[2], \ dilation[0], dilation[1], dilation[2]); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } else #define DWCONV3D_BACKWARD_WEIGHT_DISPATCH_OTHERS \ { \ using accscalar_t = acc_type<scalar_t, true>; \ hipLaunchKernelGGL(( conv_depthwise3d_cuda_backward_weight_kernel \ <scalar_t, accscalar_t, -1, -1>) \ , dim3(grid), dim3(block), smem, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ grad_output_.packed_accessor32<scalar_t, 5>(), \ input_.packed_accessor32<scalar_t, 5>(), \ grad_weight.packed_accessor32<scalar_t, 5>(), \ stride[0], stride[1], stride[2], \ padding[0], padding[1], padding[2], \ dilation[0], dilation[1], dilation[2]); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } std::tuple<Tensor&, Tensor&, Tensor&> _depthwise_3d_backward_cuda_out( Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias, const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const std::array<bool, 3> output_mask) { TORCH_CHECK(grad_output.device() == input.device() && input.device() == weight.device(), "expects input, weight and grad_output to be on the same device."); conv_depthwise_shape_check<3>( input, weight, Tensor() /* undefined */, grad_output, kernel_size, stride, padding, dilation); const Tensor grad_output_ = grad_output.contiguous(); Tensor grad_input_ = (output_mask[0] ? grad_input : Tensor()); if (output_mask[0]) { const Tensor weight_ = weight.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "conv_depthwise3d", [&] { int64_t num_inputs = grad_input_.numel(); int64_t block = 256; int64_t grid = ::min((num_inputs - 1) / block + 1, (int64_t)65536); // Range check to avoid overflow in CUDA kernels. TORCH_CHECK(grad_input_.numel() <= std::numeric_limits<int32_t>::max(), "Input tensor is too large."); TORCH_CHECK(grad_output_.numel() <= std::numeric_limits<int32_t>::max(), "Output tensor is too large."); TORCH_CHECK(weight_.numel() <= std::numeric_limits<int32_t>::max(), "Weight tensor is too large."); for (int i = 0; i < 3; ++i) { TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= std::numeric_limits<int32_t>::max(), "Padded input tensor is too large."); } DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION( 3, 3, 3, 1, 1, 1, 1, 1, 1) DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION( 3, 3, 3, 1, 1, 1, -1, -1, -1) DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION( 3, 3, 3, -1, -1, -1, 1, 1, 1) DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION( 3, 3, 3, -1, -1, -1, -1, -1, -1) DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS } ); } if (output_mask[1]) { const Tensor input_ = input.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "conv_depthwise3d", [&] { int64_t grid = grad_weight.numel(); int64_t block = 256; int64_t smem = sizeof(scalar_t) * block; const int64_t int_max = std::numeric_limits<int32_t>::max(); TORCH_CHECK(grad_input_.numel() <= int_max, "Input tensor is too large."); TORCH_CHECK(grad_output_.numel() <= int_max, "Output tensor is too large."); TORCH_CHECK(weight.numel() <= int_max, "Weight tensor is too large."); for (int i = 0; i < 3; ++i) { TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= int_max, "Padded input tensor is too large."); } int64_t warp_size = at::cuda::warp_size(); TORCH_CHECK(grad_output_.size(0) * grad_output_.size(2) < int_max - block / warp_size && grad_output_.size(3) <= int_max - warp_size && grad_output_.size(4) <= int_max - warp_size, "Output size is too large."); DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(1, 1) DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(2, 2) DWCONV3D_BACKWARD_WEIGHT_DISPATCH_OTHERS } ); } if (output_mask[2]) { grad_bias = grad_output.sum({0, 2, 3, 4}); } return std::tie(grad_input, grad_weight, grad_bias); } std::tuple<Tensor&, Tensor&, Tensor&> conv_depthwise3d_backward_cuda_out(const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias) { if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } return _depthwise_3d_backward_cuda_out( grad_input, grad_weight, grad_bias, grad_output, input, weight, kernel_size, stride, padding, dilation, {true,true,true}); } std::tuple<Tensor, Tensor, Tensor> conv_depthwise3d_backward_cuda( const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const std::array<bool, 3> output_mask) { auto options = grad_output.options(); Tensor grad_input = (output_mask[0] ? at::empty(input.sizes(), options) : Tensor()); Tensor grad_weight = (output_mask[1] ? at::empty(weight.sizes(), options) : Tensor()); Tensor grad_bias; /* undefined temporarily */ return _depthwise_3d_backward_cuda_out( grad_input, grad_weight, grad_bias, grad_output, input, weight, kernel_size, stride, padding, dilation, output_mask ); } REGISTER_CUDA_DISPATCH(conv_depthwise3d_backward_stub, &conv_depthwise3d_backward_cuda); #undef DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION #undef DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS #undef NODEF_OR_EQUAL_3 #undef NODEF_OR_EQUAL } }
b8f221ccd8a08639afdc8f29d1aaff150d97425b.cu
#include <ATen/ATen.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/AccumulateType.h> #include <ATen/TensorUtils.h> #include <ATen/native/ConvUtils.h> #include <algorithm> #include <tuple> #include <limits> namespace at { namespace native { namespace { template <typename scalar_t, typename accscalar_t, int kKnownKernelT, int kKnownKernelH, int kKnownKernelW, int kKnownDilationT, int kKnownDilationH, int kKnownDilationW> __global__ void conv_depthwise3d_cuda_kernel( const PackedTensorAccessor32<scalar_t, 5> input, PackedTensorAccessor32<scalar_t, 5> output, const PackedTensorAccessor32<scalar_t, 5> kernel, const scalar_t* bias, int strideT, int strideH, int strideW, int paddingT, int paddingH, int paddingW, int dilationT_, int dilationH_, int dilationW_) { const int kT = kKnownKernelT > 0 ? kKnownKernelT : kernel.size(2); const int kH = kKnownKernelH > 0 ? kKnownKernelH : kernel.size(3); const int kW = kKnownKernelW > 0 ? kKnownKernelW : kernel.size(4); const int oC = output.size(1); const int oT = output.size(2); const int oH = output.size(3); const int oW = output.size(4); const int iC = input.size(1); const int iT = input.size(2); const int iH = input.size(3); const int iW = input.size(4); const int channel_multiplier = oC / iC; const int dilationT = kKnownDilationT > 0 ? kKnownDilationT : dilationT_; const int dilationH = kKnownDilationH > 0 ? kKnownDilationH : dilationH_; const int dilationW = kKnownDilationW > 0 ? kKnownDilationW : dilationW_; const int num_output = output.size(0) * output.stride(0); CUDA_KERNEL_LOOP(index, num_output) { const int out_col = index % oW; const int out_row = (index / oW) % oH; const int out_frame = (index / oW / oH) % oT; const int out_channel = (index / oW / oH / oT) % oC; const int batch = index / oW / oH / oT / oC; const int in_channel = out_channel / channel_multiplier; const int in_col_start = out_col * strideW - paddingW; const int in_row_start = out_row * strideH - paddingH; const int in_frame_start = out_frame * strideT - paddingT; accscalar_t sum = 0; const scalar_t *kernel_ptr = kernel[out_channel].data(); const scalar_t *input_ptr = &input[batch][in_channel][in_frame_start][in_row_start][in_col_start]; for (int k_frame = 0; k_frame < kT; ++k_frame) { const int in_frame = in_frame_start + k_frame * dilationT; for (int k_row = 0; k_row < kH; ++k_row) { const int in_row = in_row_start + k_row * dilationH; for (int k_col = 0; k_col < kW; ++k_col) { const accscalar_t op1 = *(kernel_ptr++); const int in_col = in_col_start + k_col * dilationW; if (in_frame >= 0 && in_row >= 0 && in_col >= 0 && in_frame < iT && in_row < iH && in_col < iW) { sum += op1 * *(input_ptr); } input_ptr += dilationW; } input_ptr += iW * dilationH - kW * dilationW; } input_ptr += iW * (iH * dilationT - kH * dilationH); } if (bias != NULL) { sum += bias[out_channel]; } output[batch][out_channel][out_frame][out_row][out_col] = sum; } } template <typename scalar_t, typename accscalar_t, int kKnownKernelT, int kKnownKernelH, int kKnownKernelW, int kKnownDilationT, int kKnownDilationH, int kKnownDilationW, int kKnownStrideT, int kKnownStrideH, int kKnownStrideW> __global__ void conv_depthwise3d_cuda_backward_input_kernel( const PackedTensorAccessor32<scalar_t, 5> grad_output, PackedTensorAccessor32<scalar_t, 5> grad_input, const PackedTensorAccessor32<scalar_t, 5> kernel, int strideT_, int strideH_, int strideW_, int paddingT, int paddingH, int paddingW, int dilationT_, int dilationH_, int dilationW_) { const int kT = kKnownKernelT > 0 ? kKnownKernelT : kernel.size(2); const int kH = kKnownKernelH > 0 ? kKnownKernelH : kernel.size(3); const int kW = kKnownKernelW > 0 ? kKnownKernelW : kernel.size(4); const int oC = grad_output.size(1); const int oT = grad_output.size(2); const int oH = grad_output.size(3); const int oW = grad_output.size(4); const int iC = grad_input.size(1); const int iT = grad_input.size(2); const int iH = grad_input.size(3); const int iW = grad_input.size(4); const int channel_multiplier = oC / iC; const int dilationT = kKnownDilationT > 0 ? kKnownDilationT : dilationT_; const int dilationH = kKnownDilationH > 0 ? kKnownDilationH : dilationH_; const int dilationW = kKnownDilationW > 0 ? kKnownDilationW : dilationW_; const int strideT = kKnownStrideT > 0 ? kKnownStrideT : strideT_; const int strideH = kKnownStrideH > 0 ? kKnownStrideH : strideH_; const int strideW = kKnownStrideW > 0 ? kKnownStrideW : strideW_; const int num_input = grad_input.size(0) * grad_input.stride(0); CUDA_KERNEL_LOOP(index, num_input) { const int in_col = index % iW; const int in_row = (index / iW) % iH; const int in_frame = (index / iW / iH) % iT; const int in_channel = (index / iW / iH / iT) % iC; const int batch = index / iW / iH / iT / iC; const int out_col_end = in_col + paddingW; const int out_row_end = in_row + paddingH; const int out_frame_end = in_frame + paddingT; const scalar_t* kernel_ptr = kernel[in_channel * channel_multiplier].data(); accscalar_t sum = 0; for (int k_chn = in_channel * channel_multiplier; k_chn < (in_channel + 1) * channel_multiplier; ++k_chn) { const scalar_t* gout_ptr = grad_output[batch][k_chn].data(); for (int k_frame = 0; k_frame < kT; ++k_frame) { const int out_frame_raw = out_frame_end - k_frame * dilationT; const int out_frame = out_frame_raw / strideT; for (int k_row = 0; k_row < kH; ++k_row) { const int out_row_raw = out_row_end - k_row * dilationH; const int out_row = out_row_raw / strideH; for (int k_col = 0; k_col < kW; ++k_col) { const accscalar_t op1 = *(kernel_ptr++); const int out_col_raw = out_col_end - k_col * dilationW; const int out_col = out_col_raw / strideW; const int out_offs = (out_frame * oH + out_row) * oW + out_col; accscalar_t op2 = (accscalar_t)0; if (out_col >= 0 && out_row >= 0 && out_frame >= 0 && out_col < oW && out_row < oH && out_frame < oT) { op2 = *(gout_ptr + out_offs); } if (out_frame * strideT == out_frame_raw && out_row * strideH == out_row_raw && out_col * strideW == out_col_raw) { sum += op1 * op2; } } } } } grad_input[batch][in_channel][in_frame][in_row][in_col] = sum; } } template <typename scalar_t, typename accscalar_t, int kKnownStrideH, int kKnownStrideW> __global__ void conv_depthwise3d_cuda_backward_weight_kernel( const PackedTensorAccessor32<scalar_t, 5> grad_output, const PackedTensorAccessor32<scalar_t, 5> input, PackedTensorAccessor32<scalar_t, 5> grad_kernel, int strideT, int strideH_, int strideW_, int paddingT, int paddingH, int paddingW, int dilationT, int dilationH, int dilationW) { const int kC = grad_kernel.size(0); const int kT = grad_kernel.size(2); const int kH = grad_kernel.size(3); const int kW = grad_kernel.size(4); const int strideH = kKnownStrideH > 0 ? kKnownStrideH : strideH_; const int strideW = kKnownStrideW > 0 ? kKnownStrideW : strideW_; const int k_col = blockIdx.x % kW; const int k_row = (blockIdx.x / kW) % kH; const int k_frame = (blockIdx.x / kW / kH) % kT; const int k_channel = blockIdx.x / kW / kH / kT; scalar_t *result = &grad_kernel[k_channel][0][k_frame][k_row][k_col]; const int oT = grad_output.size(2); const int oH = grad_output.size(3); const int oW = grad_output.size(4); const int iT = input.size(2); const int iH = input.size(3); const int iW = input.size(4); const int channel_multiplier = grad_output.size(1) / input.size(1); const int in_channel = k_channel / channel_multiplier; extern __shared__ int sdata_raw[]; scalar_t* sdata = reinterpret_cast<scalar_t*>(sdata_raw); if (k_channel >= kC) { return; } const int laneid = threadIdx.x % C10_WARP_SIZE; const int warpid = threadIdx.x / C10_WARP_SIZE; const int nwarps = blockDim.x / C10_WARP_SIZE; accscalar_t grad = 0; int batch = warpid / oT; int gout_frame = warpid - batch * oT; for (int outer_pos = warpid; outer_pos < input.size(0) * oT; outer_pos += nwarps, gout_frame += nwarps) { while (gout_frame >= oT) { gout_frame -= oT; batch ++; } const int in_frame = (gout_frame * strideT) + (k_frame * dilationT) - paddingT; if (in_frame < 0 || in_frame >= iT) { continue; } const scalar_t* gout_ptr = grad_output[batch][k_channel][gout_frame].data() + laneid; const scalar_t* input_ptr = input[batch][in_channel][in_frame].data(); int gout_row = laneid / oW; int gout_col = laneid - gout_row * oW; for (; gout_row < oH; ) { const accscalar_t op1 = *(gout_ptr); gout_ptr += C10_WARP_SIZE; const int in_col = (gout_col * strideW) + (k_col * dilationW) - paddingW; const int in_row = (gout_row * strideH) + (k_row * dilationH) - paddingH; const int in_pos = in_row * iW + in_col; accscalar_t op2 = (accscalar_t)0; if (in_col >= 0 && in_col < iW && in_row >= 0 && in_row < iH) { op2 = *(input_ptr + in_pos); } gout_col += C10_WARP_SIZE; while (gout_col >= oW) { gout_col -= oW; gout_row ++; } grad += op1 * op2; } } sdata[threadIdx.x] = grad; __syncthreads(); CUDA_KERNEL_ASSERT(__popc(blockDim.x) == 1); #pragma unroll for (int i = blockDim.x / 2; i >= 1; i >>= 1) { if (threadIdx.x < i) { sdata[threadIdx.x] += sdata[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { *result = sdata[0]; } } template <int dim> void conv_depthwise_shape_check( const Tensor& input, const Tensor& weight, const Tensor& bias, const Tensor& grad_output, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { TORCH_CHECK(kernel_size.size() == dim, "kernel size length should be ", dim, ", but got ", kernel_size.size()); TORCH_CHECK(stride.size() == dim, "stride length should be ", dim, ", but got ", stride.size()); TORCH_CHECK(padding.size() == dim, "padding length should be ", dim, ", but got ", padding.size()); TORCH_CHECK(dilation.size() == dim, "dilation length should be ", dim, ", but got ", dilation.size()); TORCH_CHECK(weight.defined(), "Weight must be defined."); TORCH_CHECK(input.dim() == dim + 1 || input.dim() == dim + 2, "Input dimension should be ", dim + 1, "D or ", dim + 2, "D, got ", input.dim(), "D"); TORCH_CHECK(weight.dim() == dim + 2, "Weight dimension should be ", dim + 2, "D, got ", weight.dim(), "D"); TORCH_CHECK(weight.size(1) == 1, "Depthwise weight should have in_channels=1, got ", weight.size(1)); TORCH_CHECK(weight.size(0) % input.size(-dim - 1) == 0, "Depthwise out channels should be a multiple of in channels, got ", weight.size(0), " and ", input.size(-dim - 1)); for (int i = 0; i < dim; ++i) { TORCH_CHECK(weight.size(i + 2) == kernel_size[i], "kernel size and weight size mismatch, got ", kernel_size, " and ", weight.sizes()); TORCH_CHECK(stride[i] >= 1, "stride should be at least 1, got ", stride); TORCH_CHECK(padding[i] >= 0, "padding should be non-negative, got ", padding); TORCH_CHECK(dilation[i] >= 1, "dilation should be at least 1, got ", dilation); } if (bias.defined()) { TORCH_CHECK(bias.dim() == 1, "Bias should be 1D tensor, got ", bias.dim(), "D"); TORCH_CHECK(bias.size(0) == weight.size(0), "Bias length should be equal to out_channels, got ", bias.size(0), " and ", weight.size(0)); } if (grad_output.defined()) { auto expected_output_size = conv_output_size(input.sizes(), weight.sizes(), padding, stride, dilation); TORCH_CHECK(grad_output.dim() == expected_output_size.size(), "Expect grad_output to be ", expected_output_size.size(), "D, got ", grad_output.dim(), "D."); for (int i = 0; i < grad_output.dim(); ++i) { TORCH_CHECK(grad_output.size(i) == expected_output_size[i], "Expect grad_output to be of same shape as output, got ", grad_output.size(i), " and ", expected_output_size[i], " at dimension ", i); } } } } #define NODEF_OR_EQUAL(x, y) ((y) < 0 || (x) == (y)) #define NODEF_OR_EQUAL_3(x, y1, y2, y3) \ (NODEF_OR_EQUAL(x[0], y1) && \ NODEF_OR_EQUAL(x[1], y2) && \ NODEF_OR_EQUAL(x[2], y3)) #define DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(kt, kh, kw, dilt, dilh, dilw) \ if (NODEF_OR_EQUAL_3(kernel_size, (kt), (kh), (kw)) && \ NODEF_OR_EQUAL_3(dilation, (dilt), (dilh), (dilw))) { \ using accscalar_t = acc_type<scalar_t, true>; \ conv_depthwise3d_cuda_kernel \ <scalar_t, accscalar_t, (kt), (kh), (kw), (dilt), (dilh), (dilw)> \ <<<grid, block, (smem), at::cuda::getCurrentCUDAStream()>>>( \ input_.packed_accessor32<scalar_t, 5>(), \ output_.packed_accessor32<scalar_t, 5>(), \ weight_.packed_accessor32<scalar_t, 5>(), \ bias_ptr, \ stride[0], stride[1], stride[2], \ padding[0], padding[1], padding[2], \ dilation[0], dilation[1], dilation[2]); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } else #define DWCONV3D_FORWARD_DISPATCH_OTHERS \ { \ using accscalar_t = acc_type<scalar_t, true>; \ conv_depthwise3d_cuda_kernel \ <scalar_t,accscalar_t, -1, -1, -1, -1, -1, -1> \ <<<grid, block, (smem), at::cuda::getCurrentCUDAStream()>>>( \ input_.packed_accessor32<scalar_t, 5>(), \ output_.packed_accessor32<scalar_t, 5>(), \ weight_.packed_accessor32<scalar_t, 5>(), \ bias_ptr, \ stride[0], stride[1], stride[2], \ padding[0], padding[1], padding[2], \ dilation[0], dilation[1], dilation[2]); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } Tensor conv_depthwise3d_cuda( const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; TORCH_CHECK(input.device() == weight.device(), "expects input and weight tensors to be on the same device."); if (bias.defined()) { TORCH_CHECK(input.device() == bias.device(), "expects input and bias tensors to be on the same device."); } conv_depthwise_shape_check<3>(input, weight, bias, Tensor() /* undefined */, kernel_size, stride, padding, dilation); Tensor input_ = input.contiguous(); if (input.dim() == 4 /* no batch */) { input_ = input.unsqueeze(0); } auto output_size = conv_output_size(input_.sizes(), weight.sizes(), padding, stride, dilation); for (size_t i = 0; i < output_size.size(); ++i) { TORCH_CHECK(output_size[i] > 0, "Output size should be positive, got ", output_size[i], " at dim ", i); } Tensor output = at::empty(output_size, input.options()); Tensor output_ = output; Tensor weight_ = weight.contiguous(); Tensor bias_ = bias.defined() ? bias.contiguous() : bias; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "conv_depthwise3d", [&]{ int64_t num_outputs = output_.numel(); int64_t block = 256; int64_t grid = std::min((num_outputs - 1) / block + 1, (int64_t)65536); int64_t smem = 0; const scalar_t* bias_ptr = bias_.defined() ? bias_.data_ptr<scalar_t>() : NULL; // Range check to avoid overflow in CUDA kernels. TORCH_CHECK(input_.numel() <= std::numeric_limits<int32_t>::max(), "Input tensor is too large."); TORCH_CHECK(output_.numel() <= std::numeric_limits<int32_t>::max(), "Output tensor is too large."); TORCH_CHECK(weight_.numel() <= std::numeric_limits<int32_t>::max(), "Weight tensor is too large."); for (int i = 0; i < 3; ++i) { TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= std::numeric_limits<int32_t>::max(), "Padded input tensor is too large."); } DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(3, 3, 3, 1, 1, 1) DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION(-1, -1, -1, 1, 1, 1) DWCONV3D_FORWARD_DISPATCH_OTHERS } ); return output; } #undef DWCONV3D_FORWARD_DISPATCH_SPECIALIZATION #undef DWCONV3D_FORWARD_DISPATCH_OTHERS #define DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION( \ kt, kh, kw, dilt, dilh, dilw, dt, dh, dw) \ if (NODEF_OR_EQUAL_3(kernel_size, (kt), (kh), (kw)) && \ NODEF_OR_EQUAL_3(dilation, (dilt), (dilh), (dilw)) && \ NODEF_OR_EQUAL_3(stride, (dt), (dh), (dw))) { \ using accscalar_t = acc_type<scalar_t, true>; \ conv_depthwise3d_cuda_backward_input_kernel \ <scalar_t, accscalar_t, (kt), (kh), (kw), (dilt), (dilh), (dilw), (dt), (dh), (dw)> \ <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( \ grad_output_.packed_accessor32<scalar_t, 5>(), \ grad_input_.packed_accessor32<scalar_t, 5>(), \ weight_.packed_accessor32<scalar_t, 5>(), \ stride[0], stride[1], stride[2], \ padding[0], padding[1], padding[2], \ dilation[0], dilation[1], dilation[2]); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } else #define DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS \ { \ using accscalar_t = acc_type<scalar_t, true>; \ conv_depthwise3d_cuda_backward_input_kernel \ <scalar_t, accscalar_t, -1, -1, -1, -1, -1, -1, -1, -1, -1> \ <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( \ grad_output_.packed_accessor32<scalar_t, 5>(), \ grad_input_.packed_accessor32<scalar_t, 5>(), \ weight_.packed_accessor32<scalar_t, 5>(), \ stride[0], stride[1], stride[2], \ padding[0], padding[1], padding[2], \ dilation[0], dilation[1], dilation[2]); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } #define DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(dh, dw) \ if (NODEF_OR_EQUAL_3(stride, -1, (dh), (dw))) { \ using accscalar_t = acc_type<scalar_t, true>; \ conv_depthwise3d_cuda_backward_weight_kernel \ <scalar_t, accscalar_t, (dh), (dw)> \ <<<grid, block, smem, at::cuda::getCurrentCUDAStream()>>>( \ grad_output_.packed_accessor32<scalar_t, 5>(), \ input_.packed_accessor32<scalar_t, 5>(), \ grad_weight.packed_accessor32<scalar_t, 5>(), \ stride[0], stride[1], stride[2], \ padding[0], padding[1], padding[2], \ dilation[0], dilation[1], dilation[2]); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } else #define DWCONV3D_BACKWARD_WEIGHT_DISPATCH_OTHERS \ { \ using accscalar_t = acc_type<scalar_t, true>; \ conv_depthwise3d_cuda_backward_weight_kernel \ <scalar_t, accscalar_t, -1, -1> \ <<<grid, block, smem, at::cuda::getCurrentCUDAStream()>>>( \ grad_output_.packed_accessor32<scalar_t, 5>(), \ input_.packed_accessor32<scalar_t, 5>(), \ grad_weight.packed_accessor32<scalar_t, 5>(), \ stride[0], stride[1], stride[2], \ padding[0], padding[1], padding[2], \ dilation[0], dilation[1], dilation[2]); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } std::tuple<Tensor&, Tensor&, Tensor&> _depthwise_3d_backward_cuda_out( Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias, const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const std::array<bool, 3> output_mask) { TORCH_CHECK(grad_output.device() == input.device() && input.device() == weight.device(), "expects input, weight and grad_output to be on the same device."); conv_depthwise_shape_check<3>( input, weight, Tensor() /* undefined */, grad_output, kernel_size, stride, padding, dilation); const Tensor grad_output_ = grad_output.contiguous(); Tensor grad_input_ = (output_mask[0] ? grad_input : Tensor()); if (output_mask[0]) { const Tensor weight_ = weight.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "conv_depthwise3d", [&] { int64_t num_inputs = grad_input_.numel(); int64_t block = 256; int64_t grid = std::min((num_inputs - 1) / block + 1, (int64_t)65536); // Range check to avoid overflow in CUDA kernels. TORCH_CHECK(grad_input_.numel() <= std::numeric_limits<int32_t>::max(), "Input tensor is too large."); TORCH_CHECK(grad_output_.numel() <= std::numeric_limits<int32_t>::max(), "Output tensor is too large."); TORCH_CHECK(weight_.numel() <= std::numeric_limits<int32_t>::max(), "Weight tensor is too large."); for (int i = 0; i < 3; ++i) { TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= std::numeric_limits<int32_t>::max(), "Padded input tensor is too large."); } DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION( 3, 3, 3, 1, 1, 1, 1, 1, 1) DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION( 3, 3, 3, 1, 1, 1, -1, -1, -1) DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION( 3, 3, 3, -1, -1, -1, 1, 1, 1) DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION( 3, 3, 3, -1, -1, -1, -1, -1, -1) DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS } ); } if (output_mask[1]) { const Tensor input_ = input.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "conv_depthwise3d", [&] { int64_t grid = grad_weight.numel(); int64_t block = 256; int64_t smem = sizeof(scalar_t) * block; const int64_t int_max = std::numeric_limits<int32_t>::max(); TORCH_CHECK(grad_input_.numel() <= int_max, "Input tensor is too large."); TORCH_CHECK(grad_output_.numel() <= int_max, "Output tensor is too large."); TORCH_CHECK(weight.numel() <= int_max, "Weight tensor is too large."); for (int i = 0; i < 3; ++i) { TORCH_CHECK(padding[i] * 2 + input.size(i + 2) <= int_max, "Padded input tensor is too large."); } int64_t warp_size = at::cuda::warp_size(); TORCH_CHECK(grad_output_.size(0) * grad_output_.size(2) < int_max - block / warp_size && grad_output_.size(3) <= int_max - warp_size && grad_output_.size(4) <= int_max - warp_size, "Output size is too large."); DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(1, 1) DWCONV3D_BACKWARD_WEIGHT_DISPATCH_SPECIALIZATION(2, 2) DWCONV3D_BACKWARD_WEIGHT_DISPATCH_OTHERS } ); } if (output_mask[2]) { grad_bias = grad_output.sum({0, 2, 3, 4}); } return std::tie(grad_input, grad_weight, grad_bias); } std::tuple<Tensor&, Tensor&, Tensor&> conv_depthwise3d_backward_cuda_out(const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias) { if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } return _depthwise_3d_backward_cuda_out( grad_input, grad_weight, grad_bias, grad_output, input, weight, kernel_size, stride, padding, dilation, {true,true,true}); } std::tuple<Tensor, Tensor, Tensor> conv_depthwise3d_backward_cuda( const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const std::array<bool, 3> output_mask) { auto options = grad_output.options(); Tensor grad_input = (output_mask[0] ? at::empty(input.sizes(), options) : Tensor()); Tensor grad_weight = (output_mask[1] ? at::empty(weight.sizes(), options) : Tensor()); Tensor grad_bias; /* undefined temporarily */ return _depthwise_3d_backward_cuda_out( grad_input, grad_weight, grad_bias, grad_output, input, weight, kernel_size, stride, padding, dilation, output_mask ); } REGISTER_CUDA_DISPATCH(conv_depthwise3d_backward_stub, &conv_depthwise3d_backward_cuda); #undef DWCONV3D_BACKWARD_INPUT_DISPATCH_SPECIALIZATION #undef DWCONV3D_BACKWARD_INPUT_DISPATCH_OTHERS #undef NODEF_OR_EQUAL_3 #undef NODEF_OR_EQUAL } }
14410855f4d187be71fdad396cd8ba2cec9fa8f8.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <algorithm> #include <cassert> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <omp.h> #include "CudaEvent.h" #include "CudaStream.h" /////////////////////////////////////////////////////////////////////////////// // CUDA kernel that adds the vectors a[N] += b[N] /////////////////////////////////////////////////////////////////////////////// __global__ void sum(double* a, double* b, size_t N) { size_t tid = threadIdx.x + blockDim.x*blockIdx.x; size_t grid_step = blockDim.x * gridDim.x; while(tid<N) { for(int i=0; i<50; ++i) a[tid] += exp(1./a[tid])+b[tid]; tid += grid_step; } } /////////////////////////////////////////////////////////////////////////////// // helper function that will initialize cuda. /////////////////////////////////////////////////////////////////////////////// bool initialize_cuda() { hipError_t result = hipInit(0); return result == hipSuccess; } /////////////////////////////////////////////////////////////////////////////// // helper function for allocating device memory /////////////////////////////////////////////////////////////////////////////// template <typename T> T* allocate_on_device(size_t N) { void* ptr=0; hipMalloc(&ptr, N*sizeof(T)); return reinterpret_cast<T*>(ptr); } /////////////////////////////////////////////////////////////////////////////// // helper function for allocating host memory // takes boolean flag indicating whether to use pinned memory or not /////////////////////////////////////////////////////////////////////////////// template <typename T> T* allocate_on_host(size_t N, T value=T(), bool pinned=false) { T* ptr=0; if( pinned ) { //hipHostMalloc((void**)&ptr, N*sizeof(T), hipHostMallocPortable); std::cout << "allocating " << N*sizeof(T) << " bytes pinned host data" << std::endl; hipHostMalloc((void**)&ptr, N*sizeof(T)); } else { std::cout << "allocating " << N*sizeof(T) << " bytes unpinned host data" << std::endl; ptr = reinterpret_cast<T*>(malloc(N*sizeof(T))); } std::fill(ptr, ptr+N, value); return ptr; } // wrapper for launch configuration class Launch { public: Launch(size_t N, unsigned int b_dim) : block_(b_dim) { unsigned int g_dim = N/b_dim + (N%b_dim ? 1 : 0); g_dim = g_dim > 1024 ? 1024 : g_dim; grid_ = dim3(g_dim); } dim3 block() { return block_; } dim3 grid() { return grid_; } unsigned int block_dim() { return block_.x; } unsigned int grid_dim() { return grid_.x; } private: Launch(); dim3 block_; dim3 grid_; }; /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// int main(void) { const size_t N=128*1024*1024; const size_t nchunks=1; const size_t chunk_dim=N/nchunks; const size_t size = sizeof(double)*N; const size_t chunk_size = size/nchunks; // initialize CUDA if(!initialize_cuda()) { std::cerr << "unable to initialize CUDA" << std::endl; return 1; } // create streams CudaStream stream_H2D(true); CudaStream stream_D2H(true); CudaStream stream_compute(true); // check that streams are/are not default stream as appropriate assert(!stream_H2D.is_default_stream()); assert(!stream_D2H.is_default_stream()); assert(!stream_compute.is_default_stream()); Launch launch(N, 128); std::cout << "launch grid : " << launch.block_dim() << "*" << launch.grid_dim() << std::endl; // allocate host and device memory double *a_d = allocate_on_device<double>(N); double *b_d = allocate_on_device<double>(N); double *a_h = allocate_on_host<double>(N, 1., true); double *b_h = allocate_on_host<double>(N, 1., true); CudaEvent event_start = stream_H2D.insert_event(); // copy data to device for(int i=0; i<nchunks; ++i) { size_t offset = i*chunk_dim; hipMemcpyAsync(a_d+offset, a_h+offset, chunk_size, hipMemcpyHostToDevice, stream_H2D.stream()); hipMemcpyAsync(b_d+offset, b_h+offset, chunk_size, hipMemcpyHostToDevice, stream_H2D.stream()); // insert events that force compute stream to wait CudaEvent event_H2D = stream_H2D.insert_event(); stream_compute.wait_on_event(event_H2D); // asynchronously execute the kernel hipLaunchKernelGGL(( sum), dim3(launch.block()), dim3(launch.grid()), 0, stream_compute.stream(), a_d+offset, b_d+offset, chunk_dim); // insert event CudaEvent event_compute = stream_compute.insert_event(); stream_D2H.wait_on_event(event_compute); hipMemcpyAsync(a_h+offset, a_d+offset, chunk_size, hipMemcpyDeviceToHost, stream_D2H.stream()); CudaEvent event_D2H = stream_D2H.insert_event(); } CudaEvent event_end = stream_D2H.insert_event(); event_end.wait(); double time_taken = event_end.time_since(event_start); std::cout << "that took " << time_taken << " seconds" << std::endl; /* size_t limit = 256; limit = N>limit ? limit : N; double result = 0.; for(size_t i=N-limit; i<N; ++i) std::cout << a_h[i] << ((i+1)%launch.block_dim() ? " " : " | "); #pragma omp parallel for reduction(+:result) for(size_t i=0; i<N; ++i) result += 2. - a_h[i]; std::cout << std::endl; std::cout << "result : " << result << std::endl; */ return 0; }
14410855f4d187be71fdad396cd8ba2cec9fa8f8.cu
#include <iostream> #include <algorithm> #include <cassert> #include <cuda.h> #include <cuda_runtime.h> #include <omp.h> #include "CudaEvent.h" #include "CudaStream.h" /////////////////////////////////////////////////////////////////////////////// // CUDA kernel that adds the vectors a[N] += b[N] /////////////////////////////////////////////////////////////////////////////// __global__ void sum(double* a, double* b, size_t N) { size_t tid = threadIdx.x + blockDim.x*blockIdx.x; size_t grid_step = blockDim.x * gridDim.x; while(tid<N) { for(int i=0; i<50; ++i) a[tid] += exp(1./a[tid])+b[tid]; tid += grid_step; } } /////////////////////////////////////////////////////////////////////////////// // helper function that will initialize cuda. /////////////////////////////////////////////////////////////////////////////// bool initialize_cuda() { CUresult result = cuInit(0); return result == CUDA_SUCCESS; } /////////////////////////////////////////////////////////////////////////////// // helper function for allocating device memory /////////////////////////////////////////////////////////////////////////////// template <typename T> T* allocate_on_device(size_t N) { void* ptr=0; cudaMalloc(&ptr, N*sizeof(T)); return reinterpret_cast<T*>(ptr); } /////////////////////////////////////////////////////////////////////////////// // helper function for allocating host memory // takes boolean flag indicating whether to use pinned memory or not /////////////////////////////////////////////////////////////////////////////// template <typename T> T* allocate_on_host(size_t N, T value=T(), bool pinned=false) { T* ptr=0; if( pinned ) { //cudaHostAlloc((void**)&ptr, N*sizeof(T), cudaHostAllocPortable); std::cout << "allocating " << N*sizeof(T) << " bytes pinned host data" << std::endl; cudaMallocHost((void**)&ptr, N*sizeof(T)); } else { std::cout << "allocating " << N*sizeof(T) << " bytes unpinned host data" << std::endl; ptr = reinterpret_cast<T*>(malloc(N*sizeof(T))); } std::fill(ptr, ptr+N, value); return ptr; } // wrapper for launch configuration class Launch { public: Launch(size_t N, unsigned int b_dim) : block_(b_dim) { unsigned int g_dim = N/b_dim + (N%b_dim ? 1 : 0); g_dim = g_dim > 1024 ? 1024 : g_dim; grid_ = dim3(g_dim); } dim3 block() { return block_; } dim3 grid() { return grid_; } unsigned int block_dim() { return block_.x; } unsigned int grid_dim() { return grid_.x; } private: Launch(); dim3 block_; dim3 grid_; }; /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// int main(void) { const size_t N=128*1024*1024; const size_t nchunks=1; const size_t chunk_dim=N/nchunks; const size_t size = sizeof(double)*N; const size_t chunk_size = size/nchunks; // initialize CUDA if(!initialize_cuda()) { std::cerr << "unable to initialize CUDA" << std::endl; return 1; } // create streams CudaStream stream_H2D(true); CudaStream stream_D2H(true); CudaStream stream_compute(true); // check that streams are/are not default stream as appropriate assert(!stream_H2D.is_default_stream()); assert(!stream_D2H.is_default_stream()); assert(!stream_compute.is_default_stream()); Launch launch(N, 128); std::cout << "launch grid : " << launch.block_dim() << "*" << launch.grid_dim() << std::endl; // allocate host and device memory double *a_d = allocate_on_device<double>(N); double *b_d = allocate_on_device<double>(N); double *a_h = allocate_on_host<double>(N, 1., true); double *b_h = allocate_on_host<double>(N, 1., true); CudaEvent event_start = stream_H2D.insert_event(); // copy data to device for(int i=0; i<nchunks; ++i) { size_t offset = i*chunk_dim; cudaMemcpyAsync(a_d+offset, a_h+offset, chunk_size, cudaMemcpyHostToDevice, stream_H2D.stream()); cudaMemcpyAsync(b_d+offset, b_h+offset, chunk_size, cudaMemcpyHostToDevice, stream_H2D.stream()); // insert events that force compute stream to wait CudaEvent event_H2D = stream_H2D.insert_event(); stream_compute.wait_on_event(event_H2D); // asynchronously execute the kernel sum<<<launch.block(), launch.grid(), 0, stream_compute.stream()>>>(a_d+offset, b_d+offset, chunk_dim); // insert event CudaEvent event_compute = stream_compute.insert_event(); stream_D2H.wait_on_event(event_compute); cudaMemcpyAsync(a_h+offset, a_d+offset, chunk_size, cudaMemcpyDeviceToHost, stream_D2H.stream()); CudaEvent event_D2H = stream_D2H.insert_event(); } CudaEvent event_end = stream_D2H.insert_event(); event_end.wait(); double time_taken = event_end.time_since(event_start); std::cout << "that took " << time_taken << " seconds" << std::endl; /* size_t limit = 256; limit = N>limit ? limit : N; double result = 0.; for(size_t i=N-limit; i<N; ++i) std::cout << a_h[i] << ((i+1)%launch.block_dim() ? " " : " | "); #pragma omp parallel for reduction(+:result) for(size_t i=0; i<N; ++i) result += 2. - a_h[i]; std::cout << std::endl; std::cout << "result : " << result << std::endl; */ return 0; }
9be7ad7c1ead7695bbb3be8f2ec6c982b790ac56.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/matrix/kernelparams.h> #include <matrix/grammatrix.cuh> #include <matrix/kernelfactory.cuh> #include <memory> #include <random/rng.cuh> #include <sstream> #include <string> #include <vector> #include "../common/ml_benchmark.hpp" namespace MLCommon { namespace Bench { namespace Matrix { using namespace MLCommon::Matrix; struct GramTestParams { int m; // m parameter of the GEMM int k; // k parameter of the GEMM int n; // n parameter of the GEMM KernelParams kernel_params; }; // struct GramTestParams template <typename T> struct GramMatrix : public Fixture { GramMatrix(const std::string& name, const GramTestParams& p) : Fixture(name, std::shared_ptr<deviceAllocator>( new raft::mr::device::default_allocator)), params(p) { std::vector<std::string> kernel_names{"linear", "poly", "rbf", "tanh"}; std::ostringstream oss; oss << name << "/" << kernel_names[p.kernel_params.kernel] << "/" << p.m << "x" << p.k << "x" << p.n; this->SetName(oss.str().c_str()); CUBLAS_CHECK(hipblasCreate(&cublas_handle)); kernel = std::unique_ptr<GramMatrixBase<T>>( KernelFactory<T>::create(p.kernel_params, cublas_handle)); } ~GramMatrix() { CUBLAS_CHECK(hipblasDestroy(cublas_handle)); } protected: void allocateBuffers(const ::benchmark::State& state) override { alloc(A, params.m * params.k); alloc(B, params.k * params.n); alloc(C, params.m * params.n); raft::random::Rng r(123456ULL); r.uniform(A, params.m * params.k, T(-1.0), T(1.0), stream); r.uniform(B, params.k * params.n, T(-1.0), T(1.0), stream); } void deallocateBuffers(const ::benchmark::State& state) override { dealloc(A, params.m * params.k); dealloc(B, params.k * params.n); dealloc(C, params.m * params.n); } void runBenchmark(::benchmark::State& state) override { if (!this->kernel) { state.SkipWithError("Kernel matrix is not initialized"); } loopOnState(state, [this]() { (*this->kernel)(this->A, this->params.m, this->params.k, this->B, this->params.n, this->C, this->stream); }); } private: hipblasHandle_t cublas_handle; std::unique_ptr<GramMatrixBase<T>> kernel; GramTestParams params; T* A; // input matrix A, size [m * k] T* B; // input matrix B, size [n * k] T* C; // output matrix C, size [m*n] }; static std::vector<GramTestParams> getInputs() { std::vector<GramTestParams> param_vec; std::vector<KernelParams> kernel_params{ KernelParams{LINEAR, 3, 1, 0}, KernelParams{POLYNOMIAL, 2, 1.3, 1}, KernelParams{TANH, 2, 0.5, 2.4}, KernelParams{RBF, 2, 0.5, 0}}; struct TestSize { int m; int k; int n; }; std::vector<TestSize> data_size{{4096, 10, 1024}, {4096, 100, 1024}, {4096, 1000, 1024}, {4096, 10000, 1024}, {100000, 10, 1024}, {100000, 100, 1024}, {100000, 1000, 1024}}; param_vec.reserve(kernel_params.size() * data_size.size()); for (TestSize s : data_size) { for (auto kernel : kernel_params) { param_vec.push_back(GramTestParams{s.m, s.k, s.n, kernel}); } } return param_vec; } ML_BENCH_REGISTER(GramTestParams, GramMatrix<float>, "", getInputs()); ML_BENCH_REGISTER(GramTestParams, GramMatrix<double>, "", getInputs()); } // namespace Matrix } // namespace Bench } // namespace MLCommon
9be7ad7c1ead7695bbb3be8f2ec6c982b790ac56.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/matrix/kernelparams.h> #include <matrix/grammatrix.cuh> #include <matrix/kernelfactory.cuh> #include <memory> #include <random/rng.cuh> #include <sstream> #include <string> #include <vector> #include "../common/ml_benchmark.hpp" namespace MLCommon { namespace Bench { namespace Matrix { using namespace MLCommon::Matrix; struct GramTestParams { int m; // m parameter of the GEMM int k; // k parameter of the GEMM int n; // n parameter of the GEMM KernelParams kernel_params; }; // struct GramTestParams template <typename T> struct GramMatrix : public Fixture { GramMatrix(const std::string& name, const GramTestParams& p) : Fixture(name, std::shared_ptr<deviceAllocator>( new raft::mr::device::default_allocator)), params(p) { std::vector<std::string> kernel_names{"linear", "poly", "rbf", "tanh"}; std::ostringstream oss; oss << name << "/" << kernel_names[p.kernel_params.kernel] << "/" << p.m << "x" << p.k << "x" << p.n; this->SetName(oss.str().c_str()); CUBLAS_CHECK(cublasCreate(&cublas_handle)); kernel = std::unique_ptr<GramMatrixBase<T>>( KernelFactory<T>::create(p.kernel_params, cublas_handle)); } ~GramMatrix() { CUBLAS_CHECK(cublasDestroy(cublas_handle)); } protected: void allocateBuffers(const ::benchmark::State& state) override { alloc(A, params.m * params.k); alloc(B, params.k * params.n); alloc(C, params.m * params.n); raft::random::Rng r(123456ULL); r.uniform(A, params.m * params.k, T(-1.0), T(1.0), stream); r.uniform(B, params.k * params.n, T(-1.0), T(1.0), stream); } void deallocateBuffers(const ::benchmark::State& state) override { dealloc(A, params.m * params.k); dealloc(B, params.k * params.n); dealloc(C, params.m * params.n); } void runBenchmark(::benchmark::State& state) override { if (!this->kernel) { state.SkipWithError("Kernel matrix is not initialized"); } loopOnState(state, [this]() { (*this->kernel)(this->A, this->params.m, this->params.k, this->B, this->params.n, this->C, this->stream); }); } private: cublasHandle_t cublas_handle; std::unique_ptr<GramMatrixBase<T>> kernel; GramTestParams params; T* A; // input matrix A, size [m * k] T* B; // input matrix B, size [n * k] T* C; // output matrix C, size [m*n] }; static std::vector<GramTestParams> getInputs() { std::vector<GramTestParams> param_vec; std::vector<KernelParams> kernel_params{ KernelParams{LINEAR, 3, 1, 0}, KernelParams{POLYNOMIAL, 2, 1.3, 1}, KernelParams{TANH, 2, 0.5, 2.4}, KernelParams{RBF, 2, 0.5, 0}}; struct TestSize { int m; int k; int n; }; std::vector<TestSize> data_size{{4096, 10, 1024}, {4096, 100, 1024}, {4096, 1000, 1024}, {4096, 10000, 1024}, {100000, 10, 1024}, {100000, 100, 1024}, {100000, 1000, 1024}}; param_vec.reserve(kernel_params.size() * data_size.size()); for (TestSize s : data_size) { for (auto kernel : kernel_params) { param_vec.push_back(GramTestParams{s.m, s.k, s.n, kernel}); } } return param_vec; } ML_BENCH_REGISTER(GramTestParams, GramMatrix<float>, "", getInputs()); ML_BENCH_REGISTER(GramTestParams, GramMatrix<double>, "", getInputs()); } // namespace Matrix } // namespace Bench } // namespace MLCommon
6b8dbea4d7bcbdc36fd2703f7936afa7ccfaad1a.hip
// !!! This is a file automatically generated by hipify!!! #include <cv.h> #include <highgui.h> #include <time.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math_functions.h> #include "opencv2/gpu/gpu.hpp" #define MASK_SIZE 9 #define MASK_WIDTH 3 #define TILE_WIDTH 32 using namespace cv; // Convolution matrix on constant memory __constant__ char d_M[MASK_SIZE]; // Parallel Code on GPU using Constant Mem for matrix convol (CUDA) __global__ void imgConvGPU(unsigned char* imgIn, int row, int col, unsigned char* imgOut) { unsigned int row_d = blockIdx.y*blockDim.y+threadIdx.y; unsigned int col_d = blockIdx.x*blockDim.x+threadIdx.x; int start_r = row_d - (MASK_WIDTH/2); int start_c = col_d - (MASK_WIDTH/2); int Pixel = 0; for (int k = 0; k < MASK_WIDTH; ++k) { for (int l = 0; l < MASK_WIDTH; ++l) { if((k + start_r) >= 0 && (k + start_r) < row && (l + start_c) >= 0 && (l + start_c) < col) Pixel += imgIn[(k + start_r) * col + (l + start_c)] * d_M[k * MASK_WIDTH + l]; } } Pixel = Pixel < 0 ? 0 : Pixel > 255 ? 255 : Pixel; if (row_d < row && col_d < col) imgOut[row_d * col + col_d] = (unsigned char)Pixel; } // Parallel Code on GPU using shared Mem (CUDA) __global__ void imgConvGPU_sharedMem(unsigned char* imgIn, int row, int col, unsigned char* imgOut) { int dest, destX, destY, src, srcX, srcY; const int size_T = TILE_WIDTH + TILE_WIDTH - 1; unsigned int row_d = blockIdx.y*blockDim.y+threadIdx.y; unsigned int col_d = blockIdx.x*blockDim.x+threadIdx.x; __shared__ char d_T[size_T][size_T]; int n = MASK_WIDTH/2; dest = threadIdx.y * TILE_WIDTH + threadIdx.x; destY = dest / size_T; destX = dest % size_T; srcY = blockIdx.y * TILE_WIDTH + destY - n; srcX = blockIdx.x * TILE_WIDTH + destX - n; src = srcY * col + srcX; if (srcY >= 0 && srcY < row && srcX >= 0 && srcX < col) d_T[destY][destX] = imgIn[src]; else d_T[destY][destX] = 0; dest = threadIdx.y * TILE_WIDTH + threadIdx.x + TILE_WIDTH * TILE_WIDTH; destY = dest / size_T; destX = dest % size_T; srcY = blockIdx.y * TILE_WIDTH + destY - n; srcX = blockIdx.x * TILE_WIDTH + destX - n; src = srcY * col + srcX; if (destY < size_T) if (srcY >= 0 && srcY < row && srcX >= 0 && srcX < col) d_T[destY][destX] = imgIn[src]; else d_T[destY][destX] = 0; __syncthreads(); int Pixel = 0; for (int k = 0; k < MASK_WIDTH; ++k) { for (int l = 0; l < MASK_WIDTH; ++l) { Pixel += d_T[threadIdx.y + k][threadIdx.x + l] * d_M[k * MASK_WIDTH + l]; } } Pixel = Pixel < 0 ? 0 : Pixel > 255 ? 255 : Pixel; if (row_d < row && col_d < col) imgOut[row_d * col + col_d] = (unsigned char)Pixel; } void checkError(hipError_t error, std::string type) { if(error != hipSuccess){ printf("Error in %s, %s\n", type.c_str(), hipGetErrorString(error)); exit(0); } } void cuda_const(unsigned char* imgIn, int row, int col, unsigned int maskWidth, unsigned char* imgOut, char* M, int size, double& time) { int size_M = sizeof(char)*MASK_SIZE; hipError_t error = hipSuccess; unsigned char *d_dataRawImage, *d_imageOutput; error = hipMalloc((void**)&d_dataRawImage,size); checkError(error, "hipMalloc for d_dataRawImage (w/ const mem)"); error = hipMalloc((void**)&d_imageOutput,size); checkError(error, "hipMalloc for d_imageOutput (w/ const mem)"); /*******************************GPU********************************/ clock_t tic = clock(); error = hipMemcpy(d_dataRawImage,imgIn,size,hipMemcpyHostToDevice); checkError(error, "hipMemcpy for d_dataRawImage (w/ const mem)"); error = hipMemcpyToSymbol(d_M, M, size_M); checkError(error, "hipMemcpyToSymbol for d_M (w/ const mem)"); dim3 dimBlock(32,32,1); dim3 dimGrid(ceil(col/float(dimBlock.x)),ceil(row/float(dimBlock.y)),1); hipLaunchKernelGGL(( imgConvGPU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_dataRawImage, row, col, d_imageOutput); hipDeviceSynchronize(); error = hipMemcpy(imgOut,d_imageOutput,size,hipMemcpyDeviceToHost); checkError(error, "hipMemcpy for imgOut (w/ const mem)"); clock_t toc = clock(); time = (double)(toc - tic) / CLOCKS_PER_SEC; /*****************************GPU END******************************/ hipFree(d_dataRawImage); hipFree(d_imageOutput); } void cuda_sm(unsigned char* imgIn, int row, int col, unsigned int maskWidth, unsigned char* imgOut, char* M, int size, double& time) { int size_M = sizeof(char)*MASK_SIZE; hipError_t error = hipSuccess; unsigned char *d_dataRawImage, *d_imageOutput; error = hipMalloc((void**)&d_dataRawImage,size); checkError(error, "hipMalloc for d_dataRawImage (w/ const and shared mem))"); error = hipMalloc((void**)&d_imageOutput,size); checkError(error, "hipMalloc for d_imageOutput (w/ const and shared mem)"); /*******************************GPU********************************/ clock_t tic = clock(); error = hipMemcpy(d_dataRawImage,imgIn,size,hipMemcpyHostToDevice); checkError(error, "hipMemcpy for d_dataRawImage (w/ const and shared mem)"); error = hipMemcpyToSymbol(d_M, M, size_M); checkError(error, "hipMemcpyToSymbol for d_M (w/ const and shared mem)"); dim3 dimBlock(TILE_WIDTH,TILE_WIDTH); dim3 dimGrid(ceil(col/float(dimBlock.x)),ceil(row/float(dimBlock.y))); hipLaunchKernelGGL(( imgConvGPU_sharedMem), dim3(dimGrid),dim3(dimBlock), 0, 0, d_dataRawImage, row, col, d_imageOutput); hipDeviceSynchronize(); error = hipMemcpy(imgOut,d_imageOutput,size,hipMemcpyDeviceToHost); checkError(error, "hipMemcpy for imgOut (w/ const and shared mem)"); clock_t toc = clock(); time = (double)(toc - tic) / CLOCKS_PER_SEC; /*****************************GPU END******************************/ hipFree(d_dataRawImage); hipFree(d_imageOutput); } int main(int argc, char** argv) { char M[] = {-1,0,1,-2,0,2,-1,0,1}; unsigned int maskWidth = 3; /* imgIn: Original img (Gray scaled) imgOut_1: Parallel w/ constant mem imgOut_2: Parallel w/ constant and shared mem imgOut_3: imgOut_4: */ unsigned char *imgIn, *imgOut_1, *imgOut_2, *imgOut_3; double GPU_C, GPU_CS, GPU, GPU_CV, acc1, acc2, acc3; GPU_C = GPU_CS = GPU = GPU_CV = acc1 = acc2 = acc3 = 0.0; // Meaning of positions: {GPU_C, GPU_CS, GPU, GPU_CV} bool op[] = {false, false, false, false}; if(argc < 2) { printf("No image name given\n"); return -1; } char* imageName = argv[1]; for (int i = 2; i < argc; i++) { std::string s = argv[i]; if (s == "cconst") op[0] = true; else if (s == "csha") op[1] = true; else if (s == "pd") op[2] = true; else if (s == "sobel_d") op[3] = true; } Mat image; image = imread(imageName, CV_LOAD_IMAGE_GRAYSCALE); // Get image dimension Size s = image.size(); int col = s.width; int row = s.height; int size = sizeof(unsigned char)*row*col; int sizeGray = sizeof(unsigned char)*row*col; imgIn = (unsigned char*)malloc(size); imgOut_1 = (unsigned char*)malloc(sizeGray); imgOut_2 = (unsigned char*)malloc(sizeGray); imgOut_3 = (unsigned char*)malloc(sizeGray); imgIn = image.data; Mat result, imgOut_4; imgOut_4.create(row,col,CV_8UC1); if (op[0]) cuda_const(imgIn, row, col, maskWidth, imgOut_1, M, sizeGray, GPU_C); if (op[1]) cuda_sm(imgIn, row, col, maskWidth, imgOut_2, M, sizeGray, GPU_CS); // if (op[2]) parallel_device(imgIn, row, col, maskWidth, imgOut_3, M, sizeGray, GPU); // if (op[3]) sobel_device(image, imgOut_4, GPU_CV); result.create(row,col,CV_8UC1); if (op[0]) { printf(" %f |", GPU_C); result.data = imgOut_1; imwrite("res_GPU_C.jpg", result); } else printf(" - |"); if (op[1]) { if (op[0]) { acc1 = GPU_C / GPU_CS; printf(" %f | %f |", GPU_CS, acc1); } else printf(" %f | - |", GPU_CS); result.data = imgOut_2; imwrite("res_GPU_CS.jpg", result); } else printf(" - | - |"); if (op[2]) { if (op[0]) { acc2 = GPU_C / GPU; printf(" %f | %f |", GPU, acc2); } else printf(" %f | - |", GPU); result.data = imgOut_3; imwrite("res_GPU.jpg", result); } else printf(" - | - |"); if (op[3]) { if (op[0]) { acc3 = GPU_C / GPU_CV; printf(" %f | %f |\n", GPU_CV, acc3); } else printf(" %f | - |\n", GPU_CV); imwrite("res_GPU_CV.jpg", imgOut_4); } else printf(" - | - |\n"); free(imgOut_1); free(imgOut_3); return 0; }
6b8dbea4d7bcbdc36fd2703f7936afa7ccfaad1a.cu
#include <cv.h> #include <highgui.h> #include <time.h> #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <math_functions.h> #include "opencv2/gpu/gpu.hpp" #define MASK_SIZE 9 #define MASK_WIDTH 3 #define TILE_WIDTH 32 using namespace cv; // Convolution matrix on constant memory __constant__ char d_M[MASK_SIZE]; // Parallel Code on GPU using Constant Mem for matrix convol (CUDA) __global__ void imgConvGPU(unsigned char* imgIn, int row, int col, unsigned char* imgOut) { unsigned int row_d = blockIdx.y*blockDim.y+threadIdx.y; unsigned int col_d = blockIdx.x*blockDim.x+threadIdx.x; int start_r = row_d - (MASK_WIDTH/2); int start_c = col_d - (MASK_WIDTH/2); int Pixel = 0; for (int k = 0; k < MASK_WIDTH; ++k) { for (int l = 0; l < MASK_WIDTH; ++l) { if((k + start_r) >= 0 && (k + start_r) < row && (l + start_c) >= 0 && (l + start_c) < col) Pixel += imgIn[(k + start_r) * col + (l + start_c)] * d_M[k * MASK_WIDTH + l]; } } Pixel = Pixel < 0 ? 0 : Pixel > 255 ? 255 : Pixel; if (row_d < row && col_d < col) imgOut[row_d * col + col_d] = (unsigned char)Pixel; } // Parallel Code on GPU using shared Mem (CUDA) __global__ void imgConvGPU_sharedMem(unsigned char* imgIn, int row, int col, unsigned char* imgOut) { int dest, destX, destY, src, srcX, srcY; const int size_T = TILE_WIDTH + TILE_WIDTH - 1; unsigned int row_d = blockIdx.y*blockDim.y+threadIdx.y; unsigned int col_d = blockIdx.x*blockDim.x+threadIdx.x; __shared__ char d_T[size_T][size_T]; int n = MASK_WIDTH/2; dest = threadIdx.y * TILE_WIDTH + threadIdx.x; destY = dest / size_T; destX = dest % size_T; srcY = blockIdx.y * TILE_WIDTH + destY - n; srcX = blockIdx.x * TILE_WIDTH + destX - n; src = srcY * col + srcX; if (srcY >= 0 && srcY < row && srcX >= 0 && srcX < col) d_T[destY][destX] = imgIn[src]; else d_T[destY][destX] = 0; dest = threadIdx.y * TILE_WIDTH + threadIdx.x + TILE_WIDTH * TILE_WIDTH; destY = dest / size_T; destX = dest % size_T; srcY = blockIdx.y * TILE_WIDTH + destY - n; srcX = blockIdx.x * TILE_WIDTH + destX - n; src = srcY * col + srcX; if (destY < size_T) if (srcY >= 0 && srcY < row && srcX >= 0 && srcX < col) d_T[destY][destX] = imgIn[src]; else d_T[destY][destX] = 0; __syncthreads(); int Pixel = 0; for (int k = 0; k < MASK_WIDTH; ++k) { for (int l = 0; l < MASK_WIDTH; ++l) { Pixel += d_T[threadIdx.y + k][threadIdx.x + l] * d_M[k * MASK_WIDTH + l]; } } Pixel = Pixel < 0 ? 0 : Pixel > 255 ? 255 : Pixel; if (row_d < row && col_d < col) imgOut[row_d * col + col_d] = (unsigned char)Pixel; } void checkError(cudaError_t error, std::string type) { if(error != cudaSuccess){ printf("Error in %s, %s\n", type.c_str(), cudaGetErrorString(error)); exit(0); } } void cuda_const(unsigned char* imgIn, int row, int col, unsigned int maskWidth, unsigned char* imgOut, char* M, int size, double& time) { int size_M = sizeof(char)*MASK_SIZE; cudaError_t error = cudaSuccess; unsigned char *d_dataRawImage, *d_imageOutput; error = cudaMalloc((void**)&d_dataRawImage,size); checkError(error, "cudaMalloc for d_dataRawImage (w/ const mem)"); error = cudaMalloc((void**)&d_imageOutput,size); checkError(error, "cudaMalloc for d_imageOutput (w/ const mem)"); /*******************************GPU********************************/ clock_t tic = clock(); error = cudaMemcpy(d_dataRawImage,imgIn,size,cudaMemcpyHostToDevice); checkError(error, "cudaMemcpy for d_dataRawImage (w/ const mem)"); error = cudaMemcpyToSymbol(d_M, M, size_M); checkError(error, "cudaMemcpyToSymbol for d_M (w/ const mem)"); dim3 dimBlock(32,32,1); dim3 dimGrid(ceil(col/float(dimBlock.x)),ceil(row/float(dimBlock.y)),1); imgConvGPU<<<dimGrid,dimBlock>>>(d_dataRawImage, row, col, d_imageOutput); cudaDeviceSynchronize(); error = cudaMemcpy(imgOut,d_imageOutput,size,cudaMemcpyDeviceToHost); checkError(error, "cudaMemcpy for imgOut (w/ const mem)"); clock_t toc = clock(); time = (double)(toc - tic) / CLOCKS_PER_SEC; /*****************************GPU END******************************/ cudaFree(d_dataRawImage); cudaFree(d_imageOutput); } void cuda_sm(unsigned char* imgIn, int row, int col, unsigned int maskWidth, unsigned char* imgOut, char* M, int size, double& time) { int size_M = sizeof(char)*MASK_SIZE; cudaError_t error = cudaSuccess; unsigned char *d_dataRawImage, *d_imageOutput; error = cudaMalloc((void**)&d_dataRawImage,size); checkError(error, "cudaMalloc for d_dataRawImage (w/ const and shared mem))"); error = cudaMalloc((void**)&d_imageOutput,size); checkError(error, "cudaMalloc for d_imageOutput (w/ const and shared mem)"); /*******************************GPU********************************/ clock_t tic = clock(); error = cudaMemcpy(d_dataRawImage,imgIn,size,cudaMemcpyHostToDevice); checkError(error, "cudaMemcpy for d_dataRawImage (w/ const and shared mem)"); error = cudaMemcpyToSymbol(d_M, M, size_M); checkError(error, "cudaMemcpyToSymbol for d_M (w/ const and shared mem)"); dim3 dimBlock(TILE_WIDTH,TILE_WIDTH); dim3 dimGrid(ceil(col/float(dimBlock.x)),ceil(row/float(dimBlock.y))); imgConvGPU_sharedMem<<<dimGrid,dimBlock>>>(d_dataRawImage, row, col, d_imageOutput); cudaDeviceSynchronize(); error = cudaMemcpy(imgOut,d_imageOutput,size,cudaMemcpyDeviceToHost); checkError(error, "cudaMemcpy for imgOut (w/ const and shared mem)"); clock_t toc = clock(); time = (double)(toc - tic) / CLOCKS_PER_SEC; /*****************************GPU END******************************/ cudaFree(d_dataRawImage); cudaFree(d_imageOutput); } int main(int argc, char** argv) { char M[] = {-1,0,1,-2,0,2,-1,0,1}; unsigned int maskWidth = 3; /* imgIn: Original img (Gray scaled) imgOut_1: Parallel w/ constant mem imgOut_2: Parallel w/ constant and shared mem imgOut_3: imgOut_4: */ unsigned char *imgIn, *imgOut_1, *imgOut_2, *imgOut_3; double GPU_C, GPU_CS, GPU, GPU_CV, acc1, acc2, acc3; GPU_C = GPU_CS = GPU = GPU_CV = acc1 = acc2 = acc3 = 0.0; // Meaning of positions: {GPU_C, GPU_CS, GPU, GPU_CV} bool op[] = {false, false, false, false}; if(argc < 2) { printf("No image name given\n"); return -1; } char* imageName = argv[1]; for (int i = 2; i < argc; i++) { std::string s = argv[i]; if (s == "cconst") op[0] = true; else if (s == "csha") op[1] = true; else if (s == "pd") op[2] = true; else if (s == "sobel_d") op[3] = true; } Mat image; image = imread(imageName, CV_LOAD_IMAGE_GRAYSCALE); // Get image dimension Size s = image.size(); int col = s.width; int row = s.height; int size = sizeof(unsigned char)*row*col; int sizeGray = sizeof(unsigned char)*row*col; imgIn = (unsigned char*)malloc(size); imgOut_1 = (unsigned char*)malloc(sizeGray); imgOut_2 = (unsigned char*)malloc(sizeGray); imgOut_3 = (unsigned char*)malloc(sizeGray); imgIn = image.data; Mat result, imgOut_4; imgOut_4.create(row,col,CV_8UC1); if (op[0]) cuda_const(imgIn, row, col, maskWidth, imgOut_1, M, sizeGray, GPU_C); if (op[1]) cuda_sm(imgIn, row, col, maskWidth, imgOut_2, M, sizeGray, GPU_CS); // if (op[2]) parallel_device(imgIn, row, col, maskWidth, imgOut_3, M, sizeGray, GPU); // if (op[3]) sobel_device(image, imgOut_4, GPU_CV); result.create(row,col,CV_8UC1); if (op[0]) { printf(" %f |", GPU_C); result.data = imgOut_1; imwrite("res_GPU_C.jpg", result); } else printf(" - |"); if (op[1]) { if (op[0]) { acc1 = GPU_C / GPU_CS; printf(" %f | %f |", GPU_CS, acc1); } else printf(" %f | - |", GPU_CS); result.data = imgOut_2; imwrite("res_GPU_CS.jpg", result); } else printf(" - | - |"); if (op[2]) { if (op[0]) { acc2 = GPU_C / GPU; printf(" %f | %f |", GPU, acc2); } else printf(" %f | - |", GPU); result.data = imgOut_3; imwrite("res_GPU.jpg", result); } else printf(" - | - |"); if (op[3]) { if (op[0]) { acc3 = GPU_C / GPU_CV; printf(" %f | %f |\n", GPU_CV, acc3); } else printf(" %f | - |\n", GPU_CV); imwrite("res_GPU_CV.jpg", imgOut_4); } else printf(" - | - |\n"); free(imgOut_1); free(imgOut_3); return 0; }
df6bee3f1070d9bbf9435212ffab8fdd2d85525c.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <atomic> #include <cstdlib> #include <string> #include <unordered_map> #include "caffe2/core/THCCachingAllocator_gpu.h" #include "hipcub/hipcub.hpp" // Needed to be included first to check the CAFFE2_USE_CUDNN macros. #include "caffe2/core/macros.h" #include "caffe2/core/asan.h" #include "caffe2/core/blob_stats.h" #ifdef CAFFE2_USE_CUDNN #include "caffe2/core/common_cudnn.h" #endif // CAFFE2_USE_CUDNN #include "caffe2/core/context_gpu.h" #include "caffe2/core/init.h" #include "caffe2/core/logging.h" #include "caffe2/core/tensor.h" #include "caffe2/utils/string_utils.h" C10_DEFINE_string( caffe2_cuda_memory_pool, "", "Sets the memory pool used by caffe2. Possible values are " "none, cnmem, thc and cub."); // For description of CUB caching allocator configuration, see // https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html C10_DEFINE_int( caffe2_cub_bin_growth, 8, "If using cub as the memory allocator, sets the growth of bins " "used by the cub pool."); C10_DEFINE_int( caffe2_cub_min_bin, 3, "If using cub as the memory allocator, sets the min number of " "bins."); C10_DEFINE_int( caffe2_cub_max_bin, 10, "If using cub as the memory allocator, sets the max number of " "bins."); C10_DEFINE_int( caffe2_cub_max_managed_mb, 10 * 1024, "If using cub as the memory allocators, sets the maximum amount " "of memory managed in gigabytes"); C10_DEFINE_bool( caffe2_cub_print_allocation_events, false, "If true CachingDeviceAllocator will print allocation and deallocation " "events to stdout."); C10_DEFINE_bool( caffe2_gpu_memory_tracking, false, "If set, logs changes in GPU memory allocations"); C10_DEFINE_int( caffe2_gpu_memory_report_interval_mb, 128, "The threshold in MB on how frequently to report memory changes"); namespace at { REGISTER_CONTEXT(DeviceType::CUDA, caffe2::CUDAContext); } // namespace at namespace caffe2 { ThreadLocalCUDAObjects& CUDAContext::getCudaObjects() { static thread_local ThreadLocalCUDAObjects cuda_objects_; return cuda_objects_; } // TODO(jiayq): these variables shouldn't be currently accessed during static // initialization. We should consider moving them to a Mayer's singleton to // be totally safe against SIOF. // Static global variables for setting up the memory pool. CudaMemoryPoolType g_cuda_memory_pool_type; std::unique_ptr<hipcub::CachingDeviceAllocator> g_cub_allocator; std::unique_ptr<THCCachingAllocator> g_thc_allocator; // an unordered map that holds the map from the cuda memory pointer to the // device id that it is allocated from. This is used in the cuda memory pool // cases, where we need the device id to carry out the deletion. // Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but // that is usually quite slow. We might want to benchmark the speed difference // though. // Note(jiayq): another alternate approach is to augment the Tensor class that // would allow one to record the device id. However, this does not address any // non-tensor allocation and deallocation. // Ideally, a memory pool should already have the device id information, as // long as we are using UVA (as of CUDA 5 and later) so the addresses are // unique. static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation; // Data structures for optional memory tracking. Access to these structures // is garded by the CUDAContext::mutex. static std::unordered_map<void*, long> g_size_map; static std::vector<long> g_total_by_gpu_map(CAFFE2_COMPILE_TIME_MAX_GPUS, 0); static std::vector<long> g_max_by_gpu_map(CAFFE2_COMPILE_TIME_MAX_GPUS, 0); static long g_total_mem = 0; static long g_last_rep = 0; CudaMemoryPoolType GetCudaMemoryPoolType() { return g_cuda_memory_pool_type; } /////////////////////////////////////////////////////////////////////////////// // A wrapper to allow us to lazily initialize all cuda environments that Caffe // uses. This gets done the first time a caffe2::CUDAContext::New() gets called // which is probably the decisive indication that this caffe2 run is going to // use GPUs. We avoid cuda initialization with core/init.h functionalities so // that we have minimal resource impact in case we will need to run multiple // caffe2 instances on a GPU machine. /////////////////////////////////////////////////////////////////////////////// static void Caffe2InitializeCuda() { // If the current run does not have any cuda devices, do nothing. if (!HasCudaGPU()) { VLOG(1) << "No cuda gpu present. Skipping."; return; } // Check if the number of GPUs matches the expected compile-time max number // of GPUs. CAFFE_ENFORCE_LE( NumCudaDevices(), CAFFE2_COMPILE_TIME_MAX_GPUS, "Number of CUDA devices on the machine is larger than the compiled " "max number of gpus expected (", CAFFE2_COMPILE_TIME_MAX_GPUS, "). Increase that and recompile the caffe binary."); for (int i = 0; i < NumCudaDevices(); ++i) { DeviceGuard g(i); // Enable peer access. const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_end = ::min( NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE); VLOG(1) << "Enabling peer access within group #" << peer_group << ", from gpuid " << peer_start << " to " << peer_end - 1 << ", for gpuid " << i << "."; for (int j = peer_start; j < peer_end; ++j) { if (i == j) continue; int can_access; CUDA_ENFORCE(hipDeviceCanAccessPeer(&can_access, i, j)); if (can_access) { VLOG(1) << "Enabling peer access from " << i << " to " << j; // Note: just for future reference, the 0 here is not a gpu id, it is // a reserved flag for hipDeviceEnablePeerAccess that should always be // zero currently. CUDA_ENFORCE(hipDeviceEnablePeerAccess(j, 0)); } } } #ifdef CAFFE2_USE_CUDNN // Check the versions of cuDNN that were compiled and linked with are compatible CheckCuDNNVersions(); #endif // CAFFE2_USE_CUDNN } static void SetUpCub() { VLOG(1) << "Setting up cub memory pool."; // Sets up the cub memory pool try { g_cub_allocator.reset(new hipcub::CachingDeviceAllocator( c10::FLAGS_caffe2_cub_bin_growth, c10::FLAGS_caffe2_cub_min_bin, c10::FLAGS_caffe2_cub_max_bin, size_t(c10::FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L, false, c10::FLAGS_caffe2_cub_print_allocation_events)); } catch (...) { CAFFE_THROW("Some error happened at cub initialization."); } VLOG(1) << "Done setting up cub memory pool."; } static void Caffe2SetCUDAMemoryPool() { if (c10::FLAGS_caffe2_cuda_memory_pool == "" || c10::FLAGS_caffe2_cuda_memory_pool == "none") { g_cuda_memory_pool_type = CudaMemoryPoolType::NONE; } else if (c10::FLAGS_caffe2_cuda_memory_pool == "cnmem") { CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. " "This error message may go away in the future."); } else if (c10::FLAGS_caffe2_cuda_memory_pool == "cub") { // Sets up cub. g_cuda_memory_pool_type = CudaMemoryPoolType::CUB; SetUpCub(); } else if (c10::FLAGS_caffe2_cuda_memory_pool == "thc") { g_cuda_memory_pool_type = CudaMemoryPoolType::THC; g_thc_allocator.reset(new THCCachingAllocator()); } else { CAFFE_THROW( "Unrecognized cuda memory pool type: ", c10::FLAGS_caffe2_cuda_memory_pool); } } static PinnedCPUAllocator g_pinned_cpu_alloc; // An initialization function that sets the CPU side to use pinned cpu // allocator. void Caffe2UsePinnedCPUAllocator() { #if CAFFE2_ASAN_ENABLED // Note(jiayq): for more details, see // https://github.com/google/sanitizers/issues/629 LOG(WARNING) << "There are known issues between address sanitizer and " "hipHostMalloc. As a result, caffe2 will not enable pinned " "memory allocation in asan mode. If you are expecting any " "behavior that depends on asan, be advised that it is not " "turned on."; #else if (!HasCudaGPU()) { VLOG(1) << "No GPU present. I won't use pinned allocator then."; return; } VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator."; SetCPUAllocator(&g_pinned_cpu_alloc); #endif } // Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to // detect the first hint that this Caffe2 run is going to use GPU: either // CUDAContext is initialized or CUDAContext::New is called. It then runs // all the related cuda initialization functions. namespace { struct Caffe2CudaInitializerHelper { Caffe2CudaInitializerHelper() { // We cannot use bool because nvcc changes bool to __nv_bool which does // not have a std::atomic instantiation. static std::atomic<char> first_call(1); if (first_call.fetch_and((char)0)) { Caffe2InitializeCuda(); Caffe2SetCUDAMemoryPool(); Caffe2UsePinnedCPUAllocator(); } } }; } // namespace /** * A utility function to rectify the gpu id. If the context specifies the * gpu id to be -1, it means that we will just use the current gpu id when * the function is being called. */ static inline int RectifyGPUID(const int gpu_id) { return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id; } CUDAContext::CUDAContext(const int gpu_id) : gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; } CUDAContext::CUDAContext(const DeviceOption& option) : gpu_id_( option.has_device_id() ? RectifyGPUID(option.device_id()) : CaffeCudaGetDevice()), random_seed_( option.has_random_seed() ? option.random_seed() : RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; DCHECK_EQ(option.device_type(), PROTO_CUDA); } // shared mutex to lock out alloc / free during NCCL launches std::mutex& CUDAContext::mutex() { static std::mutex m; return m; } std::vector<long> CUDAContext::TotalMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( c10::FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_total_by_gpu_map; } std::vector<long> CUDAContext::MaxMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( c10::FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_max_by_gpu_map; } namespace { void TrackMemoryAlloc(size_t nbytes) { int this_gpu = CaffeCudaGetDevice(); g_total_by_gpu_map[this_gpu] += nbytes; g_max_by_gpu_map[this_gpu] = max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]); g_total_mem += nbytes; if (g_total_mem - g_last_rep > c10::FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) { for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) { long t = g_total_by_gpu_map[gpu]; long max_t = g_max_by_gpu_map[gpu]; if (max_t > 0) { if (max_t != t) { LOG(INFO) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB" << " (max: " << max_t / 1024 / 1024 << " MB)"; } else { LOG(INFO) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"; } } } LOG(INFO) << "Total: " << g_total_mem / 1024 / 1024 << " MB"; g_last_rep = g_total_mem; } } } struct DefaultCUDAAllocator final : public at::Allocator { DefaultCUDAAllocator() {} ~DefaultCUDAAllocator() override {} at::DataPtr allocate(size_t nbytes) const override { // Lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); // A one-time caffe2 cuda initializer. static Caffe2CudaInitializerHelper g_cuda_initializer_; void* ptr = nullptr; if (c10::FLAGS_caffe2_gpu_memory_tracking) { TrackMemoryAlloc(nbytes); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: CUDA_ENFORCE(hipMalloc(&ptr, nbytes)); if (c10::FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA)}; case CudaMemoryPoolType::CUB: CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes)); g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); VLOG(2) << "CUB allocating pointer " << ptr << " on device " << CaffeCudaGetDevice(); if (c10::FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; } return {ptr, ptr, &Delete, at::Device(CUDA)}; case CudaMemoryPoolType::THC: CUDA_ENFORCE(g_thc_allocator->Alloc(&ptr, nbytes, 0 /* stream */)); if (c10::FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA)}; } return {nullptr, nullptr, &Delete, at::Device(CUDA)}; } at::DeleterFnPtr raw_deleter() const override { return &Delete; } private: static void Delete(void* ptr) { // lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (c10::FLAGS_caffe2_gpu_memory_tracking) { auto sz_it = g_size_map.find(ptr); DCHECK(sz_it != g_size_map.end()); auto aff_it = g_cuda_device_affiliation.find(ptr); DCHECK(aff_it != g_cuda_device_affiliation.end()); g_total_mem -= sz_it->second; g_total_by_gpu_map[aff_it->second] -= sz_it->second; g_size_map.erase(sz_it); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: { // If memory pool is not set up, use simple hipFree. hipError_t error = hipFree(ptr); // For some reason, in Python runtime we sometimes delete a data pointer // after the cuda runtime exits - this is odd but is probably caused by // a static workspace that pycaffe2 uses, and the destruction got // entangled in some race condition. Anyway, since cuda runtime is // exiting anyway, we will not need to worry about memory leak, so we // basically ignore it. This is definitely not ideal but works for now. if (error != hipSuccess && error != hipErrorDeinitialized) { LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": " << hipGetErrorString(error); } if (c10::FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } case CudaMemoryPoolType::CUB: { auto it = g_cuda_device_affiliation.find(ptr); DCHECK(it != g_cuda_device_affiliation.end()); VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second; CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr)); g_cuda_device_affiliation.erase(it); break; } case CudaMemoryPoolType::THC: { CUDA_ENFORCE(g_thc_allocator->Free(ptr)); if (c10::FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } } } }; at::Allocator* GetCUDAAllocator() { return GetAllocator(CUDA); } static DefaultCUDAAllocator g_cuda_alloc; REGISTER_ALLOCATOR(CUDA, &g_cuda_alloc); BaseStaticContext* GetCUDAStaticContext() { static CUDAStaticContext context; return &context; } REGISTER_STATIC_CONTEXT(CUDA, GetCUDAStaticContext()); } // namespace caffe2
df6bee3f1070d9bbf9435212ffab8fdd2d85525c.cu
#include <algorithm> #include <atomic> #include <cstdlib> #include <string> #include <unordered_map> #include "caffe2/core/THCCachingAllocator_gpu.h" #include "cub/util_allocator.cuh" // Needed to be included first to check the CAFFE2_USE_CUDNN macros. #include "caffe2/core/macros.h" #include "caffe2/core/asan.h" #include "caffe2/core/blob_stats.h" #ifdef CAFFE2_USE_CUDNN #include "caffe2/core/common_cudnn.h" #endif // CAFFE2_USE_CUDNN #include "caffe2/core/context_gpu.h" #include "caffe2/core/init.h" #include "caffe2/core/logging.h" #include "caffe2/core/tensor.h" #include "caffe2/utils/string_utils.h" C10_DEFINE_string( caffe2_cuda_memory_pool, "", "Sets the memory pool used by caffe2. Possible values are " "none, cnmem, thc and cub."); // For description of CUB caching allocator configuration, see // https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html C10_DEFINE_int( caffe2_cub_bin_growth, 8, "If using cub as the memory allocator, sets the growth of bins " "used by the cub pool."); C10_DEFINE_int( caffe2_cub_min_bin, 3, "If using cub as the memory allocator, sets the min number of " "bins."); C10_DEFINE_int( caffe2_cub_max_bin, 10, "If using cub as the memory allocator, sets the max number of " "bins."); C10_DEFINE_int( caffe2_cub_max_managed_mb, 10 * 1024, "If using cub as the memory allocators, sets the maximum amount " "of memory managed in gigabytes"); C10_DEFINE_bool( caffe2_cub_print_allocation_events, false, "If true CachingDeviceAllocator will print allocation and deallocation " "events to stdout."); C10_DEFINE_bool( caffe2_gpu_memory_tracking, false, "If set, logs changes in GPU memory allocations"); C10_DEFINE_int( caffe2_gpu_memory_report_interval_mb, 128, "The threshold in MB on how frequently to report memory changes"); namespace at { REGISTER_CONTEXT(DeviceType::CUDA, caffe2::CUDAContext); } // namespace at namespace caffe2 { ThreadLocalCUDAObjects& CUDAContext::getCudaObjects() { static thread_local ThreadLocalCUDAObjects cuda_objects_; return cuda_objects_; } // TODO(jiayq): these variables shouldn't be currently accessed during static // initialization. We should consider moving them to a Mayer's singleton to // be totally safe against SIOF. // Static global variables for setting up the memory pool. CudaMemoryPoolType g_cuda_memory_pool_type; std::unique_ptr<cub::CachingDeviceAllocator> g_cub_allocator; std::unique_ptr<THCCachingAllocator> g_thc_allocator; // an unordered map that holds the map from the cuda memory pointer to the // device id that it is allocated from. This is used in the cuda memory pool // cases, where we need the device id to carry out the deletion. // Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but // that is usually quite slow. We might want to benchmark the speed difference // though. // Note(jiayq): another alternate approach is to augment the Tensor class that // would allow one to record the device id. However, this does not address any // non-tensor allocation and deallocation. // Ideally, a memory pool should already have the device id information, as // long as we are using UVA (as of CUDA 5 and later) so the addresses are // unique. static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation; // Data structures for optional memory tracking. Access to these structures // is garded by the CUDAContext::mutex. static std::unordered_map<void*, long> g_size_map; static std::vector<long> g_total_by_gpu_map(CAFFE2_COMPILE_TIME_MAX_GPUS, 0); static std::vector<long> g_max_by_gpu_map(CAFFE2_COMPILE_TIME_MAX_GPUS, 0); static long g_total_mem = 0; static long g_last_rep = 0; CudaMemoryPoolType GetCudaMemoryPoolType() { return g_cuda_memory_pool_type; } /////////////////////////////////////////////////////////////////////////////// // A wrapper to allow us to lazily initialize all cuda environments that Caffe // uses. This gets done the first time a caffe2::CUDAContext::New() gets called // which is probably the decisive indication that this caffe2 run is going to // use GPUs. We avoid cuda initialization with core/init.h functionalities so // that we have minimal resource impact in case we will need to run multiple // caffe2 instances on a GPU machine. /////////////////////////////////////////////////////////////////////////////// static void Caffe2InitializeCuda() { // If the current run does not have any cuda devices, do nothing. if (!HasCudaGPU()) { VLOG(1) << "No cuda gpu present. Skipping."; return; } // Check if the number of GPUs matches the expected compile-time max number // of GPUs. CAFFE_ENFORCE_LE( NumCudaDevices(), CAFFE2_COMPILE_TIME_MAX_GPUS, "Number of CUDA devices on the machine is larger than the compiled " "max number of gpus expected (", CAFFE2_COMPILE_TIME_MAX_GPUS, "). Increase that and recompile the caffe binary."); for (int i = 0; i < NumCudaDevices(); ++i) { DeviceGuard g(i); // Enable peer access. const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_end = std::min( NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE); VLOG(1) << "Enabling peer access within group #" << peer_group << ", from gpuid " << peer_start << " to " << peer_end - 1 << ", for gpuid " << i << "."; for (int j = peer_start; j < peer_end; ++j) { if (i == j) continue; int can_access; CUDA_ENFORCE(cudaDeviceCanAccessPeer(&can_access, i, j)); if (can_access) { VLOG(1) << "Enabling peer access from " << i << " to " << j; // Note: just for future reference, the 0 here is not a gpu id, it is // a reserved flag for cudaDeviceEnablePeerAccess that should always be // zero currently. CUDA_ENFORCE(cudaDeviceEnablePeerAccess(j, 0)); } } } #ifdef CAFFE2_USE_CUDNN // Check the versions of cuDNN that were compiled and linked with are compatible CheckCuDNNVersions(); #endif // CAFFE2_USE_CUDNN } static void SetUpCub() { VLOG(1) << "Setting up cub memory pool."; // Sets up the cub memory pool try { g_cub_allocator.reset(new cub::CachingDeviceAllocator( c10::FLAGS_caffe2_cub_bin_growth, c10::FLAGS_caffe2_cub_min_bin, c10::FLAGS_caffe2_cub_max_bin, size_t(c10::FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L, false, c10::FLAGS_caffe2_cub_print_allocation_events)); } catch (...) { CAFFE_THROW("Some error happened at cub initialization."); } VLOG(1) << "Done setting up cub memory pool."; } static void Caffe2SetCUDAMemoryPool() { if (c10::FLAGS_caffe2_cuda_memory_pool == "" || c10::FLAGS_caffe2_cuda_memory_pool == "none") { g_cuda_memory_pool_type = CudaMemoryPoolType::NONE; } else if (c10::FLAGS_caffe2_cuda_memory_pool == "cnmem") { CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. " "This error message may go away in the future."); } else if (c10::FLAGS_caffe2_cuda_memory_pool == "cub") { // Sets up cub. g_cuda_memory_pool_type = CudaMemoryPoolType::CUB; SetUpCub(); } else if (c10::FLAGS_caffe2_cuda_memory_pool == "thc") { g_cuda_memory_pool_type = CudaMemoryPoolType::THC; g_thc_allocator.reset(new THCCachingAllocator()); } else { CAFFE_THROW( "Unrecognized cuda memory pool type: ", c10::FLAGS_caffe2_cuda_memory_pool); } } static PinnedCPUAllocator g_pinned_cpu_alloc; // An initialization function that sets the CPU side to use pinned cpu // allocator. void Caffe2UsePinnedCPUAllocator() { #if CAFFE2_ASAN_ENABLED // Note(jiayq): for more details, see // https://github.com/google/sanitizers/issues/629 LOG(WARNING) << "There are known issues between address sanitizer and " "cudaMallocHost. As a result, caffe2 will not enable pinned " "memory allocation in asan mode. If you are expecting any " "behavior that depends on asan, be advised that it is not " "turned on."; #else if (!HasCudaGPU()) { VLOG(1) << "No GPU present. I won't use pinned allocator then."; return; } VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator."; SetCPUAllocator(&g_pinned_cpu_alloc); #endif } // Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to // detect the first hint that this Caffe2 run is going to use GPU: either // CUDAContext is initialized or CUDAContext::New is called. It then runs // all the related cuda initialization functions. namespace { struct Caffe2CudaInitializerHelper { Caffe2CudaInitializerHelper() { // We cannot use bool because nvcc changes bool to __nv_bool which does // not have a std::atomic instantiation. static std::atomic<char> first_call(1); if (first_call.fetch_and((char)0)) { Caffe2InitializeCuda(); Caffe2SetCUDAMemoryPool(); Caffe2UsePinnedCPUAllocator(); } } }; } // namespace /** * A utility function to rectify the gpu id. If the context specifies the * gpu id to be -1, it means that we will just use the current gpu id when * the function is being called. */ static inline int RectifyGPUID(const int gpu_id) { return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id; } CUDAContext::CUDAContext(const int gpu_id) : gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; } CUDAContext::CUDAContext(const DeviceOption& option) : gpu_id_( option.has_device_id() ? RectifyGPUID(option.device_id()) : CaffeCudaGetDevice()), random_seed_( option.has_random_seed() ? option.random_seed() : RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; DCHECK_EQ(option.device_type(), PROTO_CUDA); } // shared mutex to lock out alloc / free during NCCL launches std::mutex& CUDAContext::mutex() { static std::mutex m; return m; } std::vector<long> CUDAContext::TotalMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( c10::FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_total_by_gpu_map; } std::vector<long> CUDAContext::MaxMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( c10::FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_max_by_gpu_map; } namespace { void TrackMemoryAlloc(size_t nbytes) { int this_gpu = CaffeCudaGetDevice(); g_total_by_gpu_map[this_gpu] += nbytes; g_max_by_gpu_map[this_gpu] = max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]); g_total_mem += nbytes; if (g_total_mem - g_last_rep > c10::FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) { for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) { long t = g_total_by_gpu_map[gpu]; long max_t = g_max_by_gpu_map[gpu]; if (max_t > 0) { if (max_t != t) { LOG(INFO) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB" << " (max: " << max_t / 1024 / 1024 << " MB)"; } else { LOG(INFO) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"; } } } LOG(INFO) << "Total: " << g_total_mem / 1024 / 1024 << " MB"; g_last_rep = g_total_mem; } } } struct DefaultCUDAAllocator final : public at::Allocator { DefaultCUDAAllocator() {} ~DefaultCUDAAllocator() override {} at::DataPtr allocate(size_t nbytes) const override { // Lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); // A one-time caffe2 cuda initializer. static Caffe2CudaInitializerHelper g_cuda_initializer_; void* ptr = nullptr; if (c10::FLAGS_caffe2_gpu_memory_tracking) { TrackMemoryAlloc(nbytes); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: CUDA_ENFORCE(cudaMalloc(&ptr, nbytes)); if (c10::FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA)}; case CudaMemoryPoolType::CUB: CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes)); g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); VLOG(2) << "CUB allocating pointer " << ptr << " on device " << CaffeCudaGetDevice(); if (c10::FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; } return {ptr, ptr, &Delete, at::Device(CUDA)}; case CudaMemoryPoolType::THC: CUDA_ENFORCE(g_thc_allocator->Alloc(&ptr, nbytes, 0 /* stream */)); if (c10::FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA)}; } return {nullptr, nullptr, &Delete, at::Device(CUDA)}; } at::DeleterFnPtr raw_deleter() const override { return &Delete; } private: static void Delete(void* ptr) { // lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (c10::FLAGS_caffe2_gpu_memory_tracking) { auto sz_it = g_size_map.find(ptr); DCHECK(sz_it != g_size_map.end()); auto aff_it = g_cuda_device_affiliation.find(ptr); DCHECK(aff_it != g_cuda_device_affiliation.end()); g_total_mem -= sz_it->second; g_total_by_gpu_map[aff_it->second] -= sz_it->second; g_size_map.erase(sz_it); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: { // If memory pool is not set up, use simple cudaFree. cudaError_t error = cudaFree(ptr); // For some reason, in Python runtime we sometimes delete a data pointer // after the cuda runtime exits - this is odd but is probably caused by // a static workspace that pycaffe2 uses, and the destruction got // entangled in some race condition. Anyway, since cuda runtime is // exiting anyway, we will not need to worry about memory leak, so we // basically ignore it. This is definitely not ideal but works for now. if (error != cudaSuccess && error != cudaErrorCudartUnloading) { LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": " << cudaGetErrorString(error); } if (c10::FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } case CudaMemoryPoolType::CUB: { auto it = g_cuda_device_affiliation.find(ptr); DCHECK(it != g_cuda_device_affiliation.end()); VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second; CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr)); g_cuda_device_affiliation.erase(it); break; } case CudaMemoryPoolType::THC: { CUDA_ENFORCE(g_thc_allocator->Free(ptr)); if (c10::FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } } } }; at::Allocator* GetCUDAAllocator() { return GetAllocator(CUDA); } static DefaultCUDAAllocator g_cuda_alloc; REGISTER_ALLOCATOR(CUDA, &g_cuda_alloc); BaseStaticContext* GetCUDAStaticContext() { static CUDAStaticContext context; return &context; } REGISTER_STATIC_CONTEXT(CUDA, GetCUDAStaticContext()); } // namespace caffe2
2acd41d9b5f18f8efd5bba6a906ca53357229885.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include <THH/THH.h> extern THCState *state; const int CUDA_WARP_SIZE = 32; const int CUDA_GRID_SIZE = 56; // #SM*2 const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } enum class ScaleType { SINGLE_SCALE, PER_WEIGHT_CHANNEL, PER_ACTIVATION_CHANNEL }; ScaleType get_scale_type(const at::Tensor& input, const at::Tensor& input_low, const at::Tensor& input_range) { AT_CHECK(input_low.dim() == input_range.dim(), "input_low and input_range have different dimensionality"); int64_t scale_dim = input_range.dim(); for (int i = 0; i < scale_dim; i++) { AT_CHECK(input_low.size(i) == input_range.size(i), "input_low and input_range have different dimension sizes"); } int64_t scale_count = input_range.numel(); if (scale_dim > 0) { // For (NxCxHxW) input/output tensors, it is assumed that input_range is // either (1) for single-scale quantization, or (Nx1x1x1) for // per-channel scale weights quantization, or (1xCx1x1) for per-channel // activation quantization if (input_range.size(0) > 1) { AT_CHECK(input_range.size(0) == input.size(0), "Scale count and weights input channel count is different"); AT_CHECK(input_range.size(0) == scale_count, "Scale shape is not flat"); return ScaleType::PER_WEIGHT_CHANNEL; } else if (scale_dim >= 2 and input_range.size(1) > 1) { AT_CHECK(input_range.size(1) == input.size(1), "Scale count and activations channel count is different"); AT_CHECK(input_range.size(1) == scale_count, "Scale shape is not flat"); return ScaleType::PER_ACTIVATION_CHANNEL; } } return ScaleType::SINGLE_SCALE; } namespace { template <typename scalar_t> __device__ void fakeQuantize( scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels ) { scalar_t s = (levels - 1) / (*input_range); (*output) = round((min(max((*input), (*input_low)), (*input_low) + (*input_range)) - (*input_low)) * s) / s + (*input_low); } template <typename scalar_t> __global__ void q_cuda_forward_kernel( scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels, const int64_t size, const int64_t contiguous_elements_per_scale, const int64_t scale_count) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { // "Scales" are derived from input_low/input_range int64_t scale_idx = static_cast<int64_t>(idx / contiguous_elements_per_scale) % scale_count; fakeQuantize<scalar_t>((output + idx), (input + idx), input_low + scale_idx, input_range + scale_idx, levels); } } template <typename scalar_t> __device__ void calcGrad( scalar_t* __restrict__ val_grad_input, scalar_t* __restrict__ val_grad_input_low, scalar_t* __restrict__ val_grad_input_range, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ output, const scalar_t range_low, const scalar_t range_high, const scalar_t reverted_range, const scalar_t val_low_grad) { *val_grad_input_range = 0; *val_grad_input_low = 0; *val_grad_input = 0; if ((*input) < range_low) { (*val_grad_input_range) = val_low_grad * (*grad_output); (*val_grad_input_low) = (*grad_output); } else if ((*input) > range_high) { (*val_grad_input_range) = (*grad_output); (*val_grad_input_low) = (*grad_output); } else { (*val_grad_input_range) = (*grad_output) * (((*output) - (*input)) * reverted_range); (*val_grad_input) = (*grad_output); } } __device__ bool last_block(int* counter) { __threadfence(); int last = 0; if (threadIdx.x == 0) { last = atomicAdd(counter, 1); } return __syncthreads_or(last == gridDim.x - 1); } // support only warp size = 32 template <typename scalar_t> __device__ void sum_warp(volatile scalar_t* sharr) { int tidx = threadIdx.x & 31; if (tidx < 16) { sharr[tidx] += sharr[tidx + 16]; sharr[tidx] += sharr[tidx + 8]; sharr[tidx] += sharr[tidx + 4]; sharr[tidx] += sharr[tidx + 2]; sharr[tidx] += sharr[tidx + 1]; } } template <typename scalar_t> __device__ void sumGrad( scalar_t* __restrict__ sh_grad, scalar_t sum, const int tidx, const int bidx, scalar_t* __restrict__ dev_tmp, int* __restrict__ dev_last_block_counter, scalar_t* __restrict__ grad) { sh_grad[tidx] = sum; __syncthreads(); sum_warp(sh_grad + (tidx & ~(CUDA_WARP_SIZE - 1))); __syncthreads(); if (tidx < CUDA_WARP_SIZE) { sh_grad[tidx] = tidx * CUDA_WARP_SIZE < CUDA_NUM_THREADS ? sh_grad[tidx * CUDA_WARP_SIZE] : 0; sum_warp(sh_grad); if (tidx == 0) { dev_tmp[bidx] = sh_grad[0]; } } if (last_block(dev_last_block_counter)) { sh_grad[tidx] = tidx < gridDim.x ? dev_tmp[tidx] : 0; __syncthreads(); sum_warp(sh_grad + (tidx & ~(CUDA_WARP_SIZE - 1))); __syncthreads(); if (tidx < CUDA_WARP_SIZE) { sh_grad[tidx] = tidx * CUDA_WARP_SIZE < CUDA_NUM_THREADS ? sh_grad[tidx * CUDA_WARP_SIZE] : 0; sum_warp(sh_grad); if (tidx == 0) { grad[0] = sh_grad[0]; } } } } template <typename scalar_t> __global__ void q_scale_per_activation_channel_cuda_backward_kernel( scalar_t* __restrict__ grad_input, scalar_t* __restrict__ grad_input_low, scalar_t* __restrict__ grad_input_range, scalar_t* __restrict__ dev_tmp_range, scalar_t* __restrict__ dev_tmp_low, int* __restrict__ dev_last_block_counter_range, int* __restrict__ dev_last_block_counter_low, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels, const scalar_t level_low, const scalar_t level_high, const int64_t total_elements_per_scale, const int64_t contiguous_elements_per_scale, const int64_t scale_count, const int64_t channel_offset) { const int tidx = threadIdx.x; const int bidx = blockIdx.x; const int gtidx = bidx * CUDA_NUM_THREADS + tidx; const int grid_size = CUDA_NUM_THREADS * gridDim.x; scalar_t sum_range = 0, sum_low = 0; scalar_t output, val_grad_input_range, val_grad_input_low; scalar_t alpha = level_low / level_high; scalar_t range_low = (*input_low) + (*input_range) * alpha; scalar_t range_high = (*input_low) + (*input_range); scalar_t reverted_range = 1 / (*input_range); for (int i = gtidx; i < total_elements_per_scale; i += grid_size) { // i is the global thread index - need to calculate the input array index // that belongs to a specific scale index from i. Will do this by treating i // as the index in a non-existing array where input values belonging to a single // scale have a contiguous block layout, but will recalculate actual index into the // input/output array based on the fact that the values belonging to a single scale // in reality have interleaved block layout, with a spacing between the blocks // equal to channel_offset int actual_idx = (i / contiguous_elements_per_scale) * channel_offset + (i % contiguous_elements_per_scale); fakeQuantize<scalar_t>(&output, (input + actual_idx), input_low, input_range, levels); calcGrad<scalar_t>((grad_input + actual_idx), &val_grad_input_low, &val_grad_input_range, (grad_output + actual_idx), (input + actual_idx), &output, range_low, range_high, reverted_range, alpha); sum_range += val_grad_input_range; sum_low += val_grad_input_low; } __shared__ scalar_t sh_grad_range[CUDA_NUM_THREADS]; __shared__ scalar_t sh_grad_low[CUDA_NUM_THREADS]; sumGrad<scalar_t>(sh_grad_range, sum_range, tidx, bidx, dev_tmp_range, dev_last_block_counter_range, grad_input_range); sumGrad<scalar_t>(sh_grad_low, sum_low, tidx, bidx, dev_tmp_low, dev_last_block_counter_low, grad_input_low); } template <typename scalar_t> __global__ void q_single_scale_cuda_backward_kernel( scalar_t* __restrict__ grad_input, scalar_t* __restrict__ grad_input_low, scalar_t* __restrict__ grad_input_range, scalar_t* __restrict__ dev_tmp_range, scalar_t* __restrict__ dev_tmp_low, int* __restrict__ dev_last_block_counter_range, int* __restrict__ dev_last_block_counter_low, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels, const scalar_t level_low, const scalar_t level_high, const int64_t size) { const int tidx = threadIdx.x; const int bidx = blockIdx.x; const int gtidx = bidx * CUDA_NUM_THREADS + tidx; const int grid_size = CUDA_NUM_THREADS * gridDim.x; scalar_t sum_range = 0, sum_low = 0; scalar_t output, val_grad_input_range, val_grad_input_low; scalar_t alpha = level_low / level_high; scalar_t range_low = (*input_low); scalar_t range_high = (*input_low) + (*input_range); scalar_t reverted_range = 1 / (*input_range); for (int i = gtidx; i < size; i += grid_size) { fakeQuantize<scalar_t>(&output, (input + i), input_low, input_range, levels); calcGrad<scalar_t>((grad_input + i), &val_grad_input_low, &val_grad_input_range, (grad_output + i), (input + i), &output, range_low, range_high, reverted_range, alpha); sum_range += val_grad_input_range; sum_low += val_grad_input_low; } __shared__ scalar_t sh_grad_range[CUDA_NUM_THREADS]; __shared__ scalar_t sh_grad_low[CUDA_NUM_THREADS]; sumGrad<scalar_t>(sh_grad_range, sum_range, tidx, bidx, dev_tmp_range, dev_last_block_counter_range, grad_input_range); sumGrad<scalar_t>(sh_grad_low, sum_low, tidx, bidx, dev_tmp_low, dev_last_block_counter_low, grad_input_low); } } at::Tensor q_cuda_forward( at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels) { const auto quantized_elements_count = input.numel(); ScaleType scale_type = get_scale_type(input, input_low, input_range); int64_t contiguous_elements_per_scale = 0; int64_t scale_count = input_range.numel(); switch (scale_type) { case ScaleType::PER_ACTIVATION_CHANNEL: // Scale count should be equal to 1-st input tensor dimension contiguous_elements_per_scale = quantized_elements_count / (input.size(0) * scale_count); break; case ScaleType::PER_WEIGHT_CHANNEL: // Scale count should be equal to 0-th input tensor dimension contiguous_elements_per_scale = quantized_elements_count / scale_count; break; default: contiguous_elements_per_scale = quantized_elements_count; break; } auto output = at::empty_like(input); AT_DISPATCH_FLOATING_TYPES(input.type(), "q_cuda_forward", ([&] { hipLaunchKernelGGL(( q_cuda_forward_kernel<scalar_t>), dim3(GET_BLOCKS(quantized_elements_count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), output.data<scalar_t>(), input.data<scalar_t>(), input_low.data<scalar_t>(), input_range.data<scalar_t>(), levels, quantized_elements_count, contiguous_elements_per_scale, scale_count); })); return output; } std::vector<at::Tensor> q_scale_per_activation_channel_cuda_backward(at::Tensor grad_output, at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels, int level_low, int level_high) { const auto scale_count = input_range.size(1); const auto total_elements_per_scale = input.numel() / scale_count; const auto contiguous_elements_per_scale = input.numel() / (scale_count * input.size(0)); const auto channel_offset = input.numel() / input.size(0); auto grad_input = at::empty_like(grad_output); auto grad_input_low = at::empty(input_range.sizes(), grad_output.options()); auto grad_input_range = at::empty(input_range.sizes(), grad_output.options()); auto grid_size = ::min(GET_BLOCKS(total_elements_per_scale), CUDA_GRID_SIZE); auto dev_tmp_range = at::empty({grid_size}, grad_output.options()); auto dev_tmp_low = at::empty({grid_size}, grad_output.options()); auto dev_last_block_counter_range = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); auto dev_last_block_counter_low = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); // Reusing the single scale backward kernel for now, since in this case the // elements that correspond to a single scale value are laid out in memory // as contiguous blocks. for (int64_t scale_idx = 0; scale_idx < scale_count; scale_idx++) { auto init_element_offset = contiguous_elements_per_scale * scale_idx; AT_DISPATCH_FLOATING_TYPES(input.type(), "q_scale_per_activation_channel_cuda_backward", ([&] { hipLaunchKernelGGL(( q_scale_per_activation_channel_cuda_backward_kernel<scalar_t>), dim3(grid_size), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), grad_input.data<scalar_t>() + init_element_offset, grad_input_low.data<scalar_t>() + scale_idx, grad_input_range.data<scalar_t>() + scale_idx, dev_tmp_range.data<scalar_t>(), dev_tmp_low.data<scalar_t>(), dev_last_block_counter_range.data<int>(), dev_last_block_counter_low.data<int>(), grad_output.data<scalar_t>() + init_element_offset, input.data<scalar_t>() + init_element_offset, input_low.data<scalar_t>() + scale_idx, input_range.data<scalar_t>() + scale_idx, levels, level_low, level_high, total_elements_per_scale, contiguous_elements_per_scale, scale_count, channel_offset); })); dev_tmp_range.fill_(0.0); dev_tmp_low.fill_(0.0); dev_last_block_counter_low.fill_(0); dev_last_block_counter_range.fill_(0); } return {grad_input, grad_input_low, grad_input_range}; } std::vector<at::Tensor> q_scale_per_weight_channel_cuda_backward(at::Tensor grad_output, at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels, int level_low, int level_high) { const auto scale_count = input_range.size(0); const auto elements_per_scale = input.numel() / scale_count; auto grad_input = at::empty_like(grad_output); auto grad_input_low = at::empty(input_range.sizes(), grad_output.options()); auto grad_input_range = at::empty(input_range.sizes(), grad_output.options()); auto grid_size = ::min(GET_BLOCKS(elements_per_scale), CUDA_GRID_SIZE); auto dev_tmp_range = at::empty({grid_size}, grad_output.options()); auto dev_tmp_low = at::empty({grid_size}, grad_output.options()); auto dev_last_block_counter_range = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); auto dev_last_block_counter_low = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); // Reusing the single scale backward kernel for now, since in this case the // elements that correspond to a single scale value are laid out in memory // as contiguous blocks. for (int64_t scale_idx = 0; scale_idx < scale_count; scale_idx++) { auto init_element_offset = elements_per_scale * scale_idx; AT_DISPATCH_FLOATING_TYPES(input.type(), "q_single_scale_cuda_backward", ([&] { hipLaunchKernelGGL(( q_single_scale_cuda_backward_kernel<scalar_t>), dim3(grid_size), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), grad_input.data<scalar_t>() + init_element_offset, grad_input_low.data<scalar_t>() + scale_idx, grad_input_range.data<scalar_t>() + scale_idx, dev_tmp_range.data<scalar_t>(), dev_tmp_low.data<scalar_t>(), dev_last_block_counter_range.data<int>(), dev_last_block_counter_low.data<int>(), grad_output.data<scalar_t>() + init_element_offset, input.data<scalar_t>() + init_element_offset, input_low.data<scalar_t>() + scale_idx, input_range.data<scalar_t>() + scale_idx, levels, level_low, level_high, elements_per_scale); })); dev_tmp_range.fill_(0.0); dev_tmp_low.fill_(0.0); dev_last_block_counter_low.fill_(0); dev_last_block_counter_range.fill_(0); } return {grad_input, grad_input_low, grad_input_range}; } std::vector<at::Tensor> q_single_scale_cuda_backward(at::Tensor grad_output, at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels, int level_low, int level_high) { const auto size = input.numel(); auto grad_input = at::empty_like(grad_output); auto grad_input_range = at::empty({1}, grad_output.options()); auto grad_input_low = at::empty({1}, grad_output.options()); auto grid_size = ::min(GET_BLOCKS(size), CUDA_GRID_SIZE); auto dev_tmp_range = at::empty({grid_size}, grad_output.options()); auto dev_tmp_low = at::empty({grid_size}, grad_output.options()); auto dev_last_block_counter_range = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); auto dev_last_block_counter_low = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); AT_DISPATCH_FLOATING_TYPES(input.type(), "q_single_scale_cuda_backward", ([&] { hipLaunchKernelGGL(( q_single_scale_cuda_backward_kernel<scalar_t>), dim3(grid_size), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), grad_input.data<scalar_t>(), grad_input_low.data<scalar_t>(), grad_input_range.data<scalar_t>(), dev_tmp_range.data<scalar_t>(), dev_tmp_low.data<scalar_t>(), dev_last_block_counter_range.data<int>(), dev_last_block_counter_low.data<int>(), grad_output.data<scalar_t>(), input.data<scalar_t>(), input_low.data<scalar_t>(), input_range.data<scalar_t>(), levels, level_low, level_high, size); })); return {grad_input, grad_input_low, grad_input_range}; } std::vector<at::Tensor> q_cuda_backward( at::Tensor grad_output, at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels, int level_low, int level_high) { ScaleType scale_type = get_scale_type(input, input_low, input_range); switch (scale_type) { case ScaleType::PER_ACTIVATION_CHANNEL: return q_scale_per_activation_channel_cuda_backward( grad_output, input, input_low, input_range, levels, level_low, level_high); case ScaleType::PER_WEIGHT_CHANNEL: return q_scale_per_weight_channel_cuda_backward( grad_output, input, input_low, input_range, levels, level_low, level_high); case ScaleType::SINGLE_SCALE: default: return q_single_scale_cuda_backward( grad_output, input, input_low, input_range, levels, level_low, level_high); }; }
2acd41d9b5f18f8efd5bba6a906ca53357229885.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <THC/THC.h> extern THCState *state; const int CUDA_WARP_SIZE = 32; const int CUDA_GRID_SIZE = 56; // #SM*2 const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } enum class ScaleType { SINGLE_SCALE, PER_WEIGHT_CHANNEL, PER_ACTIVATION_CHANNEL }; ScaleType get_scale_type(const at::Tensor& input, const at::Tensor& input_low, const at::Tensor& input_range) { AT_CHECK(input_low.dim() == input_range.dim(), "input_low and input_range have different dimensionality"); int64_t scale_dim = input_range.dim(); for (int i = 0; i < scale_dim; i++) { AT_CHECK(input_low.size(i) == input_range.size(i), "input_low and input_range have different dimension sizes"); } int64_t scale_count = input_range.numel(); if (scale_dim > 0) { // For (NxCxHxW) input/output tensors, it is assumed that input_range is // either (1) for single-scale quantization, or (Nx1x1x1) for // per-channel scale weights quantization, or (1xCx1x1) for per-channel // activation quantization if (input_range.size(0) > 1) { AT_CHECK(input_range.size(0) == input.size(0), "Scale count and weights input channel count is different"); AT_CHECK(input_range.size(0) == scale_count, "Scale shape is not flat"); return ScaleType::PER_WEIGHT_CHANNEL; } else if (scale_dim >= 2 and input_range.size(1) > 1) { AT_CHECK(input_range.size(1) == input.size(1), "Scale count and activations channel count is different"); AT_CHECK(input_range.size(1) == scale_count, "Scale shape is not flat"); return ScaleType::PER_ACTIVATION_CHANNEL; } } return ScaleType::SINGLE_SCALE; } namespace { template <typename scalar_t> __device__ void fakeQuantize( scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels ) { scalar_t s = (levels - 1) / (*input_range); (*output) = round((min(max((*input), (*input_low)), (*input_low) + (*input_range)) - (*input_low)) * s) / s + (*input_low); } template <typename scalar_t> __global__ void q_cuda_forward_kernel( scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels, const int64_t size, const int64_t contiguous_elements_per_scale, const int64_t scale_count) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { // "Scales" are derived from input_low/input_range int64_t scale_idx = static_cast<int64_t>(idx / contiguous_elements_per_scale) % scale_count; fakeQuantize<scalar_t>((output + idx), (input + idx), input_low + scale_idx, input_range + scale_idx, levels); } } template <typename scalar_t> __device__ void calcGrad( scalar_t* __restrict__ val_grad_input, scalar_t* __restrict__ val_grad_input_low, scalar_t* __restrict__ val_grad_input_range, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ output, const scalar_t range_low, const scalar_t range_high, const scalar_t reverted_range, const scalar_t val_low_grad) { *val_grad_input_range = 0; *val_grad_input_low = 0; *val_grad_input = 0; if ((*input) < range_low) { (*val_grad_input_range) = val_low_grad * (*grad_output); (*val_grad_input_low) = (*grad_output); } else if ((*input) > range_high) { (*val_grad_input_range) = (*grad_output); (*val_grad_input_low) = (*grad_output); } else { (*val_grad_input_range) = (*grad_output) * (((*output) - (*input)) * reverted_range); (*val_grad_input) = (*grad_output); } } __device__ bool last_block(int* counter) { __threadfence(); int last = 0; if (threadIdx.x == 0) { last = atomicAdd(counter, 1); } return __syncthreads_or(last == gridDim.x - 1); } // support only warp size = 32 template <typename scalar_t> __device__ void sum_warp(volatile scalar_t* sharr) { int tidx = threadIdx.x & 31; if (tidx < 16) { sharr[tidx] += sharr[tidx + 16]; sharr[tidx] += sharr[tidx + 8]; sharr[tidx] += sharr[tidx + 4]; sharr[tidx] += sharr[tidx + 2]; sharr[tidx] += sharr[tidx + 1]; } } template <typename scalar_t> __device__ void sumGrad( scalar_t* __restrict__ sh_grad, scalar_t sum, const int tidx, const int bidx, scalar_t* __restrict__ dev_tmp, int* __restrict__ dev_last_block_counter, scalar_t* __restrict__ grad) { sh_grad[tidx] = sum; __syncthreads(); sum_warp(sh_grad + (tidx & ~(CUDA_WARP_SIZE - 1))); __syncthreads(); if (tidx < CUDA_WARP_SIZE) { sh_grad[tidx] = tidx * CUDA_WARP_SIZE < CUDA_NUM_THREADS ? sh_grad[tidx * CUDA_WARP_SIZE] : 0; sum_warp(sh_grad); if (tidx == 0) { dev_tmp[bidx] = sh_grad[0]; } } if (last_block(dev_last_block_counter)) { sh_grad[tidx] = tidx < gridDim.x ? dev_tmp[tidx] : 0; __syncthreads(); sum_warp(sh_grad + (tidx & ~(CUDA_WARP_SIZE - 1))); __syncthreads(); if (tidx < CUDA_WARP_SIZE) { sh_grad[tidx] = tidx * CUDA_WARP_SIZE < CUDA_NUM_THREADS ? sh_grad[tidx * CUDA_WARP_SIZE] : 0; sum_warp(sh_grad); if (tidx == 0) { grad[0] = sh_grad[0]; } } } } template <typename scalar_t> __global__ void q_scale_per_activation_channel_cuda_backward_kernel( scalar_t* __restrict__ grad_input, scalar_t* __restrict__ grad_input_low, scalar_t* __restrict__ grad_input_range, scalar_t* __restrict__ dev_tmp_range, scalar_t* __restrict__ dev_tmp_low, int* __restrict__ dev_last_block_counter_range, int* __restrict__ dev_last_block_counter_low, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels, const scalar_t level_low, const scalar_t level_high, const int64_t total_elements_per_scale, const int64_t contiguous_elements_per_scale, const int64_t scale_count, const int64_t channel_offset) { const int tidx = threadIdx.x; const int bidx = blockIdx.x; const int gtidx = bidx * CUDA_NUM_THREADS + tidx; const int grid_size = CUDA_NUM_THREADS * gridDim.x; scalar_t sum_range = 0, sum_low = 0; scalar_t output, val_grad_input_range, val_grad_input_low; scalar_t alpha = level_low / level_high; scalar_t range_low = (*input_low) + (*input_range) * alpha; scalar_t range_high = (*input_low) + (*input_range); scalar_t reverted_range = 1 / (*input_range); for (int i = gtidx; i < total_elements_per_scale; i += grid_size) { // i is the global thread index - need to calculate the input array index // that belongs to a specific scale index from i. Will do this by treating i // as the index in a non-existing array where input values belonging to a single // scale have a contiguous block layout, but will recalculate actual index into the // input/output array based on the fact that the values belonging to a single scale // in reality have interleaved block layout, with a spacing between the blocks // equal to channel_offset int actual_idx = (i / contiguous_elements_per_scale) * channel_offset + (i % contiguous_elements_per_scale); fakeQuantize<scalar_t>(&output, (input + actual_idx), input_low, input_range, levels); calcGrad<scalar_t>((grad_input + actual_idx), &val_grad_input_low, &val_grad_input_range, (grad_output + actual_idx), (input + actual_idx), &output, range_low, range_high, reverted_range, alpha); sum_range += val_grad_input_range; sum_low += val_grad_input_low; } __shared__ scalar_t sh_grad_range[CUDA_NUM_THREADS]; __shared__ scalar_t sh_grad_low[CUDA_NUM_THREADS]; sumGrad<scalar_t>(sh_grad_range, sum_range, tidx, bidx, dev_tmp_range, dev_last_block_counter_range, grad_input_range); sumGrad<scalar_t>(sh_grad_low, sum_low, tidx, bidx, dev_tmp_low, dev_last_block_counter_low, grad_input_low); } template <typename scalar_t> __global__ void q_single_scale_cuda_backward_kernel( scalar_t* __restrict__ grad_input, scalar_t* __restrict__ grad_input_low, scalar_t* __restrict__ grad_input_range, scalar_t* __restrict__ dev_tmp_range, scalar_t* __restrict__ dev_tmp_low, int* __restrict__ dev_last_block_counter_range, int* __restrict__ dev_last_block_counter_low, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ input_low, const scalar_t* __restrict__ input_range, const scalar_t levels, const scalar_t level_low, const scalar_t level_high, const int64_t size) { const int tidx = threadIdx.x; const int bidx = blockIdx.x; const int gtidx = bidx * CUDA_NUM_THREADS + tidx; const int grid_size = CUDA_NUM_THREADS * gridDim.x; scalar_t sum_range = 0, sum_low = 0; scalar_t output, val_grad_input_range, val_grad_input_low; scalar_t alpha = level_low / level_high; scalar_t range_low = (*input_low); scalar_t range_high = (*input_low) + (*input_range); scalar_t reverted_range = 1 / (*input_range); for (int i = gtidx; i < size; i += grid_size) { fakeQuantize<scalar_t>(&output, (input + i), input_low, input_range, levels); calcGrad<scalar_t>((grad_input + i), &val_grad_input_low, &val_grad_input_range, (grad_output + i), (input + i), &output, range_low, range_high, reverted_range, alpha); sum_range += val_grad_input_range; sum_low += val_grad_input_low; } __shared__ scalar_t sh_grad_range[CUDA_NUM_THREADS]; __shared__ scalar_t sh_grad_low[CUDA_NUM_THREADS]; sumGrad<scalar_t>(sh_grad_range, sum_range, tidx, bidx, dev_tmp_range, dev_last_block_counter_range, grad_input_range); sumGrad<scalar_t>(sh_grad_low, sum_low, tidx, bidx, dev_tmp_low, dev_last_block_counter_low, grad_input_low); } } at::Tensor q_cuda_forward( at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels) { const auto quantized_elements_count = input.numel(); ScaleType scale_type = get_scale_type(input, input_low, input_range); int64_t contiguous_elements_per_scale = 0; int64_t scale_count = input_range.numel(); switch (scale_type) { case ScaleType::PER_ACTIVATION_CHANNEL: // Scale count should be equal to 1-st input tensor dimension contiguous_elements_per_scale = quantized_elements_count / (input.size(0) * scale_count); break; case ScaleType::PER_WEIGHT_CHANNEL: // Scale count should be equal to 0-th input tensor dimension contiguous_elements_per_scale = quantized_elements_count / scale_count; break; default: contiguous_elements_per_scale = quantized_elements_count; break; } auto output = at::empty_like(input); AT_DISPATCH_FLOATING_TYPES(input.type(), "q_cuda_forward", ([&] { q_cuda_forward_kernel<scalar_t><<<GET_BLOCKS(quantized_elements_count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( output.data<scalar_t>(), input.data<scalar_t>(), input_low.data<scalar_t>(), input_range.data<scalar_t>(), levels, quantized_elements_count, contiguous_elements_per_scale, scale_count); })); return output; } std::vector<at::Tensor> q_scale_per_activation_channel_cuda_backward(at::Tensor grad_output, at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels, int level_low, int level_high) { const auto scale_count = input_range.size(1); const auto total_elements_per_scale = input.numel() / scale_count; const auto contiguous_elements_per_scale = input.numel() / (scale_count * input.size(0)); const auto channel_offset = input.numel() / input.size(0); auto grad_input = at::empty_like(grad_output); auto grad_input_low = at::empty(input_range.sizes(), grad_output.options()); auto grad_input_range = at::empty(input_range.sizes(), grad_output.options()); auto grid_size = std::min(GET_BLOCKS(total_elements_per_scale), CUDA_GRID_SIZE); auto dev_tmp_range = at::empty({grid_size}, grad_output.options()); auto dev_tmp_low = at::empty({grid_size}, grad_output.options()); auto dev_last_block_counter_range = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); auto dev_last_block_counter_low = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); // Reusing the single scale backward kernel for now, since in this case the // elements that correspond to a single scale value are laid out in memory // as contiguous blocks. for (int64_t scale_idx = 0; scale_idx < scale_count; scale_idx++) { auto init_element_offset = contiguous_elements_per_scale * scale_idx; AT_DISPATCH_FLOATING_TYPES(input.type(), "q_scale_per_activation_channel_cuda_backward", ([&] { q_scale_per_activation_channel_cuda_backward_kernel<scalar_t><<<grid_size, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( grad_input.data<scalar_t>() + init_element_offset, grad_input_low.data<scalar_t>() + scale_idx, grad_input_range.data<scalar_t>() + scale_idx, dev_tmp_range.data<scalar_t>(), dev_tmp_low.data<scalar_t>(), dev_last_block_counter_range.data<int>(), dev_last_block_counter_low.data<int>(), grad_output.data<scalar_t>() + init_element_offset, input.data<scalar_t>() + init_element_offset, input_low.data<scalar_t>() + scale_idx, input_range.data<scalar_t>() + scale_idx, levels, level_low, level_high, total_elements_per_scale, contiguous_elements_per_scale, scale_count, channel_offset); })); dev_tmp_range.fill_(0.0); dev_tmp_low.fill_(0.0); dev_last_block_counter_low.fill_(0); dev_last_block_counter_range.fill_(0); } return {grad_input, grad_input_low, grad_input_range}; } std::vector<at::Tensor> q_scale_per_weight_channel_cuda_backward(at::Tensor grad_output, at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels, int level_low, int level_high) { const auto scale_count = input_range.size(0); const auto elements_per_scale = input.numel() / scale_count; auto grad_input = at::empty_like(grad_output); auto grad_input_low = at::empty(input_range.sizes(), grad_output.options()); auto grad_input_range = at::empty(input_range.sizes(), grad_output.options()); auto grid_size = std::min(GET_BLOCKS(elements_per_scale), CUDA_GRID_SIZE); auto dev_tmp_range = at::empty({grid_size}, grad_output.options()); auto dev_tmp_low = at::empty({grid_size}, grad_output.options()); auto dev_last_block_counter_range = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); auto dev_last_block_counter_low = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); // Reusing the single scale backward kernel for now, since in this case the // elements that correspond to a single scale value are laid out in memory // as contiguous blocks. for (int64_t scale_idx = 0; scale_idx < scale_count; scale_idx++) { auto init_element_offset = elements_per_scale * scale_idx; AT_DISPATCH_FLOATING_TYPES(input.type(), "q_single_scale_cuda_backward", ([&] { q_single_scale_cuda_backward_kernel<scalar_t><<<grid_size, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( grad_input.data<scalar_t>() + init_element_offset, grad_input_low.data<scalar_t>() + scale_idx, grad_input_range.data<scalar_t>() + scale_idx, dev_tmp_range.data<scalar_t>(), dev_tmp_low.data<scalar_t>(), dev_last_block_counter_range.data<int>(), dev_last_block_counter_low.data<int>(), grad_output.data<scalar_t>() + init_element_offset, input.data<scalar_t>() + init_element_offset, input_low.data<scalar_t>() + scale_idx, input_range.data<scalar_t>() + scale_idx, levels, level_low, level_high, elements_per_scale); })); dev_tmp_range.fill_(0.0); dev_tmp_low.fill_(0.0); dev_last_block_counter_low.fill_(0); dev_last_block_counter_range.fill_(0); } return {grad_input, grad_input_low, grad_input_range}; } std::vector<at::Tensor> q_single_scale_cuda_backward(at::Tensor grad_output, at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels, int level_low, int level_high) { const auto size = input.numel(); auto grad_input = at::empty_like(grad_output); auto grad_input_range = at::empty({1}, grad_output.options()); auto grad_input_low = at::empty({1}, grad_output.options()); auto grid_size = std::min(GET_BLOCKS(size), CUDA_GRID_SIZE); auto dev_tmp_range = at::empty({grid_size}, grad_output.options()); auto dev_tmp_low = at::empty({grid_size}, grad_output.options()); auto dev_last_block_counter_range = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); auto dev_last_block_counter_low = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); AT_DISPATCH_FLOATING_TYPES(input.type(), "q_single_scale_cuda_backward", ([&] { q_single_scale_cuda_backward_kernel<scalar_t><<<grid_size, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( grad_input.data<scalar_t>(), grad_input_low.data<scalar_t>(), grad_input_range.data<scalar_t>(), dev_tmp_range.data<scalar_t>(), dev_tmp_low.data<scalar_t>(), dev_last_block_counter_range.data<int>(), dev_last_block_counter_low.data<int>(), grad_output.data<scalar_t>(), input.data<scalar_t>(), input_low.data<scalar_t>(), input_range.data<scalar_t>(), levels, level_low, level_high, size); })); return {grad_input, grad_input_low, grad_input_range}; } std::vector<at::Tensor> q_cuda_backward( at::Tensor grad_output, at::Tensor input, at::Tensor input_low, at::Tensor input_range, int levels, int level_low, int level_high) { ScaleType scale_type = get_scale_type(input, input_low, input_range); switch (scale_type) { case ScaleType::PER_ACTIVATION_CHANNEL: return q_scale_per_activation_channel_cuda_backward( grad_output, input, input_low, input_range, levels, level_low, level_high); case ScaleType::PER_WEIGHT_CHANNEL: return q_scale_per_weight_channel_cuda_backward( grad_output, input, input_low, input_range, levels, level_low, level_high); case ScaleType::SINGLE_SCALE: default: return q_single_scale_cuda_backward( grad_output, input, input_low, input_range, levels, level_low, level_high); }; }
b12d3a52d7116be2132561f4178feb09da0b8afb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/operators/random_routing_op.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { #define CEIL(_x_, _y_) (((_x_)-1) / (_y_) + 1) #define PERTHREAD_EXPERTS 256 #define WARP_SIZE 32 const int CUDA_NUM_THREADS = 512; static inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } using LoDTensor = framework::LoDTensor; using Tensor = phi::DenseTensor; template <typename T> __global__ void random_routing_kernel(int64_t* data, const int64_t length, const size_t N, const size_t D, const T* prob, const int64_t* topk_idx, const T* topk_value) { CUDA_KERNEL_LOOP(idx, length) { size_t row = idx / D; size_t col = idx % D; if (col != 1) return; if (static_cast<T>(2) * topk_value[idx] < prob[row]) { data[idx] = static_cast<int64_t>(-1); } } } template <typename T> class RandomRoutingOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto topk_idx = context.Input<LoDTensor>("TopK_Idx"); auto topk_value = context.Input<LoDTensor>("TopK_Value"); auto prob = context.Input<LoDTensor>("Prob"); auto out = context.Output<LoDTensor>("Out"); auto place = context.GetPlace(); const auto& dev_ctx = context.template device_context<phi::GPUContext>(); framework::TensorCopy(*topk_idx, place, out); size_t N = topk_idx->dims()[0]; size_t D = topk_idx->dims()[1]; int64_t num_idx = topk_idx->numel(); auto prob_data = prob->data<T>(); auto topk_value_data = topk_value->data<T>(); auto topk_idx_data = topk_idx->data<int64_t>(); auto out_data = out->data<int64_t>(); hipLaunchKernelGGL(( random_routing_kernel<T>) , dim3(GET_BLOCKS(num_idx)), dim3(CUDA_NUM_THREADS), 0, dev_ctx.stream(), out_data, num_idx, N, D, prob_data, topk_idx_data, topk_value_data); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(random_routing, ops::RandomRoutingOpCUDAKernel<float>, ops::RandomRoutingOpCUDAKernel<double>, ops::RandomRoutingOpCUDAKernel<plat::float16>);
b12d3a52d7116be2132561f4178feb09da0b8afb.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/operators/random_routing_op.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { #define CEIL(_x_, _y_) (((_x_)-1) / (_y_) + 1) #define PERTHREAD_EXPERTS 256 #define WARP_SIZE 32 const int CUDA_NUM_THREADS = 512; static inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } using LoDTensor = framework::LoDTensor; using Tensor = phi::DenseTensor; template <typename T> __global__ void random_routing_kernel(int64_t* data, const int64_t length, const size_t N, const size_t D, const T* prob, const int64_t* topk_idx, const T* topk_value) { CUDA_KERNEL_LOOP(idx, length) { size_t row = idx / D; size_t col = idx % D; if (col != 1) return; if (static_cast<T>(2) * topk_value[idx] < prob[row]) { data[idx] = static_cast<int64_t>(-1); } } } template <typename T> class RandomRoutingOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto topk_idx = context.Input<LoDTensor>("TopK_Idx"); auto topk_value = context.Input<LoDTensor>("TopK_Value"); auto prob = context.Input<LoDTensor>("Prob"); auto out = context.Output<LoDTensor>("Out"); auto place = context.GetPlace(); const auto& dev_ctx = context.template device_context<phi::GPUContext>(); framework::TensorCopy(*topk_idx, place, out); size_t N = topk_idx->dims()[0]; size_t D = topk_idx->dims()[1]; int64_t num_idx = topk_idx->numel(); auto prob_data = prob->data<T>(); auto topk_value_data = topk_value->data<T>(); auto topk_idx_data = topk_idx->data<int64_t>(); auto out_data = out->data<int64_t>(); random_routing_kernel<T> <<<GET_BLOCKS(num_idx), CUDA_NUM_THREADS, 0, dev_ctx.stream()>>>( out_data, num_idx, N, D, prob_data, topk_idx_data, topk_value_data); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(random_routing, ops::RandomRoutingOpCUDAKernel<float>, ops::RandomRoutingOpCUDAKernel<double>, ops::RandomRoutingOpCUDAKernel<plat::float16>);
bcb7c28b3635fd30c795a72eac4e3e74ec438207.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <thrust/count.h> #include <thrust/device_vector.h> #include <common/cudart_utils.h> #include <cuda_utils.cuh> #include <random/make_arima.cuh> #include "test_utils.h" namespace MLCommon { namespace Random { /* This test only proves that the generator runs without errors, not * correctness! */ struct MakeArimaInputs { int batch_size, n_obs; int p, d, q, P, D, Q, s, k; raft::random::GeneratorType gtype; uint64_t seed; }; template <typename T> class MakeArimaTest : public ::testing::TestWithParam<MakeArimaInputs> { protected: void SetUp() override { params = ::testing::TestWithParam<MakeArimaInputs>::GetParam(); // Scales of the different random components T scale = 1.0, noise_scale = 0.2; T intercept_scale = params.d + params.D == 0 ? 1.0 : (params.d + params.D == 1 ? 0.2 : 0.01); ML::ARIMAOrder order = {params.p, params.d, params.q, params.P, params.D, params.Q, params.s, params.k}; allocator.reset(new raft::mr::device::default_allocator); CUDA_CHECK(hipStreamCreate(&stream)); raft::allocate(data, params.batch_size * params.n_obs); // Create the time series dataset make_arima(data, params.batch_size, params.n_obs, order, allocator, stream, scale, noise_scale, intercept_scale, params.seed, params.gtype); } void TearDown() override { CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipStreamDestroy(stream)); } protected: MakeArimaInputs params; T *data; std::shared_ptr<deviceAllocator> allocator; hipStream_t stream; }; const std::vector<MakeArimaInputs> make_arima_inputs = { {100, 200, 1, 1, 2, 0, 0, 0, 0, 1, raft::random::GenPhilox, 1234ULL}, {1000, 100, 3, 0, 0, 1, 1, 0, 4, 1, raft::random::GenPhilox, 1234ULL}, {10000, 150, 2, 1, 2, 0, 1, 2, 4, 0, raft::random::GenPhilox, 1234ULL}}; typedef MakeArimaTest<float> MakeArimaTestF; TEST_P(MakeArimaTestF, Result) { CUDA_CHECK(hipStreamSynchronize(stream)); } INSTANTIATE_TEST_CASE_P(MakeArimaTests, MakeArimaTestF, ::testing::ValuesIn(make_arima_inputs)); typedef MakeArimaTest<double> MakeArimaTestD; TEST_P(MakeArimaTestD, Result) { CUDA_CHECK(hipStreamSynchronize(stream)); } INSTANTIATE_TEST_CASE_P(MakeArimaTests, MakeArimaTestD, ::testing::ValuesIn(make_arima_inputs)); } // end namespace Random } // end namespace MLCommon
bcb7c28b3635fd30c795a72eac4e3e74ec438207.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <thrust/count.h> #include <thrust/device_vector.h> #include <common/cudart_utils.h> #include <cuda_utils.cuh> #include <random/make_arima.cuh> #include "test_utils.h" namespace MLCommon { namespace Random { /* This test only proves that the generator runs without errors, not * correctness! */ struct MakeArimaInputs { int batch_size, n_obs; int p, d, q, P, D, Q, s, k; raft::random::GeneratorType gtype; uint64_t seed; }; template <typename T> class MakeArimaTest : public ::testing::TestWithParam<MakeArimaInputs> { protected: void SetUp() override { params = ::testing::TestWithParam<MakeArimaInputs>::GetParam(); // Scales of the different random components T scale = 1.0, noise_scale = 0.2; T intercept_scale = params.d + params.D == 0 ? 1.0 : (params.d + params.D == 1 ? 0.2 : 0.01); ML::ARIMAOrder order = {params.p, params.d, params.q, params.P, params.D, params.Q, params.s, params.k}; allocator.reset(new raft::mr::device::default_allocator); CUDA_CHECK(cudaStreamCreate(&stream)); raft::allocate(data, params.batch_size * params.n_obs); // Create the time series dataset make_arima(data, params.batch_size, params.n_obs, order, allocator, stream, scale, noise_scale, intercept_scale, params.seed, params.gtype); } void TearDown() override { CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaStreamDestroy(stream)); } protected: MakeArimaInputs params; T *data; std::shared_ptr<deviceAllocator> allocator; cudaStream_t stream; }; const std::vector<MakeArimaInputs> make_arima_inputs = { {100, 200, 1, 1, 2, 0, 0, 0, 0, 1, raft::random::GenPhilox, 1234ULL}, {1000, 100, 3, 0, 0, 1, 1, 0, 4, 1, raft::random::GenPhilox, 1234ULL}, {10000, 150, 2, 1, 2, 0, 1, 2, 4, 0, raft::random::GenPhilox, 1234ULL}}; typedef MakeArimaTest<float> MakeArimaTestF; TEST_P(MakeArimaTestF, Result) { CUDA_CHECK(cudaStreamSynchronize(stream)); } INSTANTIATE_TEST_CASE_P(MakeArimaTests, MakeArimaTestF, ::testing::ValuesIn(make_arima_inputs)); typedef MakeArimaTest<double> MakeArimaTestD; TEST_P(MakeArimaTestD, Result) { CUDA_CHECK(cudaStreamSynchronize(stream)); } INSTANTIATE_TEST_CASE_P(MakeArimaTests, MakeArimaTestD, ::testing::ValuesIn(make_arima_inputs)); } // end namespace Random } // end namespace MLCommon
93a7490f4d1d589f6732ee5e50f4f01577efce8b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // //user function __device__ void initEta_formula_gpu( const float *coords, float *values, const double *time) { float x = coords[0]; float y = coords[1]; float t = *time; float val = -1.0f ;; values[0] += val; } // CUDA kernel function __global__ void op_cuda_initEta_formula( const float *__restrict arg0, float *arg1, const double *arg2, int set_size ) { //process set elements for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){ //user-supplied kernel call initEta_formula_gpu(arg0+n*2, arg1+n*4, arg2); } } //host stub function void op_par_loop_initEta_formula(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2){ double*arg2h = (double *)arg2.data; int nargs = 3; op_arg args[3]; args[0] = arg0; args[1] = arg1; args[2] = arg2; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(5); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[5].name = name; OP_kernels[5].count += 1; if (OP_diags>2) { printf(" kernel routine w/o indirection: initEta_formula"); } int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); if (set_size > 0) { //transfer constants to GPU int consts_bytes = 0; consts_bytes += ROUND_UP(1*sizeof(double)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OP_consts_h + consts_bytes; arg2.data_d = OP_consts_d + consts_bytes; for ( int d=0; d<1; d++ ){ ((double *)arg2.data)[d] = arg2h[d]; } consts_bytes += ROUND_UP(1*sizeof(double)); mvConstArraysToDevice(consts_bytes); //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_5 int nthread = OP_BLOCK_SIZE_5; #else int nthread = OP_block_size; #endif int nblocks = 200; hipLaunchKernelGGL(( op_cuda_initEta_formula), dim3(nblocks),dim3(nthread), 0, 0, (float *) arg0.data_d, (float *) arg1.data_d, (double *) arg2.data_d, set->size ); } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(hipDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[5].time += wall_t2 - wall_t1; OP_kernels[5].transfer += (float)set->size * arg0.size; OP_kernels[5].transfer += (float)set->size * arg1.size * 2.0f; }
93a7490f4d1d589f6732ee5e50f4f01577efce8b.cu
// // auto-generated by op2.py // //user function __device__ void initEta_formula_gpu( const float *coords, float *values, const double *time) { float x = coords[0]; float y = coords[1]; float t = *time; float val = -1.0f ;; values[0] += val; } // CUDA kernel function __global__ void op_cuda_initEta_formula( const float *__restrict arg0, float *arg1, const double *arg2, int set_size ) { //process set elements for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){ //user-supplied kernel call initEta_formula_gpu(arg0+n*2, arg1+n*4, arg2); } } //host stub function void op_par_loop_initEta_formula(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2){ double*arg2h = (double *)arg2.data; int nargs = 3; op_arg args[3]; args[0] = arg0; args[1] = arg1; args[2] = arg2; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(5); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[5].name = name; OP_kernels[5].count += 1; if (OP_diags>2) { printf(" kernel routine w/o indirection: initEta_formula"); } int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); if (set_size > 0) { //transfer constants to GPU int consts_bytes = 0; consts_bytes += ROUND_UP(1*sizeof(double)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OP_consts_h + consts_bytes; arg2.data_d = OP_consts_d + consts_bytes; for ( int d=0; d<1; d++ ){ ((double *)arg2.data)[d] = arg2h[d]; } consts_bytes += ROUND_UP(1*sizeof(double)); mvConstArraysToDevice(consts_bytes); //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_5 int nthread = OP_BLOCK_SIZE_5; #else int nthread = OP_block_size; #endif int nblocks = 200; op_cuda_initEta_formula<<<nblocks,nthread>>>( (float *) arg0.data_d, (float *) arg1.data_d, (double *) arg2.data_d, set->size ); } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(cudaDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[5].time += wall_t2 - wall_t1; OP_kernels[5].transfer += (float)set->size * arg0.size; OP_kernels[5].transfer += (float)set->size * arg1.size * 2.0f; }
44f973f2750fa71d5426310f6ab73f2f1cd07e99.hip
// !!! This is a file automatically generated by hipify!!! #include "SamplingKernel.cuh" void SampleKernelD(WTD &argWTDen, WTAll &argWT, DTChunk &argDT, Document &argDoc) { int blockCounter = 0; int iterWT = (argWTDen.numOfWordD - 1) / GridDim + 1; float Perplexity = 0.0; srand(time(NULL)); hiprandState_t* randState; hipMalloc(&randState, sizeof(hiprandState_t)*GridDim*BlockDim); initRandState << <GridDim, BlockDim >> >(randState); for (int i = 0; i < iterWT; i++) { hipMemcpy(argDoc.d_blockCounter, &blockCounter, sizeof(int), hipMemcpyHostToDevice); LDAKernelTrainD << <GridDim, BlockDim >> > (alpha, beta, argDoc.deviceMapWord2Doc, argDoc.deviceTLTopic, argDT.deviceNZDTCount, argDT.deviceDTIndex, argDT.deviceDTValue, argDoc.deviceTLDocCount, argDoc.deviceTLDocOffset, argDT.deviceDTCount, argDT.deviceDTOffset, argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount, argDoc.deviceTLWordOffset, argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.d_blockCounter, argDoc.deviceMapDoc2Word, argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexity, randState, argDoc.deviceWTHeadDense, argWTDen.numOfWordD); H_ERR(hipDeviceSynchronize()); blockCounter++; } H_ERR(hipDeviceSynchronize()); } //(double alpha, double beta, int* d_Index, int* d_TopicIndex, int* d_SparseDTCount, int* d_SparseDTIndex, int* d_SparseDTValue, int* d_TokenCountDT, int* d_TokenOffsetDT, int* d_DocListCount, int* d_DocListOffset, int* d_WTDense, int* d_WTDenseCopy, int* d_TokenCount, int* d_TokenOffset, int* d_WordListCount, int* d_WordListOffset, int* d_WTRowSum, int* d_blockCounter, int*d_DocIndex, int D, int W, double* d_Perplexity, hiprandState_t *randState, double *WTHeadDense, int numOfWordD); void SampleKernel(WTAll &argWT, DTChunk &argDT, Document &argDoc) { int blockCounter = 0; int iterWT = (argWT.numOfWordS - 1) / GridDim + 1; float Perplexity = 0.0; int numOfWordD = argWT.wordLength- argWT.numOfWordS; srand(time(NULL)); hiprandState_t* randState; hipMalloc(&randState, sizeof(hiprandState_t)*GridDim*BlockDim); initRandState << <GridDim, BlockDim >> >(randState); for (int i = 0; i < iterWT; i++) { hipMemcpy(argDoc.d_blockCounter, &blockCounter, sizeof(int), hipMemcpyHostToDevice); LDAKernelTrain << <GridDim, BlockDim >> > (alpha, beta, argDoc.deviceMapWord2Doc, argDoc.deviceTLTopic, argDT.deviceNZDTCount, argDT.deviceDTIndex, argDT.deviceDTValue, argDoc.deviceTLDocCount, argDoc.deviceTLDocOffset, argDT.deviceDTCount, argDT.deviceDTOffset, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argDoc.deviceTLWordCount, argDoc.deviceTLWordOffset, argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.d_blockCounter, argDoc.deviceMapDoc2Word, argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexity, randState, argDoc.deviceWTHeadDense, numOfWordD, argWT.numOfWordS); H_ERR(hipDeviceSynchronize()); blockCounter++; } LDATrainPerplexityReduce1 << <GridDim, BlockDim >> > (argDoc.devicePerplexity, argDoc.devicePerplexityMid, argDoc.TLLengthVec[argDT.chunkId]); H_ERR(hipDeviceSynchronize()); }
44f973f2750fa71d5426310f6ab73f2f1cd07e99.cu
#include "SamplingKernel.cuh" void SampleKernelD(WTD &argWTDen, WTAll &argWT, DTChunk &argDT, Document &argDoc) { int blockCounter = 0; int iterWT = (argWTDen.numOfWordD - 1) / GridDim + 1; float Perplexity = 0.0; srand(time(NULL)); curandState* randState; cudaMalloc(&randState, sizeof(curandState)*GridDim*BlockDim); initRandState << <GridDim, BlockDim >> >(randState); for (int i = 0; i < iterWT; i++) { cudaMemcpy(argDoc.d_blockCounter, &blockCounter, sizeof(int), cudaMemcpyHostToDevice); LDAKernelTrainD << <GridDim, BlockDim >> > (alpha, beta, argDoc.deviceMapWord2Doc, argDoc.deviceTLTopic, argDT.deviceNZDTCount, argDT.deviceDTIndex, argDT.deviceDTValue, argDoc.deviceTLDocCount, argDoc.deviceTLDocOffset, argDT.deviceDTCount, argDT.deviceDTOffset, argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount, argDoc.deviceTLWordOffset, argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.d_blockCounter, argDoc.deviceMapDoc2Word, argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexity, randState, argDoc.deviceWTHeadDense, argWTDen.numOfWordD); H_ERR(cudaDeviceSynchronize()); blockCounter++; } H_ERR(cudaDeviceSynchronize()); } //(double alpha, double beta, int* d_Index, int* d_TopicIndex, int* d_SparseDTCount, int* d_SparseDTIndex, int* d_SparseDTValue, int* d_TokenCountDT, int* d_TokenOffsetDT, int* d_DocListCount, int* d_DocListOffset, int* d_WTDense, int* d_WTDenseCopy, int* d_TokenCount, int* d_TokenOffset, int* d_WordListCount, int* d_WordListOffset, int* d_WTRowSum, int* d_blockCounter, int*d_DocIndex, int D, int W, double* d_Perplexity, curandState *randState, double *WTHeadDense, int numOfWordD); void SampleKernel(WTAll &argWT, DTChunk &argDT, Document &argDoc) { int blockCounter = 0; int iterWT = (argWT.numOfWordS - 1) / GridDim + 1; float Perplexity = 0.0; int numOfWordD = argWT.wordLength- argWT.numOfWordS; srand(time(NULL)); curandState* randState; cudaMalloc(&randState, sizeof(curandState)*GridDim*BlockDim); initRandState << <GridDim, BlockDim >> >(randState); for (int i = 0; i < iterWT; i++) { cudaMemcpy(argDoc.d_blockCounter, &blockCounter, sizeof(int), cudaMemcpyHostToDevice); LDAKernelTrain << <GridDim, BlockDim >> > (alpha, beta, argDoc.deviceMapWord2Doc, argDoc.deviceTLTopic, argDT.deviceNZDTCount, argDT.deviceDTIndex, argDT.deviceDTValue, argDoc.deviceTLDocCount, argDoc.deviceTLDocOffset, argDT.deviceDTCount, argDT.deviceDTOffset, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argDoc.deviceTLWordCount, argDoc.deviceTLWordOffset, argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.d_blockCounter, argDoc.deviceMapDoc2Word, argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexity, randState, argDoc.deviceWTHeadDense, numOfWordD, argWT.numOfWordS); H_ERR(cudaDeviceSynchronize()); blockCounter++; } LDATrainPerplexityReduce1 << <GridDim, BlockDim >> > (argDoc.devicePerplexity, argDoc.devicePerplexityMid, argDoc.TLLengthVec[argDT.chunkId]); H_ERR(cudaDeviceSynchronize()); }
ee689b55412f3980e243d63e59a15356b77ae9e8.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <hip/hip_runtime.h> #include <stdio.h> /* * This code implements the interleaved and neighbor-paired approaches to * parallel reduction in CUDA. For this example, the sum operation is used. A * variety of optimizations on parallel reduction aimed at reducing divergence * are also demonstrated, such as unrolling. */ // Recursive Implementation of Interleaved Pair Approach int recursiveReduce(int *data, int const size) { // terminate check if (size == 1) return data[0]; // renew the stride int const stride = size / 2; // in-place reduction for (int i = 0; i < stride; i++) { data[i] += data[i + stride]; } // call recursively return recursiveReduce(data, stride); } // Neighbored Pair Implementation with divergence __global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } // Neighbored Pair Implementation with less divergence __global__ void reduceNeighboredLess (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if(idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { // convert tid into local array index int index = 2 * stride * tid; if (index < blockDim.x) { idata[index] += idata[index + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } // Interleaved Pair Implementation with less divergence __global__ void reduceInterleaved (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if(idx >= n) return; // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling2 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 2; // unrolling 2 if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x]; __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling4 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 4; // unrolling 4 if (idx + 3 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // unrolling warp if (tid < 32) { volatile int *vmem = idata; vmem[tid] += vmem[tid + 32]; vmem[tid] += vmem[tid + 16]; vmem[tid] += vmem[tid + 8]; vmem[tid] += vmem[tid + 4]; vmem[tid] += vmem[tid + 2]; vmem[tid] += vmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceCompleteUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction and complete unroll if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } template <unsigned int iBlockSize> __global__ void reduceCompleteUnroll(int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction and complete unroll if (iBlockSize >= 1024 && tid < 512) idata[tid] += idata[tid + 512]; __syncthreads(); if (iBlockSize >= 512 && tid < 256) idata[tid] += idata[tid + 256]; __syncthreads(); if (iBlockSize >= 256 && tid < 128) idata[tid] += idata[tid + 128]; __syncthreads(); if (iBlockSize >= 128 && tid < 64) idata[tid] += idata[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrollWarps (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 2; // unrolling 2 if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x]; __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // unrolling last warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } if (tid == 0) g_odata[blockIdx.x] = idata[0]; } int main(int argc, char **argv) { // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("%s starting reduction at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(hipSetDevice(dev)); bool bResult = false; // initialization int size = 1 << 24; // total number of elements to reduce printf(" with array size %d ", size); // execution configuration int blocksize = 512; // initial block size if(argc > 1) { blocksize = atoi(argv[1]); // block size from command line argument } dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("grid %d block %d\n", grid.x, block.x); // allocate host memory size_t bytes = size * sizeof(int); int *h_idata = (int *) malloc(bytes); int *h_odata = (int *) malloc(grid.x * sizeof(int)); int *tmp = (int *) malloc(bytes); // initialize the array for (int i = 0; i < size; i++) { // mask off high 2 bytes to force max number to 255 h_idata[i] = (int)( rand() & 0xFF ); } memcpy (tmp, h_idata, bytes); double iStart, iElaps; int gpu_sum = 0; // allocate device memory int *d_idata = NULL; int *d_odata = NULL; CHECK(hipMalloc((void **) &d_idata, bytes)); CHECK(hipMalloc((void **) &d_odata, grid.x * sizeof(int))); // cpu reduction iStart = seconds(); int cpu_sum = recursiveReduce (tmp, size); iElaps = seconds() - iStart; printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum); // kernel 1: reduceNeighbored CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceNeighbored), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 2: reduceNeighbored with less divergence CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceNeighboredLess), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored2 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 3: reduceInterleaved CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceInterleaved), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Interleaved elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 4: reduceUnrolling2 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceUnrolling2), dim3(grid.x / 2), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 2 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 2; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling2 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 2, block.x); // kernel 5: reduceUnrolling4 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceUnrolling4), dim3(grid.x / 4), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling4 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 4, block.x); // kernel 6: reduceUnrolling8 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceUnrolling8), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); for (int i = 0; i < grid.x / 16; i++) gpu_sum += h_odata[i]; // kernel 8: reduceUnrollWarps8 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceUnrollWarps8), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu UnrollWarp8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // kernel 9: reduceCompleteUnrollWarsp8 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceCompleteUnrollWarps8), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Cmptnroll8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // kernel 9: reduceCompleteUnroll CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); switch (blocksize) { case 1024: hipLaunchKernelGGL(( reduceCompleteUnroll<1024>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; case 512: hipLaunchKernelGGL(( reduceCompleteUnroll<512>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; case 256: hipLaunchKernelGGL(( reduceCompleteUnroll<256>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( reduceCompleteUnroll<128>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; case 64: hipLaunchKernelGGL(( reduceCompleteUnroll<64>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; } CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Cmptnroll elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // free host memory free(h_idata); free(h_odata); // free device memory CHECK(hipFree(d_idata)); CHECK(hipFree(d_odata)); // reset device CHECK(hipDeviceReset()); // check the results bResult = (gpu_sum == cpu_sum); if(!bResult) printf("Test failed!\n"); return EXIT_SUCCESS; }
ee689b55412f3980e243d63e59a15356b77ae9e8.cu
#include "../common/common.h" #include <cuda_runtime.h> #include <stdio.h> /* * This code implements the interleaved and neighbor-paired approaches to * parallel reduction in CUDA. For this example, the sum operation is used. A * variety of optimizations on parallel reduction aimed at reducing divergence * are also demonstrated, such as unrolling. */ // Recursive Implementation of Interleaved Pair Approach int recursiveReduce(int *data, int const size) { // terminate check if (size == 1) return data[0]; // renew the stride int const stride = size / 2; // in-place reduction for (int i = 0; i < stride; i++) { data[i] += data[i + stride]; } // call recursively return recursiveReduce(data, stride); } // Neighbored Pair Implementation with divergence __global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } // Neighbored Pair Implementation with less divergence __global__ void reduceNeighboredLess (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if(idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { // convert tid into local array index int index = 2 * stride * tid; if (index < blockDim.x) { idata[index] += idata[index + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } // Interleaved Pair Implementation with less divergence __global__ void reduceInterleaved (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if(idx >= n) return; // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling2 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 2; // unrolling 2 if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x]; __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling4 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 4; // unrolling 4 if (idx + 3 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // unrolling warp if (tid < 32) { volatile int *vmem = idata; vmem[tid] += vmem[tid + 32]; vmem[tid] += vmem[tid + 16]; vmem[tid] += vmem[tid + 8]; vmem[tid] += vmem[tid + 4]; vmem[tid] += vmem[tid + 2]; vmem[tid] += vmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceCompleteUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction and complete unroll if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } template <unsigned int iBlockSize> __global__ void reduceCompleteUnroll(int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction and complete unroll if (iBlockSize >= 1024 && tid < 512) idata[tid] += idata[tid + 512]; __syncthreads(); if (iBlockSize >= 512 && tid < 256) idata[tid] += idata[tid + 256]; __syncthreads(); if (iBlockSize >= 256 && tid < 128) idata[tid] += idata[tid + 128]; __syncthreads(); if (iBlockSize >= 128 && tid < 64) idata[tid] += idata[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrollWarps (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 2; // unrolling 2 if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x]; __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // unrolling last warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } if (tid == 0) g_odata[blockIdx.x] = idata[0]; } int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s starting reduction at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); bool bResult = false; // initialization int size = 1 << 24; // total number of elements to reduce printf(" with array size %d ", size); // execution configuration int blocksize = 512; // initial block size if(argc > 1) { blocksize = atoi(argv[1]); // block size from command line argument } dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("grid %d block %d\n", grid.x, block.x); // allocate host memory size_t bytes = size * sizeof(int); int *h_idata = (int *) malloc(bytes); int *h_odata = (int *) malloc(grid.x * sizeof(int)); int *tmp = (int *) malloc(bytes); // initialize the array for (int i = 0; i < size; i++) { // mask off high 2 bytes to force max number to 255 h_idata[i] = (int)( rand() & 0xFF ); } memcpy (tmp, h_idata, bytes); double iStart, iElaps; int gpu_sum = 0; // allocate device memory int *d_idata = NULL; int *d_odata = NULL; CHECK(cudaMalloc((void **) &d_idata, bytes)); CHECK(cudaMalloc((void **) &d_odata, grid.x * sizeof(int))); // cpu reduction iStart = seconds(); int cpu_sum = recursiveReduce (tmp, size); iElaps = seconds() - iStart; printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum); // kernel 1: reduceNeighbored CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceNeighbored<<<grid, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 2: reduceNeighbored with less divergence CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceNeighboredLess<<<grid, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored2 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 3: reduceInterleaved CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceInterleaved<<<grid, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Interleaved elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 4: reduceUnrolling2 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceUnrolling2<<<grid.x / 2, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 2 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 2; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling2 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 2, block.x); // kernel 5: reduceUnrolling4 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceUnrolling4<<<grid.x / 4, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling4 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 4, block.x); // kernel 6: reduceUnrolling8 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceUnrolling8<<<grid.x / 8, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); for (int i = 0; i < grid.x / 16; i++) gpu_sum += h_odata[i]; // kernel 8: reduceUnrollWarps8 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu UnrollWarp8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // kernel 9: reduceCompleteUnrollWarsp8 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceCompleteUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Cmptnroll8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // kernel 9: reduceCompleteUnroll CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); switch (blocksize) { case 1024: reduceCompleteUnroll<1024><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; case 512: reduceCompleteUnroll<512><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; case 256: reduceCompleteUnroll<256><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; case 128: reduceCompleteUnroll<128><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; case 64: reduceCompleteUnroll<64><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; } CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Cmptnroll elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // free host memory free(h_idata); free(h_odata); // free device memory CHECK(cudaFree(d_idata)); CHECK(cudaFree(d_odata)); // reset device CHECK(cudaDeviceReset()); // check the results bResult = (gpu_sum == cpu_sum); if(!bResult) printf("Test failed!\n"); return EXIT_SUCCESS; }
645113129e89277d1715b393b4077b23266db586.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <iomanip> #include <fstream> #include <string> #include <stdlib.h> #include <vector> #include <sstream> #include "roctracer/roctx.h" #include <hip/hip_runtime.h> #include "f_eval.cuh" using namespace std; // x - m-dimensional point // n - total number of points __global__ void jacobian(double *deviceInput, double *deviceResult, const int m, const int n, const double h, const double epsilon) { // number of blocks requried to process each point int blocksPerPoint = (m + 1023) / 1024; // idx in global deviceInput to start copying data from into shared memory int startBlockIdx = (blockIdx.x / blocksPerPoint) * m; // idx in global deviceInput till where to copy data into shared memory int endBlockIdx = startBlockIdx + m; // we have to copy from deviceInput[startBlockIdx] to deviceInput[endBlockIdx]. // int elementsToCopy = m; // int threadsPerBlock = blockDim.x; int elementsToCopyPerThread = (m + 1023) / 1024; int threadStartCopyIdx = startBlockIdx + (threadIdx.x * elementsToCopyPerThread); int threadEndCopyIdx = threadStartCopyIdx + elementsToCopyPerThread; int i = threadStartCopyIdx; int j = (threadIdx.x * elementsToCopyPerThread); extern __shared__ double sharedX[]; while(i < threadEndCopyIdx && i < endBlockIdx) { sharedX[j] = deviceInput[i]; i++; j++; } __syncthreads(); // if (m <= threadsPerBlock) { // // bring in utmost one thing // } else { // // bring in more than one thing // } // int startIdx = threadIdx.x * nEachThread; // int endIdx = startIdx + nEachThread; // for(int i = startIdx; i < endIdx; i++) { // sharedX[i] = x[i]; // } // TODO: fix this - malloc() returning NULL pointer // if(localX == 0) { // printf("You're screwed!\n"); // } // where to do the xi + h and xi - h? -- can't touch sharedX int currentThread = ((blockIdx.x % blocksPerPoint) * blockDim.x) + threadIdx.x; if(currentThread < m) { double *localX = (double*) malloc (m * sizeof(double)); for(int i = 0; i < m; i++) localX[i] = sharedX[i]; localX[currentThread] += h; double val1 = f_eval(localX, m); localX[currentThread] -= 2*h; double val2 = f_eval(localX, m); double result = (val1 - val2) / (2*epsilon); deviceResult[startBlockIdx + currentThread] = result; free(localX); } } int main(int argc, char* argv[]) { size_t freeDeviceMemory, totalDeviceMemory; hipMemGetInfo(&freeDeviceMemory, &totalDeviceMemory); // currently allocating half of free device memory to the heap hipDeviceSetLimit(hipLimitMallocHeapSize, freeDeviceMemory / 2); string ipFile(argv[1]); string opFile(argv[2]); double epsilon = atof(argv[3]); // Setting up timing hipEvent_t startEvent, stopEvent; hipEventCreate(&startEvent); hipEventCreate(&stopEvent); hipEventRecord(startEvent, 0); const double h = 1e-2; int m, n; string line; ifstream ifile; ifile.open(ipFile); getline(ifile, line); n = stoi(line); getline(ifile, line); m = stoi(line); int nElements = m * n; double *hostInput = new double[m * n]; double *hostResult = new double[m * n]; for (int i = 0; getline(ifile, line);) { string number; stringstream s(line); while(getline(s, number, ',')) { hostInput[i] = stod(number); i++; } } ifile.close(); // for(int i = 0; i < nElements; i++) // cout << hostInput[i] << " "; double *deviceInput, *deviceResult; hipError_t error; error = hipMalloc((void**) &deviceInput, nElements * sizeof(double)); if (error != hipSuccess) { printf("hipMalloc returned error code %d: %s, line(%d)\n", error, hipGetErrorString(error), __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void**) &deviceResult, nElements * sizeof(double)); if (error != hipSuccess) { printf("hipMalloc returned error code %d: %s, line(%d)\n", error, hipGetErrorString(error), __LINE__); exit(EXIT_FAILURE); } // copy stuff hipMemcpy(deviceInput, hostInput, nElements * sizeof(double), hipMemcpyHostToDevice); int blocksPerPoint = (m + 1023) / 1024; int totalBlocks = n * blocksPerPoint; int threadsPerBlock = min(m, 1024); // call kernel hipLaunchKernelGGL(( jacobian), dim3(totalBlocks), dim3(threadsPerBlock), 48000, 0, deviceInput, deviceResult, m, n, h, epsilon); hipDeviceSynchronize(); // copy back hipMemcpy(hostResult, deviceResult, nElements * sizeof(double), hipMemcpyDeviceToHost); // fix kernel // write to file ofstream ofile; ofile.open(opFile); for(int i = 0; i < n; i++) { for(int j = 0; j < m; j++) { if(j != m-1) ofile << hostResult[i*m + j] << ","; else ofile << hostResult[i*m + j]; } ofile << endl; } hipEventRecord(stopEvent, 0); hipEventSynchronize(stopEvent); ofile.close(); float elapsedTime; hipEventElapsedTime(&elapsedTime, startEvent, stopEvent); // Create a new file to log the execution time ofstream ologfile; ologfile.open("cudaSharedLog", ios_base::app); ologfile << left << setw(10) << n << "\t" << left << setw(10) << m << "\t" << left << setw(10) << setprecision(10) << elapsedTime << endl; ologfile.close(); hipFree(deviceInput); hipFree(deviceResult); delete[] hostInput, hostResult; return 0; }
645113129e89277d1715b393b4077b23266db586.cu
#include <iostream> #include <iomanip> #include <fstream> #include <string> #include <stdlib.h> #include <vector> #include <sstream> #include "nvToolsExt.h" #include <cuda.h> #include "f_eval.cuh" using namespace std; // x - m-dimensional point // n - total number of points __global__ void jacobian(double *deviceInput, double *deviceResult, const int m, const int n, const double h, const double epsilon) { // number of blocks requried to process each point int blocksPerPoint = (m + 1023) / 1024; // idx in global deviceInput to start copying data from into shared memory int startBlockIdx = (blockIdx.x / blocksPerPoint) * m; // idx in global deviceInput till where to copy data into shared memory int endBlockIdx = startBlockIdx + m; // we have to copy from deviceInput[startBlockIdx] to deviceInput[endBlockIdx]. // int elementsToCopy = m; // int threadsPerBlock = blockDim.x; int elementsToCopyPerThread = (m + 1023) / 1024; int threadStartCopyIdx = startBlockIdx + (threadIdx.x * elementsToCopyPerThread); int threadEndCopyIdx = threadStartCopyIdx + elementsToCopyPerThread; int i = threadStartCopyIdx; int j = (threadIdx.x * elementsToCopyPerThread); extern __shared__ double sharedX[]; while(i < threadEndCopyIdx && i < endBlockIdx) { sharedX[j] = deviceInput[i]; i++; j++; } __syncthreads(); // if (m <= threadsPerBlock) { // // bring in utmost one thing // } else { // // bring in more than one thing // } // int startIdx = threadIdx.x * nEachThread; // int endIdx = startIdx + nEachThread; // for(int i = startIdx; i < endIdx; i++) { // sharedX[i] = x[i]; // } // TODO: fix this - malloc() returning NULL pointer // if(localX == 0) { // printf("You're screwed!\n"); // } // where to do the xi + h and xi - h? -- can't touch sharedX int currentThread = ((blockIdx.x % blocksPerPoint) * blockDim.x) + threadIdx.x; if(currentThread < m) { double *localX = (double*) malloc (m * sizeof(double)); for(int i = 0; i < m; i++) localX[i] = sharedX[i]; localX[currentThread] += h; double val1 = f_eval(localX, m); localX[currentThread] -= 2*h; double val2 = f_eval(localX, m); double result = (val1 - val2) / (2*epsilon); deviceResult[startBlockIdx + currentThread] = result; free(localX); } } int main(int argc, char* argv[]) { size_t freeDeviceMemory, totalDeviceMemory; cudaMemGetInfo(&freeDeviceMemory, &totalDeviceMemory); // currently allocating half of free device memory to the heap cudaDeviceSetLimit(cudaLimitMallocHeapSize, freeDeviceMemory / 2); string ipFile(argv[1]); string opFile(argv[2]); double epsilon = atof(argv[3]); // Setting up timing cudaEvent_t startEvent, stopEvent; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); cudaEventRecord(startEvent, 0); const double h = 1e-2; int m, n; string line; ifstream ifile; ifile.open(ipFile); getline(ifile, line); n = stoi(line); getline(ifile, line); m = stoi(line); int nElements = m * n; double *hostInput = new double[m * n]; double *hostResult = new double[m * n]; for (int i = 0; getline(ifile, line);) { string number; stringstream s(line); while(getline(s, number, ',')) { hostInput[i] = stod(number); i++; } } ifile.close(); // for(int i = 0; i < nElements; i++) // cout << hostInput[i] << " "; double *deviceInput, *deviceResult; cudaError_t error; error = cudaMalloc((void**) &deviceInput, nElements * sizeof(double)); if (error != cudaSuccess) { printf("cudaMalloc returned error code %d: %s, line(%d)\n", error, cudaGetErrorString(error), __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void**) &deviceResult, nElements * sizeof(double)); if (error != cudaSuccess) { printf("cudaMalloc returned error code %d: %s, line(%d)\n", error, cudaGetErrorString(error), __LINE__); exit(EXIT_FAILURE); } // copy stuff cudaMemcpy(deviceInput, hostInput, nElements * sizeof(double), cudaMemcpyHostToDevice); int blocksPerPoint = (m + 1023) / 1024; int totalBlocks = n * blocksPerPoint; int threadsPerBlock = min(m, 1024); // call kernel jacobian<<<totalBlocks, threadsPerBlock, 48000>>>(deviceInput, deviceResult, m, n, h, epsilon); cudaDeviceSynchronize(); // copy back cudaMemcpy(hostResult, deviceResult, nElements * sizeof(double), cudaMemcpyDeviceToHost); // fix kernel // write to file ofstream ofile; ofile.open(opFile); for(int i = 0; i < n; i++) { for(int j = 0; j < m; j++) { if(j != m-1) ofile << hostResult[i*m + j] << ","; else ofile << hostResult[i*m + j]; } ofile << endl; } cudaEventRecord(stopEvent, 0); cudaEventSynchronize(stopEvent); ofile.close(); float elapsedTime; cudaEventElapsedTime(&elapsedTime, startEvent, stopEvent); // Create a new file to log the execution time ofstream ologfile; ologfile.open("cudaSharedLog", ios_base::app); ologfile << left << setw(10) << n << "\t" << left << setw(10) << m << "\t" << left << setw(10) << setprecision(10) << elapsedTime << endl; ologfile.close(); cudaFree(deviceInput); cudaFree(deviceResult); delete[] hostInput, hostResult; return 0; }
6e77b4bc34ef135b49cff0dd2dc899e1e3cd0524.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cutil_inline.h> #include <iostream> #include <layer_kernels.cuh> #include <layer.cuh> #include <data.cuh> #include <util.cuh> #include <cudaconv2.cuh> #include <matrix.h> #include "plugin.cuh" using namespace std; /* * ======================= * Layer * ======================= */ Layer::Layer(ConvNet* convNet, PyObject* paramsDict, bool trans) : _convNet(convNet), _trans(trans) { _name = pyDictGetString(paramsDict, "name"); _type = pyDictGetString(paramsDict, "type"); _numGradProducersNext = 0; _foundGradConsumers = false; _gradConsumer = pyDictGetInt(paramsDict, "gradConsumer"); _actsTarget = pyDictGetInt(paramsDict, "actsTarget"); _actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget"); _conserveMem = pyDictGetInt(paramsDict, "conserveMem"); _outputs = _actsTarget < 0 ? new NVMatrix() : NULL; _actsGrad = _actsGradTarget < 0 ? new NVMatrix() : NULL; } void Layer::fpropNext(PASS_TYPE passType) { for (int i = 0; i < _next.size(); i++) { _next[i]->fprop(passType); } } void Layer::truncBwdActs() { // Only truncate actsGrad if I own it if (_conserveMem && _actsGradTarget < 0) { getActsGrad().truncate(); } if (_conserveMem) { getActs().truncate(); } } void Layer::fprop(PASS_TYPE passType) { // Pull model, prev layers triggers this function, and this function pulls the activations from the prev layers. _rcvdFInputs += 1; if (_rcvdFInputs == _prev.size()) { NVMatrixV v; for (int i = 0; i < _prev.size(); i++) { v.push_back(&_prev[i]->getActs()); } fprop(v, passType); } } void Layer::fprop(NVMatrix& v, PASS_TYPE passType) { NVMatrixV vl; vl.push_back(&v); fprop(vl, passType); } void Layer::fprop(NVMatrixV& v, PASS_TYPE passType) { assert(v.size() == _prev.size()); _inputs.clear(); _inputs.insert(_inputs.begin(), v.begin(), v.end()); _outputs = _actsTarget < 0 ? _outputs : _inputs[_actsTarget]; // If its output is shared from the input _rcvdFInputs = _prev.size(); for (NVMatrixV::iterator it = v.begin(); it != v.end(); ++it) { (*it)->transpose(_trans); } getActs().transpose(_trans); // First do fprop on the input whose acts matrix I'm sharing, if any if (_actsTarget >= 0) { fpropActs(_actsTarget, 0, passType); } // Then add the rest of the inputs to that for (int i = 0; i < _prev.size(); i++) { if (i != _actsTarget) { fpropActs(i, _actsTarget >= 0 || i > 0, passType); } } fpropNext(passType); } void Layer::bprop(PASS_TYPE passType) { // At the time of executing this code, the ActsGrad of this layer is already generated by the next layers. // All layers should share this function except the last layer. The last layer should override this function. if (_rcvdBInputs == _numGradProducersNext) { _rcvdBInputs++; // avoid doing bprop computation twice bprop(getActsGrad(), passType); } } void Layer::bprop(NVMatrix& v, PASS_TYPE passType) { // Transpose, don't know why we need to transpose everytime, ain't they already setup at the beginning? v.transpose(_trans); for (int i = 0; i < _prev.size(); i++) { _prev[i]->getActs().transpose(_trans); _prev[i]->getActsGrad().transpose(_trans); } getActs().transpose(_trans); bpropCommon(v, passType); if (isGradProducer()) { // If it is gradient producer, then back prop // First propagate activity gradient to all layers whose activity // gradient matrix I'm definitely not sharing. for (int i = 0; i < _prev.size(); i++) { if (_prev[i]->isGradConsumer() && _actsGradTarget != i) { bpropActs(v, i, _prev[i]->getRcvdBInputs() > 0 ? 1 : 0, passType); _prev[i]->incRcvdBInputs(); } } // Then propagate activity gradient to the layer whose activity gradient // matrix I'm sharing, if any. if (_actsGradTarget >= 0 && _prev[_actsGradTarget]->isGradConsumer()) { bpropActs(v, _actsGradTarget, _prev[_actsGradTarget]->getRcvdBInputs() > 0 ? 1 : 0, passType); _prev[_actsGradTarget]->incRcvdBInputs(); } } truncBwdActs(); // After back prop. The Activation and Activation gradient of this layer is useless and is removed by calling truncate. // Before calling truncate, it is first checked whether this layer owns the activation and gradient matrix, if not then does not delete. // Each time fprop is executed, the activation matrix is dynamically allocated using resize. And the gradient matrix is allocated when the backprop is executed. // After back prop, these are useless, thus is truncated to reduce memory usage. // Propergate the backprop to the previous layers. // If the previous layers havn't received enough backprop, this will do nothing. // only the last layer that backprop to the previous layer will have effect. if (isGradProducer()) { for (int i = 0; i < _prev.size(); i++) { if (_prev[i]->isGradConsumer()) { _prev[i]->bprop(passType); } } } } void Layer::reset() { // At reset time we don't need to delete the matrices, because they are already deleted during back prop. _rcvdFInputs = 0; _rcvdBInputs = 0; } string& Layer::getName() { return _name; } string& Layer::getType() { return _type; } int Layer::getRcvdFInputs() { return _rcvdFInputs; } int Layer::getRcvdBInputs() { return _rcvdBInputs; } int Layer::incRcvdBInputs() { return ++_rcvdBInputs; } void Layer::addNext(Layer* l) { _next.push_back(l); _numGradProducersNext += l->isGradProducer(); } void Layer::addPrev(Layer* l) { _prev.push_back(l); } void Layer::postInit() { // _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs(); _actsGrad = _actsGradTarget < 0 ? new NVMatrix() : &_prev[_actsGradTarget]->getActsGrad(); } // Does this layer, or some layer below it, need the gradient // for parameter updates? // Only weight layers should be grad consumers themselves. bool Layer::isGradConsumer() { // Only weight layers need to be updated by the gradients, however, the layers on top of the weight layers need to back prop the gradients to the weight layer. // Thus those layers are also consumers indirectly. if (!_foundGradConsumers) { for (int i = 0; i < _prev.size(); i++) { _gradConsumer |= _prev[i]->isGradConsumer(); } _foundGradConsumers = true; } return _gradConsumer; } // Does this layer produce gradient for layers below? bool Layer::isGradProducer() { return true; } vector<Layer*>& Layer::getPrev() { return _prev; } vector<Layer*>& Layer::getNext() { return _next; } NVMatrix& Layer::getActs() { assert(_outputs != NULL); return *_outputs; } NVMatrix& Layer::getActsGrad() { assert(_actsGrad != NULL); return *_actsGrad; } /* * ======================= * NeuronLayer * ======================= */ NeuronLayer::NeuronLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) { _neuron = &Neuron::makeNeuron(PyDict_GetItemString(paramsDict, "neuron")); } void NeuronLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { _neuron->computeInputGrad(v, _prev[0]->getActsGrad(), scaleTargets > 0, passType); // if scaleTargets > 0 then the gradient will be added instead of assigned. } void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { _neuron->activate(*_inputs[0], getActs(), passType); } /* * ======================= * WeightLayer * ======================= */ WeightLayer::WeightLayer(ConvNet* convNet, PyObject* paramsDict, bool trans, bool useGrad) : Layer(convNet, paramsDict, trans) { MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights"); MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc"); Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases"); Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc"); floatv& momW = *pyDictGetFloatV(paramsDict, "momW"); float momB = pyDictGetFloat(paramsDict, "momB"); floatv& epsW = *pyDictGetFloatV(paramsDict, "epsW"); float epsB = pyDictGetFloat(paramsDict, "epsB"); floatv& wc = *pyDictGetFloatV(paramsDict, "wc"); // Source layers for shared weights intv& weightSourceLayerIndices = *pyDictGetIntV(paramsDict, "weightSourceLayerIndices"); // Weight matrix indices (inside the above source layers) for shared weights intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices"); for (int i = 0; i < weightSourceLayerIndices.size(); i++) { int srcLayerIdx = weightSourceLayerIndices[i]; int matrixIdx = weightSourceMatrixIndices[i]; if (srcLayerIdx == convNet->getNumLayers()) { // Current layer // Shares weights with another weight matrix in the same layer. // This constructor does not create intrisic data, but shares the data from the other matrix. _weights.addWeights(*new Weights(_weights[matrixIdx], epsW[i])); } else if (srcLayerIdx >= 0) { // shares weights from another layer. WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNet->getLayer(srcLayerIdx)); Weights* srcWeights = &srcLayer.getWeights(matrixIdx); _weights.addWeights(*new Weights(*srcWeights, epsW[i])); } else { _weights.addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], epsW[i], wc[i], momW[i], useGrad)); } } _biases = new Weights(hBiases, hBiasesInc, epsB, 0, momB, true); // Epsilons for finite-difference gradient checking operation _wStep = 0.001; _bStep = 0.002; delete &weightSourceLayerIndices; delete &weightSourceMatrixIndices; delete &hWeights; delete &hWeightsInc; delete &momW; delete &epsW; delete &wc; } void WeightLayer::bpropCommon(NVMatrix& v, PASS_TYPE passType) { // For all the layers based on weightlayer, there are weights and biases. // Thus it is common for all the layers to do backpropergation on the biases and weights. if (_biases->getEps() > 0) { bpropBiases(v, passType); } // There is one weight matrix for each of the inputs from the previous layer. // So iterate all of them. for (int i = 0; i < _weights.getSize(); i++) { if (_weights[i].getEps() > 0) { bpropWeights(v, i, passType); // Increment its number of updates _weights[i].incNumUpdates(); } } } void WeightLayer::updateWeights() { _weights.update(); _biases->update(); } void WeightLayer::copyToCPU() { _weights.copyToCPU(); _biases->copyToCPU(); } void WeightLayer::copyToGPU() { _weights.copyToGPU(); _biases->copyToGPU(); } void WeightLayer::checkGradients() { for (int i = 0; i < _weights.getSize(); i++) { _convNet->checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights[i]); } _convNet->checkGradient(_name + " biases", _bStep, *_biases); } Weights& WeightLayer::getWeights(int idx) { // get the weights of the idx th input. return _weights[idx]; } /* * ======================= * FCLayer * ======================= */ FCLayer::FCLayer(ConvNet* convNet, PyObject* paramsDict) : WeightLayer(convNet, paramsDict, true, false) { _wStep = 0.1; _bStep = 0.01; } void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { getActs().addProduct(*_inputs[inpIdx], *_weights[inpIdx], scaleTargets, 1); if (scaleTargets == 0) {// At the first run, the scaleTarget = 0, add the bias. The bias will be added only once. getActs().addVector(_biases->getW()); } } void FCLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& weights_T = _weights[inpIdx].getW().getTranspose(); _prev[inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1); delete &weights_T; } void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { // Is the numCases the number of samples ? // Seems possible. If so, how does the other layers whose output for a single sample is a matrix pass the matrix of multiple samples? // Are they all unfolded to a vector? int numCases = v.getNumRows(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; _biases->getGrad().addSum(v, 0, 0, scaleBGrad); } void FCLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { // Judging from this function and the above one, each row of the matrix v is a sample. // Which means that it is quite possible that all the inputs and outputs are unfolded version of the activations. int numCases = v.getNumRows(); NVMatrix& prevActs_T = _prev[inpIdx]->getActs().getTranspose(); float scaleInc = (_weights[inpIdx].getNumUpdates() == 0 && passType != PASS_GC) * _weights[inpIdx].getMom(); float scaleGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; // Why scale the increase by the Weight momentem? Does it mean that weightsinc is never added to the weight before the current batch of training finishes? _weights[inpIdx].getInc().addProduct(prevActs_T, v, scaleInc, scaleGrad); delete &prevActs_T; } /* * ======================= * LocalLayer * ======================= */ LocalLayer::LocalLayer(ConvNet* convNet, PyObject* paramsDict, bool useGrad) : WeightLayer(convNet, paramsDict, false, useGrad) { // Initialize the variables supplied. _padding = pyDictGetIntV(paramsDict, "padding"); _stride = pyDictGetIntV(paramsDict, "stride"); _filterSize = pyDictGetIntV(paramsDict, "filterSize"); _channels = pyDictGetIntV(paramsDict, "channels"); _imgSize = pyDictGetIntV(paramsDict, "imgSize"); _numFilters = pyDictGetInt(paramsDict, "filters"); _groups = pyDictGetIntV(paramsDict, "groups"); _filterChannels = pyDictGetIntV(paramsDict, "filterChannels"); _randSparse = pyDictGetIntV(paramsDict, "randSparse"); _overSample = pyDictGetIntV(paramsDict, "overSample"); _filterPixels = pyDictGetIntV(paramsDict, "filterPixels"); _imgPixels = pyDictGetIntV(paramsDict, "imgPixels"); _modulesX = pyDictGetInt(paramsDict, "modulesX"); _modules = pyDictGetInt(paramsDict, "modules"); // It's a vector on the heap to be consistent with all the others... _filterConns = new vector<FilterConns>(); PyObject* pyFilterConns = PyDict_GetItemString(paramsDict, "filterConns"); for (int i = 0; i < _randSparse->size(); i++) { FilterConns fc; if (_randSparse->at(i)) { fc.hFilterConns = getIntA(PyList_GET_ITEM(pyFilterConns, i)); } _filterConns->push_back(fc); } } void LocalLayer::copyToGPU() { WeightLayer::copyToGPU(); for (int i = 0; i < _prev.size(); i++) { if (_randSparse->at(i)) { // Copy to GPU vector that describes sparse random connectivity hipMalloc(&_filterConns->at(i).dFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i)); hipMemcpy(_filterConns->at(i).dFilterConns, _filterConns->at(i).hFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i), hipMemcpyHostToDevice); cutilCheckMsg("hipMemcpy: failed"); } } } /* * ======================= * ConvLayer * ======================= */ ConvLayer::ConvLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, true) { _partialSum = pyDictGetInt(paramsDict, "partialSum"); _sharedBiases = pyDictGetInt(paramsDict, "sharedBiases"); } void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { convFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { convFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } if (scaleTargets == 0) { if (_sharedBiases) { getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters); getActs().addVector(_biases->getW()); getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules)); } else { getActs().addVector(_biases->getW()); } } } void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; if (_sharedBiases) { v.reshape(_numFilters, v.getNumElements() / _numFilters); _biases->getGrad().addSum(v, 1, 0, scaleBGrad); v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules)); } else { _biases->getGrad().addSum(v, 1, 0, scaleBGrad); } } void ConvLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumCols(); NVMatrix& tgt = _partialSum > 0 ? _weightGradTmp : _weights[inpIdx].getGrad(); float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; float scaleTargets = _weights[inpIdx].getNumUpdates() > 0 && _partialSum == 0; // ? 1 : 0; if (_randSparse->at(inpIdx)) { convWeightActsSparse(_prev[inpIdx]->getActs(), v, tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad); } else { convWeightActs(_prev[inpIdx]->getActs(), v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad); } if (_partialSum > 0) { scaleTargets = _weights[inpIdx].getNumUpdates() > 0; _weightGradTmp.reshape(_modules / _partialSum, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters); _weights[inpIdx].getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1); _weights[inpIdx].getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters); } } void ConvLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { NVMatrix& tgt = _overSample->at(inpIdx) > 1 ? _actGradTmp : _prev[inpIdx]->getActsGrad(); convImgActsSparse(v, *_weights[inpIdx], tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); if (_overSample->at(inpIdx) > 1) { _actGradTmp.reshape(_overSample->at(inpIdx), _actGradTmp.getNumElements() / _overSample->at(inpIdx)); _actGradTmp.sum(0, _prev[inpIdx]->getActsGrad()); _prev[inpIdx]->getActsGrad().reshape(_prev[inpIdx]->getActsGrad().getNumElements() / v.getNumCols(), v.getNumCols()); } } else { convImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } } void ConvLayer::truncBwdActs() { LocalLayer::truncBwdActs(); if (_conserveMem) { _weightGradTmp.truncate(); _actGradTmp.truncate(); } } /* * ======================= * LocalUnsharedLayer * ======================= */ LocalUnsharedLayer::LocalUnsharedLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, false) { } void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { localFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { localFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } if (scaleTargets == 0) { getActs().addVector(_biases->getW()); } } void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; _biases->getGrad().addSum(v, 1, 0, scaleBGrad); } void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleInc = (passType != PASS_GC && _weights[inpIdx].getNumUpdates() == 0) * _weights[inpIdx].getMom(); // momentum float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; // eps / numCases if (_randSparse->at(inpIdx)) { localWeightActsSparse(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad); } else { localWeightActs(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad); } } void LocalUnsharedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { localImgActsSparse(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { localImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } } /* * ======================= * SoftmaxLayer * ======================= */ SoftmaxLayer::SoftmaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) { } void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& input = *_inputs[0]; NVMatrix& max = input.max(1); input.addVector(max, -1, getActs()); getActs().apply(NVMatrixOps::Exp()); NVMatrix& sum = getActs().sum(1); getActs().eltwiseDivideByVector(sum); delete &max; delete &sum; } void SoftmaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); bool doLogregGrad = _next.size() == 1 && _next[0]->getType() == "cost.logreg"; if (doLogregGrad) { NVMatrix& labels = _next[0]->getPrev()[0]->getActs(); float gradCoeff = dynamic_cast<CostLayer*>(_next[0])->getCoeff(); computeLogregSoftmaxGrad(labels, getActs(), _prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff); } else { computeSoftmaxGrad(getActs(), v, _prev[0]->getActsGrad(), scaleTargets == 1); } } /* * ======================= * EltwiseSumLayer * ======================= */ EltwiseSumLayer::EltwiseSumLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _coeffs = pyDictGetFloatV(paramsDict, "coeffs"); } void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (scaleTargets == 0) { _inputs[inpIdx]->scale(_coeffs->at(inpIdx), getActs()); } else { getActs().add(*_inputs[inpIdx], _coeffs->at(inpIdx)); } } void EltwiseSumLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (scaleTargets == 0 ) { v.scale(_coeffs->at(inpIdx), _prev[inpIdx]->getActsGrad()); } else { assert(&_prev[inpIdx]->getActsGrad() != &v); _prev[inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx)); } } /* * ======================= * EltwiseMaxLayer * ======================= */ EltwiseMaxLayer::EltwiseMaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (inpIdx == 1) { // First input, do nothing _inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs()); } else if (inpIdx > 1) { getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]); } } void EltwiseMaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[inpIdx]->getActsGrad(), scaleTargets != 0); } /* * ======================= * DataLayer * ======================= */ DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _dataIdx = pyDictGetInt(paramsDict, "dataIdx"); } void DataLayer::fprop(PASS_TYPE passType) { throw string("No dava given!"); } void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { } void DataLayer::fprop(NVMatrixV& data, PASS_TYPE passType) { _outputs = data[_dataIdx]; fpropNext(passType); } bool DataLayer::isGradProducer() { return false; } /* * ===================== * PoolLayer * ===================== */ PoolLayer::PoolLayer(ConvNet* convNet, PyObject* paramsDict, bool trans) : Layer(convNet, paramsDict, trans) { _channels = pyDictGetInt(paramsDict, "channels"); _sizeX = pyDictGetInt(paramsDict, "sizeX"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); _pool = pyDictGetString(paramsDict, "pool"); } PoolLayer& PoolLayer::makePoolLayer(ConvNet* convNet, PyObject* paramsDict) { string _pool = pyDictGetString(paramsDict, "pool"); if (_pool == "max") { return *new MaxPoolLayer(convNet, paramsDict); } else if(_pool == "avg") { return *new AvgPoolLayer(convNet, paramsDict); } throw string("Unknown pooling layer type ") + _pool; } /* * ===================== * AvgPoolLayer * ===================== */ AvgPoolLayer::AvgPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) { } void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler()); } void AvgPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalAvgUndo(v, _prev[0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1); } /* * ===================== * MaxPoolLayer * ===================== */ MaxPoolLayer::MaxPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) { } void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler()); } void MaxPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalMaxUndo(_prev[0]->getActs(), v, getActs(), _prev[inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1); } /* * ===================== * NailbedLayer * ===================== */ NailbedLayer::NailbedLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); } void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1); } void NailbedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convBedOfNailsUndo(v, _prev[0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1); } /* * ===================== * GaussianBlurLayer * ===================== */ GaussianBlurLayer::GaussianBlurLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _hFilter = pyDictGetMatrix(paramsDict, "filter"); } void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1); convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1); } // This is here just for completeness' sake. Why would you backpropagate // through a blur filter? void GaussianBlurLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& tgt1 = _prev[0]->getRcvdBInputs() > 0 ? _actGradsTmp : _prev[0]->getActsGrad(); convGaussianBlur(v, _filter, tgt1, true, _channels, 0, 1); convGaussianBlur(tgt1, _filter, _prev[0]->getActsGrad(), false, _channels, scaleTargets, 1); } void GaussianBlurLayer::copyToGPU() { _filter.copyFromHost(*_hFilter, true); } /* * ===================== * ResizeLayer * ===================== */ ResizeLayer::ResizeLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); _tgtSize = pyDictGetInt(paramsDict, "tgtSize"); _scale = pyDictGetFloat(paramsDict, "scale"); } void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale); } // Can't do this void ResizeLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToYUVLayer * ===================== */ RGBToYUVLayer::RGBToYUVLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convRGBToYUV(*_inputs[0], getActs()); } // Can't do this void RGBToYUVLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToLABLayer * ===================== */ RGBToLABLayer::RGBToLABLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _center = pyDictGetInt(paramsDict, "center"); } void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convRGBToLAB(*_inputs[0], getActs(), _center); } // Can't do this void RGBToLABLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * ResponseNormLayer * ===================== */ ResponseNormLayer::ResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _size = pyDictGetInt(paramsDict, "size"); _scale = pyDictGetFloat(paramsDict, "scale"); _pow = pyDictGetFloat(paramsDict, "pow"); } void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow); } void ResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ResponseNormLayer::truncBwdActs() { Layer::truncBwdActs(); if (_conserveMem) { _denoms.truncate(); } } /* * ===================== * CrossMapResponseNormLayer * ===================== */ CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) { _blocked = pyDictGetInt(paramsDict, "blocked"); _k = pyDictGetFloat(paramsDict, "k"); } void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormCrossMap(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _blocked, _k); } void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormCrossMapUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, _blocked, scaleTargets, 1); } /* * ===================== * ContrastNormLayer * ===================== */ ContrastNormLayer::ContrastNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) { _imgSize = pyDictGetInt(paramsDict, "imgSize"); } void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& images = *_inputs[0]; convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler()); _meanDiffs.add(images, -1, 1); convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow); } void ContrastNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ContrastNormLayer::truncBwdActs() { ResponseNormLayer::truncBwdActs(); if (_conserveMem) { _meanDiffs.truncate(); } } /* * ===================== * CostLayer * ===================== */ CostLayer::CostLayer(ConvNet* convNet, PyObject* paramsDict, bool trans) : Layer(convNet, paramsDict, trans) { _coeff = pyDictGetFloat(paramsDict, "coeff"); } float CostLayer::getCoeff() { return _coeff; } void CostLayer::bprop(PASS_TYPE passType) { if (_coeff != 0) { Layer::bprop(passType); } } bool CostLayer::isGradProducer() { return _coeff != 0; } doublev& CostLayer::getCost() { doublev& v = *new doublev(); v.insert(v.begin(), _costv.begin(), _costv.end()); return v; } CostLayer& CostLayer::makeCostLayer(ConvNet* convNet, string& type, PyObject* paramsDict) { if (type == "cost.logreg") { return *new LogregCostLayer(convNet, paramsDict); } else if (type == "cost.sum2") { return *new SumOfSquaresCostLayer(convNet, paramsDict); } else if (layers.find(type) != layers.end()) { return *(CostLayer*)layers[type](convNet, paramsDict); } throw string("Unknown cost layer type ") + type; } /* * ===================== * LogregCostLayer * ===================== */ LogregCostLayer::LogregCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getNumElements(); NVMatrix& trueLabelLogProbs = getActs(), correctProbs; computeLogregCost(labels, probs, trueLabelLogProbs, correctProbs); _costv.clear(); _costv.push_back(-trueLabelLogProbs.sum()); _costv.push_back(numCases - correctProbs.sum()); } } void LogregCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); NVMatrix& labels = _prev[0]->getActs(); NVMatrix& probs = _prev[1]->getActs(); NVMatrix& target = _prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a softmax layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = _prev[1]->getNext().size() > 1 || _prev[1]->getType() != "softmax"; if (doWork) { computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff); } } /* * ===================== * SumOfSquaresCostLayer * ===================== */ SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { _inputs[0]->apply(NVMatrixOps::Square(), getActs()); _costv.clear(); _costv.push_back(0.5*getActs().sum()); // Look at the bpropActs and the fpropActs, they are not consistant, we need 0.5*x^2 so that the gradient would be x } void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { _prev[inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -_coeff); }
6e77b4bc34ef135b49cff0dd2dc899e1e3cd0524.cu
/* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cutil_inline.h> #include <iostream> #include <layer_kernels.cuh> #include <layer.cuh> #include <data.cuh> #include <util.cuh> #include <cudaconv2.cuh> #include <matrix.h> #include "plugin.cuh" using namespace std; /* * ======================= * Layer * ======================= */ Layer::Layer(ConvNet* convNet, PyObject* paramsDict, bool trans) : _convNet(convNet), _trans(trans) { _name = pyDictGetString(paramsDict, "name"); _type = pyDictGetString(paramsDict, "type"); _numGradProducersNext = 0; _foundGradConsumers = false; _gradConsumer = pyDictGetInt(paramsDict, "gradConsumer"); _actsTarget = pyDictGetInt(paramsDict, "actsTarget"); _actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget"); _conserveMem = pyDictGetInt(paramsDict, "conserveMem"); _outputs = _actsTarget < 0 ? new NVMatrix() : NULL; _actsGrad = _actsGradTarget < 0 ? new NVMatrix() : NULL; } void Layer::fpropNext(PASS_TYPE passType) { for (int i = 0; i < _next.size(); i++) { _next[i]->fprop(passType); } } void Layer::truncBwdActs() { // Only truncate actsGrad if I own it if (_conserveMem && _actsGradTarget < 0) { getActsGrad().truncate(); } if (_conserveMem) { getActs().truncate(); } } void Layer::fprop(PASS_TYPE passType) { // Pull model, prev layers triggers this function, and this function pulls the activations from the prev layers. _rcvdFInputs += 1; if (_rcvdFInputs == _prev.size()) { NVMatrixV v; for (int i = 0; i < _prev.size(); i++) { v.push_back(&_prev[i]->getActs()); } fprop(v, passType); } } void Layer::fprop(NVMatrix& v, PASS_TYPE passType) { NVMatrixV vl; vl.push_back(&v); fprop(vl, passType); } void Layer::fprop(NVMatrixV& v, PASS_TYPE passType) { assert(v.size() == _prev.size()); _inputs.clear(); _inputs.insert(_inputs.begin(), v.begin(), v.end()); _outputs = _actsTarget < 0 ? _outputs : _inputs[_actsTarget]; // If its output is shared from the input _rcvdFInputs = _prev.size(); for (NVMatrixV::iterator it = v.begin(); it != v.end(); ++it) { (*it)->transpose(_trans); } getActs().transpose(_trans); // First do fprop on the input whose acts matrix I'm sharing, if any if (_actsTarget >= 0) { fpropActs(_actsTarget, 0, passType); } // Then add the rest of the inputs to that for (int i = 0; i < _prev.size(); i++) { if (i != _actsTarget) { fpropActs(i, _actsTarget >= 0 || i > 0, passType); } } fpropNext(passType); } void Layer::bprop(PASS_TYPE passType) { // At the time of executing this code, the ActsGrad of this layer is already generated by the next layers. // All layers should share this function except the last layer. The last layer should override this function. if (_rcvdBInputs == _numGradProducersNext) { _rcvdBInputs++; // avoid doing bprop computation twice bprop(getActsGrad(), passType); } } void Layer::bprop(NVMatrix& v, PASS_TYPE passType) { // Transpose, don't know why we need to transpose everytime, ain't they already setup at the beginning? v.transpose(_trans); for (int i = 0; i < _prev.size(); i++) { _prev[i]->getActs().transpose(_trans); _prev[i]->getActsGrad().transpose(_trans); } getActs().transpose(_trans); bpropCommon(v, passType); if (isGradProducer()) { // If it is gradient producer, then back prop // First propagate activity gradient to all layers whose activity // gradient matrix I'm definitely not sharing. for (int i = 0; i < _prev.size(); i++) { if (_prev[i]->isGradConsumer() && _actsGradTarget != i) { bpropActs(v, i, _prev[i]->getRcvdBInputs() > 0 ? 1 : 0, passType); _prev[i]->incRcvdBInputs(); } } // Then propagate activity gradient to the layer whose activity gradient // matrix I'm sharing, if any. if (_actsGradTarget >= 0 && _prev[_actsGradTarget]->isGradConsumer()) { bpropActs(v, _actsGradTarget, _prev[_actsGradTarget]->getRcvdBInputs() > 0 ? 1 : 0, passType); _prev[_actsGradTarget]->incRcvdBInputs(); } } truncBwdActs(); // After back prop. The Activation and Activation gradient of this layer is useless and is removed by calling truncate. // Before calling truncate, it is first checked whether this layer owns the activation and gradient matrix, if not then does not delete. // Each time fprop is executed, the activation matrix is dynamically allocated using resize. And the gradient matrix is allocated when the backprop is executed. // After back prop, these are useless, thus is truncated to reduce memory usage. // Propergate the backprop to the previous layers. // If the previous layers havn't received enough backprop, this will do nothing. // only the last layer that backprop to the previous layer will have effect. if (isGradProducer()) { for (int i = 0; i < _prev.size(); i++) { if (_prev[i]->isGradConsumer()) { _prev[i]->bprop(passType); } } } } void Layer::reset() { // At reset time we don't need to delete the matrices, because they are already deleted during back prop. _rcvdFInputs = 0; _rcvdBInputs = 0; } string& Layer::getName() { return _name; } string& Layer::getType() { return _type; } int Layer::getRcvdFInputs() { return _rcvdFInputs; } int Layer::getRcvdBInputs() { return _rcvdBInputs; } int Layer::incRcvdBInputs() { return ++_rcvdBInputs; } void Layer::addNext(Layer* l) { _next.push_back(l); _numGradProducersNext += l->isGradProducer(); } void Layer::addPrev(Layer* l) { _prev.push_back(l); } void Layer::postInit() { // _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs(); _actsGrad = _actsGradTarget < 0 ? new NVMatrix() : &_prev[_actsGradTarget]->getActsGrad(); } // Does this layer, or some layer below it, need the gradient // for parameter updates? // Only weight layers should be grad consumers themselves. bool Layer::isGradConsumer() { // Only weight layers need to be updated by the gradients, however, the layers on top of the weight layers need to back prop the gradients to the weight layer. // Thus those layers are also consumers indirectly. if (!_foundGradConsumers) { for (int i = 0; i < _prev.size(); i++) { _gradConsumer |= _prev[i]->isGradConsumer(); } _foundGradConsumers = true; } return _gradConsumer; } // Does this layer produce gradient for layers below? bool Layer::isGradProducer() { return true; } vector<Layer*>& Layer::getPrev() { return _prev; } vector<Layer*>& Layer::getNext() { return _next; } NVMatrix& Layer::getActs() { assert(_outputs != NULL); return *_outputs; } NVMatrix& Layer::getActsGrad() { assert(_actsGrad != NULL); return *_actsGrad; } /* * ======================= * NeuronLayer * ======================= */ NeuronLayer::NeuronLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) { _neuron = &Neuron::makeNeuron(PyDict_GetItemString(paramsDict, "neuron")); } void NeuronLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { _neuron->computeInputGrad(v, _prev[0]->getActsGrad(), scaleTargets > 0, passType); // if scaleTargets > 0 then the gradient will be added instead of assigned. } void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { _neuron->activate(*_inputs[0], getActs(), passType); } /* * ======================= * WeightLayer * ======================= */ WeightLayer::WeightLayer(ConvNet* convNet, PyObject* paramsDict, bool trans, bool useGrad) : Layer(convNet, paramsDict, trans) { MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights"); MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc"); Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases"); Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc"); floatv& momW = *pyDictGetFloatV(paramsDict, "momW"); float momB = pyDictGetFloat(paramsDict, "momB"); floatv& epsW = *pyDictGetFloatV(paramsDict, "epsW"); float epsB = pyDictGetFloat(paramsDict, "epsB"); floatv& wc = *pyDictGetFloatV(paramsDict, "wc"); // Source layers for shared weights intv& weightSourceLayerIndices = *pyDictGetIntV(paramsDict, "weightSourceLayerIndices"); // Weight matrix indices (inside the above source layers) for shared weights intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices"); for (int i = 0; i < weightSourceLayerIndices.size(); i++) { int srcLayerIdx = weightSourceLayerIndices[i]; int matrixIdx = weightSourceMatrixIndices[i]; if (srcLayerIdx == convNet->getNumLayers()) { // Current layer // Shares weights with another weight matrix in the same layer. // This constructor does not create intrisic data, but shares the data from the other matrix. _weights.addWeights(*new Weights(_weights[matrixIdx], epsW[i])); } else if (srcLayerIdx >= 0) { // shares weights from another layer. WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNet->getLayer(srcLayerIdx)); Weights* srcWeights = &srcLayer.getWeights(matrixIdx); _weights.addWeights(*new Weights(*srcWeights, epsW[i])); } else { _weights.addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], epsW[i], wc[i], momW[i], useGrad)); } } _biases = new Weights(hBiases, hBiasesInc, epsB, 0, momB, true); // Epsilons for finite-difference gradient checking operation _wStep = 0.001; _bStep = 0.002; delete &weightSourceLayerIndices; delete &weightSourceMatrixIndices; delete &hWeights; delete &hWeightsInc; delete &momW; delete &epsW; delete &wc; } void WeightLayer::bpropCommon(NVMatrix& v, PASS_TYPE passType) { // For all the layers based on weightlayer, there are weights and biases. // Thus it is common for all the layers to do backpropergation on the biases and weights. if (_biases->getEps() > 0) { bpropBiases(v, passType); } // There is one weight matrix for each of the inputs from the previous layer. // So iterate all of them. for (int i = 0; i < _weights.getSize(); i++) { if (_weights[i].getEps() > 0) { bpropWeights(v, i, passType); // Increment its number of updates _weights[i].incNumUpdates(); } } } void WeightLayer::updateWeights() { _weights.update(); _biases->update(); } void WeightLayer::copyToCPU() { _weights.copyToCPU(); _biases->copyToCPU(); } void WeightLayer::copyToGPU() { _weights.copyToGPU(); _biases->copyToGPU(); } void WeightLayer::checkGradients() { for (int i = 0; i < _weights.getSize(); i++) { _convNet->checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights[i]); } _convNet->checkGradient(_name + " biases", _bStep, *_biases); } Weights& WeightLayer::getWeights(int idx) { // get the weights of the idx th input. return _weights[idx]; } /* * ======================= * FCLayer * ======================= */ FCLayer::FCLayer(ConvNet* convNet, PyObject* paramsDict) : WeightLayer(convNet, paramsDict, true, false) { _wStep = 0.1; _bStep = 0.01; } void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { getActs().addProduct(*_inputs[inpIdx], *_weights[inpIdx], scaleTargets, 1); if (scaleTargets == 0) {// At the first run, the scaleTarget = 0, add the bias. The bias will be added only once. getActs().addVector(_biases->getW()); } } void FCLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& weights_T = _weights[inpIdx].getW().getTranspose(); _prev[inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1); delete &weights_T; } void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { // Is the numCases the number of samples ? // Seems possible. If so, how does the other layers whose output for a single sample is a matrix pass the matrix of multiple samples? // Are they all unfolded to a vector? int numCases = v.getNumRows(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; _biases->getGrad().addSum(v, 0, 0, scaleBGrad); } void FCLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { // Judging from this function and the above one, each row of the matrix v is a sample. // Which means that it is quite possible that all the inputs and outputs are unfolded version of the activations. int numCases = v.getNumRows(); NVMatrix& prevActs_T = _prev[inpIdx]->getActs().getTranspose(); float scaleInc = (_weights[inpIdx].getNumUpdates() == 0 && passType != PASS_GC) * _weights[inpIdx].getMom(); float scaleGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; // Why scale the increase by the Weight momentem? Does it mean that weightsinc is never added to the weight before the current batch of training finishes? _weights[inpIdx].getInc().addProduct(prevActs_T, v, scaleInc, scaleGrad); delete &prevActs_T; } /* * ======================= * LocalLayer * ======================= */ LocalLayer::LocalLayer(ConvNet* convNet, PyObject* paramsDict, bool useGrad) : WeightLayer(convNet, paramsDict, false, useGrad) { // Initialize the variables supplied. _padding = pyDictGetIntV(paramsDict, "padding"); _stride = pyDictGetIntV(paramsDict, "stride"); _filterSize = pyDictGetIntV(paramsDict, "filterSize"); _channels = pyDictGetIntV(paramsDict, "channels"); _imgSize = pyDictGetIntV(paramsDict, "imgSize"); _numFilters = pyDictGetInt(paramsDict, "filters"); _groups = pyDictGetIntV(paramsDict, "groups"); _filterChannels = pyDictGetIntV(paramsDict, "filterChannels"); _randSparse = pyDictGetIntV(paramsDict, "randSparse"); _overSample = pyDictGetIntV(paramsDict, "overSample"); _filterPixels = pyDictGetIntV(paramsDict, "filterPixels"); _imgPixels = pyDictGetIntV(paramsDict, "imgPixels"); _modulesX = pyDictGetInt(paramsDict, "modulesX"); _modules = pyDictGetInt(paramsDict, "modules"); // It's a vector on the heap to be consistent with all the others... _filterConns = new vector<FilterConns>(); PyObject* pyFilterConns = PyDict_GetItemString(paramsDict, "filterConns"); for (int i = 0; i < _randSparse->size(); i++) { FilterConns fc; if (_randSparse->at(i)) { fc.hFilterConns = getIntA(PyList_GET_ITEM(pyFilterConns, i)); } _filterConns->push_back(fc); } } void LocalLayer::copyToGPU() { WeightLayer::copyToGPU(); for (int i = 0; i < _prev.size(); i++) { if (_randSparse->at(i)) { // Copy to GPU vector that describes sparse random connectivity cudaMalloc(&_filterConns->at(i).dFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i)); cudaMemcpy(_filterConns->at(i).dFilterConns, _filterConns->at(i).hFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i), cudaMemcpyHostToDevice); cutilCheckMsg("cudaMemcpy: failed"); } } } /* * ======================= * ConvLayer * ======================= */ ConvLayer::ConvLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, true) { _partialSum = pyDictGetInt(paramsDict, "partialSum"); _sharedBiases = pyDictGetInt(paramsDict, "sharedBiases"); } void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { convFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { convFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } if (scaleTargets == 0) { if (_sharedBiases) { getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters); getActs().addVector(_biases->getW()); getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules)); } else { getActs().addVector(_biases->getW()); } } } void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; if (_sharedBiases) { v.reshape(_numFilters, v.getNumElements() / _numFilters); _biases->getGrad().addSum(v, 1, 0, scaleBGrad); v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules)); } else { _biases->getGrad().addSum(v, 1, 0, scaleBGrad); } } void ConvLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumCols(); NVMatrix& tgt = _partialSum > 0 ? _weightGradTmp : _weights[inpIdx].getGrad(); float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; float scaleTargets = _weights[inpIdx].getNumUpdates() > 0 && _partialSum == 0; // ? 1 : 0; if (_randSparse->at(inpIdx)) { convWeightActsSparse(_prev[inpIdx]->getActs(), v, tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad); } else { convWeightActs(_prev[inpIdx]->getActs(), v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad); } if (_partialSum > 0) { scaleTargets = _weights[inpIdx].getNumUpdates() > 0; _weightGradTmp.reshape(_modules / _partialSum, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters); _weights[inpIdx].getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1); _weights[inpIdx].getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters); } } void ConvLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { NVMatrix& tgt = _overSample->at(inpIdx) > 1 ? _actGradTmp : _prev[inpIdx]->getActsGrad(); convImgActsSparse(v, *_weights[inpIdx], tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); if (_overSample->at(inpIdx) > 1) { _actGradTmp.reshape(_overSample->at(inpIdx), _actGradTmp.getNumElements() / _overSample->at(inpIdx)); _actGradTmp.sum(0, _prev[inpIdx]->getActsGrad()); _prev[inpIdx]->getActsGrad().reshape(_prev[inpIdx]->getActsGrad().getNumElements() / v.getNumCols(), v.getNumCols()); } } else { convImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } } void ConvLayer::truncBwdActs() { LocalLayer::truncBwdActs(); if (_conserveMem) { _weightGradTmp.truncate(); _actGradTmp.truncate(); } } /* * ======================= * LocalUnsharedLayer * ======================= */ LocalUnsharedLayer::LocalUnsharedLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, false) { } void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { localFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { localFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } if (scaleTargets == 0) { getActs().addVector(_biases->getW()); } } void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; _biases->getGrad().addSum(v, 1, 0, scaleBGrad); } void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleInc = (passType != PASS_GC && _weights[inpIdx].getNumUpdates() == 0) * _weights[inpIdx].getMom(); // momentum float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; // eps / numCases if (_randSparse->at(inpIdx)) { localWeightActsSparse(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad); } else { localWeightActs(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad); } } void LocalUnsharedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { localImgActsSparse(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { localImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } } /* * ======================= * SoftmaxLayer * ======================= */ SoftmaxLayer::SoftmaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) { } void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& input = *_inputs[0]; NVMatrix& max = input.max(1); input.addVector(max, -1, getActs()); getActs().apply(NVMatrixOps::Exp()); NVMatrix& sum = getActs().sum(1); getActs().eltwiseDivideByVector(sum); delete &max; delete &sum; } void SoftmaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); bool doLogregGrad = _next.size() == 1 && _next[0]->getType() == "cost.logreg"; if (doLogregGrad) { NVMatrix& labels = _next[0]->getPrev()[0]->getActs(); float gradCoeff = dynamic_cast<CostLayer*>(_next[0])->getCoeff(); computeLogregSoftmaxGrad(labels, getActs(), _prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff); } else { computeSoftmaxGrad(getActs(), v, _prev[0]->getActsGrad(), scaleTargets == 1); } } /* * ======================= * EltwiseSumLayer * ======================= */ EltwiseSumLayer::EltwiseSumLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _coeffs = pyDictGetFloatV(paramsDict, "coeffs"); } void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (scaleTargets == 0) { _inputs[inpIdx]->scale(_coeffs->at(inpIdx), getActs()); } else { getActs().add(*_inputs[inpIdx], _coeffs->at(inpIdx)); } } void EltwiseSumLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (scaleTargets == 0 ) { v.scale(_coeffs->at(inpIdx), _prev[inpIdx]->getActsGrad()); } else { assert(&_prev[inpIdx]->getActsGrad() != &v); _prev[inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx)); } } /* * ======================= * EltwiseMaxLayer * ======================= */ EltwiseMaxLayer::EltwiseMaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (inpIdx == 1) { // First input, do nothing _inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs()); } else if (inpIdx > 1) { getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]); } } void EltwiseMaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[inpIdx]->getActsGrad(), scaleTargets != 0); } /* * ======================= * DataLayer * ======================= */ DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _dataIdx = pyDictGetInt(paramsDict, "dataIdx"); } void DataLayer::fprop(PASS_TYPE passType) { throw string("No dava given!"); } void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { } void DataLayer::fprop(NVMatrixV& data, PASS_TYPE passType) { _outputs = data[_dataIdx]; fpropNext(passType); } bool DataLayer::isGradProducer() { return false; } /* * ===================== * PoolLayer * ===================== */ PoolLayer::PoolLayer(ConvNet* convNet, PyObject* paramsDict, bool trans) : Layer(convNet, paramsDict, trans) { _channels = pyDictGetInt(paramsDict, "channels"); _sizeX = pyDictGetInt(paramsDict, "sizeX"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); _pool = pyDictGetString(paramsDict, "pool"); } PoolLayer& PoolLayer::makePoolLayer(ConvNet* convNet, PyObject* paramsDict) { string _pool = pyDictGetString(paramsDict, "pool"); if (_pool == "max") { return *new MaxPoolLayer(convNet, paramsDict); } else if(_pool == "avg") { return *new AvgPoolLayer(convNet, paramsDict); } throw string("Unknown pooling layer type ") + _pool; } /* * ===================== * AvgPoolLayer * ===================== */ AvgPoolLayer::AvgPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) { } void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler()); } void AvgPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalAvgUndo(v, _prev[0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1); } /* * ===================== * MaxPoolLayer * ===================== */ MaxPoolLayer::MaxPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) { } void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler()); } void MaxPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalMaxUndo(_prev[0]->getActs(), v, getActs(), _prev[inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1); } /* * ===================== * NailbedLayer * ===================== */ NailbedLayer::NailbedLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); } void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1); } void NailbedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convBedOfNailsUndo(v, _prev[0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1); } /* * ===================== * GaussianBlurLayer * ===================== */ GaussianBlurLayer::GaussianBlurLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _hFilter = pyDictGetMatrix(paramsDict, "filter"); } void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1); convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1); } // This is here just for completeness' sake. Why would you backpropagate // through a blur filter? void GaussianBlurLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& tgt1 = _prev[0]->getRcvdBInputs() > 0 ? _actGradsTmp : _prev[0]->getActsGrad(); convGaussianBlur(v, _filter, tgt1, true, _channels, 0, 1); convGaussianBlur(tgt1, _filter, _prev[0]->getActsGrad(), false, _channels, scaleTargets, 1); } void GaussianBlurLayer::copyToGPU() { _filter.copyFromHost(*_hFilter, true); } /* * ===================== * ResizeLayer * ===================== */ ResizeLayer::ResizeLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); _tgtSize = pyDictGetInt(paramsDict, "tgtSize"); _scale = pyDictGetFloat(paramsDict, "scale"); } void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale); } // Can't do this void ResizeLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToYUVLayer * ===================== */ RGBToYUVLayer::RGBToYUVLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convRGBToYUV(*_inputs[0], getActs()); } // Can't do this void RGBToYUVLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToLABLayer * ===================== */ RGBToLABLayer::RGBToLABLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _center = pyDictGetInt(paramsDict, "center"); } void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convRGBToLAB(*_inputs[0], getActs(), _center); } // Can't do this void RGBToLABLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * ResponseNormLayer * ===================== */ ResponseNormLayer::ResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _size = pyDictGetInt(paramsDict, "size"); _scale = pyDictGetFloat(paramsDict, "scale"); _pow = pyDictGetFloat(paramsDict, "pow"); } void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow); } void ResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ResponseNormLayer::truncBwdActs() { Layer::truncBwdActs(); if (_conserveMem) { _denoms.truncate(); } } /* * ===================== * CrossMapResponseNormLayer * ===================== */ CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) { _blocked = pyDictGetInt(paramsDict, "blocked"); _k = pyDictGetFloat(paramsDict, "k"); } void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormCrossMap(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _blocked, _k); } void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormCrossMapUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, _blocked, scaleTargets, 1); } /* * ===================== * ContrastNormLayer * ===================== */ ContrastNormLayer::ContrastNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) { _imgSize = pyDictGetInt(paramsDict, "imgSize"); } void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& images = *_inputs[0]; convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler()); _meanDiffs.add(images, -1, 1); convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow); } void ContrastNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ContrastNormLayer::truncBwdActs() { ResponseNormLayer::truncBwdActs(); if (_conserveMem) { _meanDiffs.truncate(); } } /* * ===================== * CostLayer * ===================== */ CostLayer::CostLayer(ConvNet* convNet, PyObject* paramsDict, bool trans) : Layer(convNet, paramsDict, trans) { _coeff = pyDictGetFloat(paramsDict, "coeff"); } float CostLayer::getCoeff() { return _coeff; } void CostLayer::bprop(PASS_TYPE passType) { if (_coeff != 0) { Layer::bprop(passType); } } bool CostLayer::isGradProducer() { return _coeff != 0; } doublev& CostLayer::getCost() { doublev& v = *new doublev(); v.insert(v.begin(), _costv.begin(), _costv.end()); return v; } CostLayer& CostLayer::makeCostLayer(ConvNet* convNet, string& type, PyObject* paramsDict) { if (type == "cost.logreg") { return *new LogregCostLayer(convNet, paramsDict); } else if (type == "cost.sum2") { return *new SumOfSquaresCostLayer(convNet, paramsDict); } else if (layers.find(type) != layers.end()) { return *(CostLayer*)layers[type](convNet, paramsDict); } throw string("Unknown cost layer type ") + type; } /* * ===================== * LogregCostLayer * ===================== */ LogregCostLayer::LogregCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getNumElements(); NVMatrix& trueLabelLogProbs = getActs(), correctProbs; computeLogregCost(labels, probs, trueLabelLogProbs, correctProbs); _costv.clear(); _costv.push_back(-trueLabelLogProbs.sum()); _costv.push_back(numCases - correctProbs.sum()); } } void LogregCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); NVMatrix& labels = _prev[0]->getActs(); NVMatrix& probs = _prev[1]->getActs(); NVMatrix& target = _prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a softmax layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = _prev[1]->getNext().size() > 1 || _prev[1]->getType() != "softmax"; if (doWork) { computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff); } } /* * ===================== * SumOfSquaresCostLayer * ===================== */ SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { _inputs[0]->apply(NVMatrixOps::Square(), getActs()); _costv.clear(); _costv.push_back(0.5*getActs().sum()); // Look at the bpropActs and the fpropActs, they are not consistant, we need 0.5*x^2 so that the gradient would be x } void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { _prev[inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -_coeff); }
GPUgaussMLEv2.hip
// !!! This is a file automatically generated by hipify!!! /*! * \file GPUgaussMLEv2.cu * \author Keith Lidke * \date January 10, 2010 * \brief This file contains all of the Cuda kernels. The helper functions * are defined in GPUgaussLib.cuh */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "mex.h" #include "hip/hip_runtime.h" #include "definitions.h" #include "MatInvLib.h" #include "GPUgaussLib.cuh" #include "GPUgaussMLEv2.h" //******************************************************************************************* //theta is: {x,y,N,bg} __global__ void kernel_MLEFit_noshared(const float *d_data, const float PSFSigma, const int sz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[NV_P*NV_P], Diag[NV_P], Minv[NV_P*NV_P]; const int tx = threadIdx.x; const int bx = blockIdx.x; const int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=NV_P; float dudt[NV_P]; float d2udt2[NV_P]; float NR_Numerator[NV_P], NR_Denominator[NV_P]; float theta[NV_P]; float maxjump[NV_P]={1e0f, 1e0f, 1e2f, 2e0f}; float gamma[NV_P]={1.0f, 1.0f, 0.5f, 1.0f}; float Nmax; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV_P*NV_P*sizeof(float)); memset(Minv,0,NV_P*NV_P*sizeof(float)); //load data const float *s_data = d_data+(sz*sz*bx*BlockSize+sz*sz*tx); //initial values kernel_CenterofMass2D(sz, s_data, &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma*PSFSigma); for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV_P*sizeof(float)); memset(NR_Denominator,0,NV_P*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], PSFSigma, theta[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, theta[1], PSFSigma, theta[2], PSFx, &dudt[1], &d2udt2[1]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=data/model-1; if (model>10e-3f) df=data/pow(model, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update if (kk<2) for (ll=0;ll<NV;ll++) theta[ll]-=gamma[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll=0;ll<NV;ll++) theta[ll]-=min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); } // Calculating the CRLB and LogLikelihood Div=0.0; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], PSFSigma, theta[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, theta[1], PSFSigma, theta[2], PSFx, &dudt[1], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/model; M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if (model>0) if (data>0)Div+=data*log(model)-model-data*log(data)+data; else Div+=-model; } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; } //******************************************************************************************* //theta is: {x,y,N,bg} __global__ void kernel_MLEFit(const float *d_data, const float PSFSigma, const int sz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ __shared__ float s_data[MEM]; float M[NV_P*NV_P], Diag[NV_P], Minv[NV_P*NV_P]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=NV_P; float dudt[NV_P]; float d2udt2[NV_P]; float NR_Numerator[NV_P], NR_Denominator[NV_P]; float theta[NV_P]; float maxjump[NV_P]={1e0f, 1e0f, 1e2f, 2e0f}; float gamma[NV_P]={1.0f, 1.0f, 0.5f, 1.0f}; float Nmax; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV_P*NV_P*sizeof(float)); memset(Minv,0,NV_P*NV_P*sizeof(float)); //load data memcpy(s_data+(sz*sz*tx), d_data+(sz*sz*bx*BlockSize+sz*sz*tx),sizeof(float)*sz*sz); //initial values kernel_CenterofMass2D(sz, &s_data[sz*sz*tx], &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma, &s_data[sz*sz*tx], &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma*PSFSigma); for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV_P*sizeof(float)); memset(NR_Denominator,0,NV_P*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*sz*tx+sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], PSFSigma, theta[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, theta[1], PSFSigma, theta[2], PSFx, &dudt[1], &d2udt2[1]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=data/model-1; if (model>10e-3f) df=data/pow(model, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update if (kk<2) for (ll=0;ll<NV;ll++) theta[ll]-=gamma[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll=0;ll<NV;ll++) theta[ll]-=min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); } // Calculating the CRLB and LogLikelihood Div=0.0; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*sz*tx+sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], PSFSigma, theta[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, theta[1], PSFSigma, theta[2], PSFx, &dudt[1], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/model; M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if (model>0) if (data>0)Div+=data*log(model)-model-data*log(data)+data; else Div+=-model; } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; } //******************************************************************************************* __global__ void kernel_MLEFit_sigma(const float *d_data, const float PSFSigma, const int sz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[NV_PS*NV_PS], Diag[NV_PS], Minv[NV_PS*NV_PS]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=NV_PS; float dudt[NV_PS]; float d2udt2[NV_PS]; float NR_Numerator[NV_PS], NR_Denominator[NV_PS]; float theta[NV_PS]; float maxjump[NV_PS]={1e0f, 1e0f, 1e2f, 2e0f, 5e-1f}; float gamma[NV_PS]={1.0f, 1.0f, 0.5f, 1.0f, 1.0f}; float Nmax; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV_PS*NV_PS*sizeof(float)); memset(Minv,0,NV_PS*NV_PS*sizeof(float)); //load data const float *s_data = d_data+(sz*sz*bx*BlockSize+sz*sz*tx); //initial values kernel_CenterofMass2D(sz, s_data, &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma*PSFSigma); theta[4]=PSFSigma; for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV_PS*sizeof(float)); memset(NR_Denominator,0,NV_PS*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], theta[4]); PSFy=kernel_IntGauss1D(jj, theta[1], theta[4]); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], theta[4], theta[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, theta[1], theta[4], theta[2], PSFx, &dudt[1], &d2udt2[1]); kernel_DerivativeIntGauss2DSigma(ii, jj, theta[0], theta[1], theta[4], theta[2], PSFx, PSFy, &dudt[4], &d2udt2[4]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=data/model-1; if (model>10e-3f) df=data/pow(model, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update if (kk<5) for (ll=0;ll<NV;ll++) theta[ll]-=gamma[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll=0;ll<NV;ll++) theta[ll]-=min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); theta[4]=max(theta[4], 0.5f); theta[4]=min(theta[4], sz/2.0f); } // Calculating the CRLB and LogLikelihood Div=0.0f; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], theta[4], theta[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, theta[1], theta[4], theta[2], PSFx, &dudt[1], NULL); kernel_DerivativeIntGauss2DSigma(ii, jj, theta[0], theta[1], theta[4], theta[2], PSFx, PSFy, &dudt[4], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/model; M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if (model>0) if (data>0)Div+=data*log(model)-model-data*log(data)+data; else Div+=-model; } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; } //******************************************************************************************* __global__ void kernel_MLEFit_z(const float *d_data, const float PSFSigma_x, const float Ax, const float Ay, const float Bx, const float By, const float gamma, const float d, const float PSFSigma_y, const int sz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma_x sigma of the point spread function on the x axis * \param Ax ??? * \param Ay ??? * \param Bx ??? * \param By ??? * \param gamma ??? * \param d ??? * \param PSFSigma_y sigma of the point spread function on the y axis * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[5*5], Diag[5], Minv[5*5]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=5; float dudt[5]; float d2udt2[5]; float NR_Numerator[5], NR_Denominator[5]; float theta[5]; float maxjump[5]={1e0f, 1e0f, 1e2f, 2e0f, 1e-1f}; float g[5]={1.0f, 1.0f, 0.5f, 1.0f, 1.0f}; float Nmax; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV*NV*sizeof(float)); memset(Minv,0,NV*NV*sizeof(float)); //load data const float *s_data = d_data+(sz*sz*bx*BlockSize+sz*sz*tx); //initial values kernel_CenterofMass2D(sz, s_data, &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma_x, s_data, &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma_x*PSFSigma_y*sqrt(2.0f)); theta[4]=0; for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV*sizeof(float)); memset(NR_Denominator,0,NV*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { kernel_DerivativeIntGauss2Dz(ii, jj, theta, PSFSigma_x,PSFSigma_y, Ax,Ay,Bx,By, gamma, d, &PSFx, &PSFy, dudt, d2udt2); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating remaining derivatives dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=data/model-1; if (model>10e-3f) df=data/pow(model, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update if (kk<2) for (ll=0;ll<NV;ll++) theta[ll]-=g[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll=0;ll<NV;ll++) theta[ll]-=min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); } // Calculating the CRLB and LogLikelihood Div=0.0f; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { kernel_DerivativeIntGauss2Dz(ii, jj, theta, PSFSigma_x,PSFSigma_y, Ax,Ay, Bx,By, gamma, d, &PSFx, &PSFy, dudt, NULL); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating remaining derivatives dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/model; M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if (model>0) if (data>0)Div+=data*log(model)-model-data*log(data)+data; else Div+=-model; } // Matrix inverse (CRLB=F^-1) kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; } //******************************************************************************************* __global__ void kernel_MLEFit_sigmaxy(const float *d_data, const float PSFSigma, const int sz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[6*6], Diag[6], Minv[6*6]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=6; float dudt[6]; float d2udt2[6]; float NR_Numerator[6], NR_Denominator[6]; float theta[6]; float maxjump[6]={1e0f, 1e0f, 1e2f, 2e0f, 1e-1f,1e-1f}; float g[6]={1.0f, 1.0f, 0.5f, 1.0f, 1.0f,1.0f}; float Nmax; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV*NV*sizeof(float)); memset(Minv,0,NV*NV*sizeof(float)); //load data const float *s_data = d_data+(sz*sz*bx*BlockSize+sz*sz*tx); //initial values kernel_CenterofMass2D(sz, s_data, &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma*PSFSigma); theta[4]=PSFSigma; theta[5]=PSFSigma; for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV*sizeof(float)); memset(NR_Denominator,0,NV*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], theta[4]); PSFy=kernel_IntGauss1D(jj, theta[1], theta[5]); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], theta[4], theta[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, theta[1], theta[5], theta[2], PSFx, &dudt[1], &d2udt2[1]); kernel_DerivativeIntGauss1DSigma(ii, theta[0], theta[4], theta[2], PSFy, &dudt[4], &d2udt2[4]); kernel_DerivativeIntGauss1DSigma(jj, theta[1], theta[5], theta[2], PSFx, &dudt[5], &d2udt2[5]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=data/model-1; if (model>10e-3f) df=data/pow(model, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update for (ll=0;ll<NV;ll++) theta[ll]-=g[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); theta[4]=max(theta[4], PSFSigma/10.0f); theta[5]=max(theta[5], PSFSigma/10.0f); } // Calculating the CRLB and LogLikelihood Div=0.0f; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], theta[4]); PSFy=kernel_IntGauss1D(jj, theta[1], theta[5]); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], theta[4], theta[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, theta[1], theta[5], theta[2], PSFx, &dudt[1], NULL); kernel_DerivativeIntGauss1DSigma(ii, theta[0], theta[4], theta[2], PSFy, &dudt[4], NULL); kernel_DerivativeIntGauss1DSigma(jj, theta[1], theta[5], theta[2], PSFx, &dudt[5], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/model; M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if (model>0) if (data>0)Div+=data*log(model)-model-data*log(data)+data; else Div+=-model; } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; }
GPUgaussMLEv2.cu
/*! * \file GPUgaussMLEv2.cu * \author Keith Lidke * \date January 10, 2010 * \brief This file contains all of the Cuda kernels. The helper functions * are defined in GPUgaussLib.cuh */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "mex.h" #include "cuda_runtime.h" #include "definitions.h" #include "MatInvLib.h" #include "GPUgaussLib.cuh" #include "GPUgaussMLEv2.h" //******************************************************************************************* //theta is: {x,y,N,bg} __global__ void kernel_MLEFit_noshared(const float *d_data, const float PSFSigma, const int sz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[NV_P*NV_P], Diag[NV_P], Minv[NV_P*NV_P]; const int tx = threadIdx.x; const int bx = blockIdx.x; const int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=NV_P; float dudt[NV_P]; float d2udt2[NV_P]; float NR_Numerator[NV_P], NR_Denominator[NV_P]; float theta[NV_P]; float maxjump[NV_P]={1e0f, 1e0f, 1e2f, 2e0f}; float gamma[NV_P]={1.0f, 1.0f, 0.5f, 1.0f}; float Nmax; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV_P*NV_P*sizeof(float)); memset(Minv,0,NV_P*NV_P*sizeof(float)); //load data const float *s_data = d_data+(sz*sz*bx*BlockSize+sz*sz*tx); //initial values kernel_CenterofMass2D(sz, s_data, &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma*PSFSigma); for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV_P*sizeof(float)); memset(NR_Denominator,0,NV_P*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], PSFSigma, theta[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, theta[1], PSFSigma, theta[2], PSFx, &dudt[1], &d2udt2[1]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=data/model-1; if (model>10e-3f) df=data/pow(model, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update if (kk<2) for (ll=0;ll<NV;ll++) theta[ll]-=gamma[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll=0;ll<NV;ll++) theta[ll]-=min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); } // Calculating the CRLB and LogLikelihood Div=0.0; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], PSFSigma, theta[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, theta[1], PSFSigma, theta[2], PSFx, &dudt[1], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/model; M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if (model>0) if (data>0)Div+=data*log(model)-model-data*log(data)+data; else Div+=-model; } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; } //******************************************************************************************* //theta is: {x,y,N,bg} __global__ void kernel_MLEFit(const float *d_data, const float PSFSigma, const int sz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ __shared__ float s_data[MEM]; float M[NV_P*NV_P], Diag[NV_P], Minv[NV_P*NV_P]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=NV_P; float dudt[NV_P]; float d2udt2[NV_P]; float NR_Numerator[NV_P], NR_Denominator[NV_P]; float theta[NV_P]; float maxjump[NV_P]={1e0f, 1e0f, 1e2f, 2e0f}; float gamma[NV_P]={1.0f, 1.0f, 0.5f, 1.0f}; float Nmax; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV_P*NV_P*sizeof(float)); memset(Minv,0,NV_P*NV_P*sizeof(float)); //load data memcpy(s_data+(sz*sz*tx), d_data+(sz*sz*bx*BlockSize+sz*sz*tx),sizeof(float)*sz*sz); //initial values kernel_CenterofMass2D(sz, &s_data[sz*sz*tx], &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma, &s_data[sz*sz*tx], &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma*PSFSigma); for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV_P*sizeof(float)); memset(NR_Denominator,0,NV_P*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*sz*tx+sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], PSFSigma, theta[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, theta[1], PSFSigma, theta[2], PSFx, &dudt[1], &d2udt2[1]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=data/model-1; if (model>10e-3f) df=data/pow(model, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update if (kk<2) for (ll=0;ll<NV;ll++) theta[ll]-=gamma[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll=0;ll<NV;ll++) theta[ll]-=min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); } // Calculating the CRLB and LogLikelihood Div=0.0; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*sz*tx+sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], PSFSigma, theta[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, theta[1], PSFSigma, theta[2], PSFx, &dudt[1], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/model; M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if (model>0) if (data>0)Div+=data*log(model)-model-data*log(data)+data; else Div+=-model; } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; } //******************************************************************************************* __global__ void kernel_MLEFit_sigma(const float *d_data, const float PSFSigma, const int sz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[NV_PS*NV_PS], Diag[NV_PS], Minv[NV_PS*NV_PS]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=NV_PS; float dudt[NV_PS]; float d2udt2[NV_PS]; float NR_Numerator[NV_PS], NR_Denominator[NV_PS]; float theta[NV_PS]; float maxjump[NV_PS]={1e0f, 1e0f, 1e2f, 2e0f, 5e-1f}; float gamma[NV_PS]={1.0f, 1.0f, 0.5f, 1.0f, 1.0f}; float Nmax; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV_PS*NV_PS*sizeof(float)); memset(Minv,0,NV_PS*NV_PS*sizeof(float)); //load data const float *s_data = d_data+(sz*sz*bx*BlockSize+sz*sz*tx); //initial values kernel_CenterofMass2D(sz, s_data, &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma*PSFSigma); theta[4]=PSFSigma; for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV_PS*sizeof(float)); memset(NR_Denominator,0,NV_PS*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], theta[4]); PSFy=kernel_IntGauss1D(jj, theta[1], theta[4]); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], theta[4], theta[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, theta[1], theta[4], theta[2], PSFx, &dudt[1], &d2udt2[1]); kernel_DerivativeIntGauss2DSigma(ii, jj, theta[0], theta[1], theta[4], theta[2], PSFx, PSFy, &dudt[4], &d2udt2[4]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=data/model-1; if (model>10e-3f) df=data/pow(model, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update if (kk<5) for (ll=0;ll<NV;ll++) theta[ll]-=gamma[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll=0;ll<NV;ll++) theta[ll]-=min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); theta[4]=max(theta[4], 0.5f); theta[4]=min(theta[4], sz/2.0f); } // Calculating the CRLB and LogLikelihood Div=0.0f; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], theta[4], theta[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, theta[1], theta[4], theta[2], PSFx, &dudt[1], NULL); kernel_DerivativeIntGauss2DSigma(ii, jj, theta[0], theta[1], theta[4], theta[2], PSFx, PSFy, &dudt[4], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/model; M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if (model>0) if (data>0)Div+=data*log(model)-model-data*log(data)+data; else Div+=-model; } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; } //******************************************************************************************* __global__ void kernel_MLEFit_z(const float *d_data, const float PSFSigma_x, const float Ax, const float Ay, const float Bx, const float By, const float gamma, const float d, const float PSFSigma_y, const int sz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma_x sigma of the point spread function on the x axis * \param Ax ??? * \param Ay ??? * \param Bx ??? * \param By ??? * \param gamma ??? * \param d ??? * \param PSFSigma_y sigma of the point spread function on the y axis * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[5*5], Diag[5], Minv[5*5]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=5; float dudt[5]; float d2udt2[5]; float NR_Numerator[5], NR_Denominator[5]; float theta[5]; float maxjump[5]={1e0f, 1e0f, 1e2f, 2e0f, 1e-1f}; float g[5]={1.0f, 1.0f, 0.5f, 1.0f, 1.0f}; float Nmax; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV*NV*sizeof(float)); memset(Minv,0,NV*NV*sizeof(float)); //load data const float *s_data = d_data+(sz*sz*bx*BlockSize+sz*sz*tx); //initial values kernel_CenterofMass2D(sz, s_data, &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma_x, s_data, &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma_x*PSFSigma_y*sqrt(2.0f)); theta[4]=0; for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV*sizeof(float)); memset(NR_Denominator,0,NV*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { kernel_DerivativeIntGauss2Dz(ii, jj, theta, PSFSigma_x,PSFSigma_y, Ax,Ay,Bx,By, gamma, d, &PSFx, &PSFy, dudt, d2udt2); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating remaining derivatives dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=data/model-1; if (model>10e-3f) df=data/pow(model, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update if (kk<2) for (ll=0;ll<NV;ll++) theta[ll]-=g[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll=0;ll<NV;ll++) theta[ll]-=min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); } // Calculating the CRLB and LogLikelihood Div=0.0f; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { kernel_DerivativeIntGauss2Dz(ii, jj, theta, PSFSigma_x,PSFSigma_y, Ax,Ay, Bx,By, gamma, d, &PSFx, &PSFy, dudt, NULL); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating remaining derivatives dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/model; M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if (model>0) if (data>0)Div+=data*log(model)-model-data*log(data)+data; else Div+=-model; } // Matrix inverse (CRLB=F^-1) kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; } //******************************************************************************************* __global__ void kernel_MLEFit_sigmaxy(const float *d_data, const float PSFSigma, const int sz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[6*6], Diag[6], Minv[6*6]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=6; float dudt[6]; float d2udt2[6]; float NR_Numerator[6], NR_Denominator[6]; float theta[6]; float maxjump[6]={1e0f, 1e0f, 1e2f, 2e0f, 1e-1f,1e-1f}; float g[6]={1.0f, 1.0f, 0.5f, 1.0f, 1.0f,1.0f}; float Nmax; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV*NV*sizeof(float)); memset(Minv,0,NV*NV*sizeof(float)); //load data const float *s_data = d_data+(sz*sz*bx*BlockSize+sz*sz*tx); //initial values kernel_CenterofMass2D(sz, s_data, &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma*PSFSigma); theta[4]=PSFSigma; theta[5]=PSFSigma; for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV*sizeof(float)); memset(NR_Denominator,0,NV*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], theta[4]); PSFy=kernel_IntGauss1D(jj, theta[1], theta[5]); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], theta[4], theta[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, theta[1], theta[5], theta[2], PSFx, &dudt[1], &d2udt2[1]); kernel_DerivativeIntGauss1DSigma(ii, theta[0], theta[4], theta[2], PSFy, &dudt[4], &d2udt2[4]); kernel_DerivativeIntGauss1DSigma(jj, theta[1], theta[5], theta[2], PSFx, &dudt[5], &d2udt2[5]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=data/model-1; if (model>10e-3f) df=data/pow(model, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update for (ll=0;ll<NV;ll++) theta[ll]-=g[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); theta[4]=max(theta[4], PSFSigma/10.0f); theta[5]=max(theta[5], PSFSigma/10.0f); } // Calculating the CRLB and LogLikelihood Div=0.0f; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], theta[4]); PSFy=kernel_IntGauss1D(jj, theta[1], theta[5]); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], theta[4], theta[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, theta[1], theta[5], theta[2], PSFx, &dudt[1], NULL); kernel_DerivativeIntGauss1DSigma(ii, theta[0], theta[4], theta[2], PSFy, &dudt[4], NULL); kernel_DerivativeIntGauss1DSigma(jj, theta[1], theta[5], theta[2], PSFx, &dudt[5], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/model; M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if (model>0) if (data>0)Div+=data*log(model)-model-data*log(data)+data; else Div+=-model; } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; }
6711a1b4f4546d2836ce75a1c524595ea2976d5f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (C) 2016-2018, Nils Moehrle * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD 3-Clause license. See the LICENSE.txt file for details. */ #include <fstream> #include <iostream> #include "util/system.h" #include "util/arguments.h" #include "util/io.h" #include "util/numpy_io.h" #include "cacc/util.h" #include "cacc/math.h" #include "eval/kernels.h" #include "stat/correlations.h" #include "mve/scene.h" #include "mve/image.h" struct Arguments { std::string scene; std::string image; std::string gt_mesh; std::string file; std::string recon_cloud; std::string obs_cloud; float max_distance; float target_recon; }; typedef unsigned int uint; typedef acc::BVHTree<uint, math::Vec3f> BVHTree; Arguments parse_args(int argc, char **argv) { util::Arguments args; args.set_exit_on_error(true); args.set_nonopt_minnum(4); args.set_nonopt_maxnum(4); args.set_usage("Usage: " + std::string(argv[0]) + " [OPTS] SCENE IMAGE GT_MESH FILE"); args.set_description("Evaluates Spearman's rank correlation between " "depth error and heuristic for multiple parameter sets."); args.add_option('r', "recon-cloud", true, "save cloud with predicted reconstructabilities"); args.add_option('o', "obs-cloud", true, "save cloud with number of observations reconstructabilities"); args.add_option('\0', "max-distance", true, "maximum distance to surface [80.0]"); args.parse(argc, argv); Arguments conf; conf.scene = args.get_nth_nonopt(0); conf.image = args.get_nth_nonopt(1); conf.gt_mesh = args.get_nth_nonopt(2); conf.file = args.get_nth_nonopt(3); conf.max_distance = 80.0f; for (util::ArgResult const* i = args.next_option(); i != 0; i = args.next_option()) { switch (i->opt->sopt) { case 'r': conf.recon_cloud = i->arg; break; case 'o': conf.obs_cloud = i->arg; break; case '\0': if (i->opt->lopt == "max-distance") { conf.max_distance = i->get_arg<float>(); } else { throw std::invalid_argument("Invalid option"); } break; default: throw std::invalid_argument("Invalid option"); } } return conf; } template <int N> inline void patch(mve::FloatImage::Ptr img, int x, int y, float (*ptr)[N][N]) { static_assert(N % 2 == 1, "Requires odd patch size"); constexpr int e = N / 2; for (int i = -e; i <= e; ++i) { for (int j = -e; j <= e; ++j) { (*ptr)[e + j][e + i] = img->at(x + j, y + i, 0); } } } int main(int argc, char **argv) { util::system::register_segfault_handler(); util::system::print_build_timestamp(argv[0]); Arguments args = parse_args(argc, argv); int device = cacc::select_cuda_device(3, 5); mve::Scene::Ptr scene; try { scene = mve::Scene::create(args.scene); } catch (std::exception& e) { std::cerr << "Could not open scene: " << e.what() << std::endl; std::exit(EXIT_FAILURE); } mve::TriangleMesh::Ptr mesh; try { mesh = mve::geom::load_ply_mesh(args.gt_mesh); } catch (std::exception& e) { std::cerr << "\tCould not load mesh: "<< e.what() << std::endl; std::exit(EXIT_FAILURE); } std::vector<math::Vec3f> const & vertices = mesh->get_vertices(); std::vector<uint> const & faces = mesh->get_faces(); std::cout << "Building BVH... " << std::flush; BVHTree::Ptr bvh_tree = BVHTree::create(faces, vertices); std::cout << "done." << std::endl; std::vector<float> errors; std::vector<math::Vec3f> verts; std::vector<math::Vec3f> normals; std::vector<mve::View::Ptr> views = scene->get_views(); for (mve::View::Ptr & view : views) { if (view == nullptr) continue; if (!view->has_image(args.image, mve::IMAGE_TYPE_FLOAT)) { std::cerr << "Warning view " << view->get_name() << " has no image " << args.image << std::endl; continue; } mve::FloatImage::Ptr dmap = view->get_float_image(args.image); mve::CameraInfo const & camera = view->get_camera(); math::Vec3f origin; camera.fill_camera_pos(origin.begin()); math::Matrix3f invcalib; camera.fill_inverse_calibration(invcalib.begin(), dmap->width(), dmap->height()); math::Matrix3f c2w_rot; camera.fill_cam_to_world_rot(c2w_rot.begin()); /* Ignore border - issues with kernel approaches. */ int border = 0.01f * max(dmap->width(), dmap->height()); for (int y = border; y < dmap->height() - border; ++y) { for (int x = border; x < dmap->width() - border; ++x) { float depth = dmap->at(x, y, 0); BVHTree::Ray ray; ray.origin = origin; math::Vec3f v = invcalib * math::Vec3f ((float)x + 0.5f, (float)y + 0.5f, 1.0f); ray.dir = c2w_rot.mult(v.normalized()).normalize(); ray.tmin = 0.0f; ray.tmax = std::numeric_limits<float>::infinity(); /* Ground truth depth? */ BVHTree::Hit hit; if (!bvh_tree->intersect(ray, &hit)) continue; verts.push_back(origin + (hit.t * ray.dir)); math::Vec3f v0 = vertices[faces[hit.idx * 3]]; math::Vec3f v1 = vertices[faces[hit.idx * 3 + 1]]; math::Vec3f v2 = vertices[faces[hit.idx * 3 + 2]]; normals.push_back((v2 - v0).cross(v1 - v0).normalize()); //float depths[25]; //patch(dmap, x, y, (float (*)[5][5])&depths); //if (std::any_of(depths, depths + 25, // [] (float d) { return d == 0.0f; })) { if (depth == 0) { errors.push_back(-1.0f); } else { errors.push_back(std::abs(depth - (hit.t * ray.dir).norm())); } } } } /* Construct cloud for heuristic evaluation on GPU. */ cacc::PointCloud<cacc::HOST>::Ptr cloud; cloud = cacc::PointCloud<cacc::HOST>::create(verts.size()); cacc::PointCloud<cacc::HOST>::Data data = cloud->cdata(); for (std::size_t i = 0; i < verts.size(); ++i) { data.vertices_ptr[i] = cacc::Vec3f(verts[i].begin()); data.normals_ptr[i] = cacc::Vec3f(normals[i].begin()); data.values_ptr[i] = 0.0f; data.qualities_ptr[i] = 1.0f; } cacc::PointCloud<cacc::DEVICE>::Ptr dcloud; dcloud = cacc::PointCloud<cacc::DEVICE>::create<cacc::HOST>(cloud); cacc::BVHTree<cacc::DEVICE>::Ptr dbvh_tree; dbvh_tree = cacc::BVHTree<cacc::DEVICE>::create<uint, math::Vec3f>(bvh_tree); uint num_verts = verts.size(); uint max_cameras = 32; cacc::VectorArray<cacc::Vec3f, cacc::DEVICE>::Ptr dobs_rays; dobs_rays = cacc::VectorArray<cacc::Vec3f, cacc::DEVICE>::create(num_verts, max_cameras); cacc::Array<float, cacc::DEVICE>::Ptr drecons; drecons = cacc::Array<float, cacc::DEVICE>::create(num_verts); drecons->null(); int width = 1920; int height = 1080; math::Matrix4f w2c; math::Matrix3f calib; math::Vec3f view_pos(0.0f); /* Populate view direction histograms. */ { hipStream_t stream; hipStreamCreate(&stream); dim3 grid(cacc::divup(num_verts, KERNEL_BLOCK_SIZE)); dim3 block(KERNEL_BLOCK_SIZE); for (mve::View::Ptr const & view : scene->get_views()) { if (view == nullptr) continue; mve::CameraInfo cam = view->get_camera(); cam.fill_calibration(calib.begin(), width, height); cam.fill_world_to_cam(w2c.begin()); cam.fill_camera_pos(view_pos.begin()); hipLaunchKernelGGL(( update_observation_rays), dim3(grid), dim3(block), 0, stream, true, cacc::Vec3f(view_pos.begin()), args.max_distance, cacc::Mat4f(w2c.begin()), cacc::Mat3f(calib.begin()), width, height, dbvh_tree->accessor(), dcloud->cdata(), dobs_rays->cdata() ); } hipStreamDestroy(stream); CHECK(hipDeviceSynchronize()); } { dim3 grid(cacc::divup(num_verts, 2)); dim3 block(32, 2); hipLaunchKernelGGL(( process_observation_rays), dim3(grid), dim3(block), 0, 0, dobs_rays->cdata()); } std::vector<float> heuristics(verts.size()); std::vector<float> observations(verts.size()); float m_k = 8; float m_x0 =4; float t_k = 32; float t_x0 = 16; { configure_heuristic(m_k, m_x0, t_k, t_x0); CHECK(hipDeviceSynchronize()); dim3 grid(cacc::divup(num_verts, KERNEL_BLOCK_SIZE)); dim3 block(KERNEL_BLOCK_SIZE); hipLaunchKernelGGL(( evaluate_observation_rays), dim3(grid), dim3(block), 0, 0, dobs_rays->cdata(), drecons->cdata()); CHECK(hipDeviceSynchronize()); { cacc::Array<float, cacc::HOST> recons(*drecons); cacc::Array<float, cacc::HOST>::Data const & data = recons.cdata(); CHECK(hipDeviceSynchronize()); for (std::size_t k = 0; k < data.num_values; ++k) { heuristics[k] = data.data_ptr[k]; } } std::cout << stat::spearmans_rank_correlation(heuristics, errors) << std::endl; { cacc::VectorArray<cacc::Vec3f, cacc::HOST> obs_rays(*dobs_rays); cacc::VectorArray<cacc::Vec3f, cacc::HOST>::Data const & data = obs_rays.cdata(); CHECK(hipDeviceSynchronize()); for (std::size_t k = 0; k < data.num_cols; ++k) { observations[k] = data.num_rows_ptr[k]; } } save_numpy_file(heuristics, errors, observations, args.file); } if (!args.recon_cloud.empty() || !args.obs_cloud.empty()) { mve::TriangleMesh::Ptr mesh = mve::TriangleMesh::create(); mesh->get_vertices().assign(verts.begin(), verts.end()); mve::geom::SavePLYOptions opts; opts.write_vertex_values = true; if (!args.recon_cloud.empty()) { mesh->get_vertex_values().assign(heuristics.begin(), heuristics.end()); mve::geom::save_ply_mesh(mesh, args.recon_cloud, opts); } if (!args.obs_cloud.empty()) { mesh->get_vertex_values().assign(observations.begin(), observations.end()); mve::geom::save_ply_mesh(mesh, args.obs_cloud, opts); } } return EXIT_SUCCESS; }
6711a1b4f4546d2836ce75a1c524595ea2976d5f.cu
/* * Copyright (C) 2016-2018, Nils Moehrle * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD 3-Clause license. See the LICENSE.txt file for details. */ #include <fstream> #include <iostream> #include "util/system.h" #include "util/arguments.h" #include "util/io.h" #include "util/numpy_io.h" #include "cacc/util.h" #include "cacc/math.h" #include "eval/kernels.h" #include "stat/correlations.h" #include "mve/scene.h" #include "mve/image.h" struct Arguments { std::string scene; std::string image; std::string gt_mesh; std::string file; std::string recon_cloud; std::string obs_cloud; float max_distance; float target_recon; }; typedef unsigned int uint; typedef acc::BVHTree<uint, math::Vec3f> BVHTree; Arguments parse_args(int argc, char **argv) { util::Arguments args; args.set_exit_on_error(true); args.set_nonopt_minnum(4); args.set_nonopt_maxnum(4); args.set_usage("Usage: " + std::string(argv[0]) + " [OPTS] SCENE IMAGE GT_MESH FILE"); args.set_description("Evaluates Spearman's rank correlation between " "depth error and heuristic for multiple parameter sets."); args.add_option('r', "recon-cloud", true, "save cloud with predicted reconstructabilities"); args.add_option('o', "obs-cloud", true, "save cloud with number of observations reconstructabilities"); args.add_option('\0', "max-distance", true, "maximum distance to surface [80.0]"); args.parse(argc, argv); Arguments conf; conf.scene = args.get_nth_nonopt(0); conf.image = args.get_nth_nonopt(1); conf.gt_mesh = args.get_nth_nonopt(2); conf.file = args.get_nth_nonopt(3); conf.max_distance = 80.0f; for (util::ArgResult const* i = args.next_option(); i != 0; i = args.next_option()) { switch (i->opt->sopt) { case 'r': conf.recon_cloud = i->arg; break; case 'o': conf.obs_cloud = i->arg; break; case '\0': if (i->opt->lopt == "max-distance") { conf.max_distance = i->get_arg<float>(); } else { throw std::invalid_argument("Invalid option"); } break; default: throw std::invalid_argument("Invalid option"); } } return conf; } template <int N> inline void patch(mve::FloatImage::Ptr img, int x, int y, float (*ptr)[N][N]) { static_assert(N % 2 == 1, "Requires odd patch size"); constexpr int e = N / 2; for (int i = -e; i <= e; ++i) { for (int j = -e; j <= e; ++j) { (*ptr)[e + j][e + i] = img->at(x + j, y + i, 0); } } } int main(int argc, char **argv) { util::system::register_segfault_handler(); util::system::print_build_timestamp(argv[0]); Arguments args = parse_args(argc, argv); int device = cacc::select_cuda_device(3, 5); mve::Scene::Ptr scene; try { scene = mve::Scene::create(args.scene); } catch (std::exception& e) { std::cerr << "Could not open scene: " << e.what() << std::endl; std::exit(EXIT_FAILURE); } mve::TriangleMesh::Ptr mesh; try { mesh = mve::geom::load_ply_mesh(args.gt_mesh); } catch (std::exception& e) { std::cerr << "\tCould not load mesh: "<< e.what() << std::endl; std::exit(EXIT_FAILURE); } std::vector<math::Vec3f> const & vertices = mesh->get_vertices(); std::vector<uint> const & faces = mesh->get_faces(); std::cout << "Building BVH... " << std::flush; BVHTree::Ptr bvh_tree = BVHTree::create(faces, vertices); std::cout << "done." << std::endl; std::vector<float> errors; std::vector<math::Vec3f> verts; std::vector<math::Vec3f> normals; std::vector<mve::View::Ptr> views = scene->get_views(); for (mve::View::Ptr & view : views) { if (view == nullptr) continue; if (!view->has_image(args.image, mve::IMAGE_TYPE_FLOAT)) { std::cerr << "Warning view " << view->get_name() << " has no image " << args.image << std::endl; continue; } mve::FloatImage::Ptr dmap = view->get_float_image(args.image); mve::CameraInfo const & camera = view->get_camera(); math::Vec3f origin; camera.fill_camera_pos(origin.begin()); math::Matrix3f invcalib; camera.fill_inverse_calibration(invcalib.begin(), dmap->width(), dmap->height()); math::Matrix3f c2w_rot; camera.fill_cam_to_world_rot(c2w_rot.begin()); /* Ignore border - issues with kernel approaches. */ int border = 0.01f * max(dmap->width(), dmap->height()); for (int y = border; y < dmap->height() - border; ++y) { for (int x = border; x < dmap->width() - border; ++x) { float depth = dmap->at(x, y, 0); BVHTree::Ray ray; ray.origin = origin; math::Vec3f v = invcalib * math::Vec3f ((float)x + 0.5f, (float)y + 0.5f, 1.0f); ray.dir = c2w_rot.mult(v.normalized()).normalize(); ray.tmin = 0.0f; ray.tmax = std::numeric_limits<float>::infinity(); /* Ground truth depth? */ BVHTree::Hit hit; if (!bvh_tree->intersect(ray, &hit)) continue; verts.push_back(origin + (hit.t * ray.dir)); math::Vec3f v0 = vertices[faces[hit.idx * 3]]; math::Vec3f v1 = vertices[faces[hit.idx * 3 + 1]]; math::Vec3f v2 = vertices[faces[hit.idx * 3 + 2]]; normals.push_back((v2 - v0).cross(v1 - v0).normalize()); //float depths[25]; //patch(dmap, x, y, (float (*)[5][5])&depths); //if (std::any_of(depths, depths + 25, // [] (float d) { return d == 0.0f; })) { if (depth == 0) { errors.push_back(-1.0f); } else { errors.push_back(std::abs(depth - (hit.t * ray.dir).norm())); } } } } /* Construct cloud for heuristic evaluation on GPU. */ cacc::PointCloud<cacc::HOST>::Ptr cloud; cloud = cacc::PointCloud<cacc::HOST>::create(verts.size()); cacc::PointCloud<cacc::HOST>::Data data = cloud->cdata(); for (std::size_t i = 0; i < verts.size(); ++i) { data.vertices_ptr[i] = cacc::Vec3f(verts[i].begin()); data.normals_ptr[i] = cacc::Vec3f(normals[i].begin()); data.values_ptr[i] = 0.0f; data.qualities_ptr[i] = 1.0f; } cacc::PointCloud<cacc::DEVICE>::Ptr dcloud; dcloud = cacc::PointCloud<cacc::DEVICE>::create<cacc::HOST>(cloud); cacc::BVHTree<cacc::DEVICE>::Ptr dbvh_tree; dbvh_tree = cacc::BVHTree<cacc::DEVICE>::create<uint, math::Vec3f>(bvh_tree); uint num_verts = verts.size(); uint max_cameras = 32; cacc::VectorArray<cacc::Vec3f, cacc::DEVICE>::Ptr dobs_rays; dobs_rays = cacc::VectorArray<cacc::Vec3f, cacc::DEVICE>::create(num_verts, max_cameras); cacc::Array<float, cacc::DEVICE>::Ptr drecons; drecons = cacc::Array<float, cacc::DEVICE>::create(num_verts); drecons->null(); int width = 1920; int height = 1080; math::Matrix4f w2c; math::Matrix3f calib; math::Vec3f view_pos(0.0f); /* Populate view direction histograms. */ { cudaStream_t stream; cudaStreamCreate(&stream); dim3 grid(cacc::divup(num_verts, KERNEL_BLOCK_SIZE)); dim3 block(KERNEL_BLOCK_SIZE); for (mve::View::Ptr const & view : scene->get_views()) { if (view == nullptr) continue; mve::CameraInfo cam = view->get_camera(); cam.fill_calibration(calib.begin(), width, height); cam.fill_world_to_cam(w2c.begin()); cam.fill_camera_pos(view_pos.begin()); update_observation_rays<<<grid, block, 0, stream>>>( true, cacc::Vec3f(view_pos.begin()), args.max_distance, cacc::Mat4f(w2c.begin()), cacc::Mat3f(calib.begin()), width, height, dbvh_tree->accessor(), dcloud->cdata(), dobs_rays->cdata() ); } cudaStreamDestroy(stream); CHECK(cudaDeviceSynchronize()); } { dim3 grid(cacc::divup(num_verts, 2)); dim3 block(32, 2); process_observation_rays<<<grid, block>>>( dobs_rays->cdata()); } std::vector<float> heuristics(verts.size()); std::vector<float> observations(verts.size()); float m_k = 8; float m_x0 =4; float t_k = 32; float t_x0 = 16; { configure_heuristic(m_k, m_x0, t_k, t_x0); CHECK(cudaDeviceSynchronize()); dim3 grid(cacc::divup(num_verts, KERNEL_BLOCK_SIZE)); dim3 block(KERNEL_BLOCK_SIZE); evaluate_observation_rays<<<grid, block>>>(dobs_rays->cdata(), drecons->cdata()); CHECK(cudaDeviceSynchronize()); { cacc::Array<float, cacc::HOST> recons(*drecons); cacc::Array<float, cacc::HOST>::Data const & data = recons.cdata(); CHECK(cudaDeviceSynchronize()); for (std::size_t k = 0; k < data.num_values; ++k) { heuristics[k] = data.data_ptr[k]; } } std::cout << stat::spearmans_rank_correlation(heuristics, errors) << std::endl; { cacc::VectorArray<cacc::Vec3f, cacc::HOST> obs_rays(*dobs_rays); cacc::VectorArray<cacc::Vec3f, cacc::HOST>::Data const & data = obs_rays.cdata(); CHECK(cudaDeviceSynchronize()); for (std::size_t k = 0; k < data.num_cols; ++k) { observations[k] = data.num_rows_ptr[k]; } } save_numpy_file(heuristics, errors, observations, args.file); } if (!args.recon_cloud.empty() || !args.obs_cloud.empty()) { mve::TriangleMesh::Ptr mesh = mve::TriangleMesh::create(); mesh->get_vertices().assign(verts.begin(), verts.end()); mve::geom::SavePLYOptions opts; opts.write_vertex_values = true; if (!args.recon_cloud.empty()) { mesh->get_vertex_values().assign(heuristics.begin(), heuristics.end()); mve::geom::save_ply_mesh(mesh, args.recon_cloud, opts); } if (!args.obs_cloud.empty()) { mesh->get_vertex_values().assign(observations.begin(), observations.end()); mve::geom::save_ply_mesh(mesh, args.obs_cloud, opts); } } return EXIT_SUCCESS; }
2b66b9fbf3c33cd5dee1091b604966bf81a2747d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <MatKernel.hpp> #if __CUDA_ARCH__ > 200 #define MAXXGRID 2147483647 #else #define MAXXGRID 65535 #endif __device__ float op_add(float a, float b) {return a+b;} __device__ float op_sub(float a, float b) {return a-b;} __device__ float op_mul(float a, float b) {return a*b;} __device__ float op_div(float a, float b) {return a/b;} __device__ float op_gt(float a, float b) {return (a > b) ? 1.0f : 0;} __device__ float op_lt(float a, float b) {return (a < b) ? 1.0f : 0;} __device__ float op_eq(float a, float b) {return (a == b) ? 1.0f : 0;} __device__ float op_ge(float a, float b) {return (a >= b) ? 1.0f : 0;} __device__ float op_le(float a, float b) {return (a <= b) ? 1.0f : 0;} __device__ float op_ne(float a, float b) {return (a != b) ? 1.0f : 0;} __device__ float op_max(float a, float b) {return max(a,b);} __device__ float op_min(float a, float b) {return min(a,b);} __device__ float op_atan2(float a, float b) {return atan2f(a, b);} __device__ float op_pow(float a, float b) {return powf(a, b);} __device__ int iop_add(int a, int b) {return a+b;} __device__ int iop_sub(int a, int b) {return a-b;} __device__ int iop_mul(int a, int b) {return a*b;} __device__ int iop_div(int a, int b) {return a/b;} __device__ int iop_gt(int a, int b) {return (a > b) ? 1 : 0;} __device__ int iop_lt(int a, int b) {return (a < b) ? 1 : 0;} __device__ int iop_eq(int a, int b) {return (a == b) ? 1 : 0;} __device__ int iop_ge(int a, int b) {return (a >= b) ? 1 : 0;} __device__ int iop_le(int a, int b) {return (a <= b) ? 1 : 0;} __device__ int iop_ne(int a, int b) {return (a != b) ? 1 : 0;} typedef float (*optype)(float,float); typedef int (*ioptype)(int,int); __device__ const optype operators[] = { op_add, op_sub, op_mul, op_div, op_gt, op_lt, op_eq, op_ge, op_le, op_ne, op_max, op_min, op_atan2, op_pow}; __device__ const ioptype ioperators[] = { iop_add, iop_sub, iop_mul, iop_div, iop_gt, iop_lt, iop_eq, iop_ge, iop_le, iop_ne}; __device__ float fn_abs(float a) {return abs(a);} __device__ float fn_exp(float a) {return expf(a);} __device__ float fn_log(float a) {return logf(a);} __device__ float fn_expm1(float a) {return expm1f(a);} __device__ float fn_sqrt(float a) {return sqrtf(a);} __device__ float fn_ln(float a) {return logf(a);} __device__ float fn_log10(float a) {return log10f(a);} __device__ float fn_log1p(float a) {return log1pf(a);} __device__ float fn_cos(float a) {return cosf(a);} __device__ float fn_sin(float a) {return sinf(a);} __device__ float fn_tan(float a) {return tanf(a);} __device__ float fn_cosh(float a) {return coshf(a);} __device__ float fn_sinh(float a) {return sinhf(a);} __device__ float fn_tanh(float a) {return tanhf(a);} __device__ float fn_acos(float a) {return acosf(a);} __device__ float fn_asin(float a) {return asinf(a);} __device__ float fn_atan(float a) {return atanf(a);} __device__ float fn_acosh(float a) {return acoshf(a);} __device__ float fn_asinh(float a) {return asinhf(a);} __device__ float fn_atanh(float a) {return atanhf(a);} __device__ float fn_erf(float a) {return erff(a);} __device__ float fn_erfinv(float a) {return erfinvf(a);} __device__ float fn_erfc(float a) {return erfcf(a);} __device__ float fn_erfcinv(float a) {return erfcinvf(a);} __device__ float fn_gammaln(float a) {return lgammaf(a);} __device__ float fn_gamma(float a) {return tgammaf(a);} __device__ float fn_ceil(float a) {return ceilf(a);} __device__ float fn_floor(float a) {return floorf(a);} __device__ float fn_round(float a) {return roundf(a);} __device__ float fn_trunc(float a) {return truncf(a);} __device__ float fn_sign(float a) {return (a>0) ? 1.0f : ((a<0) ? -1.0f : 0);} __device__ float fn_j0(float a) {return j0f(a);} __device__ float fn_j1(float a) {return j1f(a);} //__device__ float fn_jn(float a) {return jnf(a);} __device__ float fn_y0(float a) {return y0f(a);} __device__ float fn_y1(float a) {return y1f(a);} //__device__ float fn_yn(float a) {return ynf(a);} __device__ float fn_exppsi(float a) {return (a<1.0f) ? 0.5f*a*a : a-0.5f;} __device__ float fn_atan2(float a, float b) {return atan2f(a, b);} __device__ float fn_pow(float a, float b) {return powf(a, b);} typedef float (*fntype)(float); __device__ const fntype fctns[35] = { fn_abs, fn_exp, fn_expm1, fn_sqrt, fn_ln, fn_log10, fn_log1p, fn_cos, fn_sin, fn_tan, fn_cosh, fn_sinh, fn_tanh, fn_acos, fn_asin, fn_atan, fn_acosh, fn_asinh, fn_atanh, fn_erf, fn_erfinv, fn_erfc, fn_erfcinv, fn_gammaln, fn_gamma, fn_ceil, fn_floor, fn_round, fn_trunc, fn_sign, fn_j0, fn_j1, fn_y0, fn_y1, fn_exppsi}; __device__ const optype fctns2[2] = { fn_atan2, fn_pow}; __global__ void __apply_gfun(float *A, float *B, int N, int opn) { fntype fn = fctns[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { B[i] = fn(A[i]); } } void setsizes(int N, dim3 *gridp, int *nthreadsp) { int nblocks = 1; int nthreads = 1; while (nblocks * nthreads < N) { if (nblocks < 16) { nblocks = 2*nblocks; } else if (nthreads < 1024) { nthreads = 2*nthreads; } else { nblocks = 2*nblocks; } } gridp->y = 1 + (nblocks-1)/65536; gridp->x = 1 + (nblocks-1)/gridp->y; gridp->z = 1; *nthreadsp = nthreads; } int apply_gfun(float *A, float *B, int N, int opn) { int nthreads; dim3 griddims; setsizes(N, &griddims, &nthreads); hipLaunchKernelGGL(( __apply_gfun), dim3(griddims),dim3(nthreads), 0, 0, A, B, N, opn); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __toFloat(int *A, float *B, int N) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { B[i] = (float)(A[i]); } } __global__ void __toInt(float *A, int *B, int N) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { B[i] = (int)(A[i]); } } int toFloat(int *A, float *B, int N) { int nthreads; dim3 griddims; setsizes(N, &griddims, &nthreads); hipLaunchKernelGGL(( __toFloat), dim3(griddims),dim3(nthreads), 0, 0, A, B, N); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int toInt(float *A, int *B, int N) { int nthreads; dim3 griddims; setsizes(N, &griddims, &nthreads); hipLaunchKernelGGL(( __toInt), dim3(griddims),dim3(nthreads), 0, 0, A, B, N); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __full(int *ir, int *ic, float *data, float *od, int nrows, int ncols, int nnz) { int i, row, col; float v; int id = threadIdx.x + blockIdx.x * blockDim.x; for (i = id; i < nnz; i += blockDim.x * gridDim.x) { v = data[i]; row = ir[i]; col = ic[i]; od[row + col * nrows] = v; } } int full(int *ir, int *ic, float *data, float *od, int nrows, int ncols, int nnz) { int nblocks = min(32, 1+(nnz-1)/32); int nthreads = min(1+(nnz-1)/nblocks, 1024); hipLaunchKernelGGL(( __full), dim3(nblocks),dim3(nthreads), 0, 0, ir, ic, data, od, nrows, ncols, nnz); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __initSeq(int *A, int nrows, int ncols) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { A[i] = i % nrows; } } int initSeq(int *A, int nrows, int ncols) { int nthreads; dim3 griddims; setsizes(nrows*ncols, &griddims, &nthreads); hipLaunchKernelGGL(( __initSeq), dim3(griddims),dim3(nthreads), 0, 0, A, nrows, ncols); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __apply_gfun2(float *A, float *B, float *C, int N, int opn) { optype fn = fctns2[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = fn(A[i], B[i]); } } int apply_gfun2(float *A, float *B, float *C, int N, int opn) { int nthreads; dim3 griddims; setsizes(N, &griddims, &nthreads); hipLaunchKernelGGL(( __apply_gfun2), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, N, opn); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __apply_full(float *A, float *B, float *C, int N, int opn) { optype op = operators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i],B[i]); } } __global__ void __apply_right_col(float *A, float *B, float *C, int nrows, int ncols, int opn) { optype op = operators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i],B[i % nrows]); } } __global__ void __apply_right_row(float *A, float *B, float *C, int nrows, int ncols, int opn) { optype op = operators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i],B[i / nrows]); } } __global__ void __apply_left_col(float *A, float *B, float *C, int nrows, int ncols, int opn) { optype op = operators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i % nrows],B[i]); } } __global__ void __apply_left_row(float *A, float *B, float *C, int nrows, int ncols, int opn) { optype op = operators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i / nrows],B[i]); } } __global__ void __apply_right_val(float *A, float *B, float *C, int nrows, int ncols, int opn) { optype op = operators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); float val = B[0]; for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i],val); } } __global__ void __apply_left_val(float *A, float *B, float *C, int nrows, int ncols, int opn) { optype op = operators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); float val = A[0]; for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(val,B[i]); } } __global__ void __set_val(float *A, float val, int length) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < length; i += blockDim.x * gridDim.x * gridDim.y) { A[i] = val; } } int set_val(float *A, float val, int length) { int nthreads; dim3 griddims; setsizes(length, &griddims, &nthreads); hipLaunchKernelGGL(( __set_val), dim3(griddims),dim3(nthreads), 0, 0, A, val, length); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int set_ival(float *A, int val, int length) { int nthreads; dim3 griddims; setsizes(length, &griddims, &nthreads); hipLaunchKernelGGL(( __set_val), dim3(griddims),dim3(nthreads), 0, 0, A, *((float *)&val), length); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int apply_binop(float *A, int Anrows, int Ancols, float *B, int Bnrows, int Bncols, float *C, int opn) { int N = max(Anrows, Bnrows)*max(Ancols, Bncols); int nthreads; dim3 griddims; setsizes(N, &griddims, &nthreads); if (Anrows == Bnrows && Ancols == Bncols) { hipLaunchKernelGGL(( __apply_full), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, N, opn); } else if (Anrows == Bnrows && Bncols == 1) { hipLaunchKernelGGL(( __apply_right_col), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn); } else if (Ancols == Bncols && Bnrows == 1) { hipLaunchKernelGGL(( __apply_right_row), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn); } else if (Anrows == Bnrows && Ancols == 1) { hipLaunchKernelGGL(( __apply_left_col), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn); } else if (Ancols == Bncols && Anrows == 1) { hipLaunchKernelGGL(( __apply_left_row), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn); } else if (Bnrows == 1 && Bncols == 1) { hipLaunchKernelGGL(( __apply_right_val), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn); } else if (Anrows == 1 && Ancols == 1) { hipLaunchKernelGGL(( __apply_left_val), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __apply_full_int(int *A, int *B, int *C, int N, int opn) { ioptype op = ioperators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i],B[i]); } } __global__ void __apply_right_col_int(int *A, int *B, int *C, int nrows, int ncols, int opn) { ioptype op = ioperators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i],B[i % nrows]); } } __global__ void __apply_right_row_int(int *A, int *B, int *C, int nrows, int ncols, int opn) { ioptype op = ioperators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i],B[i / nrows]); } } __global__ void __apply_left_col_int(int *A, int *B, int *C, int nrows, int ncols, int opn) { ioptype op = ioperators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i % nrows],B[i]); } } __global__ void __apply_left_row_int(int *A, int *B, int *C, int nrows, int ncols, int opn) { ioptype op = ioperators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i / nrows],B[i]); } } __global__ void __apply_right_val_int(int *A, int *B, int *C, int nrows, int ncols, int opn) { ioptype op = ioperators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); int val = B[0]; for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i],val); } } __global__ void __apply_left_val_int(int *A, int *B, int *C, int nrows, int ncols, int opn) { ioptype op = ioperators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); int val = A[0]; for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(val,B[i]); } } int apply_biniop(int *A, int Anrows, int Ancols, int *B, int Bnrows, int Bncols, int *C, int opn) { int N = max(Anrows, Bnrows)*max(Ancols, Bncols); int nthreads; dim3 griddims; setsizes(N, &griddims, &nthreads); if (Anrows == Bnrows && Ancols == Bncols) { hipLaunchKernelGGL(( __apply_full_int), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, N, opn); } else if (Anrows == Bnrows && Bncols == 1) { hipLaunchKernelGGL(( __apply_right_col_int), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn); } else if (Ancols == Bncols && Bnrows == 1) { hipLaunchKernelGGL(( __apply_right_row_int), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn); } else if (Anrows == Bnrows && Ancols == 1) { hipLaunchKernelGGL(( __apply_left_col_int), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn); } else if (Ancols == Bncols && Anrows == 1) { hipLaunchKernelGGL(( __apply_left_row_int), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn); } else if (Bnrows == 1 && Bncols == 1) { hipLaunchKernelGGL(( __apply_right_val_int), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn); } else if (Anrows == 1 && Ancols == 1) { hipLaunchKernelGGL(( __apply_left_val_int), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } // Implement B[I,J] = A // indexed copy: version with one block per column #define COPYTOINDS2DA(DFNAME,IEXPR,JEXPR) \ __global__ void __copyToInds2D##DFNAME(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) { \ int iblock = blockIdx.x + blockIdx.y * gridDim.x; \ if (iblock < ncols) { \ int icol = JEXPR; \ for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \ B[IEXPR + icol * ldb] = A[i + iblock * lda]; \ } \ } \ } COPYTOINDS2DA(nn,I[i],J[iblock]) COPYTOINDS2DA(xn,i,J[iblock]) COPYTOINDS2DA(nx,I[i],iblock) COPYTOINDS2DA(xx,i,iblock) // Implement B[I,J] = A // indexed copy: version with one thread per element #define COPYTOINDS2DB(DFNAME,IEXPR,JEXPR) \ __global__ void __copyToInds2DB##DFNAME(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) { \ int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \ if (indx < nrows * ncols) { \ int irow = indx % nrows; \ int icol = indx / nrows; \ B[IEXPR + JEXPR * ldb] = A[irow + icol * lda]; \ } \ } COPYTOINDS2DB(nn,I[irow],J[icol]) COPYTOINDS2DB(xn,irow,J[icol]) COPYTOINDS2DB(nx,I[irow],icol) COPYTOINDS2DB(xx,irow,icol) // Implement B[I,J] = A int copyToInds2D(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) { int len = nrows * ncols; int nthreads = min(len, max(32, min(1024, nrows))); int nblocks = min(ncols, (len-1)/nthreads + 1); dim3 griddims; griddims.x = 1; griddims.y = 1; griddims.z = 1; if (nblocks < 65536) { griddims.x = nblocks; } else { int vs = (int)sqrt((float)nblocks); griddims.x = vs; griddims.y = (nblocks-1)/vs + 1; } if (nblocks == ncols) { if (I == NULL) { if (J == NULL) { hipLaunchKernelGGL(( __copyToInds2Dxx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols); } else { hipLaunchKernelGGL(( __copyToInds2Dxn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols); } } else { if (J == NULL) { hipLaunchKernelGGL(( __copyToInds2Dnx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols); } else { hipLaunchKernelGGL(( __copyToInds2Dnn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols); } } } else { if (I == NULL) { if (J == NULL) { hipLaunchKernelGGL(( __copyToInds2DBxx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols); } else { hipLaunchKernelGGL(( __copyToInds2DBxn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols); } } else { if (J == NULL) { hipLaunchKernelGGL(( __copyToInds2DBnx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols); } else { hipLaunchKernelGGL(( __copyToInds2DBnn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols); } } } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } // Implement B = A[I,J] // indexed copy: version with one block per column #define COPYFROMINDS2DA(FNAME,IEXPR,JEXPR) \ __global__ void __copyFromInds2D##FNAME(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) { \ int iblock = blockIdx.x + blockIdx.y * gridDim.x; \ if (iblock < ncols) { \ int icol = JEXPR; \ for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \ B[i + iblock * ldb] = A[IEXPR + icol * lda]; \ } \ } \ } COPYFROMINDS2DA(nn,I[i],J[iblock]) COPYFROMINDS2DA(xn,i,J[iblock]) COPYFROMINDS2DA(nx,I[i],iblock) COPYFROMINDS2DA(xx,i,iblock) // Implement B = A[I,J] // indexed copy: version with one thread per element #define COPYFROMINDS2DB(FNAME,IEXPR,JEXPR) \ __global__ void __copyFromInds2DB##FNAME(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) { \ int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \ if (indx < nrows * ncols) { \ int irow = indx % nrows; \ int icol = indx / nrows; \ B[irow + icol * ldb] = A[IEXPR + JEXPR * lda]; \ } \ } COPYFROMINDS2DB(nn,I[irow],J[icol]) COPYFROMINDS2DB(xn,irow,J[icol]) COPYFROMINDS2DB(nx,I[irow],icol) COPYFROMINDS2DB(xx,irow,icol) // Implement B = A[I,J] int copyFromInds2D(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) { int len = nrows * ncols; int nthreads = min(len, max(32, min(1024, nrows))); int nblocks = min(ncols, (len-1)/nthreads + 1); dim3 griddims; griddims.x = 1; griddims.y = 1; griddims.z = 1; if (nblocks < 65536) { griddims.x = nblocks; } else { int vs = (int)sqrt((float)nblocks); griddims.x = vs; griddims.y = (nblocks-1)/vs + 1; } if (nblocks == ncols) { if (I == NULL) { if (J == NULL) { hipLaunchKernelGGL(( __copyFromInds2Dxx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols); } else { hipLaunchKernelGGL(( __copyFromInds2Dxn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols); } } else { if (J == NULL) { hipLaunchKernelGGL(( __copyFromInds2Dnx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols); } else { hipLaunchKernelGGL(( __copyFromInds2Dnn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols); } } } else { if (I == NULL) { if (J == NULL) { hipLaunchKernelGGL(( __copyFromInds2DBxx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols); } else { hipLaunchKernelGGL(( __copyFromInds2DBxn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols); } } else { if (J == NULL) { hipLaunchKernelGGL(( __copyFromInds2DBnx), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols); } else { hipLaunchKernelGGL(( __copyFromInds2DBnn), dim3(griddims),dim3(nthreads), 0, 0, A, lda, B, ldb, I, nrows, J, ncols); } } } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __dsmult(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; for (int i = threadIdx.x; i < nrows; i += blockDim.x) { float sum = 0; for (int j = jstart; j < jend ; j++) { sum += A[i + nrows * Bir[j]] * Bdata[j]; if (j == jend-1 || Bic[j] != Bic[j+1]) { atomicAdd(&C[i + nrows * Bic[j]], sum); sum = 0; } } } } __global__ void __dsmultx(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) { int bid = threadIdx.y + blockDim.y * blockIdx.x; int nb = blockDim.y * gridDim.x; int jstart = ((long long)bid) * nnz / nb; int jend = ((long long)(bid + 1)) * nnz / nb; float sum = 0; for (int j = jstart; j < jend ; j++) { sum += A[threadIdx.x + nrows * Bir[j]] * Bdata[j]; if (j == jend-1 || Bic[j] != Bic[j+1]) { atomicAdd(&C[threadIdx.x + nrows * Bic[j]], sum); sum = 0; } } } int dsmult(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) { if (nrows < 128) { int nt = max(1, min(ncols/2, 256/nrows)); dim3 threadDim(nrows, nt, 1); int nblocks = min(MAXXGRID, max(1, ncols/nt)); hipLaunchKernelGGL(( __dsmultx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C); } else { int nthreads = min(1024, nrows); int nblocks = min(MAXXGRID, ncols); hipLaunchKernelGGL(( __dsmult), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int dsmult_tune(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C, int nblocks, int nthreads) { hipLaunchKernelGGL(( __dsmult), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int dsmultx_tune(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C, int nblocks, int nthreadsx, int nthreadsy) { dim3 threadDim(nthreadsx, nthreadsy, 1); hipLaunchKernelGGL(( __dsmultx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __dsmultT(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; for (int i = threadIdx.x; i < nrows; i += blockDim.x) { float aval = 0; for (int j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[i + nrows * Bic[j]]; } atomicAdd(&C[i + nrows * Bir[j]], aval * Bdata[j]); } } } __global__ void __dsmultTx(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) { int bid = threadIdx.y + blockDim.y * blockIdx.x; int nb = blockDim.y * gridDim.x; int jstart = ((long long)bid) * nnz / nb; int jend = ((long long)(bid + 1)) * nnz / nb; float aval = 0; for (int j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[threadIdx.x + nrows * Bic[j]]; } atomicAdd(&C[threadIdx.x + nrows * Bir[j]], aval * Bdata[j]); } } int dsmultT(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) { if (nrows < 128) { int nt = max(1, min(ncols/2, 256/nrows)); dim3 threadDim(nrows, nt, 1); int nblocks = min(MAXXGRID, max(1, ncols/nt)); hipLaunchKernelGGL(( __dsmultTx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C); } else { int nthreads = min(1024, nrows); int nblocks = min(MAXXGRID, ncols); hipLaunchKernelGGL(( __dsmultT), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __spsum1(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) { atomicAdd(&B[Aic[i]], P[i]); } } __global__ void __spsum2(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) { atomicAdd(&B[Air[i]], P[i]); } } int spsum(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B, int n) { int nthreads = min(128, nnz); int nblks = min(65536, max(1, (nnz-1) / 128)); if (n == 1) { hipLaunchKernelGGL(( __spsum1), dim3(nblks),dim3(nthreads), 0, 0, nrows, ncols, nnz, Air, Aic, P, B); } else { hipLaunchKernelGGL(( __spsum2), dim3(nblks),dim3(nthreads), 0, 0, nrows, ncols, nnz, Air, Aic, P, B); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P); __global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cic, float *P); __global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn); __global__ void __reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr); #define DDS_BLKY 32 #if __CUDA_ARCH__ > 200 __global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; int tid = threadIdx.x + blockDim.x * threadIdx.y; for (int j = jstart; j < jend ; j++) { float sum = 0; int aoff = nrows * Cir[j]; int boff = nrows * Cic[j]; for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) { sum += A[i + aoff] * B[i + boff]; } for (int i = 1; i < blockDim.x; i *= 2) { float tmp = __shfl_down(sum, i); if (threadIdx.x + i < blockDim.x) sum = sum + tmp; } if (threadIdx.x == 0) { atomicAdd(&P[j], sum); } } } __global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cjc, float *P) { __shared__ float merge[32]; int jstart = ((long long)blockIdx.x) * ncols / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x; int tid = threadIdx.x + blockDim.x * threadIdx.y; int aoff, boff; float user, prod, sum, bsum; for (int j0 = jstart; j0 < jend ; j0++) { boff = nrows * j0; user = B[tid + boff]; for (int j = Cjc[j0]; j < Cjc[j0+1]; j++) { aoff = nrows * Cir[j]; prod = A[tid + aoff] * user; sum = prod + __shfl_down(prod, 1); sum = sum + __shfl_down(sum, 2); sum = sum + __shfl_down(sum, 4); sum = sum + __shfl_down(sum, 8); sum = sum + __shfl_down(sum, 16); bsum = __shfl(sum, 0); __syncthreads(); if (threadIdx.x == threadIdx.y) { merge[threadIdx.x] = bsum; } __syncthreads(); if (threadIdx.y == 0) { sum = merge[threadIdx.x]; sum = sum + __shfl_down(sum, 1); sum = sum + __shfl_down(sum, 2); sum = sum + __shfl_down(sum, 4); sum = sum + __shfl_down(sum, 8); sum = sum + __shfl_down(sum, 16); if (threadIdx.x == 0) { P[j] = sum; } } } } } #else __global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) { __shared__ float parts[32*DDS_BLKY]; int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; int tid = threadIdx.x + blockDim.x * threadIdx.y; for (int j = jstart; j < jend ; j++) { float sum = 0; int aoff = nrows * Cir[j]; int boff = nrows * Cic[j]; for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) { sum += A[i + aoff] * B[i + boff]; } parts[tid] = sum; for (int i = 1; i < blockDim.x * blockDim.y; i *= 2) { __syncthreads(); if (i + tid < blockDim.x * blockDim.y) { parts[tid] = parts[tid] + parts[i + tid]; } } __syncthreads(); if (tid == 0) { P[j] = parts[0]; } __syncthreads(); } } __global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cjc, float *P) {} #endif int dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) { dim3 blockDims(min(32,nrows), min(DDS_BLKY, 1+(nrows-1)/64), 1); // int nblocks = min(65536, max(1,nnz/8)); int nblocks = min(16384, max(1,nnz/128)); hipLaunchKernelGGL(( __dds), dim3(nblocks),dim3(blockDims), 0, 0, nrows, nnz, A, B, Cir, Cic, P); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cic, float *P) { dim3 blockDims(32, 32, 1); // int nblocks = min(65536, max(1,nnz/8)); int nblocks = min(16384, max(1,ncols/64)); hipLaunchKernelGGL(( __dds0), dim3(nblocks),dim3(blockDims), 0, 0, nrows, ncols, A, B, Cir, Cic, P); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } #if __CUDA_ARCH__ > 200 __global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn) { optype op = operators[opn]; int basecol = threadIdx.y + blockDim.y * blockIdx.x; for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) { float v = A[threadIdx.x + icol * nrows]; for (int i = threadIdx.x + blockDim.x; i < nrows; i += blockDim.x) { v = op(v, A[i + icol * nrows]); } for (int i = 1; i < blockDim.x; i *= 2) { v = op(v, __shfl_down(v, i)); } if (threadIdx.x == 0) { B[icol] = v; } } } #else __global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn) { __shared__ float parts[32][33]; optype op = operators[opn]; for (int icol = threadIdx.y + blockIdx.y * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) { float v = A[threadIdx.x + icol * nrows]; for (int irow = threadIdx.x + blockDim.x; irow < nrows; irow += blockDim.x) { v = op(v, A[irow + icol * nrows]); } parts[threadIdx.x][threadIdx.y] = v; for (int i = 1; i < blockDim.x; i *= 2) { if (i + threadIdx.x < blockDim.x) { parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]); } } if (threadIdx.x == 0) { B[icol] = parts[0][threadIdx.y]; } __syncthreads(); } } #endif int reduce1op(int nrows, int ncols, float *A, float *B, int opn) { int blkx = min(32, nrows); int blky = min(32, ncols); int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); const dim3 blkdims(blkx,blky,1); hipLaunchKernelGGL(( __reduce1op), dim3(nblks),dim3(blkdims), 0, 0, nrows, ncols, A, B, opn); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } #if __CUDA_ARCH__ > 200 __global__ void __reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) { optype opbf = operators[opb]; optype oprf = operators[opr]; int basecol = threadIdx.y + blockDim.y * blockIdx.x; for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) { float v = 0; for (int i = threadIdx.x; i < nrows; i += blockDim.x) { v = oprf(v, opbf(A[i + icol * nrows], B[i + icol * nrows])); } for (int i = 1; i < blockDim.x; i *= 2) { v = oprf(v, __shfl_down(v, i)); } if (threadIdx.x == 0) { C[icol] = v; } } } #else __global__ void __reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) { __shared__ float parts[32][33]; optype opbf = operators[opb]; optype oprf = operators[opr]; for (int icol = threadIdx.y + blockIdx.y * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) { float v = 0; for (int irow = threadIdx.x; irow < nrows; irow += blockDim.x) { v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows])); } parts[threadIdx.x][threadIdx.y] = v; for (int i = 1; i < blockDim.x; i *= 2) { if (i + threadIdx.x < blockDim.x) { parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]); } } if (threadIdx.x == 0) { C[icol] = parts[0][threadIdx.y]; } __syncthreads(); } } #endif int reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) { int blkx = min(32, nrows); int blky = min(32, ncols); int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); const dim3 blkdims(blkx,blky,1); hipLaunchKernelGGL(( __reducebin1op), dim3(nblks),dim3(blkdims), 0, 0, nrows, ncols, A, B, C, opb, opr); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } #define BLOCKDIM 32 __global__ void __transpose(float *in, int instride, float *out, int outstride, int nrows, int ncols) { int nx = BLOCKDIM * gridDim.x; int ny = BLOCKDIM * gridDim.y; int ix = BLOCKDIM * blockIdx.x; int iy = BLOCKDIM * blockIdx.y; __shared__ float tile[BLOCKDIM][BLOCKDIM+1]; for (int yb = iy; yb < ncols; yb += ny) { for (int xb = ix; xb < nrows; xb += nx) { if (xb + threadIdx.x < nrows) { int ylim = min(ncols, yb + BLOCKDIM); for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) { tile[threadIdx.x][y-yb] = in[threadIdx.x+xb + y*instride]; } } __syncthreads(); if (yb + threadIdx.x < ncols) { int xlim = min(nrows, xb + BLOCKDIM); for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) { out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x]; } } __syncthreads(); } } } int transpose(float *in, int instride, float *out, int outstride, int nrows, int ncols) { int gridx = min(32, 1+(nrows-1)/256); int gridy = min(32, 1+(ncols-1)/256); const dim3 griddims(gridx, gridy, 1); const dim3 blockdims(BLOCKDIM,16,1); hipError_t err; int dev = -1; hipGetDevice(&dev); hipLaunchKernelGGL(( __transpose), dim3(griddims),dim3(blockdims), 0, 0, in, instride, out, outstride, nrows, ncols); hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "cuda error device %d in transpose of %dx%d matrix", dev, nrows, ncols); return err; } return 0; } __global__ void __reduce2op(int nrows, int ncols, float *A, float *B, int opn) { __shared__ float parts[32][33]; optype op = operators[opn]; int baserow = threadIdx.x + blockDim.x * blockIdx.x; for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) { float v = A[irow + threadIdx.y * nrows]; for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) { v = op(v, A[irow + icol * nrows]); } parts[threadIdx.x][threadIdx.y] = v; __syncthreads(); float newv = 0; for (int i = 1; i < blockDim.y; i *= 2) { if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y]; __syncthreads(); if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], newv); __syncthreads(); } if (threadIdx.y == 0) { B[irow] = parts[threadIdx.x][0]; } __syncthreads(); } } int reduce2op(int nrows, int ncols, float *A, float *B, int opn) { int blkx = min(32, nrows); int blky = min(32, ncols); int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); const dim3 blkdims(blkx,blky,1); hipLaunchKernelGGL(( __reduce2op), dim3(nblks),dim3(blkdims), 0, 0, nrows, ncols, A, B, opn); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __reducebin2op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) { __shared__ float parts[32][33]; optype opbf = operators[opb]; optype oprf = operators[opr]; int baserow = threadIdx.x + blockDim.x * blockIdx.x; for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) { float v = opbf(A[irow + threadIdx.y * nrows], B[irow + threadIdx.y * nrows]); for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) { v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows])); } parts[threadIdx.x][threadIdx.y] = v; __syncthreads(); float newv = 0; for (int i = 1; i < blockDim.y; i *= 2) { if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y]; __syncthreads(); if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], newv); __syncthreads(); } if (threadIdx.y == 0) { C[irow] = parts[threadIdx.x][0]; } __syncthreads(); } } int reducebin2op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) { int blkx = min(32, nrows); int blky = min(32, ncols); int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); const dim3 blkdims(blkx,blky,1); hipLaunchKernelGGL(( __reducebin2op), dim3(nblks),dim3(blkdims), 0, 0, nrows, ncols, A, B, C, opb, opr); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __embedmat2d(float *a, long long *b, int nrows, int ncols) { int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); const int signbit = 0x80000000; const int mag = 0x7fffffff; for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) { float v = a[i]; int vi = *((int *)&v); if (vi & signbit) { vi = -(vi & mag); } b[i] = (long long)vi + (((long long)(i/nrows+1))<<32); } } __global__ void __embedmat(float *a, int *b, long long *c, int n) { int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); const int signbit = 0x80000000; const int mag = 0x7fffffff; for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) { float v = a[i]; int vi = *((int *)&v); if (vi & signbit) { vi = -(vi & mag); } c[i] = (long long)vi + (((long long)b[i])<<32); } } int embedmat2d(float *a, long long *b, int nrows, int ncols) { int nthreads; dim3 griddims; setsizes(nrows*ncols, &griddims, &nthreads); hipLaunchKernelGGL(( __embedmat2d), dim3(griddims),dim3(nthreads), 0, 0, a, b, nrows, ncols); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int embedmat(float *a, int *b, long long *c, int n) { int nthreads; dim3 griddims; setsizes(n, &griddims, &nthreads); hipLaunchKernelGGL(( __embedmat), dim3(griddims),dim3(nthreads), 0, 0, a, b, c, n); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __extractmat2d(float *a, long long *b, int nrows, int ncols) { int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); const int signbit = 0x80000000; const int mag = 0x7fffffff; for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) { int vi = *((int *)&b[i]); if (vi & signbit) { vi = -(vi & mag); } a[i] = *((float *)&vi); } } __global__ void __extractmat(float *a, int *b, long long *c, int n) { int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); const int signbit = 0x80000000; const int mag = 0x7fffffff; for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) { int vi = *((int *)&c[i]); if (vi & signbit) { vi = -(vi & mag); } a[i] = *((float *)&vi); b[i] = *(((int *)&c[i])+1); } } int extractmat2d(float *a, long long *b, int nrows, int ncols) { int nthreads; dim3 griddims; setsizes(nrows*ncols, &griddims, &nthreads); hipLaunchKernelGGL(( __extractmat2d), dim3(griddims),dim3(nthreads), 0, 0, a, b, nrows, ncols); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int extractmat(float *a, int *b, long long *c, int n) { int nthreads; dim3 griddims; setsizes(n, &griddims, &nthreads); hipLaunchKernelGGL(( __extractmat), dim3(griddims),dim3(nthreads), 0, 0, a, b, c, n); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } #include <thrust/sort.h> #include <thrust/device_ptr.h> #include <thrust/reverse.h> int fsort2d(float *pkeys, unsigned int *pvals, int nrows, int ncols, int asc) { for (int i = 0; i < ncols; i++) { thrust::device_ptr<float> keys(pkeys+i*nrows); thrust::device_ptr<unsigned int> vals(pvals+i*nrows); if (asc > 0) { thrust::sort_by_key(keys, keys + nrows, vals); } else { thrust::sort_by_key(keys, keys + nrows, vals, thrust::greater<float>()); } } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int isort(int *pkeys, int N, int asc) { thrust::device_ptr<int> keys(pkeys); if (asc > 0) { thrust::sort(keys, keys + N); } else { thrust::sort(keys, keys + N, thrust::greater<int>()); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int fsort(float *pkeys, int N, int asc) { thrust::device_ptr<float> keys(pkeys); if (asc > 0) { thrust::sort(keys, keys + N); } else { thrust::sort(keys, keys + N, thrust::greater<int>()); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int isortk(int *pkeys, unsigned int *pvals, int N, int asc) { thrust::device_ptr<int> keys(pkeys); thrust::device_ptr<unsigned int> vals(pvals); if (asc > 0) { thrust::sort_by_key(keys, keys + N, vals); } else { thrust::sort_by_key(keys, keys + N, vals, thrust::greater<int>()); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int fsorts(float *pkeys, unsigned int *pvals, int *jc, int m, int asc) { for (int i = 0; i < m; i++) { thrust::device_ptr<float> keys(pkeys + jc[i]); thrust::device_ptr<unsigned int> vals(pvals + jc[i]); int b = jc[i+1] - jc[i]; if (asc > 0) { thrust::sort_by_key(keys, keys + b, vals); } else { thrust::sort_by_key(keys, keys + b, vals, thrust::greater<float>()); } } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int dsortk(double *pkeys, unsigned int *pvals, int N, int asc) { thrust::device_ptr<double> keys(pkeys); thrust::device_ptr<unsigned int> vals(pvals); if (asc > 0) { thrust::sort_by_key(keys, keys + N, vals); } else { thrust::sort_by_key(keys, keys + N, vals, thrust::greater<double>()); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int lsortk(long long *pkeys, unsigned int *pvals, int N, int asc) { thrust::device_ptr<long long> keys(pkeys); thrust::device_ptr<unsigned int> vals(pvals); if (asc > 0) { thrust::sort_by_key(keys, keys + N, vals); } else { thrust::sort_by_key(keys, keys + N, vals, thrust::greater<long long>()); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int lsort(long long *pkeys, int N, int asc) { thrust::device_ptr<long long> keys(pkeys); if (asc > 0) { thrust::sort(keys, keys + N); } else { thrust::sort(keys, keys + N, thrust::greater<long long>()); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } typedef struct lll { int x; int y; int z; int w; } lllint; struct cmp_lllint_key_asc { __host__ __device__ inline bool operator()(const lllint &lhs, const lllint &rhs) const { if (lhs.x < rhs.x) return true; if (lhs.x > rhs.x) return false; if (lhs.y < rhs.y) return true; if (lhs.y > rhs.y) return false; if (lhs.z < rhs.z) return true; if (lhs.z > rhs.z) return false; return (lhs.w < rhs.w); } }; struct cmp_lllint_key_desc { __host__ __device__ inline bool operator()(const lllint &lhs, const lllint &rhs) const { if (lhs.x > rhs.x) return true; if (lhs.x < rhs.x) return false; if (lhs.y > rhs.y) return true; if (lhs.y < rhs.y) return false; if (lhs.z > rhs.z) return true; if (lhs.z < rhs.z) return false; return (lhs.w > rhs.w); } }; int i4sort(int *pkeys0, int N, int asc) { lllint *pkeys = (lllint *)pkeys0; thrust::device_ptr<lllint> keys(pkeys); if (asc > 0) { thrust::sort(keys, keys + N, cmp_lllint_key_asc()); } else { thrust::sort(keys, keys + N, cmp_lllint_key_desc()); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } typedef struct i3 { int x; int y; int z; } i3struct; struct cmp_i3struct_key_asc { __host__ __device__ inline bool operator()(const i3struct &lhs, const i3struct &rhs) const { if (lhs.x < rhs.x) return true; if (lhs.x > rhs.x) return false; if (lhs.y < rhs.y) return true; if (lhs.y > rhs.y) return false; return (lhs.z < rhs.z); } }; struct cmp_i3struct_key_desc { __host__ __device__ inline bool operator()(const i3struct &lhs, const i3struct &rhs) const { if (lhs.x > rhs.x) return true; if (lhs.x < rhs.x) return false; if (lhs.y > rhs.y) return true; if (lhs.y < rhs.y) return false; return (lhs.z > rhs.z); } }; int i3sortk(int *pkeys0, unsigned int *pvals, int N, int asc) { i3struct *pkeys = (i3struct *)pkeys0; thrust::device_ptr<i3struct> keys(pkeys); thrust::device_ptr<unsigned int> vals(pvals); if (asc > 0) { thrust::sort_by_key(keys, keys + N, vals, cmp_i3struct_key_asc()); } else { thrust::sort_by_key(keys, keys + N, vals, cmp_i3struct_key_desc()); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } // This path may break. If so look for radixsort_api.h in /usr/local/cuda/include // and fix the path below. using namespace thrust::system::cuda::detail::detail::b40c_thrust; int fsortsizex(int N) { RadixSortingEnactor<float,unsigned int> sorter(N); return sorter.SpineElements(); } int lsortsizex(int N) { RadixSortingEnactor<long long,unsigned int> sorter(N); return sorter.SpineElements(); } int fsort2dx(float *pkeys, unsigned int *pvals, float *tkeys, unsigned int *tvals, int *ispine, bool * bflags, int nrows, int ncols, int asc) { int i; hipError_t err; RadixSortingEnactor<float,unsigned int> sorter(nrows); RadixSortStorage<float,unsigned int> storage; storage.d_spine = ispine; storage.d_from_alt_storage = bflags; storage.using_alternate_storage = false; for (i = 0; i < ncols; i++) { storage.d_keys = pkeys+i*nrows; storage.d_values = pvals+i*nrows; storage.d_alt_keys = tkeys; storage.d_alt_values = tvals; if (asc == 0) { thrust::device_ptr<float> keys(storage.d_keys); thrust::device_ptr<unsigned int> vals(storage.d_values); thrust::reverse(keys, keys+nrows); thrust::reverse(vals, vals+nrows); } hipDeviceSynchronize(); sorter.EnactSort(storage); hipDeviceSynchronize(); err = hipGetLastError(); if (err > 0) return err; if (asc == 0) { thrust::device_ptr<float> keys(storage.d_keys); thrust::device_ptr<unsigned int> vals(storage.d_values); thrust::reverse(keys, keys+nrows); thrust::reverse(vals, vals+nrows); } hipDeviceSynchronize(); if (storage.d_keys == tkeys) { hipMemcpy(pkeys+i*nrows, tkeys, nrows*sizeof(float), hipMemcpyDeviceToDevice); } if (storage.d_values == tvals) { hipMemcpy(pvals+i*nrows, tvals, nrows*sizeof(unsigned int), hipMemcpyDeviceToDevice); } } return err; } int lsortx(long long *pkeys, unsigned int *pvals, long long *tkeys, unsigned int *tvals, int *ispine, bool * bflags, int N, int asc) { RadixSortingEnactor<long long,unsigned int> sorter(N); RadixSortStorage<long long,unsigned int> storage; storage.d_keys = pkeys; storage.d_values = pvals; storage.d_alt_keys = tkeys; storage.d_alt_values = tvals; storage.d_spine = ispine; storage.d_from_alt_storage = bflags; if (asc == 0) { thrust::device_ptr<long long> keys(storage.d_keys); thrust::device_ptr<unsigned int> vals(storage.d_values); thrust::reverse(keys, keys+N); thrust::reverse(vals, vals+N); } hipDeviceSynchronize(); sorter.EnactSort(storage); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (asc == 0) { thrust::device_ptr<long long> keys(storage.d_keys); thrust::device_ptr<unsigned int> vals(storage.d_values); thrust::reverse(keys, keys+N); thrust::reverse(vals, vals+N); } return err; } __global__ void __stratify(float *strata, int n, float *a, float *b, unsigned int *bi, int stride) { __shared__ float ss[32]; __shared__ unsigned int ibin[32]; __shared__ unsigned int ebin[32]; __shared__ unsigned int todo[32]; __shared__ float bins[64][33]; __shared__ unsigned int topush; int tid = threadIdx.x; ss[tid] = strata[tid]; ibin[tid] = 0; for (int i = 0; i < n; i += blockDim.x * gridDim.x) { int ii = i + tid + blockDim.x * blockIdx.x; if (tid == 0) topush = 0; if (ii < n) { float v = a[ii]; int j = 1; j = (v > ss[j-1]) ? 2*j+1 : 2*j; j = (v > ss[j-1]) ? 2*j+1 : 2*j; j = (v > ss[j-1]) ? 2*j+1 : 2*j; j = (v > ss[j-1]) ? 2*j+1 : 2*j; j = (v > ss[j-1]) ? 2*j+1 : 2*j; j = j - 32; int k = atomicInc(&ibin[j], 256); bins[k][j] = v; if (k == 31) { k = atomicInc(&topush, 1024); todo[k] = j; } } if (ibin[tid] >= 32) { ebin[tid] = atomicAdd(&bi[tid], 32); ibin[tid] = ibin[tid] - 32; } for (int k = 0; k < topush; k++) { int j = todo[k]; b[j*stride + ebin[j] + tid] = bins[ibin[j] + tid][j]; } } ebin[tid] = atomicAdd(&bi[tid], ibin[tid]); for (int j = 0; j < 32; j++) { if (tid < ibin[j]) { b[j*stride + ebin[j] + tid] = bins[tid][j]; } } } int stratify(float *strata, int n, float *a, float *b, unsigned int *bi, int stride) { hipLaunchKernelGGL(( __stratify), dim3(40),dim3(32), 0, 0, strata, n, a, b, bi, stride); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } #define SNDVALS 256 #define SNDGRPS 4 #define SNTHREADS 1024 #define SBIGBLK (4*1024) __global__ void __stratifycounts(float *strata, int n, float *a, unsigned int *bi) { __shared__ unsigned int ic[SNDVALS][SNDGRPS]; __shared__ float ss[SNDVALS]; int istart = (int)(((long long)blockIdx.x) * n / gridDim.x); int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x); int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK); int tid = threadIdx.x + threadIdx.y * blockDim.x; if (threadIdx.y == 0) { ss[threadIdx.x] = strata[threadIdx.x]; } for (int i = istart; i < iend; i += SBIGBLK) { __syncthreads(); if (threadIdx.y < SNDGRPS) { ic[threadIdx.x][threadIdx.y] = 0; } __syncthreads(); for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) { float v = a[k]; int j = 0; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = j - SNDVALS + 1; atomicInc(&ic[j][threadIdx.y], 65536*32767); } __syncthreads(); if (threadIdx.y == 0) { bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3]; } bibase += SNDVALS; } } int stratifycounts(float *strata, int n, float *a, unsigned int *bi) { const dim3 blockdims(SNDVALS, SNTHREADS/SNDVALS, 1); const dim3 griddims(8,1,1); hipLaunchKernelGGL(( __stratifycounts), dim3(griddims),dim3(blockdims), 0, 0, strata, n, a, bi); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } #define RNDVALS 256 #define RNTHREADS 256 #define RNDBITS 8 #define RBIGBLK (4*1024) __global__ void __radixcounts(float *a, int n, int digit, unsigned int *bi) { __shared__ unsigned int ic[RNDVALS]; int istart = (int)(((long long)blockIdx.x) * n / gridDim.x); int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x); int tid = threadIdx.x; int bibase = RNDVALS * (blockIdx.x + istart / RBIGBLK); for (int i = istart; i < iend; i += RBIGBLK) { __syncthreads(); ic[threadIdx.x] = 0; __syncthreads(); for (int j = i + tid; j < min(iend, i+tid+RBIGBLK); j += RNTHREADS) { float v = a[j]; unsigned char *cv = (unsigned char *)&v; atomicInc(&ic[cv[digit]], 65536*32767); } __syncthreads(); bi[bibase + threadIdx.x] = ic[threadIdx.x]; bibase += RNDVALS; } } int radixcounts(float *a, int n, int digit, unsigned int *bi) { const dim3 blockdims(RNTHREADS,1,1); const dim3 griddims(32,1,1); hipLaunchKernelGGL(( __radixcounts), dim3(griddims),dim3(blockdims), 0, 0, a, n, digit, bi); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } #if __CUDA_ARCH__ > 200 #define GENDISTS(DFNAME,DFUNC) \ __global__ void DFNAME(float *A, int lda, float *B, int ldb, float *C, \ int ldc, int d, int nrows, int ncols, float p) { \ int xblk = blockDim.x * (threadIdx.y + blockIdx.y * blockDim.y); \ int yblk = blockDim.x * (threadIdx.z + blockIdx.z * blockDim.z); \ float va, vb, vc; \ float R00, R01, R02, R03, R04, R05, R06, R07, R08, R09, R10, R11, R12, R13, R14, R15, \ R16, R17, R18, R19, R20, R21, R22, R23, R24, R25, R26, R27, R28, R29, R30, R31; \ int xi = threadIdx.x + xblk; \ int yi = threadIdx.x; \ if (xi < nrows) { \ if (yi+yblk < ncols) {R00 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R01 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R02 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R03 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R04 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R05 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R06 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R07 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R08 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R09 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R10 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R11 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R12 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R13 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R14 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R15 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R16 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R17 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R18 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R19 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R20 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R21 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R22 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R23 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R24 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R25 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R26 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R27 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R28 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R29 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R30 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R31 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ } \ yi = threadIdx.x + yblk; \ int nbr = (threadIdx.x + 1) % blockDim.x; \ for (int i = 0; i < d; i++) { \ va = (xi < nrows) ? A[xi + i * lda] : 0; \ vb = (yi < ncols) ? B[yi + i * ldb] : 0; \ vc=R00; DFUNC; R00=vc; vb=__shfl(vb, nbr); vc=R01; DFUNC; R01=vc; vb=__shfl(vb, nbr); \ vc=R02; DFUNC; R02=vc; vb=__shfl(vb, nbr); vc=R03; DFUNC; R03=vc; vb=__shfl(vb, nbr); \ vc=R04; DFUNC; R04=vc; vb=__shfl(vb, nbr); vc=R05; DFUNC; R05=vc; vb=__shfl(vb, nbr); \ vc=R06; DFUNC; R06=vc; vb=__shfl(vb, nbr); vc=R07; DFUNC; R07=vc; vb=__shfl(vb, nbr); \ vc=R08; DFUNC; R08=vc; vb=__shfl(vb, nbr); vc=R09; DFUNC; R09=vc; vb=__shfl(vb, nbr); \ vc=R10; DFUNC; R10=vc; vb=__shfl(vb, nbr); vc=R11; DFUNC; R11=vc; vb=__shfl(vb, nbr); \ vc=R12; DFUNC; R12=vc; vb=__shfl(vb, nbr); vc=R13; DFUNC; R13=vc; vb=__shfl(vb, nbr); \ vc=R14; DFUNC; R14=vc; vb=__shfl(vb, nbr); vc=R15; DFUNC; R15=vc; vb=__shfl(vb, nbr); \ vc=R16; DFUNC; R16=vc; vb=__shfl(vb, nbr); vc=R17; DFUNC; R17=vc; vb=__shfl(vb, nbr); \ vc=R18; DFUNC; R18=vc; vb=__shfl(vb, nbr); vc=R19; DFUNC; R19=vc; vb=__shfl(vb, nbr); \ vc=R20; DFUNC; R20=vc; vb=__shfl(vb, nbr); vc=R21; DFUNC; R21=vc; vb=__shfl(vb, nbr); \ vc=R22; DFUNC; R22=vc; vb=__shfl(vb, nbr); vc=R23; DFUNC; R23=vc; vb=__shfl(vb, nbr); \ vc=R24; DFUNC; R24=vc; vb=__shfl(vb, nbr); vc=R25; DFUNC; R25=vc; vb=__shfl(vb, nbr); \ vc=R26; DFUNC; R26=vc; vb=__shfl(vb, nbr); vc=R27; DFUNC; R27=vc; vb=__shfl(vb, nbr); \ vc=R28; DFUNC; R28=vc; vb=__shfl(vb, nbr); vc=R29; DFUNC; R29=vc; vb=__shfl(vb, nbr); \ vc=R30; DFUNC; R30=vc; vb=__shfl(vb, nbr); vc=R31; DFUNC; R31=vc; vb=__shfl(vb, nbr); \ } \ yi = threadIdx.x; \ if (xi < nrows) { \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R00;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R01;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R02;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R03;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R04;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R05;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R06;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R07;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R08;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R09;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R10;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R11;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R12;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R13;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R14;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R15;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R16;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R17;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R18;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R19;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R20;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R21;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R22;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R23;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R24;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R25;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R26;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R27;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R28;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R29;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R30;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R31;} yi = (yi+1) % blockDim.x; \ } \ } GENDISTS(__l1dist,vc+=abs(va-vb)) GENDISTS(__l2dist,vc+=(va-vb)*(va-vb)) GENDISTS(__minkowskidist,vc+=pow(abs(va-vb),p)) GENDISTS(__linfdist,vc=max(vc,abs(va-vb))) GENDISTS(__msum,vc=max(vc,va+vb)) #else __global__ void __l1dist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) { printf("Warning, Lidist not supported on arch <= 200\n"); } __global__ void __l2dist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) { printf("Warning, L2dist not supported on arch <= 200\n"); } __global__ void __minkowskidist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) { printf("Warning, Minkowski distance not supported on arch <= 200\n"); } __global__ void __linfdist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) { printf("Warning, Max-abs distance not supported on arch <= 200\n"); } __global__ void __msum(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) { printf("Warning, Max-sum multiply not supported on arch <= 200\n"); } #endif int dists(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) { dim3 blockdim(32,4,4); dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128); // hipSetDevice(ithread); if (p == 0.0f) { hipLaunchKernelGGL(( __linfdist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p); } else if (p == 1.0f) { hipLaunchKernelGGL(( __l1dist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p); } else if (p == 2.0f) { hipLaunchKernelGGL(( __l2dist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p); } else { hipLaunchKernelGGL(( __minkowskidist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int maxsumx(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols) { dim3 blockdim(32,4,4); dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128); hipLaunchKernelGGL(( __msum), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, 0); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } #if __CUDA_ARCH__ > 200 template<class T> __global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) { __shared__ T tots[32]; int start, end, ij; int bid = blockIdx.y + blockIdx.z * blockDim.y; // column index T sum, tsum, tmp, ttot, ttot0; if (bid < ncols) { for (ij = blockIdx.x; ij < m; ij += gridDim.x) { start = jc[ij] + bid * nrows; end = jc[ij+1] + bid * nrows; sum = 0; for (int i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) { tsum = in[i]; tmp = __shfl_up(tsum, 1); if (threadIdx.x >= 1) tsum += tmp; tmp = __shfl_up(tsum, 2); if (threadIdx.x >= 2) tsum += tmp; tmp = __shfl_up(tsum, 4); if (threadIdx.x >= 4) tsum += tmp; tmp = __shfl_up(tsum, 8); if (threadIdx.x >= 8) tsum += tmp; tmp = __shfl_up(tsum, 16); if (threadIdx.x >= 16) tsum += tmp; ttot = __shfl(tsum, min(end-start-1, 31)); ttot0 = ttot; __syncthreads(); if (threadIdx.x == threadIdx.y) { tots[threadIdx.y] = ttot; } __syncthreads(); for (int k = 1; k < blockDim.y; k *= 2) { if (threadIdx.y >= k) { if (threadIdx.x == threadIdx.y - k) { ttot += tots[threadIdx.x]; } } __syncthreads(); if (threadIdx.y >= k) { ttot = __shfl(ttot, threadIdx.y - k); if (threadIdx.x == threadIdx.y) { tots[threadIdx.y] = ttot; } } __syncthreads(); } out[i] = sum + tsum + ttot - ttot0; if (threadIdx.x == blockDim.y - 1) { ttot = tots[threadIdx.x]; } __syncthreads(); ttot = __shfl(ttot, blockDim.y - 1); sum += ttot; } } } } template<class T> __global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T maxminv, int dir) { __shared__ T maxv[32]; __shared__ int maxi[32]; T vmax, vtmp; int imax, itmp, i, k, start, end, ij; int bid = blockIdx.y + blockIdx.z * blockDim.y; if (bid < ncols) { for (ij = blockIdx.x; ij < m; ij += gridDim.x) { vmax = maxminv; imax = -1; start = jc[ij]; end = jc[ij+1]; for (i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) { vtmp = in[i + nrows * bid]; itmp = i; if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } for (k = 1; k < blockDim.x; k *= 2) { vtmp = __shfl_up(vmax, k); itmp = __shfl_up(imax, k); if (threadIdx.x >= k) { if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } } vmax = __shfl(vmax, blockDim.x - 1); imax = __shfl(imax, blockDim.x - 1); __syncthreads(); if (threadIdx.x == threadIdx.y) { maxv[threadIdx.y] = vmax; maxi[threadIdx.y] = imax; } __syncthreads(); if (threadIdx.y == 0) { vmax = maxv[threadIdx.x]; imax = maxi[threadIdx.x]; } __syncthreads(); if (threadIdx.y == 0) { for (k = 1; k < blockDim.y; k *= 2) { vtmp = __shfl_up(vmax, k); itmp = __shfl_up(imax, k); if (threadIdx.x >= k) { if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } } if (threadIdx.x == blockDim.y - 1) { out[ij + m * bid] = vmax; outi[ij + m * bid] = imax; } } } } } template<class T> __global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T maxminv, int dir) { __shared__ T maxv[32]; __shared__ int maxi[32]; T vmax, vtmp; int imax, itmp, i, k; int bid = blockIdx.y + blockIdx.z * blockDim.y; if (bid < ncols) { vmax = maxminv; imax = -1; for (i = threadIdx.x + threadIdx.y * blockDim.x; i < nrows; i += blockDim.x * blockDim.y) { vtmp = in[i + nrows * bid]; itmp = i; if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } for (k = 1; k < blockDim.x; k *= 2) { vtmp = __shfl_up(vmax, k); itmp = __shfl_up(imax, k); if (threadIdx.x >= k) { if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } } vmax = __shfl(vmax, blockDim.x - 1); imax = __shfl(imax, blockDim.x - 1); __syncthreads(); if (threadIdx.x == threadIdx.y) { maxv[threadIdx.y] = vmax; maxi[threadIdx.y] = imax; } __syncthreads(); if (threadIdx.y == 0) { vmax = maxv[threadIdx.x]; imax = maxi[threadIdx.x]; } __syncthreads(); if (threadIdx.y == 0) { for (k = 1; k < blockDim.y; k *= 2) { vtmp = __shfl_up(vmax, k); itmp = __shfl_up(imax, k); if (threadIdx.x >= k) { if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } } if (threadIdx.x == blockDim.y - 1) { out[bid] = vmax; outi[bid] = imax; } } } } // Not very fast for wide matrices template<class T> __global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) { T vmax, vtmp; int imax, itmp, i, j; for (i = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * blockIdx.x); i < nrows; i += blockDim.x * blockDim.y * gridDim.x) { if (ncols > 0) { vmax = in[i]; imax = 0; for (j = 1; j < ncols; j++) { vtmp = in[i + nrows * j]; itmp = j; if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } out[i] = vmax; outi[i] = imax; } } } #else template<class T> __global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {} template<class T> __global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) {} template<class T> __global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) {} template<class T> __global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {} #endif void setinds(int ncols, int &nc1, int &nc2) { if (ncols < 65536) { nc1 = ncols; nc2 = 1; } else { nc1 = (int)sqrt((double)ncols); nc2 = 1 + (ncols-1)/nc1; } } template<class T> int cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) { int nc1, nc2; setinds(ncols, nc1, nc2); dim3 grid(min(64, m), nc1, nc2); int ny = min(32, 1+nrows/m/32); dim3 tblock(32, ny, 1); hipLaunchKernelGGL(( __cumsumg<T>), dim3(grid),dim3(tblock), 0, 0, in, out, jc, nrows, ncols, m); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int cumsumgf(float *in, float *out, int *jc, int nrows, int ncols, int m) { return cumsumg<float>(in, out, jc, nrows, ncols, m); } int cumsumgi(int *in, int *out, int *jc, int nrows, int ncols, int m) { return cumsumg<int>(in, out, jc, nrows, ncols, m); } template<class T> int maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) { int nc1, nc2; setinds(ncols, nc1, nc2); dim3 grid(min(64, m), nc1, nc2); int ny = min(32, 1+nrows/m/32); dim3 tblock(32, ny, 1); hipLaunchKernelGGL(( __maxming<T>), dim3(grid),dim3(tblock), 0, 0, in, out, outi, jc, nrows, ncols, m, minv, dir); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } template<class T> int maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) { int nc1, nc2; setinds(ncols, nc1, nc2); dim3 grid(1, nc1, nc2); int ny = min(32, 1+nrows/32); dim3 tblock(32, ny, 1); hipLaunchKernelGGL(( __maxmini_cols<T>), dim3(grid),dim3(tblock), 0, 0, in, out, outi, nrows, ncols, minv, dir); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } template<class T> int maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) { int nb = min(32,1+nrows/32); dim3 grid(nb,1,1); int ny = min(32, 1+nrows/nb/32); dim3 tblock(32, ny, 1); hipLaunchKernelGGL(( __maxmini_rows<T>), dim3(grid),dim3(tblock), 0, 0, in, out, outi, nrows, ncols, dir); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } int maxgf(float *in, float *out, int *outi, int *jc, int nrows, int ncols, int m) { return maxming<float>(in, out, outi, jc, nrows, ncols, m, -3e38f, 1); } int maxgi(int *in, int *out, int *outi, int *jc, int nrows, int ncols, int m) { return maxming<int>(in, out, outi, jc, nrows, ncols, m, 0x80000000, 1); } int mingf(float *in, float *out, int *outi, int *jc, int nrows, int ncols, int m) { return maxming<float>(in, out, outi, jc, nrows, ncols, m, 3e38f, 0); } int mingi(int *in, int *out, int *outi, int *jc, int nrows, int ncols, int m) { return maxming<int>(in, out, outi, jc, nrows, ncols, m, 0x7fffffff, 0); } int maxif(float *in, float *out, int *outi, int nrows, int ncols, int dir) { if (dir == 1) { return maxmini_cols<float>(in, out, outi, nrows, ncols, -3e38f, 1); } else if (dir == 2) { return maxmini_rows<float>(in, out, outi, nrows, ncols, 1); } else { return -1; } } int maxii(int *in, int *out, int *outi, int nrows, int ncols, int dir) { if (dir == 1) { return maxmini_cols<int>(in, out, outi, nrows, ncols, 0x80000000, 1); } else if (dir == 2) { return maxmini_rows<int>(in, out, outi, nrows, ncols, 1); } else { return -1; } } int minif(float *in, float *out, int *outi, int nrows, int ncols, int dir) { if (dir == 1) { return maxmini_cols<float>(in, out, outi, nrows, ncols, 3e38f, 0); } else if (dir == 2) { return maxmini_rows<float>(in, out, outi, nrows, ncols, 0); } else { return -1; } } int minii(int *in, int *out, int *outi, int nrows, int ncols, int dir) { if (dir == 1) { return maxmini_cols<int>(in, out, outi, nrows, ncols, 0x7fffffff, 0); } else if (dir == 2) { return maxmini_rows<int>(in, out, outi, nrows, ncols, 0); } else { return -1; } } __global__ void __dmv(float *a, int nrows, int ncols, float *b, float *c) { for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) { float accum = 0.0f; for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) { accum += a[tx+nrows*ty] * b[ty]; } atomicAdd(&c[tx], accum); } } #if __CUDA_ARCH__ > 200 __global__ void __dmvt(float *a, int nrows, int ncols, float *b, float *c) { for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) { float accum = 0.0f; for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) { accum += a[tx+nrows*ty] * b[tx]; } for (int i = 1; i < blockDim.x; i *= 2) { float tmp = __shfl_down(accum, i); if (threadIdx.x + i < blockDim.x) accum += tmp; } if (threadIdx.x == 0) { atomicAdd(&c[ty], accum); } } } #else __global__ void __dmvt(float *a, int nrows, int ncols, float *b, float *c) { for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) { float accum = 0.0f; for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) { accum += a[tx+nrows*ty] * b[tx]; } atomicAdd(&c[ty], accum); } } #endif __global__ void __dmv0(float *a, int nrows, int ncols, int tstep, float *b, float *c) { float accum = 0.0f; int tx = threadIdx.x + blockDim.x * blockIdx.x; if (tx < tstep) { for (; tx < nrows*ncols; tx += tstep) { int icol = tx / nrows; accum += a[tx] * b[icol]; } int irow = tx % nrows; atomicAdd(&c[irow], accum); } } int dmv(float *a, int nrows, int ncols, float *b, float *c, int trans) { if (trans == 1) { int ntx = min(32, nrows); int nty = min(32, ncols); int nbx = min(256, 1 + nrows/ntx/8); int nby = min(256, 1 + ncols/nty/2); dim3 blockdims(ntx,nty,1); dim3 griddims(nbx,nby,1); hipLaunchKernelGGL(( __dmvt), dim3(griddims),dim3(blockdims), 0, 0, a, nrows, ncols, b, c); } else { int ntx = min(1024, nrows*ncols); int nbx = max(1+(nrows-1)/ntx, nrows*ncols/ntx/32); int tstep = (ntx*nbx/nrows)*nrows; hipLaunchKernelGGL(( __dmv0), dim3(nbx),dim3(ntx), 0, 0, a, nrows, ncols, tstep, b, c); } hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } #define ACCUM_KERNEL(TI,TJ,TV,TS,II,IJ,IV) \ __global__ void __accum(TI, TJ, TV, TS, int m, int nrows) { \ int istart = ((int)(((long long)blockIdx.x) * m / gridDim.x)); \ int iend = ((int)(((long long)blockIdx.x + 1) * m / gridDim.x)); \ istart = (istart / 32) * 32; \ if (blockIdx.x != gridDim.x - 1) { \ iend = (iend / 32) * 32; \ } \ for (int i = istart + threadIdx.x; i < iend; i+= blockDim.x) { \ atomicAdd(&S[II + nrows * IJ], IV); \ } \ } \ int accum(TI, TJ, TV, TS, int m, int nrows) { \ int nthreads = min(512, m); \ int nblocks = max(1, min(65535, m/nthreads/8)); \ hipLaunchKernelGGL(( __accum), dim3(nblocks),dim3(nthreads), 0, 0, I,J,V,S,m,nrows); \ hipDeviceSynchronize(); \ hipError_t err = hipGetLastError(); \ return err; \ } ACCUM_KERNEL(int*I, int*J, float*V, float*S, I[i], J[i], V[i]) ACCUM_KERNEL(int*I, int J, float*V, float*S, I[i], J, V[i]) ACCUM_KERNEL(int I, int*J, float*V, float*S, I, J[i], V[i]) ACCUM_KERNEL(int*I, int*J, float V, float*S, I[i], J[i], V) ACCUM_KERNEL(int*I, int J, float V, float*S, I[i], J, V) ACCUM_KERNEL(int I, int*J, float V, float*S, I, J[i], V) ACCUM_KERNEL(int*I, int*J, int*V, int*S, I[i], J[i], V[i]) ACCUM_KERNEL(int*I, int J, int*V, int*S, I[i], J, V[i]) ACCUM_KERNEL(int I, int*J, int*V, int*S, I, J[i], V[i]) ACCUM_KERNEL(int*I, int*J, int V, int*S, I[i], J[i], V) ACCUM_KERNEL(int*I, int J, int V, int*S, I[i], J, V) ACCUM_KERNEL(int I, int*J, int V, int*S, I, J[i], V) const int INBLOCK = 4; // copy and transpose columns of the input matrix into the output matrix. nrows refers to the input matrix // (and so is ncols for the output). ncols is the length of the iptrs array, which will be the number of // rows of the output matrix. iptrs specifies the columns of the input array to copy. // outstride is stride of the output matrix __global__ void __icopy_transpose(int *iptrs, float *in, float *out, int outstride, int nrows, int ncols) { __shared__ float tile[BLOCKDIM][BLOCKDIM+1]; int nx = BLOCKDIM * gridDim.x; int ny = BLOCKDIM * gridDim.y; int ix = BLOCKDIM * blockIdx.x; int iy = BLOCKDIM * blockIdx.y; for (int yb = iy; yb < ncols; yb += ny) { for (int xb = ix; xb < nrows; xb += nx) { if (xb + threadIdx.x < nrows) { int ylim = min(ncols, yb + BLOCKDIM); for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) { tile[threadIdx.x][y-yb] = in[threadIdx.x + xb + iptrs[y]*nrows]; } } __syncthreads(); if (yb + threadIdx.x < ncols) { int xlim = min(nrows, xb + BLOCKDIM); for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) { out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x]; } } __syncthreads(); } } } int icopy_transpose(int *iptrs, float *in, float *out, int stride, int nrows, int ncols) { const dim3 griddims(20,256,1); const dim3 blockdims(BLOCKDIM,INBLOCK,1); hipError_t err; hipLaunchKernelGGL(( __icopy_transpose), dim3(griddims),dim3(blockdims), 0, 0, iptrs, in, out, stride, nrows, ncols); hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) {fprintf(stderr, "cuda error in icopy_transpose"); return err;} return 0; } // copy and transpose the input matrix into columns of the output matrix. nrows, ncols refer to output matrix __global__ void __ocopy_transpose(int *optrs, float *in, float *out, int instride, int nrows, int ncols) { int nx = BLOCKDIM * gridDim.x; int ny = BLOCKDIM * gridDim.y; int ix = BLOCKDIM * blockIdx.x; int iy = BLOCKDIM * blockIdx.y; __shared__ float tile[BLOCKDIM][BLOCKDIM+1]; for (int yb = iy; yb < ncols; yb += ny) { for (int xb = ix; xb < nrows; xb += nx) { if (yb + threadIdx.x < ncols) { int xlim = min(nrows, xb + BLOCKDIM); for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) { tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride]; } } __syncthreads(); if (xb + threadIdx.x < nrows) { int ylim = min(ncols, yb + BLOCKDIM); for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) { out[optrs[y]*nrows + threadIdx.x + xb] = tile[threadIdx.x][y-yb]; } } __syncthreads(); } } } __global__ void __ocopy_transpose_add(int *optrs, float *in, float *out, int instride, int nrows, int ncols) { int nx = BLOCKDIM * gridDim.x; int ny = BLOCKDIM * gridDim.y; int ix = BLOCKDIM * blockIdx.x; int iy = BLOCKDIM * blockIdx.y; __shared__ float tile[BLOCKDIM][BLOCKDIM+1]; for (int yb = iy; yb < ncols; yb += ny) { for (int xb = ix; xb < nrows; xb += nx) { if (yb + threadIdx.x < ncols) { int xlim = min(nrows, xb + BLOCKDIM); for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) { tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride]; } } __syncthreads(); if (xb + threadIdx.x < nrows) { int ylim = min(ncols, yb + BLOCKDIM); for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) { atomicAdd(&out[optrs[y]*nrows + threadIdx.x + xb], tile[threadIdx.x][y-yb]); } } __syncthreads(); } } } __global__ void __ocopy_transpose_min(int *optrs, float *in, float *out, int instride, int nrows, int ncols) { int nx = BLOCKDIM * gridDim.x; int ny = BLOCKDIM * gridDim.y; int ix = BLOCKDIM * blockIdx.x; int iy = BLOCKDIM * blockIdx.y; __shared__ float tile[BLOCKDIM][BLOCKDIM+1]; for (int yb = iy; yb < ncols; yb += ny) { for (int xb = ix; xb < nrows; xb += nx) { if (yb + threadIdx.x < ncols) { int xlim = min(nrows, xb + BLOCKDIM); for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) { tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride]; } } __syncthreads(); if (xb + threadIdx.x < nrows) { int ylim = min(ncols, yb + BLOCKDIM); for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) { atomicMin((int *)&out[optrs[y]*nrows + threadIdx.x + xb], *(int *)(&tile[threadIdx.x][y-yb])); } } __syncthreads(); } } } int ocopy_transpose_add(int *optrs, float *in, float *out, int stride, int nrows, int ncols) { const dim3 griddims(20,256,1); const dim3 blockdims(BLOCKDIM,INBLOCK,1); hipError_t err; hipLaunchKernelGGL(( __ocopy_transpose_add), dim3(griddims),dim3(blockdims), 0, 0, optrs, in, out, stride, nrows, ncols); hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;} return 0; } int ocopy_transpose(int *optrs, float *in, float *out, int stride, int nrows, int ncols) { const dim3 griddims(20,256,1); const dim3 blockdims(BLOCKDIM,INBLOCK,1); hipError_t err; hipLaunchKernelGGL(( __ocopy_transpose), dim3(griddims),dim3(blockdims), 0, 0, optrs, in, out, stride, nrows, ncols); hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;} return 0; } int ocopy_transpose_min(int *optrs, float *in, float *out, int stride, int nrows, int ncols) { const dim3 griddims(20,256,1); const dim3 blockdims(BLOCKDIM,INBLOCK,1); hipError_t err; hipLaunchKernelGGL(( __ocopy_transpose_min), dim3(griddims),dim3(blockdims), 0, 0, optrs, in, out, stride, nrows, ncols); hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;} return 0; } #ifdef TEST int main(int argc, char **argv) { int m=8, n=8, opn = 0; float *dA, *dB, *dC, *A, *B, *C; if (argc > 1) { sscanf(argv[1], "%d", &opn); if (argc > 2) { sscanf(argv[2], "%d", &m); if (argc > 3) { sscanf(argv[3], "%d", &n); } } } A = (float *)malloc(m*n*sizeof(float)); B = (float *)malloc(m*n*sizeof(float)); C = (float *)malloc(m*n*sizeof(float)); hipMalloc((void**)&dA, m*n*sizeof(float)); hipMalloc((void**)&dB, m*n*sizeof(float)); hipMalloc((void**)&dC, m*n*sizeof(float)); for (int i = 0; i < m*n; i++) { A[i] = 1.0f; B[i] = 2.0f; } hipMemcpy(dA, A, m*n*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dB, B, m*n*sizeof(float), hipMemcpyHostToDevice); printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]); printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]); MatKernel(dA, m, n, dB, m, n, dC, opn); hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "CUDA error %d", err); exit(1); } hipMemcpy(C, dC, m*n*sizeof(float), hipMemcpyDeviceToHost); printf("C %f %f %f %f\n", C[0], C[1], C[2], C[3]); printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]); printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]); if (dA != NULL) hipFree(dA); if (dB != NULL) hipFree(dB); if (dC != NULL) hipFree(dC); if (C != NULL) free(C); } #endif
2b66b9fbf3c33cd5dee1091b604966bf81a2747d.cu
#include <cuda_runtime.h> #include <stdio.h> #include <MatKernel.hpp> #if __CUDA_ARCH__ > 200 #define MAXXGRID 2147483647 #else #define MAXXGRID 65535 #endif __device__ float op_add(float a, float b) {return a+b;} __device__ float op_sub(float a, float b) {return a-b;} __device__ float op_mul(float a, float b) {return a*b;} __device__ float op_div(float a, float b) {return a/b;} __device__ float op_gt(float a, float b) {return (a > b) ? 1.0f : 0;} __device__ float op_lt(float a, float b) {return (a < b) ? 1.0f : 0;} __device__ float op_eq(float a, float b) {return (a == b) ? 1.0f : 0;} __device__ float op_ge(float a, float b) {return (a >= b) ? 1.0f : 0;} __device__ float op_le(float a, float b) {return (a <= b) ? 1.0f : 0;} __device__ float op_ne(float a, float b) {return (a != b) ? 1.0f : 0;} __device__ float op_max(float a, float b) {return max(a,b);} __device__ float op_min(float a, float b) {return min(a,b);} __device__ float op_atan2(float a, float b) {return atan2f(a, b);} __device__ float op_pow(float a, float b) {return powf(a, b);} __device__ int iop_add(int a, int b) {return a+b;} __device__ int iop_sub(int a, int b) {return a-b;} __device__ int iop_mul(int a, int b) {return a*b;} __device__ int iop_div(int a, int b) {return a/b;} __device__ int iop_gt(int a, int b) {return (a > b) ? 1 : 0;} __device__ int iop_lt(int a, int b) {return (a < b) ? 1 : 0;} __device__ int iop_eq(int a, int b) {return (a == b) ? 1 : 0;} __device__ int iop_ge(int a, int b) {return (a >= b) ? 1 : 0;} __device__ int iop_le(int a, int b) {return (a <= b) ? 1 : 0;} __device__ int iop_ne(int a, int b) {return (a != b) ? 1 : 0;} typedef float (*optype)(float,float); typedef int (*ioptype)(int,int); __device__ const optype operators[] = { op_add, op_sub, op_mul, op_div, op_gt, op_lt, op_eq, op_ge, op_le, op_ne, op_max, op_min, op_atan2, op_pow}; __device__ const ioptype ioperators[] = { iop_add, iop_sub, iop_mul, iop_div, iop_gt, iop_lt, iop_eq, iop_ge, iop_le, iop_ne}; __device__ float fn_abs(float a) {return abs(a);} __device__ float fn_exp(float a) {return expf(a);} __device__ float fn_log(float a) {return logf(a);} __device__ float fn_expm1(float a) {return expm1f(a);} __device__ float fn_sqrt(float a) {return sqrtf(a);} __device__ float fn_ln(float a) {return logf(a);} __device__ float fn_log10(float a) {return log10f(a);} __device__ float fn_log1p(float a) {return log1pf(a);} __device__ float fn_cos(float a) {return cosf(a);} __device__ float fn_sin(float a) {return sinf(a);} __device__ float fn_tan(float a) {return tanf(a);} __device__ float fn_cosh(float a) {return coshf(a);} __device__ float fn_sinh(float a) {return sinhf(a);} __device__ float fn_tanh(float a) {return tanhf(a);} __device__ float fn_acos(float a) {return acosf(a);} __device__ float fn_asin(float a) {return asinf(a);} __device__ float fn_atan(float a) {return atanf(a);} __device__ float fn_acosh(float a) {return acoshf(a);} __device__ float fn_asinh(float a) {return asinhf(a);} __device__ float fn_atanh(float a) {return atanhf(a);} __device__ float fn_erf(float a) {return erff(a);} __device__ float fn_erfinv(float a) {return erfinvf(a);} __device__ float fn_erfc(float a) {return erfcf(a);} __device__ float fn_erfcinv(float a) {return erfcinvf(a);} __device__ float fn_gammaln(float a) {return lgammaf(a);} __device__ float fn_gamma(float a) {return tgammaf(a);} __device__ float fn_ceil(float a) {return ceilf(a);} __device__ float fn_floor(float a) {return floorf(a);} __device__ float fn_round(float a) {return roundf(a);} __device__ float fn_trunc(float a) {return truncf(a);} __device__ float fn_sign(float a) {return (a>0) ? 1.0f : ((a<0) ? -1.0f : 0);} __device__ float fn_j0(float a) {return j0f(a);} __device__ float fn_j1(float a) {return j1f(a);} //__device__ float fn_jn(float a) {return jnf(a);} __device__ float fn_y0(float a) {return y0f(a);} __device__ float fn_y1(float a) {return y1f(a);} //__device__ float fn_yn(float a) {return ynf(a);} __device__ float fn_exppsi(float a) {return (a<1.0f) ? 0.5f*a*a : a-0.5f;} __device__ float fn_atan2(float a, float b) {return atan2f(a, b);} __device__ float fn_pow(float a, float b) {return powf(a, b);} typedef float (*fntype)(float); __device__ const fntype fctns[35] = { fn_abs, fn_exp, fn_expm1, fn_sqrt, fn_ln, fn_log10, fn_log1p, fn_cos, fn_sin, fn_tan, fn_cosh, fn_sinh, fn_tanh, fn_acos, fn_asin, fn_atan, fn_acosh, fn_asinh, fn_atanh, fn_erf, fn_erfinv, fn_erfc, fn_erfcinv, fn_gammaln, fn_gamma, fn_ceil, fn_floor, fn_round, fn_trunc, fn_sign, fn_j0, fn_j1, fn_y0, fn_y1, fn_exppsi}; __device__ const optype fctns2[2] = { fn_atan2, fn_pow}; __global__ void __apply_gfun(float *A, float *B, int N, int opn) { fntype fn = fctns[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { B[i] = fn(A[i]); } } void setsizes(int N, dim3 *gridp, int *nthreadsp) { int nblocks = 1; int nthreads = 1; while (nblocks * nthreads < N) { if (nblocks < 16) { nblocks = 2*nblocks; } else if (nthreads < 1024) { nthreads = 2*nthreads; } else { nblocks = 2*nblocks; } } gridp->y = 1 + (nblocks-1)/65536; gridp->x = 1 + (nblocks-1)/gridp->y; gridp->z = 1; *nthreadsp = nthreads; } int apply_gfun(float *A, float *B, int N, int opn) { int nthreads; dim3 griddims; setsizes(N, &griddims, &nthreads); __apply_gfun<<<griddims,nthreads>>>(A, B, N, opn); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __toFloat(int *A, float *B, int N) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { B[i] = (float)(A[i]); } } __global__ void __toInt(float *A, int *B, int N) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { B[i] = (int)(A[i]); } } int toFloat(int *A, float *B, int N) { int nthreads; dim3 griddims; setsizes(N, &griddims, &nthreads); __toFloat<<<griddims,nthreads>>>(A, B, N); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int toInt(float *A, int *B, int N) { int nthreads; dim3 griddims; setsizes(N, &griddims, &nthreads); __toInt<<<griddims,nthreads>>>(A, B, N); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __full(int *ir, int *ic, float *data, float *od, int nrows, int ncols, int nnz) { int i, row, col; float v; int id = threadIdx.x + blockIdx.x * blockDim.x; for (i = id; i < nnz; i += blockDim.x * gridDim.x) { v = data[i]; row = ir[i]; col = ic[i]; od[row + col * nrows] = v; } } int full(int *ir, int *ic, float *data, float *od, int nrows, int ncols, int nnz) { int nblocks = min(32, 1+(nnz-1)/32); int nthreads = min(1+(nnz-1)/nblocks, 1024); __full<<<nblocks,nthreads>>>(ir, ic, data, od, nrows, ncols, nnz); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __initSeq(int *A, int nrows, int ncols) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { A[i] = i % nrows; } } int initSeq(int *A, int nrows, int ncols) { int nthreads; dim3 griddims; setsizes(nrows*ncols, &griddims, &nthreads); __initSeq<<<griddims,nthreads>>>(A, nrows, ncols); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __apply_gfun2(float *A, float *B, float *C, int N, int opn) { optype fn = fctns2[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = fn(A[i], B[i]); } } int apply_gfun2(float *A, float *B, float *C, int N, int opn) { int nthreads; dim3 griddims; setsizes(N, &griddims, &nthreads); __apply_gfun2<<<griddims,nthreads>>>(A, B, C, N, opn); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __apply_full(float *A, float *B, float *C, int N, int opn) { optype op = operators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i],B[i]); } } __global__ void __apply_right_col(float *A, float *B, float *C, int nrows, int ncols, int opn) { optype op = operators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i],B[i % nrows]); } } __global__ void __apply_right_row(float *A, float *B, float *C, int nrows, int ncols, int opn) { optype op = operators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i],B[i / nrows]); } } __global__ void __apply_left_col(float *A, float *B, float *C, int nrows, int ncols, int opn) { optype op = operators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i % nrows],B[i]); } } __global__ void __apply_left_row(float *A, float *B, float *C, int nrows, int ncols, int opn) { optype op = operators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i / nrows],B[i]); } } __global__ void __apply_right_val(float *A, float *B, float *C, int nrows, int ncols, int opn) { optype op = operators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); float val = B[0]; for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i],val); } } __global__ void __apply_left_val(float *A, float *B, float *C, int nrows, int ncols, int opn) { optype op = operators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); float val = A[0]; for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(val,B[i]); } } __global__ void __set_val(float *A, float val, int length) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < length; i += blockDim.x * gridDim.x * gridDim.y) { A[i] = val; } } int set_val(float *A, float val, int length) { int nthreads; dim3 griddims; setsizes(length, &griddims, &nthreads); __set_val<<<griddims,nthreads>>>(A, val, length); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int set_ival(float *A, int val, int length) { int nthreads; dim3 griddims; setsizes(length, &griddims, &nthreads); __set_val<<<griddims,nthreads>>>(A, *((float *)&val), length); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int apply_binop(float *A, int Anrows, int Ancols, float *B, int Bnrows, int Bncols, float *C, int opn) { int N = max(Anrows, Bnrows)*max(Ancols, Bncols); int nthreads; dim3 griddims; setsizes(N, &griddims, &nthreads); if (Anrows == Bnrows && Ancols == Bncols) { __apply_full<<<griddims,nthreads>>>(A, B, C, N, opn); } else if (Anrows == Bnrows && Bncols == 1) { __apply_right_col<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn); } else if (Ancols == Bncols && Bnrows == 1) { __apply_right_row<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn); } else if (Anrows == Bnrows && Ancols == 1) { __apply_left_col<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn); } else if (Ancols == Bncols && Anrows == 1) { __apply_left_row<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn); } else if (Bnrows == 1 && Bncols == 1) { __apply_right_val<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn); } else if (Anrows == 1 && Ancols == 1) { __apply_left_val<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn); } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __apply_full_int(int *A, int *B, int *C, int N, int opn) { ioptype op = ioperators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i],B[i]); } } __global__ void __apply_right_col_int(int *A, int *B, int *C, int nrows, int ncols, int opn) { ioptype op = ioperators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i],B[i % nrows]); } } __global__ void __apply_right_row_int(int *A, int *B, int *C, int nrows, int ncols, int opn) { ioptype op = ioperators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i],B[i / nrows]); } } __global__ void __apply_left_col_int(int *A, int *B, int *C, int nrows, int ncols, int opn) { ioptype op = ioperators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i % nrows],B[i]); } } __global__ void __apply_left_row_int(int *A, int *B, int *C, int nrows, int ncols, int opn) { ioptype op = ioperators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i / nrows],B[i]); } } __global__ void __apply_right_val_int(int *A, int *B, int *C, int nrows, int ncols, int opn) { ioptype op = ioperators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); int val = B[0]; for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(A[i],val); } } __global__ void __apply_left_val_int(int *A, int *B, int *C, int nrows, int ncols, int opn) { ioptype op = ioperators[opn]; int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); int val = A[0]; for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { C[i] = op(val,B[i]); } } int apply_biniop(int *A, int Anrows, int Ancols, int *B, int Bnrows, int Bncols, int *C, int opn) { int N = max(Anrows, Bnrows)*max(Ancols, Bncols); int nthreads; dim3 griddims; setsizes(N, &griddims, &nthreads); if (Anrows == Bnrows && Ancols == Bncols) { __apply_full_int<<<griddims,nthreads>>>(A, B, C, N, opn); } else if (Anrows == Bnrows && Bncols == 1) { __apply_right_col_int<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn); } else if (Ancols == Bncols && Bnrows == 1) { __apply_right_row_int<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn); } else if (Anrows == Bnrows && Ancols == 1) { __apply_left_col_int<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn); } else if (Ancols == Bncols && Anrows == 1) { __apply_left_row_int<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn); } else if (Bnrows == 1 && Bncols == 1) { __apply_right_val_int<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn); } else if (Anrows == 1 && Ancols == 1) { __apply_left_val_int<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn); } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } // Implement B[I,J] = A // indexed copy: version with one block per column #define COPYTOINDS2DA(DFNAME,IEXPR,JEXPR) \ __global__ void __copyToInds2D##DFNAME(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) { \ int iblock = blockIdx.x + blockIdx.y * gridDim.x; \ if (iblock < ncols) { \ int icol = JEXPR; \ for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \ B[IEXPR + icol * ldb] = A[i + iblock * lda]; \ } \ } \ } COPYTOINDS2DA(nn,I[i],J[iblock]) COPYTOINDS2DA(xn,i,J[iblock]) COPYTOINDS2DA(nx,I[i],iblock) COPYTOINDS2DA(xx,i,iblock) // Implement B[I,J] = A // indexed copy: version with one thread per element #define COPYTOINDS2DB(DFNAME,IEXPR,JEXPR) \ __global__ void __copyToInds2DB##DFNAME(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) { \ int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \ if (indx < nrows * ncols) { \ int irow = indx % nrows; \ int icol = indx / nrows; \ B[IEXPR + JEXPR * ldb] = A[irow + icol * lda]; \ } \ } COPYTOINDS2DB(nn,I[irow],J[icol]) COPYTOINDS2DB(xn,irow,J[icol]) COPYTOINDS2DB(nx,I[irow],icol) COPYTOINDS2DB(xx,irow,icol) // Implement B[I,J] = A int copyToInds2D(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) { int len = nrows * ncols; int nthreads = min(len, max(32, min(1024, nrows))); int nblocks = min(ncols, (len-1)/nthreads + 1); dim3 griddims; griddims.x = 1; griddims.y = 1; griddims.z = 1; if (nblocks < 65536) { griddims.x = nblocks; } else { int vs = (int)sqrt((float)nblocks); griddims.x = vs; griddims.y = (nblocks-1)/vs + 1; } if (nblocks == ncols) { if (I == NULL) { if (J == NULL) { __copyToInds2Dxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } else { __copyToInds2Dxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } } else { if (J == NULL) { __copyToInds2Dnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } else { __copyToInds2Dnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } } } else { if (I == NULL) { if (J == NULL) { __copyToInds2DBxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } else { __copyToInds2DBxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } } else { if (J == NULL) { __copyToInds2DBnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } else { __copyToInds2DBnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } } } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } // Implement B = A[I,J] // indexed copy: version with one block per column #define COPYFROMINDS2DA(FNAME,IEXPR,JEXPR) \ __global__ void __copyFromInds2D##FNAME(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) { \ int iblock = blockIdx.x + blockIdx.y * gridDim.x; \ if (iblock < ncols) { \ int icol = JEXPR; \ for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \ B[i + iblock * ldb] = A[IEXPR + icol * lda]; \ } \ } \ } COPYFROMINDS2DA(nn,I[i],J[iblock]) COPYFROMINDS2DA(xn,i,J[iblock]) COPYFROMINDS2DA(nx,I[i],iblock) COPYFROMINDS2DA(xx,i,iblock) // Implement B = A[I,J] // indexed copy: version with one thread per element #define COPYFROMINDS2DB(FNAME,IEXPR,JEXPR) \ __global__ void __copyFromInds2DB##FNAME(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) { \ int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \ if (indx < nrows * ncols) { \ int irow = indx % nrows; \ int icol = indx / nrows; \ B[irow + icol * ldb] = A[IEXPR + JEXPR * lda]; \ } \ } COPYFROMINDS2DB(nn,I[irow],J[icol]) COPYFROMINDS2DB(xn,irow,J[icol]) COPYFROMINDS2DB(nx,I[irow],icol) COPYFROMINDS2DB(xx,irow,icol) // Implement B = A[I,J] int copyFromInds2D(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) { int len = nrows * ncols; int nthreads = min(len, max(32, min(1024, nrows))); int nblocks = min(ncols, (len-1)/nthreads + 1); dim3 griddims; griddims.x = 1; griddims.y = 1; griddims.z = 1; if (nblocks < 65536) { griddims.x = nblocks; } else { int vs = (int)sqrt((float)nblocks); griddims.x = vs; griddims.y = (nblocks-1)/vs + 1; } if (nblocks == ncols) { if (I == NULL) { if (J == NULL) { __copyFromInds2Dxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } else { __copyFromInds2Dxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } } else { if (J == NULL) { __copyFromInds2Dnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } else { __copyFromInds2Dnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } } } else { if (I == NULL) { if (J == NULL) { __copyFromInds2DBxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } else { __copyFromInds2DBxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } } else { if (J == NULL) { __copyFromInds2DBnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } else { __copyFromInds2DBnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } } } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __dsmult(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; for (int i = threadIdx.x; i < nrows; i += blockDim.x) { float sum = 0; for (int j = jstart; j < jend ; j++) { sum += A[i + nrows * Bir[j]] * Bdata[j]; if (j == jend-1 || Bic[j] != Bic[j+1]) { atomicAdd(&C[i + nrows * Bic[j]], sum); sum = 0; } } } } __global__ void __dsmultx(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) { int bid = threadIdx.y + blockDim.y * blockIdx.x; int nb = blockDim.y * gridDim.x; int jstart = ((long long)bid) * nnz / nb; int jend = ((long long)(bid + 1)) * nnz / nb; float sum = 0; for (int j = jstart; j < jend ; j++) { sum += A[threadIdx.x + nrows * Bir[j]] * Bdata[j]; if (j == jend-1 || Bic[j] != Bic[j+1]) { atomicAdd(&C[threadIdx.x + nrows * Bic[j]], sum); sum = 0; } } } int dsmult(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) { if (nrows < 128) { int nt = max(1, min(ncols/2, 256/nrows)); dim3 threadDim(nrows, nt, 1); int nblocks = min(MAXXGRID, max(1, ncols/nt)); __dsmultx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C); } else { int nthreads = min(1024, nrows); int nblocks = min(MAXXGRID, ncols); __dsmult<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C); } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int dsmult_tune(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C, int nblocks, int nthreads) { __dsmult<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int dsmultx_tune(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C, int nblocks, int nthreadsx, int nthreadsy) { dim3 threadDim(nthreadsx, nthreadsy, 1); __dsmultx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __dsmultT(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; for (int i = threadIdx.x; i < nrows; i += blockDim.x) { float aval = 0; for (int j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[i + nrows * Bic[j]]; } atomicAdd(&C[i + nrows * Bir[j]], aval * Bdata[j]); } } } __global__ void __dsmultTx(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) { int bid = threadIdx.y + blockDim.y * blockIdx.x; int nb = blockDim.y * gridDim.x; int jstart = ((long long)bid) * nnz / nb; int jend = ((long long)(bid + 1)) * nnz / nb; float aval = 0; for (int j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[threadIdx.x + nrows * Bic[j]]; } atomicAdd(&C[threadIdx.x + nrows * Bir[j]], aval * Bdata[j]); } } int dsmultT(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) { if (nrows < 128) { int nt = max(1, min(ncols/2, 256/nrows)); dim3 threadDim(nrows, nt, 1); int nblocks = min(MAXXGRID, max(1, ncols/nt)); __dsmultTx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C); } else { int nthreads = min(1024, nrows); int nblocks = min(MAXXGRID, ncols); __dsmultT<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C); } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __spsum1(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) { atomicAdd(&B[Aic[i]], P[i]); } } __global__ void __spsum2(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) { atomicAdd(&B[Air[i]], P[i]); } } int spsum(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B, int n) { int nthreads = min(128, nnz); int nblks = min(65536, max(1, (nnz-1) / 128)); if (n == 1) { __spsum1<<<nblks,nthreads>>>(nrows, ncols, nnz, Air, Aic, P, B); } else { __spsum2<<<nblks,nthreads>>>(nrows, ncols, nnz, Air, Aic, P, B); } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P); __global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cic, float *P); __global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn); __global__ void __reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr); #define DDS_BLKY 32 #if __CUDA_ARCH__ > 200 __global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; int tid = threadIdx.x + blockDim.x * threadIdx.y; for (int j = jstart; j < jend ; j++) { float sum = 0; int aoff = nrows * Cir[j]; int boff = nrows * Cic[j]; for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) { sum += A[i + aoff] * B[i + boff]; } for (int i = 1; i < blockDim.x; i *= 2) { float tmp = __shfl_down(sum, i); if (threadIdx.x + i < blockDim.x) sum = sum + tmp; } if (threadIdx.x == 0) { atomicAdd(&P[j], sum); } } } __global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cjc, float *P) { __shared__ float merge[32]; int jstart = ((long long)blockIdx.x) * ncols / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x; int tid = threadIdx.x + blockDim.x * threadIdx.y; int aoff, boff; float user, prod, sum, bsum; for (int j0 = jstart; j0 < jend ; j0++) { boff = nrows * j0; user = B[tid + boff]; for (int j = Cjc[j0]; j < Cjc[j0+1]; j++) { aoff = nrows * Cir[j]; prod = A[tid + aoff] * user; sum = prod + __shfl_down(prod, 1); sum = sum + __shfl_down(sum, 2); sum = sum + __shfl_down(sum, 4); sum = sum + __shfl_down(sum, 8); sum = sum + __shfl_down(sum, 16); bsum = __shfl(sum, 0); __syncthreads(); if (threadIdx.x == threadIdx.y) { merge[threadIdx.x] = bsum; } __syncthreads(); if (threadIdx.y == 0) { sum = merge[threadIdx.x]; sum = sum + __shfl_down(sum, 1); sum = sum + __shfl_down(sum, 2); sum = sum + __shfl_down(sum, 4); sum = sum + __shfl_down(sum, 8); sum = sum + __shfl_down(sum, 16); if (threadIdx.x == 0) { P[j] = sum; } } } } } #else __global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) { __shared__ float parts[32*DDS_BLKY]; int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; int tid = threadIdx.x + blockDim.x * threadIdx.y; for (int j = jstart; j < jend ; j++) { float sum = 0; int aoff = nrows * Cir[j]; int boff = nrows * Cic[j]; for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) { sum += A[i + aoff] * B[i + boff]; } parts[tid] = sum; for (int i = 1; i < blockDim.x * blockDim.y; i *= 2) { __syncthreads(); if (i + tid < blockDim.x * blockDim.y) { parts[tid] = parts[tid] + parts[i + tid]; } } __syncthreads(); if (tid == 0) { P[j] = parts[0]; } __syncthreads(); } } __global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cjc, float *P) {} #endif int dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) { dim3 blockDims(min(32,nrows), min(DDS_BLKY, 1+(nrows-1)/64), 1); // int nblocks = min(65536, max(1,nnz/8)); int nblocks = min(16384, max(1,nnz/128)); __dds<<<nblocks,blockDims>>>(nrows, nnz, A, B, Cir, Cic, P); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cic, float *P) { dim3 blockDims(32, 32, 1); // int nblocks = min(65536, max(1,nnz/8)); int nblocks = min(16384, max(1,ncols/64)); __dds0<<<nblocks,blockDims>>>(nrows, ncols, A, B, Cir, Cic, P); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } #if __CUDA_ARCH__ > 200 __global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn) { optype op = operators[opn]; int basecol = threadIdx.y + blockDim.y * blockIdx.x; for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) { float v = A[threadIdx.x + icol * nrows]; for (int i = threadIdx.x + blockDim.x; i < nrows; i += blockDim.x) { v = op(v, A[i + icol * nrows]); } for (int i = 1; i < blockDim.x; i *= 2) { v = op(v, __shfl_down(v, i)); } if (threadIdx.x == 0) { B[icol] = v; } } } #else __global__ void __reduce1op(int nrows, int ncols, float *A, float *B, int opn) { __shared__ float parts[32][33]; optype op = operators[opn]; for (int icol = threadIdx.y + blockIdx.y * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) { float v = A[threadIdx.x + icol * nrows]; for (int irow = threadIdx.x + blockDim.x; irow < nrows; irow += blockDim.x) { v = op(v, A[irow + icol * nrows]); } parts[threadIdx.x][threadIdx.y] = v; for (int i = 1; i < blockDim.x; i *= 2) { if (i + threadIdx.x < blockDim.x) { parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]); } } if (threadIdx.x == 0) { B[icol] = parts[0][threadIdx.y]; } __syncthreads(); } } #endif int reduce1op(int nrows, int ncols, float *A, float *B, int opn) { int blkx = min(32, nrows); int blky = min(32, ncols); int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); const dim3 blkdims(blkx,blky,1); __reduce1op<<<nblks,blkdims>>>(nrows, ncols, A, B, opn); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } #if __CUDA_ARCH__ > 200 __global__ void __reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) { optype opbf = operators[opb]; optype oprf = operators[opr]; int basecol = threadIdx.y + blockDim.y * blockIdx.x; for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) { float v = 0; for (int i = threadIdx.x; i < nrows; i += blockDim.x) { v = oprf(v, opbf(A[i + icol * nrows], B[i + icol * nrows])); } for (int i = 1; i < blockDim.x; i *= 2) { v = oprf(v, __shfl_down(v, i)); } if (threadIdx.x == 0) { C[icol] = v; } } } #else __global__ void __reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) { __shared__ float parts[32][33]; optype opbf = operators[opb]; optype oprf = operators[opr]; for (int icol = threadIdx.y + blockIdx.y * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) { float v = 0; for (int irow = threadIdx.x; irow < nrows; irow += blockDim.x) { v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows])); } parts[threadIdx.x][threadIdx.y] = v; for (int i = 1; i < blockDim.x; i *= 2) { if (i + threadIdx.x < blockDim.x) { parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]); } } if (threadIdx.x == 0) { C[icol] = parts[0][threadIdx.y]; } __syncthreads(); } } #endif int reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) { int blkx = min(32, nrows); int blky = min(32, ncols); int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); const dim3 blkdims(blkx,blky,1); __reducebin1op<<<nblks,blkdims>>>(nrows, ncols, A, B, C, opb, opr); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } #define BLOCKDIM 32 __global__ void __transpose(float *in, int instride, float *out, int outstride, int nrows, int ncols) { int nx = BLOCKDIM * gridDim.x; int ny = BLOCKDIM * gridDim.y; int ix = BLOCKDIM * blockIdx.x; int iy = BLOCKDIM * blockIdx.y; __shared__ float tile[BLOCKDIM][BLOCKDIM+1]; for (int yb = iy; yb < ncols; yb += ny) { for (int xb = ix; xb < nrows; xb += nx) { if (xb + threadIdx.x < nrows) { int ylim = min(ncols, yb + BLOCKDIM); for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) { tile[threadIdx.x][y-yb] = in[threadIdx.x+xb + y*instride]; } } __syncthreads(); if (yb + threadIdx.x < ncols) { int xlim = min(nrows, xb + BLOCKDIM); for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) { out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x]; } } __syncthreads(); } } } int transpose(float *in, int instride, float *out, int outstride, int nrows, int ncols) { int gridx = min(32, 1+(nrows-1)/256); int gridy = min(32, 1+(ncols-1)/256); const dim3 griddims(gridx, gridy, 1); const dim3 blockdims(BLOCKDIM,16,1); cudaError_t err; int dev = -1; cudaGetDevice(&dev); __transpose<<<griddims,blockdims>>>(in, instride, out, outstride, nrows, ncols); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "cuda error device %d in transpose of %dx%d matrix", dev, nrows, ncols); return err; } return 0; } __global__ void __reduce2op(int nrows, int ncols, float *A, float *B, int opn) { __shared__ float parts[32][33]; optype op = operators[opn]; int baserow = threadIdx.x + blockDim.x * blockIdx.x; for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) { float v = A[irow + threadIdx.y * nrows]; for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) { v = op(v, A[irow + icol * nrows]); } parts[threadIdx.x][threadIdx.y] = v; __syncthreads(); float newv = 0; for (int i = 1; i < blockDim.y; i *= 2) { if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y]; __syncthreads(); if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], newv); __syncthreads(); } if (threadIdx.y == 0) { B[irow] = parts[threadIdx.x][0]; } __syncthreads(); } } int reduce2op(int nrows, int ncols, float *A, float *B, int opn) { int blkx = min(32, nrows); int blky = min(32, ncols); int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); const dim3 blkdims(blkx,blky,1); __reduce2op<<<nblks,blkdims>>>(nrows, ncols, A, B, opn); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __reducebin2op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) { __shared__ float parts[32][33]; optype opbf = operators[opb]; optype oprf = operators[opr]; int baserow = threadIdx.x + blockDim.x * blockIdx.x; for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) { float v = opbf(A[irow + threadIdx.y * nrows], B[irow + threadIdx.y * nrows]); for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) { v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows])); } parts[threadIdx.x][threadIdx.y] = v; __syncthreads(); float newv = 0; for (int i = 1; i < blockDim.y; i *= 2) { if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y]; __syncthreads(); if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], newv); __syncthreads(); } if (threadIdx.y == 0) { C[irow] = parts[threadIdx.x][0]; } __syncthreads(); } } int reducebin2op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr) { int blkx = min(32, nrows); int blky = min(32, ncols); int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); const dim3 blkdims(blkx,blky,1); __reducebin2op<<<nblks,blkdims>>>(nrows, ncols, A, B, C, opb, opr); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __embedmat2d(float *a, long long *b, int nrows, int ncols) { int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); const int signbit = 0x80000000; const int mag = 0x7fffffff; for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) { float v = a[i]; int vi = *((int *)&v); if (vi & signbit) { vi = -(vi & mag); } b[i] = (long long)vi + (((long long)(i/nrows+1))<<32); } } __global__ void __embedmat(float *a, int *b, long long *c, int n) { int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); const int signbit = 0x80000000; const int mag = 0x7fffffff; for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) { float v = a[i]; int vi = *((int *)&v); if (vi & signbit) { vi = -(vi & mag); } c[i] = (long long)vi + (((long long)b[i])<<32); } } int embedmat2d(float *a, long long *b, int nrows, int ncols) { int nthreads; dim3 griddims; setsizes(nrows*ncols, &griddims, &nthreads); __embedmat2d<<<griddims,nthreads>>>(a, b, nrows, ncols); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int embedmat(float *a, int *b, long long *c, int n) { int nthreads; dim3 griddims; setsizes(n, &griddims, &nthreads); __embedmat<<<griddims,nthreads>>>(a, b, c, n); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __extractmat2d(float *a, long long *b, int nrows, int ncols) { int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); const int signbit = 0x80000000; const int mag = 0x7fffffff; for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) { int vi = *((int *)&b[i]); if (vi & signbit) { vi = -(vi & mag); } a[i] = *((float *)&vi); } } __global__ void __extractmat(float *a, int *b, long long *c, int n) { int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); const int signbit = 0x80000000; const int mag = 0x7fffffff; for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) { int vi = *((int *)&c[i]); if (vi & signbit) { vi = -(vi & mag); } a[i] = *((float *)&vi); b[i] = *(((int *)&c[i])+1); } } int extractmat2d(float *a, long long *b, int nrows, int ncols) { int nthreads; dim3 griddims; setsizes(nrows*ncols, &griddims, &nthreads); __extractmat2d<<<griddims,nthreads>>>(a, b, nrows, ncols); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int extractmat(float *a, int *b, long long *c, int n) { int nthreads; dim3 griddims; setsizes(n, &griddims, &nthreads); __extractmat<<<griddims,nthreads>>>(a, b, c, n); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } #include <thrust/sort.h> #include <thrust/device_ptr.h> #include <thrust/reverse.h> int fsort2d(float *pkeys, unsigned int *pvals, int nrows, int ncols, int asc) { for (int i = 0; i < ncols; i++) { thrust::device_ptr<float> keys(pkeys+i*nrows); thrust::device_ptr<unsigned int> vals(pvals+i*nrows); if (asc > 0) { thrust::sort_by_key(keys, keys + nrows, vals); } else { thrust::sort_by_key(keys, keys + nrows, vals, thrust::greater<float>()); } } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int isort(int *pkeys, int N, int asc) { thrust::device_ptr<int> keys(pkeys); if (asc > 0) { thrust::sort(keys, keys + N); } else { thrust::sort(keys, keys + N, thrust::greater<int>()); } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int fsort(float *pkeys, int N, int asc) { thrust::device_ptr<float> keys(pkeys); if (asc > 0) { thrust::sort(keys, keys + N); } else { thrust::sort(keys, keys + N, thrust::greater<int>()); } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int isortk(int *pkeys, unsigned int *pvals, int N, int asc) { thrust::device_ptr<int> keys(pkeys); thrust::device_ptr<unsigned int> vals(pvals); if (asc > 0) { thrust::sort_by_key(keys, keys + N, vals); } else { thrust::sort_by_key(keys, keys + N, vals, thrust::greater<int>()); } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int fsorts(float *pkeys, unsigned int *pvals, int *jc, int m, int asc) { for (int i = 0; i < m; i++) { thrust::device_ptr<float> keys(pkeys + jc[i]); thrust::device_ptr<unsigned int> vals(pvals + jc[i]); int b = jc[i+1] - jc[i]; if (asc > 0) { thrust::sort_by_key(keys, keys + b, vals); } else { thrust::sort_by_key(keys, keys + b, vals, thrust::greater<float>()); } } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int dsortk(double *pkeys, unsigned int *pvals, int N, int asc) { thrust::device_ptr<double> keys(pkeys); thrust::device_ptr<unsigned int> vals(pvals); if (asc > 0) { thrust::sort_by_key(keys, keys + N, vals); } else { thrust::sort_by_key(keys, keys + N, vals, thrust::greater<double>()); } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int lsortk(long long *pkeys, unsigned int *pvals, int N, int asc) { thrust::device_ptr<long long> keys(pkeys); thrust::device_ptr<unsigned int> vals(pvals); if (asc > 0) { thrust::sort_by_key(keys, keys + N, vals); } else { thrust::sort_by_key(keys, keys + N, vals, thrust::greater<long long>()); } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int lsort(long long *pkeys, int N, int asc) { thrust::device_ptr<long long> keys(pkeys); if (asc > 0) { thrust::sort(keys, keys + N); } else { thrust::sort(keys, keys + N, thrust::greater<long long>()); } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } typedef struct lll { int x; int y; int z; int w; } lllint; struct cmp_lllint_key_asc { __host__ __device__ inline bool operator()(const lllint &lhs, const lllint &rhs) const { if (lhs.x < rhs.x) return true; if (lhs.x > rhs.x) return false; if (lhs.y < rhs.y) return true; if (lhs.y > rhs.y) return false; if (lhs.z < rhs.z) return true; if (lhs.z > rhs.z) return false; return (lhs.w < rhs.w); } }; struct cmp_lllint_key_desc { __host__ __device__ inline bool operator()(const lllint &lhs, const lllint &rhs) const { if (lhs.x > rhs.x) return true; if (lhs.x < rhs.x) return false; if (lhs.y > rhs.y) return true; if (lhs.y < rhs.y) return false; if (lhs.z > rhs.z) return true; if (lhs.z < rhs.z) return false; return (lhs.w > rhs.w); } }; int i4sort(int *pkeys0, int N, int asc) { lllint *pkeys = (lllint *)pkeys0; thrust::device_ptr<lllint> keys(pkeys); if (asc > 0) { thrust::sort(keys, keys + N, cmp_lllint_key_asc()); } else { thrust::sort(keys, keys + N, cmp_lllint_key_desc()); } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } typedef struct i3 { int x; int y; int z; } i3struct; struct cmp_i3struct_key_asc { __host__ __device__ inline bool operator()(const i3struct &lhs, const i3struct &rhs) const { if (lhs.x < rhs.x) return true; if (lhs.x > rhs.x) return false; if (lhs.y < rhs.y) return true; if (lhs.y > rhs.y) return false; return (lhs.z < rhs.z); } }; struct cmp_i3struct_key_desc { __host__ __device__ inline bool operator()(const i3struct &lhs, const i3struct &rhs) const { if (lhs.x > rhs.x) return true; if (lhs.x < rhs.x) return false; if (lhs.y > rhs.y) return true; if (lhs.y < rhs.y) return false; return (lhs.z > rhs.z); } }; int i3sortk(int *pkeys0, unsigned int *pvals, int N, int asc) { i3struct *pkeys = (i3struct *)pkeys0; thrust::device_ptr<i3struct> keys(pkeys); thrust::device_ptr<unsigned int> vals(pvals); if (asc > 0) { thrust::sort_by_key(keys, keys + N, vals, cmp_i3struct_key_asc()); } else { thrust::sort_by_key(keys, keys + N, vals, cmp_i3struct_key_desc()); } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } // This path may break. If so look for radixsort_api.h in /usr/local/cuda/include // and fix the path below. using namespace thrust::system::cuda::detail::detail::b40c_thrust; int fsortsizex(int N) { RadixSortingEnactor<float,unsigned int> sorter(N); return sorter.SpineElements(); } int lsortsizex(int N) { RadixSortingEnactor<long long,unsigned int> sorter(N); return sorter.SpineElements(); } int fsort2dx(float *pkeys, unsigned int *pvals, float *tkeys, unsigned int *tvals, int *ispine, bool * bflags, int nrows, int ncols, int asc) { int i; cudaError_t err; RadixSortingEnactor<float,unsigned int> sorter(nrows); RadixSortStorage<float,unsigned int> storage; storage.d_spine = ispine; storage.d_from_alt_storage = bflags; storage.using_alternate_storage = false; for (i = 0; i < ncols; i++) { storage.d_keys = pkeys+i*nrows; storage.d_values = pvals+i*nrows; storage.d_alt_keys = tkeys; storage.d_alt_values = tvals; if (asc == 0) { thrust::device_ptr<float> keys(storage.d_keys); thrust::device_ptr<unsigned int> vals(storage.d_values); thrust::reverse(keys, keys+nrows); thrust::reverse(vals, vals+nrows); } cudaDeviceSynchronize(); sorter.EnactSort(storage); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err > 0) return err; if (asc == 0) { thrust::device_ptr<float> keys(storage.d_keys); thrust::device_ptr<unsigned int> vals(storage.d_values); thrust::reverse(keys, keys+nrows); thrust::reverse(vals, vals+nrows); } cudaDeviceSynchronize(); if (storage.d_keys == tkeys) { cudaMemcpy(pkeys+i*nrows, tkeys, nrows*sizeof(float), cudaMemcpyDeviceToDevice); } if (storage.d_values == tvals) { cudaMemcpy(pvals+i*nrows, tvals, nrows*sizeof(unsigned int), cudaMemcpyDeviceToDevice); } } return err; } int lsortx(long long *pkeys, unsigned int *pvals, long long *tkeys, unsigned int *tvals, int *ispine, bool * bflags, int N, int asc) { RadixSortingEnactor<long long,unsigned int> sorter(N); RadixSortStorage<long long,unsigned int> storage; storage.d_keys = pkeys; storage.d_values = pvals; storage.d_alt_keys = tkeys; storage.d_alt_values = tvals; storage.d_spine = ispine; storage.d_from_alt_storage = bflags; if (asc == 0) { thrust::device_ptr<long long> keys(storage.d_keys); thrust::device_ptr<unsigned int> vals(storage.d_values); thrust::reverse(keys, keys+N); thrust::reverse(vals, vals+N); } cudaDeviceSynchronize(); sorter.EnactSort(storage); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if (asc == 0) { thrust::device_ptr<long long> keys(storage.d_keys); thrust::device_ptr<unsigned int> vals(storage.d_values); thrust::reverse(keys, keys+N); thrust::reverse(vals, vals+N); } return err; } __global__ void __stratify(float *strata, int n, float *a, float *b, unsigned int *bi, int stride) { __shared__ float ss[32]; __shared__ unsigned int ibin[32]; __shared__ unsigned int ebin[32]; __shared__ unsigned int todo[32]; __shared__ float bins[64][33]; __shared__ unsigned int topush; int tid = threadIdx.x; ss[tid] = strata[tid]; ibin[tid] = 0; for (int i = 0; i < n; i += blockDim.x * gridDim.x) { int ii = i + tid + blockDim.x * blockIdx.x; if (tid == 0) topush = 0; if (ii < n) { float v = a[ii]; int j = 1; j = (v > ss[j-1]) ? 2*j+1 : 2*j; j = (v > ss[j-1]) ? 2*j+1 : 2*j; j = (v > ss[j-1]) ? 2*j+1 : 2*j; j = (v > ss[j-1]) ? 2*j+1 : 2*j; j = (v > ss[j-1]) ? 2*j+1 : 2*j; j = j - 32; int k = atomicInc(&ibin[j], 256); bins[k][j] = v; if (k == 31) { k = atomicInc(&topush, 1024); todo[k] = j; } } if (ibin[tid] >= 32) { ebin[tid] = atomicAdd(&bi[tid], 32); ibin[tid] = ibin[tid] - 32; } for (int k = 0; k < topush; k++) { int j = todo[k]; b[j*stride + ebin[j] + tid] = bins[ibin[j] + tid][j]; } } ebin[tid] = atomicAdd(&bi[tid], ibin[tid]); for (int j = 0; j < 32; j++) { if (tid < ibin[j]) { b[j*stride + ebin[j] + tid] = bins[tid][j]; } } } int stratify(float *strata, int n, float *a, float *b, unsigned int *bi, int stride) { __stratify<<<40,32>>>(strata, n, a, b, bi, stride); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } #define SNDVALS 256 #define SNDGRPS 4 #define SNTHREADS 1024 #define SBIGBLK (4*1024) __global__ void __stratifycounts(float *strata, int n, float *a, unsigned int *bi) { __shared__ unsigned int ic[SNDVALS][SNDGRPS]; __shared__ float ss[SNDVALS]; int istart = (int)(((long long)blockIdx.x) * n / gridDim.x); int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x); int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK); int tid = threadIdx.x + threadIdx.y * blockDim.x; if (threadIdx.y == 0) { ss[threadIdx.x] = strata[threadIdx.x]; } for (int i = istart; i < iend; i += SBIGBLK) { __syncthreads(); if (threadIdx.y < SNDGRPS) { ic[threadIdx.x][threadIdx.y] = 0; } __syncthreads(); for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) { float v = a[k]; int j = 0; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = j - SNDVALS + 1; atomicInc(&ic[j][threadIdx.y], 65536*32767); } __syncthreads(); if (threadIdx.y == 0) { bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3]; } bibase += SNDVALS; } } int stratifycounts(float *strata, int n, float *a, unsigned int *bi) { const dim3 blockdims(SNDVALS, SNTHREADS/SNDVALS, 1); const dim3 griddims(8,1,1); __stratifycounts<<<griddims,blockdims>>>(strata, n, a, bi); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } #define RNDVALS 256 #define RNTHREADS 256 #define RNDBITS 8 #define RBIGBLK (4*1024) __global__ void __radixcounts(float *a, int n, int digit, unsigned int *bi) { __shared__ unsigned int ic[RNDVALS]; int istart = (int)(((long long)blockIdx.x) * n / gridDim.x); int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x); int tid = threadIdx.x; int bibase = RNDVALS * (blockIdx.x + istart / RBIGBLK); for (int i = istart; i < iend; i += RBIGBLK) { __syncthreads(); ic[threadIdx.x] = 0; __syncthreads(); for (int j = i + tid; j < min(iend, i+tid+RBIGBLK); j += RNTHREADS) { float v = a[j]; unsigned char *cv = (unsigned char *)&v; atomicInc(&ic[cv[digit]], 65536*32767); } __syncthreads(); bi[bibase + threadIdx.x] = ic[threadIdx.x]; bibase += RNDVALS; } } int radixcounts(float *a, int n, int digit, unsigned int *bi) { const dim3 blockdims(RNTHREADS,1,1); const dim3 griddims(32,1,1); __radixcounts<<<griddims,blockdims>>>(a, n, digit, bi); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } #if __CUDA_ARCH__ > 200 #define GENDISTS(DFNAME,DFUNC) \ __global__ void DFNAME(float *A, int lda, float *B, int ldb, float *C, \ int ldc, int d, int nrows, int ncols, float p) { \ int xblk = blockDim.x * (threadIdx.y + blockIdx.y * blockDim.y); \ int yblk = blockDim.x * (threadIdx.z + blockIdx.z * blockDim.z); \ float va, vb, vc; \ float R00, R01, R02, R03, R04, R05, R06, R07, R08, R09, R10, R11, R12, R13, R14, R15, \ R16, R17, R18, R19, R20, R21, R22, R23, R24, R25, R26, R27, R28, R29, R30, R31; \ int xi = threadIdx.x + xblk; \ int yi = threadIdx.x; \ if (xi < nrows) { \ if (yi+yblk < ncols) {R00 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R01 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R02 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R03 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R04 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R05 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R06 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R07 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R08 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R09 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R10 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R11 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R12 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R13 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R14 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R15 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R16 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R17 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R18 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R19 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R20 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R21 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R22 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R23 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R24 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R25 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R26 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R27 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R28 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R29 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R30 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R31 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ } \ yi = threadIdx.x + yblk; \ int nbr = (threadIdx.x + 1) % blockDim.x; \ for (int i = 0; i < d; i++) { \ va = (xi < nrows) ? A[xi + i * lda] : 0; \ vb = (yi < ncols) ? B[yi + i * ldb] : 0; \ vc=R00; DFUNC; R00=vc; vb=__shfl(vb, nbr); vc=R01; DFUNC; R01=vc; vb=__shfl(vb, nbr); \ vc=R02; DFUNC; R02=vc; vb=__shfl(vb, nbr); vc=R03; DFUNC; R03=vc; vb=__shfl(vb, nbr); \ vc=R04; DFUNC; R04=vc; vb=__shfl(vb, nbr); vc=R05; DFUNC; R05=vc; vb=__shfl(vb, nbr); \ vc=R06; DFUNC; R06=vc; vb=__shfl(vb, nbr); vc=R07; DFUNC; R07=vc; vb=__shfl(vb, nbr); \ vc=R08; DFUNC; R08=vc; vb=__shfl(vb, nbr); vc=R09; DFUNC; R09=vc; vb=__shfl(vb, nbr); \ vc=R10; DFUNC; R10=vc; vb=__shfl(vb, nbr); vc=R11; DFUNC; R11=vc; vb=__shfl(vb, nbr); \ vc=R12; DFUNC; R12=vc; vb=__shfl(vb, nbr); vc=R13; DFUNC; R13=vc; vb=__shfl(vb, nbr); \ vc=R14; DFUNC; R14=vc; vb=__shfl(vb, nbr); vc=R15; DFUNC; R15=vc; vb=__shfl(vb, nbr); \ vc=R16; DFUNC; R16=vc; vb=__shfl(vb, nbr); vc=R17; DFUNC; R17=vc; vb=__shfl(vb, nbr); \ vc=R18; DFUNC; R18=vc; vb=__shfl(vb, nbr); vc=R19; DFUNC; R19=vc; vb=__shfl(vb, nbr); \ vc=R20; DFUNC; R20=vc; vb=__shfl(vb, nbr); vc=R21; DFUNC; R21=vc; vb=__shfl(vb, nbr); \ vc=R22; DFUNC; R22=vc; vb=__shfl(vb, nbr); vc=R23; DFUNC; R23=vc; vb=__shfl(vb, nbr); \ vc=R24; DFUNC; R24=vc; vb=__shfl(vb, nbr); vc=R25; DFUNC; R25=vc; vb=__shfl(vb, nbr); \ vc=R26; DFUNC; R26=vc; vb=__shfl(vb, nbr); vc=R27; DFUNC; R27=vc; vb=__shfl(vb, nbr); \ vc=R28; DFUNC; R28=vc; vb=__shfl(vb, nbr); vc=R29; DFUNC; R29=vc; vb=__shfl(vb, nbr); \ vc=R30; DFUNC; R30=vc; vb=__shfl(vb, nbr); vc=R31; DFUNC; R31=vc; vb=__shfl(vb, nbr); \ } \ yi = threadIdx.x; \ if (xi < nrows) { \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R00;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R01;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R02;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R03;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R04;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R05;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R06;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R07;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R08;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R09;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R10;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R11;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R12;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R13;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R14;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R15;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R16;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R17;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R18;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R19;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R20;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R21;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R22;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R23;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R24;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R25;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R26;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R27;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R28;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R29;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R30;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R31;} yi = (yi+1) % blockDim.x; \ } \ } GENDISTS(__l1dist,vc+=abs(va-vb)) GENDISTS(__l2dist,vc+=(va-vb)*(va-vb)) GENDISTS(__minkowskidist,vc+=pow(abs(va-vb),p)) GENDISTS(__linfdist,vc=max(vc,abs(va-vb))) GENDISTS(__msum,vc=max(vc,va+vb)) #else __global__ void __l1dist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) { printf("Warning, Lidist not supported on arch <= 200\n"); } __global__ void __l2dist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) { printf("Warning, L2dist not supported on arch <= 200\n"); } __global__ void __minkowskidist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) { printf("Warning, Minkowski distance not supported on arch <= 200\n"); } __global__ void __linfdist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) { printf("Warning, Max-abs distance not supported on arch <= 200\n"); } __global__ void __msum(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) { printf("Warning, Max-sum multiply not supported on arch <= 200\n"); } #endif int dists(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) { dim3 blockdim(32,4,4); dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128); // cudaSetDevice(ithread); if (p == 0.0f) { __linfdist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p); } else if (p == 1.0f) { __l1dist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p); } else if (p == 2.0f) { __l2dist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p); } else { __minkowskidist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p); } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int maxsumx(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols) { dim3 blockdim(32,4,4); dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128); __msum<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, 0); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } #if __CUDA_ARCH__ > 200 template<class T> __global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) { __shared__ T tots[32]; int start, end, ij; int bid = blockIdx.y + blockIdx.z * blockDim.y; // column index T sum, tsum, tmp, ttot, ttot0; if (bid < ncols) { for (ij = blockIdx.x; ij < m; ij += gridDim.x) { start = jc[ij] + bid * nrows; end = jc[ij+1] + bid * nrows; sum = 0; for (int i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) { tsum = in[i]; tmp = __shfl_up(tsum, 1); if (threadIdx.x >= 1) tsum += tmp; tmp = __shfl_up(tsum, 2); if (threadIdx.x >= 2) tsum += tmp; tmp = __shfl_up(tsum, 4); if (threadIdx.x >= 4) tsum += tmp; tmp = __shfl_up(tsum, 8); if (threadIdx.x >= 8) tsum += tmp; tmp = __shfl_up(tsum, 16); if (threadIdx.x >= 16) tsum += tmp; ttot = __shfl(tsum, min(end-start-1, 31)); ttot0 = ttot; __syncthreads(); if (threadIdx.x == threadIdx.y) { tots[threadIdx.y] = ttot; } __syncthreads(); for (int k = 1; k < blockDim.y; k *= 2) { if (threadIdx.y >= k) { if (threadIdx.x == threadIdx.y - k) { ttot += tots[threadIdx.x]; } } __syncthreads(); if (threadIdx.y >= k) { ttot = __shfl(ttot, threadIdx.y - k); if (threadIdx.x == threadIdx.y) { tots[threadIdx.y] = ttot; } } __syncthreads(); } out[i] = sum + tsum + ttot - ttot0; if (threadIdx.x == blockDim.y - 1) { ttot = tots[threadIdx.x]; } __syncthreads(); ttot = __shfl(ttot, blockDim.y - 1); sum += ttot; } } } } template<class T> __global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T maxminv, int dir) { __shared__ T maxv[32]; __shared__ int maxi[32]; T vmax, vtmp; int imax, itmp, i, k, start, end, ij; int bid = blockIdx.y + blockIdx.z * blockDim.y; if (bid < ncols) { for (ij = blockIdx.x; ij < m; ij += gridDim.x) { vmax = maxminv; imax = -1; start = jc[ij]; end = jc[ij+1]; for (i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) { vtmp = in[i + nrows * bid]; itmp = i; if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } for (k = 1; k < blockDim.x; k *= 2) { vtmp = __shfl_up(vmax, k); itmp = __shfl_up(imax, k); if (threadIdx.x >= k) { if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } } vmax = __shfl(vmax, blockDim.x - 1); imax = __shfl(imax, blockDim.x - 1); __syncthreads(); if (threadIdx.x == threadIdx.y) { maxv[threadIdx.y] = vmax; maxi[threadIdx.y] = imax; } __syncthreads(); if (threadIdx.y == 0) { vmax = maxv[threadIdx.x]; imax = maxi[threadIdx.x]; } __syncthreads(); if (threadIdx.y == 0) { for (k = 1; k < blockDim.y; k *= 2) { vtmp = __shfl_up(vmax, k); itmp = __shfl_up(imax, k); if (threadIdx.x >= k) { if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } } if (threadIdx.x == blockDim.y - 1) { out[ij + m * bid] = vmax; outi[ij + m * bid] = imax; } } } } } template<class T> __global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T maxminv, int dir) { __shared__ T maxv[32]; __shared__ int maxi[32]; T vmax, vtmp; int imax, itmp, i, k; int bid = blockIdx.y + blockIdx.z * blockDim.y; if (bid < ncols) { vmax = maxminv; imax = -1; for (i = threadIdx.x + threadIdx.y * blockDim.x; i < nrows; i += blockDim.x * blockDim.y) { vtmp = in[i + nrows * bid]; itmp = i; if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } for (k = 1; k < blockDim.x; k *= 2) { vtmp = __shfl_up(vmax, k); itmp = __shfl_up(imax, k); if (threadIdx.x >= k) { if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } } vmax = __shfl(vmax, blockDim.x - 1); imax = __shfl(imax, blockDim.x - 1); __syncthreads(); if (threadIdx.x == threadIdx.y) { maxv[threadIdx.y] = vmax; maxi[threadIdx.y] = imax; } __syncthreads(); if (threadIdx.y == 0) { vmax = maxv[threadIdx.x]; imax = maxi[threadIdx.x]; } __syncthreads(); if (threadIdx.y == 0) { for (k = 1; k < blockDim.y; k *= 2) { vtmp = __shfl_up(vmax, k); itmp = __shfl_up(imax, k); if (threadIdx.x >= k) { if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } } if (threadIdx.x == blockDim.y - 1) { out[bid] = vmax; outi[bid] = imax; } } } } // Not very fast for wide matrices template<class T> __global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) { T vmax, vtmp; int imax, itmp, i, j; for (i = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * blockIdx.x); i < nrows; i += blockDim.x * blockDim.y * gridDim.x) { if (ncols > 0) { vmax = in[i]; imax = 0; for (j = 1; j < ncols; j++) { vtmp = in[i + nrows * j]; itmp = j; if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } out[i] = vmax; outi[i] = imax; } } } #else template<class T> __global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {} template<class T> __global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) {} template<class T> __global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) {} template<class T> __global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {} #endif void setinds(int ncols, int &nc1, int &nc2) { if (ncols < 65536) { nc1 = ncols; nc2 = 1; } else { nc1 = (int)sqrt((double)ncols); nc2 = 1 + (ncols-1)/nc1; } } template<class T> int cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) { int nc1, nc2; setinds(ncols, nc1, nc2); dim3 grid(min(64, m), nc1, nc2); int ny = min(32, 1+nrows/m/32); dim3 tblock(32, ny, 1); __cumsumg<T><<<grid,tblock>>>(in, out, jc, nrows, ncols, m); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int cumsumgf(float *in, float *out, int *jc, int nrows, int ncols, int m) { return cumsumg<float>(in, out, jc, nrows, ncols, m); } int cumsumgi(int *in, int *out, int *jc, int nrows, int ncols, int m) { return cumsumg<int>(in, out, jc, nrows, ncols, m); } template<class T> int maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) { int nc1, nc2; setinds(ncols, nc1, nc2); dim3 grid(min(64, m), nc1, nc2); int ny = min(32, 1+nrows/m/32); dim3 tblock(32, ny, 1); __maxming<T><<<grid,tblock>>>(in, out, outi, jc, nrows, ncols, m, minv, dir); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } template<class T> int maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) { int nc1, nc2; setinds(ncols, nc1, nc2); dim3 grid(1, nc1, nc2); int ny = min(32, 1+nrows/32); dim3 tblock(32, ny, 1); __maxmini_cols<T><<<grid,tblock>>>(in, out, outi, nrows, ncols, minv, dir); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } template<class T> int maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) { int nb = min(32,1+nrows/32); dim3 grid(nb,1,1); int ny = min(32, 1+nrows/nb/32); dim3 tblock(32, ny, 1); __maxmini_rows<T><<<grid,tblock>>>(in, out, outi, nrows, ncols, dir); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } int maxgf(float *in, float *out, int *outi, int *jc, int nrows, int ncols, int m) { return maxming<float>(in, out, outi, jc, nrows, ncols, m, -3e38f, 1); } int maxgi(int *in, int *out, int *outi, int *jc, int nrows, int ncols, int m) { return maxming<int>(in, out, outi, jc, nrows, ncols, m, 0x80000000, 1); } int mingf(float *in, float *out, int *outi, int *jc, int nrows, int ncols, int m) { return maxming<float>(in, out, outi, jc, nrows, ncols, m, 3e38f, 0); } int mingi(int *in, int *out, int *outi, int *jc, int nrows, int ncols, int m) { return maxming<int>(in, out, outi, jc, nrows, ncols, m, 0x7fffffff, 0); } int maxif(float *in, float *out, int *outi, int nrows, int ncols, int dir) { if (dir == 1) { return maxmini_cols<float>(in, out, outi, nrows, ncols, -3e38f, 1); } else if (dir == 2) { return maxmini_rows<float>(in, out, outi, nrows, ncols, 1); } else { return -1; } } int maxii(int *in, int *out, int *outi, int nrows, int ncols, int dir) { if (dir == 1) { return maxmini_cols<int>(in, out, outi, nrows, ncols, 0x80000000, 1); } else if (dir == 2) { return maxmini_rows<int>(in, out, outi, nrows, ncols, 1); } else { return -1; } } int minif(float *in, float *out, int *outi, int nrows, int ncols, int dir) { if (dir == 1) { return maxmini_cols<float>(in, out, outi, nrows, ncols, 3e38f, 0); } else if (dir == 2) { return maxmini_rows<float>(in, out, outi, nrows, ncols, 0); } else { return -1; } } int minii(int *in, int *out, int *outi, int nrows, int ncols, int dir) { if (dir == 1) { return maxmini_cols<int>(in, out, outi, nrows, ncols, 0x7fffffff, 0); } else if (dir == 2) { return maxmini_rows<int>(in, out, outi, nrows, ncols, 0); } else { return -1; } } __global__ void __dmv(float *a, int nrows, int ncols, float *b, float *c) { for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) { float accum = 0.0f; for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) { accum += a[tx+nrows*ty] * b[ty]; } atomicAdd(&c[tx], accum); } } #if __CUDA_ARCH__ > 200 __global__ void __dmvt(float *a, int nrows, int ncols, float *b, float *c) { for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) { float accum = 0.0f; for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) { accum += a[tx+nrows*ty] * b[tx]; } for (int i = 1; i < blockDim.x; i *= 2) { float tmp = __shfl_down(accum, i); if (threadIdx.x + i < blockDim.x) accum += tmp; } if (threadIdx.x == 0) { atomicAdd(&c[ty], accum); } } } #else __global__ void __dmvt(float *a, int nrows, int ncols, float *b, float *c) { for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) { float accum = 0.0f; for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) { accum += a[tx+nrows*ty] * b[tx]; } atomicAdd(&c[ty], accum); } } #endif __global__ void __dmv0(float *a, int nrows, int ncols, int tstep, float *b, float *c) { float accum = 0.0f; int tx = threadIdx.x + blockDim.x * blockIdx.x; if (tx < tstep) { for (; tx < nrows*ncols; tx += tstep) { int icol = tx / nrows; accum += a[tx] * b[icol]; } int irow = tx % nrows; atomicAdd(&c[irow], accum); } } int dmv(float *a, int nrows, int ncols, float *b, float *c, int trans) { if (trans == 1) { int ntx = min(32, nrows); int nty = min(32, ncols); int nbx = min(256, 1 + nrows/ntx/8); int nby = min(256, 1 + ncols/nty/2); dim3 blockdims(ntx,nty,1); dim3 griddims(nbx,nby,1); __dmvt<<<griddims,blockdims>>>(a, nrows, ncols, b, c); } else { int ntx = min(1024, nrows*ncols); int nbx = max(1+(nrows-1)/ntx, nrows*ncols/ntx/32); int tstep = (ntx*nbx/nrows)*nrows; __dmv0<<<nbx,ntx>>>(a, nrows, ncols, tstep, b, c); } cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } #define ACCUM_KERNEL(TI,TJ,TV,TS,II,IJ,IV) \ __global__ void __accum(TI, TJ, TV, TS, int m, int nrows) { \ int istart = ((int)(((long long)blockIdx.x) * m / gridDim.x)); \ int iend = ((int)(((long long)blockIdx.x + 1) * m / gridDim.x)); \ istart = (istart / 32) * 32; \ if (blockIdx.x != gridDim.x - 1) { \ iend = (iend / 32) * 32; \ } \ for (int i = istart + threadIdx.x; i < iend; i+= blockDim.x) { \ atomicAdd(&S[II + nrows * IJ], IV); \ } \ } \ int accum(TI, TJ, TV, TS, int m, int nrows) { \ int nthreads = min(512, m); \ int nblocks = max(1, min(65535, m/nthreads/8)); \ __accum<<<nblocks,nthreads>>>(I,J,V,S,m,nrows); \ cudaDeviceSynchronize(); \ cudaError_t err = cudaGetLastError(); \ return err; \ } ACCUM_KERNEL(int*I, int*J, float*V, float*S, I[i], J[i], V[i]) ACCUM_KERNEL(int*I, int J, float*V, float*S, I[i], J, V[i]) ACCUM_KERNEL(int I, int*J, float*V, float*S, I, J[i], V[i]) ACCUM_KERNEL(int*I, int*J, float V, float*S, I[i], J[i], V) ACCUM_KERNEL(int*I, int J, float V, float*S, I[i], J, V) ACCUM_KERNEL(int I, int*J, float V, float*S, I, J[i], V) ACCUM_KERNEL(int*I, int*J, int*V, int*S, I[i], J[i], V[i]) ACCUM_KERNEL(int*I, int J, int*V, int*S, I[i], J, V[i]) ACCUM_KERNEL(int I, int*J, int*V, int*S, I, J[i], V[i]) ACCUM_KERNEL(int*I, int*J, int V, int*S, I[i], J[i], V) ACCUM_KERNEL(int*I, int J, int V, int*S, I[i], J, V) ACCUM_KERNEL(int I, int*J, int V, int*S, I, J[i], V) const int INBLOCK = 4; // copy and transpose columns of the input matrix into the output matrix. nrows refers to the input matrix // (and so is ncols for the output). ncols is the length of the iptrs array, which will be the number of // rows of the output matrix. iptrs specifies the columns of the input array to copy. // outstride is stride of the output matrix __global__ void __icopy_transpose(int *iptrs, float *in, float *out, int outstride, int nrows, int ncols) { __shared__ float tile[BLOCKDIM][BLOCKDIM+1]; int nx = BLOCKDIM * gridDim.x; int ny = BLOCKDIM * gridDim.y; int ix = BLOCKDIM * blockIdx.x; int iy = BLOCKDIM * blockIdx.y; for (int yb = iy; yb < ncols; yb += ny) { for (int xb = ix; xb < nrows; xb += nx) { if (xb + threadIdx.x < nrows) { int ylim = min(ncols, yb + BLOCKDIM); for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) { tile[threadIdx.x][y-yb] = in[threadIdx.x + xb + iptrs[y]*nrows]; } } __syncthreads(); if (yb + threadIdx.x < ncols) { int xlim = min(nrows, xb + BLOCKDIM); for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) { out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x]; } } __syncthreads(); } } } int icopy_transpose(int *iptrs, float *in, float *out, int stride, int nrows, int ncols) { const dim3 griddims(20,256,1); const dim3 blockdims(BLOCKDIM,INBLOCK,1); cudaError_t err; __icopy_transpose<<<griddims,blockdims>>>(iptrs, in, out, stride, nrows, ncols); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) {fprintf(stderr, "cuda error in icopy_transpose"); return err;} return 0; } // copy and transpose the input matrix into columns of the output matrix. nrows, ncols refer to output matrix __global__ void __ocopy_transpose(int *optrs, float *in, float *out, int instride, int nrows, int ncols) { int nx = BLOCKDIM * gridDim.x; int ny = BLOCKDIM * gridDim.y; int ix = BLOCKDIM * blockIdx.x; int iy = BLOCKDIM * blockIdx.y; __shared__ float tile[BLOCKDIM][BLOCKDIM+1]; for (int yb = iy; yb < ncols; yb += ny) { for (int xb = ix; xb < nrows; xb += nx) { if (yb + threadIdx.x < ncols) { int xlim = min(nrows, xb + BLOCKDIM); for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) { tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride]; } } __syncthreads(); if (xb + threadIdx.x < nrows) { int ylim = min(ncols, yb + BLOCKDIM); for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) { out[optrs[y]*nrows + threadIdx.x + xb] = tile[threadIdx.x][y-yb]; } } __syncthreads(); } } } __global__ void __ocopy_transpose_add(int *optrs, float *in, float *out, int instride, int nrows, int ncols) { int nx = BLOCKDIM * gridDim.x; int ny = BLOCKDIM * gridDim.y; int ix = BLOCKDIM * blockIdx.x; int iy = BLOCKDIM * blockIdx.y; __shared__ float tile[BLOCKDIM][BLOCKDIM+1]; for (int yb = iy; yb < ncols; yb += ny) { for (int xb = ix; xb < nrows; xb += nx) { if (yb + threadIdx.x < ncols) { int xlim = min(nrows, xb + BLOCKDIM); for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) { tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride]; } } __syncthreads(); if (xb + threadIdx.x < nrows) { int ylim = min(ncols, yb + BLOCKDIM); for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) { atomicAdd(&out[optrs[y]*nrows + threadIdx.x + xb], tile[threadIdx.x][y-yb]); } } __syncthreads(); } } } __global__ void __ocopy_transpose_min(int *optrs, float *in, float *out, int instride, int nrows, int ncols) { int nx = BLOCKDIM * gridDim.x; int ny = BLOCKDIM * gridDim.y; int ix = BLOCKDIM * blockIdx.x; int iy = BLOCKDIM * blockIdx.y; __shared__ float tile[BLOCKDIM][BLOCKDIM+1]; for (int yb = iy; yb < ncols; yb += ny) { for (int xb = ix; xb < nrows; xb += nx) { if (yb + threadIdx.x < ncols) { int xlim = min(nrows, xb + BLOCKDIM); for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) { tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride]; } } __syncthreads(); if (xb + threadIdx.x < nrows) { int ylim = min(ncols, yb + BLOCKDIM); for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) { atomicMin((int *)&out[optrs[y]*nrows + threadIdx.x + xb], *(int *)(&tile[threadIdx.x][y-yb])); } } __syncthreads(); } } } int ocopy_transpose_add(int *optrs, float *in, float *out, int stride, int nrows, int ncols) { const dim3 griddims(20,256,1); const dim3 blockdims(BLOCKDIM,INBLOCK,1); cudaError_t err; __ocopy_transpose_add<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;} return 0; } int ocopy_transpose(int *optrs, float *in, float *out, int stride, int nrows, int ncols) { const dim3 griddims(20,256,1); const dim3 blockdims(BLOCKDIM,INBLOCK,1); cudaError_t err; __ocopy_transpose<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;} return 0; } int ocopy_transpose_min(int *optrs, float *in, float *out, int stride, int nrows, int ncols) { const dim3 griddims(20,256,1); const dim3 blockdims(BLOCKDIM,INBLOCK,1); cudaError_t err; __ocopy_transpose_min<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;} return 0; } #ifdef TEST int main(int argc, char **argv) { int m=8, n=8, opn = 0; float *dA, *dB, *dC, *A, *B, *C; if (argc > 1) { sscanf(argv[1], "%d", &opn); if (argc > 2) { sscanf(argv[2], "%d", &m); if (argc > 3) { sscanf(argv[3], "%d", &n); } } } A = (float *)malloc(m*n*sizeof(float)); B = (float *)malloc(m*n*sizeof(float)); C = (float *)malloc(m*n*sizeof(float)); cudaMalloc((void**)&dA, m*n*sizeof(float)); cudaMalloc((void**)&dB, m*n*sizeof(float)); cudaMalloc((void**)&dC, m*n*sizeof(float)); for (int i = 0; i < m*n; i++) { A[i] = 1.0f; B[i] = 2.0f; } cudaMemcpy(dA, A, m*n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dB, B, m*n*sizeof(float), cudaMemcpyHostToDevice); printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]); printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]); MatKernel(dA, m, n, dB, m, n, dC, opn); cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "CUDA error %d", err); exit(1); } cudaMemcpy(C, dC, m*n*sizeof(float), cudaMemcpyDeviceToHost); printf("C %f %f %f %f\n", C[0], C[1], C[2], C[3]); printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]); printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]); if (dA != NULL) cudaFree(dA); if (dB != NULL) cudaFree(dB); if (dC != NULL) cudaFree(dC); if (C != NULL) free(C); } #endif
d90d975ebcdce2b4ce00c35fcc1b3d9433c9e8b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void mykernel(void) { } int main() { hipLaunchKernelGGL(( mykernel), dim3(1), dim3(1), 0, 0, ); printf("Hello, World!\n"); return 0; }
d90d975ebcdce2b4ce00c35fcc1b3d9433c9e8b7.cu
#include <stdio.h> __global__ void mykernel(void) { } int main() { mykernel<<<1, 1>>>(); printf("Hello, World!\n"); return 0; }
615512d544250db50d525bf4caab45e28e191552.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "include/kernel.cuh" /* ------------------------ */ /* FUNCIONES DE DISPOSITIVO */ /* ------------------------ */ /** * Comprueba si es posible realizar un giro de 3x3 en la posicin dada. * * @param posY * Coordenada Y del eje a comprobar. * * @param posX * Coordenada X del eje a comprobar. * * @param dimens * Dimensiones de la matriz a comprobar. * * * @return * true si es posible. * false si no lo es. */ __device__ bool comprobar_giro (int posY, int posX, Dim dimens) { int filas = dimens.filas, cols = dimens.columnas; /* Comprueba los lmites del eje de giro */ if ( ((posY + 1) >= filas) || ((posX + 1) >= cols) ) { return false; } if(((posY - 1) == 0) || ((posY - 1) % 3) == 0) { /* Posicin correcta para el eje Y */ if(((posX - 1) == 0) || ((posX - 1) % 3) == 0) { /* Posicin correcta para el eje X */ return true; } } return false; } /** * Busca el primer elemento no vaco por encima de la posicin especificada. * Adems, este elemento se convierte a DIAMANTE_VACIO. * * @param matriz * Matriz en la que se ha de buscar el elemento. * * @param fila_ini * Fila del primer elemento a comprobar. * * @param columna * Columna a comprobar. * * @param dimens * Dimensiones de la matriz. * * * @return * El primer elemento encontrado, si haba alguno. * -1 si no se encontr ningn elemento no vaco. */ __device__ int buscar_lleno (int *matriz, int fila_ini, int columna, Dim dimens) { int elem = -1, fila = fila_ini, aux; while ( (elem == -1) && (fila >= 0)) { aux = (fila * dimens.columnas) + columna; if (matriz [aux] != DIAMANTE_VACIO) { elem = matriz [aux]; matriz [aux] = DIAMANTE_VACIO; } fila--; } return elem; } /* ------- */ /* NCLEOS */ /* ------- */ /** * Genera un nmero aleatorio en base a la secuencia especificada y al ndice del hilo. * * @param semilla * Elemento inicial para generar la secuencia. * * @param resultado * Vector en el que se almacenarn los nmeros generados. * * @param min * Lmite inferior para generar un nmero (inclusivo). * * @param max * Lmite superior para generar un nmero (inclusivo). * * @param dimens * Dimensiones de la matriz resultado. */ __global__ void gen_aleat_cuda (unsigned long semilla, int *resultado, const int min, const int max, const Dim dimens) { int rand_int, fila = blockIdx.y * blockDim.y + threadIdx.y, columna = blockIdx.x * blockDim.x + threadIdx.x, aux = (fila * dimens.columnas) + columna; hiprandState_t estado; /* Comprueba los lmites de la matriz */ if ((fila <= dimens.filas) && (columna <= dimens.columnas)) { hiprand_init (semilla, aux, 0, &estado); /* El nmero se genera primero con coma flotante (ajustando los lmites como se haya especificado) y luego se convierte a entero. Esto es ms rpido que realizar la operacin de mdulo */ float rand_f = hiprand_uniform (&estado); rand_f *= (max - min + 0.999999); rand_f += min; /* Convierte el float a entero */ rand_int = __float2int_rz (rand_f); /* Guarda el resultado */ resultado [aux] = rand_int; } } /** * Mueve todos los elementos a la izquierda de fila_bomba hacia su derecha. Cuando llega * al primer elemento, genera un nuevo elemento. * * @param semilla * Elemento inicial para generar la secuencia (para crear los * nuevos elementos). * * @param resultado * Vector que almacena la matriz que va a ser cambiada. * * @param min * Lmite inferior para generar un nmero (inclusivo). * * @param max * Lmite superior para generar un nmero (inclusivo). * * @param dimens * Dimensiones de la matriz resultado. * * * @param fila_bomb * Fila a eliminar. */ __global__ void eliminar_fila_cuda (unsigned long semilla, int *resultado, const int min, const int max, const Dim dimens, int fila_bomba) { int columna = blockIdx.x * blockDim.x + threadIdx.x, i, rand_int, aux; hiprandState_t estado; float rand_f; extern __shared__ int matriz_comp []; if ( (columna >= dimens.columnas) || ( (blockIdx.y * blockDim.y + threadIdx.y) != 0) ) { return; } /* Copia la columna en la memoria compartida */ for (i = 0; i <= fila_bomba; i++) { aux = (i * dimens.columnas) + columna; matriz_comp [aux] = resultado [aux]; } /* ---- A partir de aqu, trabaja con la memoria compartida ---- */ /* Intercambia los elementos desde la fila actual hasta el principio */ for (i = fila_bomba; i > 0; i--) { matriz_comp [(i * dimens.columnas) + columna] = matriz_comp [(i - 1) * dimens.columnas + columna]; } /* Genera el ltimo elemento */ hiprand_init (semilla, columna, 0, &estado); /* El nmero se genera primero con coma flotante (ajustando los lmites como se haya especificado) y luego se convierte a entero. Esto es ms rpido que realizar la operacin de mdulo */ rand_f = hiprand_uniform (&estado); rand_f *= (max - min + 0.999999); rand_f += min; /* Convierte el float a entero */ rand_int = __float2int_rz (rand_f); /* Guarda el resultado */ matriz_comp [columna] = rand_int; /* Copia los datos de vuelta a la memoria global */ for (i = 0; i <= fila_bomba; i++) { aux = (i * dimens.columnas) + columna; resultado [aux] = matriz_comp [aux]; } } /** * Mueve todos los elementos a la izquierda de fila_bomba hacia su derecha. Cuando llega * al primer elemento, genera un nuevo elemento. * * @param semilla * Elemento inicial para generar la secuencia (para crear los * nuevos elementos). * * @param resultado * Vector que almacena la matriz que va a ser cambiada. * * @param min * Lmite inferior para generar un nmero (inclusivo). * * @param max * Lmite superior para generar un nmero (inclusivo). * * @param dimens * Dimensiones de la matriz resultado. * * * @param fila_bomb * Fila a eliminar. */ __global__ void eliminar_columna_cuda (unsigned long semilla, int *resultado, const int min, const int max, const Dim dimens, int col_bomba) { int fila = blockIdx.y * blockDim.y + threadIdx.y, i, rand_int, aux; hiprandState_t estado; extern __shared__ int matriz_comp []; float rand_f; if ( (fila >= dimens.filas) || ( (blockIdx.x * blockDim.x + threadIdx.x) != 0) ) { return; } /* Copia la fila en la memoria compartida */ for (i = 0; i <= col_bomba; i++) { aux = (fila * dimens.columnas) + i; matriz_comp [aux] = resultado [aux]; } /* ---- A partir de aqu, trabaja con la memoria compartida ---- */ /* Intercambia los elementos desde la fila actual hasta el principio */ for (i = col_bomba; i > 0; i--) { aux = (fila * dimens.columnas) + i; matriz_comp [aux] = matriz_comp [aux - 1]; } /* Genera el ltimo elemento */ hiprand_init (semilla, fila, 0, &estado); /* El nmero se genera primero con coma flotante (ajustando los lmites como se haya especificado) y luego se convierte a entero. Esto es ms rpido que realizar la operacin de mdulo */ rand_f = hiprand_uniform (&estado); rand_f *= (max - min + 0.999999); rand_f += min; /* Convierte el float a entero */ rand_int = __float2int_rz (rand_f); /* Guarda el resultado */ matriz_comp [fila * dimens.columnas] = rand_int; /* Copia los datos de vuelta a la memoria global */ for (i = 0; i <= col_bomba; i++) { aux = (fila * dimens.columnas) + i; resultado [aux] = matriz_comp [aux]; } } /** * Gira todos los elementos posibles en grupos de 3x3 (bomba III). * * @param resultado * Vector que almacena la matriz que va a ser cambiada. * * @param dimens * Dimensiones de la matriz. */ __global__ void girar_matriz_cuda (int *resultado, Dim dimens) { int fila = blockIdx.y * blockDim.y + threadIdx.y, columna = blockIdx.x * blockDim.x + threadIdx.x, posY = fila - 1, posX = columna - 1, aux; extern __shared__ int matriz_comp []; if ((fila >= dimens.filas) || (columna >= dimens.columnas)) { return; } if (comprobar_giro (fila, columna, dimens)) { /* Copia el cuadrante en la memoria compartida (desenrrollamiento de un bucle 'for') */ aux = (posY * dimens.columnas) + posX; matriz_comp [aux] = resultado [aux]; matriz_comp [aux + 1] = resultado [aux + 1]; matriz_comp [aux + 2] = resultado [aux + 2]; aux = ( (posY + 1) * dimens.columnas) + posX; matriz_comp [aux] = resultado [aux]; matriz_comp [aux + 1] = resultado [aux + 1]; matriz_comp [aux + 2] = resultado [aux + 2]; aux = ( (posY + 2) * dimens.columnas) + posX; matriz_comp [aux] = resultado [aux]; matriz_comp [aux + 1] = resultado [aux + 1]; matriz_comp [aux + 2] = resultado [aux + 2]; /* ---- A partir de aqu, se usa la memoria compartida ---- */ /* Se realizan los intercambios de manera manual */ aux = matriz_comp [(posY * dimens.columnas) + posX]; /* ---- */ matriz_comp [(posY * dimens.columnas) + posX] = matriz_comp [( (posY + 2) * dimens.columnas) + posX]; matriz_comp [( (posY + 2) * dimens.columnas) + posX] = matriz_comp [( (posY + 2) * dimens.columnas) + posX + 2]; matriz_comp [( (posY + 2) * dimens.columnas) + posX + 2] = matriz_comp [(posY * dimens.columnas) + posX + 2]; matriz_comp [(posY * dimens.columnas) + posX + 2] = aux; /* ---- */ aux = matriz_comp [(posY * dimens.columnas) + posX + 1]; matriz_comp [(posY * dimens.columnas) + posX + 1] = matriz_comp [( (posY + 1) * dimens.columnas) + posX]; matriz_comp [( (posY + 1) * dimens.columnas) + posX] = matriz_comp [( (posY + 2) * dimens.columnas) + posX + 1]; matriz_comp [( (posY + 2) * dimens.columnas) + posX + 1] = matriz_comp [( (posY + 1) * dimens.columnas) + posX + 2]; matriz_comp [( (posY + 1) * dimens.columnas) + posX + 2] = aux; /* Copia el cuadrante de nuevo en memoria global (desenrrollamiento de un bucle 'for') */ aux = (posY * dimens.columnas) + posX; resultado [aux] = matriz_comp [aux]; resultado [aux + 1] = matriz_comp [aux + 1]; resultado [aux + 2] = matriz_comp [aux + 2]; aux = ( (posY + 1) * dimens.columnas) + posX; resultado [aux] = matriz_comp [aux]; resultado [aux + 1] = matriz_comp [aux + 1]; resultado [aux + 2] = matriz_comp [aux + 2]; aux = ( (posY + 2) * dimens.columnas) + posX; resultado [aux] = matriz_comp [aux]; resultado [aux + 1] = matriz_comp [aux + 1]; resultado [aux + 2] = matriz_comp [aux + 2]; } } /** * Comprueba si la fila contiene elementos repetidos. * * @param matriz * Matriz con los valores actuales de los diamantes. * * @param dimens * Estructura con las dimensiones de la matriz. * * @param coincidencias * Matriz en la que se va a indicar si haba alguna coincidencia. */ __global__ void busar_coinc_cuda_fila (int *matriz, Dim dimens, int *coincidencias) { int fila = blockIdx.y * blockDim.y + threadIdx.y, i, aux; extern __shared__ int mem_comp []; int *matriz_comp, tam_matriz = dimens.filas * dimens.columnas, *coinc_comp; if ( (fila >= dimens.filas) || ( (blockIdx.x * blockDim.x+ threadIdx.x) != 0) ) { return; } /* Obtiene los punteros a las diferentes zonas de la memoria compartida */ matriz_comp = mem_comp; coinc_comp = &mem_comp [tam_matriz]; /* Copia la fila en la memoria compartida */ for (i = 0; i < dimens.columnas; i++) { aux = (fila * dimens.columnas) + i; matriz_comp [aux] = matriz [aux]; coinc_comp [aux] = coincidencias [aux]; } /* ---- A partir de aqu, trabaja con la memoria compartida ---- */ aux = fila * dimens.columnas; /* Recorre la matriz marcando los elementos iguales consecutivos */ for (i = 0; i < (dimens.columnas - 2) ; i++) { if ( (matriz_comp [aux + i] == matriz_comp [aux + i + 1]) && (matriz_comp [aux + i] == matriz_comp [aux + i + 2]) ) { coinc_comp [aux + i] = COINCIDE; coinc_comp [aux + i + 1] = COINCIDE; coinc_comp [aux + i + 2] = COINCIDE; } } /* Copia de vuelta los resultados (slo hay que copiar la matriz con las coincidencias) */ for (i = 0; i < dimens.columnas; i++) { aux = (fila * dimens.columnas) + i; coincidencias [aux] = coinc_comp [aux]; } } /** * Comprueba si la columna contiene elementos repetidos. * * @param matriz * Matriz con los valores actuales de los diamantes. * * @param dimens * Estructura con las dimensiones de la matriz. * * @param coincidencias * Matriz en la que se va a indicar si haba alguna coincidencia. */ __global__ void busar_coinc_cuda_col (int *matriz, Dim dimens, int *coincidencias) { int columna = blockIdx.x * blockDim.x + threadIdx.x, i, aux; extern __shared__ int mem_comp []; int *matriz_comp, tam_matriz = dimens.filas * dimens.columnas, *coinc_comp; if ( (columna >= dimens.columnas) || ( (blockIdx.y * blockDim.y + threadIdx.y) != 0) ) { return; } /* Obtiene los punteros a las diferentes zonas de la memoria compartida */ matriz_comp = mem_comp; coinc_comp = &mem_comp [tam_matriz]; /* Copia la fila en la memoria compartida */ for (i = 0; i < dimens.filas; i++) { aux = (i * dimens.columnas) + columna; matriz_comp [aux] = matriz [aux]; coinc_comp [aux] = coincidencias [aux]; } /* ---- A partir de aqu, trabaja con la memoria compartida ---- */ /* Recorre la matriz marcando los elementos iguales consecutivos */ for (i = 0; i < (dimens.filas - 2) ; i++) { aux = (i * dimens.columnas); if ( (matriz_comp [aux + columna] == matriz_comp [( (i + 1) * dimens.columnas) + columna]) && (matriz_comp [aux + columna] == matriz_comp [( (i + 2) * dimens.columnas) + columna]) ) { coinc_comp [aux + columna] = COINCIDE; coinc_comp [(i + 1) * dimens.columnas + columna] = COINCIDE; coinc_comp [(i + 2) * dimens.columnas + columna] = COINCIDE; } } /* Copia de vuelta los resultados (slo hay que copiar la matriz con las coincidencias) */ for (i = 0; i < dimens.filas; i++) { aux = (i * dimens.columnas) + columna; coincidencias [aux] = coinc_comp [aux]; } } /** * Elimina todos los elementos que se haya visto que han coincidido. * * * @param matriz * Matriz con los valores actuales de los diamantes. * * @param dimens * Estructura con las dimensiones de la matriz. * * @param coincidencias * Matriz con las coincidencias encontradas. */ __global__ void eliminar_coinc_cuda (int *matriz, Dim dimens, int *coincidencias) { int fila = blockIdx.y * blockDim.y + threadIdx.y, columna = blockIdx.x * blockDim.x + threadIdx.x; if ( (fila >= dimens.filas) || (columna >= dimens.columnas)) { return; } if (coincidencias [(fila * dimens.columnas) + columna] == COINCIDE) { matriz [(fila * dimens.columnas) + columna] = DIAMANTE_VACIO; } } /** * Comprueba todos los huecos de la columna y rellena los vacos. * * @param semilla * Elemento inicial para generar la secuencia. * * @param resultado * Vector en el que se almacenarn los nmeros generados. * * @param min * Lmite inferior para generar un nmero (inclusivo). * * @param max * Lmite superior para generar un nmero (inclusivo). * * @param dimens * Dimensiones de la matriz resultado. */ __global__ void llenar_vacios_cuda (unsigned long semilla, int *resultado, const int min, const int max, const Dim dimens) { int columna = blockIdx.x * blockDim.x + threadIdx.x, i, elem, rand_int, aux; extern __shared__ int matriz_comp []; hiprandState_t estado; float rand_f; if ( (columna >= dimens.columnas) || ( (blockIdx.y * blockDim.y + threadIdx.y) != 0) ) { return; } /* Copia la columna en la memoria compartida */ for (i = 0; i < dimens.filas; i++) { aux = (i * dimens.columnas) + columna; matriz_comp [aux] = resultado [aux]; } /* ---- A partir de aqu, trabaja con la memoria compartida ---- */ /* Recorre la columna hasta encontrar un elemento vaco */ for (i = (dimens.filas - 1); i >= 0; i--) { aux = (i * dimens.columnas) + columna; elem = matriz_comp [aux]; if (elem == DIAMANTE_VACIO) { /* Busca el primer elemento que haya por encima y lo baja */ elem = buscar_lleno (matriz_comp, i, columna, dimens); if (elem == -1) { hiprand_init (semilla, i + columna, 0, &estado); /* El nmero se genera primero con coma flotante (ajustando los lmites como se haya especificado) y luego se convierte a entero. Esto es ms rpido que realizar la operacin de mdulo */ rand_f = hiprand_uniform (&estado); rand_f *= (max - min + 0.999999); rand_f += min; /* Convierte el float a entero */ rand_int = __float2int_rz (rand_f); /* Guarda el resultado */ elem = rand_int; } matriz_comp [aux] = elem; } } /* Copia de vuelta los resultados */ for (i = 0; i < dimens.filas; i++) { aux = (i * dimens.columnas) + columna; resultado [aux] = matriz_comp [aux]; } } /* -------------------- */ /* FUNCIONES AUXILIARES */ /* -------------------- */ /** * Obtiene las dimensiones de los hilos necesarias para ejecutar la matriz con las * dimensiones especificadas, teniendo en cuenta las limitaciones del dispositivo. * * @param bloques * Elemento de tipo dim3 para almacenar las dimensiones de los bloques * dentro de la rejilla (2D). * * @param hilos * Elemento de tipo dim3 para almacenar las dimensiones de los hilos dentro * de los bloques (3D). * * @param tam_matriz * Estructura Dim (definida en 'commno.h') con las dimensiones de la matriz * que se desea usar en el dispositivo. * * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si hubo algn error al obtener las caractersticas del * dispositivo. * ERR_TAM si la matriz especificada sobrepasa las capacidades del * dispositivo. */ int obtener_dim (dim3 *bloques, dim3 *hilos, Dim tam_matriz) { hipDeviceProp_t propiedades; hipError_t err; int id_dispos = -1; /* Busca el dispositivo con versin >= 2 (para poder usar ms hilos por bloque) */ propiedades.major = 2; CUDA (err, hipChooseDevice (&id_dispos, &propiedades) ); /* Actualiza la informacin del dispositivo (chooseDevice no lo hizo correctamente) */ CUDA (err, hipGetDeviceProperties (&propiedades, id_dispos) ); imprimir (DETALLE_EXTRA, "\n -> Escogido dispositivo %d, con versin %d.%d\n\n", id_dispos, propiedades.major, propiedades.minor); hipSetDevice (id_dispos); /* Nmero constante de hilos por bloque (para versiones anteriores a Fermi, 16 hilos) */ hilos->x = (propiedades.major < 2)? 16 : 32; hilos->y = (propiedades.major < 2)? 16 : 32; hilos->z = 1; /* Se calcula el nmero de bloques que se deben utilizar */ bloques->x = ceil (((float) tam_matriz.columnas) / ((float) hilos->x)); bloques->y = ceil (((float) tam_matriz.filas) / ((float) hilos->y)); bloques->z = 1; imprimir (DETALLE_EXTRA, "Se usan bloques de %d x %d para alojar los (%d x %d)" " hilos necesarios.\n", hilos->x, hilos->y, tam_matriz.filas, tam_matriz.columnas); /* Si la matriz no cabe, se avisa */ if ((bloques->x > propiedades.maxGridSize [0]) || (bloques->y > propiedades.maxGridSize [1])) { imprimir (DETALLE_LOG, "\n -> Error: la matriz es demasiado grande " "para el dispositivo\n"); return ERR_TAM; } /* Limitacin para la prctica. Si la matriz cabe en un bloque, se divide para que ocupe 4 */ if ((tam_matriz.columnas * tam_matriz.filas) < propiedades.maxThreadsPerBlock) { hilos->x = ceil ( ((float) tam_matriz.columnas) / 2.0 ); hilos->y = ceil ( ((float) tam_matriz.filas) / 2.0 ); bloques->x = ceil (((float) tam_matriz.columnas) / ((float) hilos->x)); bloques->y = ceil (((float) tam_matriz.filas) / ((float) hilos->y)); imprimir (DETALLE_EXTRA, " --> Limitacin artificial (para la" " prctica): se usan %d x %d bloques de " " %d x %d hilos. La matriz es de %d x %d " " elementos.\n", bloques->x, bloques->y, hilos->x, hilos->y, tam_matriz.filas, tam_matriz.columnas); } return SUCCESS; } /** * Copia la informacin de la matriz de enteros (el resultado devuelto por el * dispositivo) en la matriz de juego con la que trabaja la CPU. * * @param matriz_d * Matriz de enteros con los resultados de la tarjeta. * * @param malla * Malla con la informacin del juego, cuya matriz va a ser actualizada. */ void copiar_matriz (int *matriz_d, Malla *malla) { int i, j, idx, filas = malla->dimens.filas, columnas = malla->dimens.columnas; for (i = 0; i < filas; i++) { for (j = 0; j < columnas; j++) { idx = (i * columnas) + j; malla->matriz [idx] = crear_diamante (matriz_d [idx]); } } } /** * Rellena la matriz de juego con diamantes aleatorios. * * @param malla * Estructura de tipo Malla (definida en 'common.h') con las dimensiones de * la matriz y su contenido. * * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si alguna funcin CUDA ha fallado. */ int matriz_aleat (Malla *malla) { int max = DIAMANTE_VACIO, filas = malla->dimens.filas, columnas = malla->dimens.columnas, tam = filas * columnas; hipError_t err; dim3 bloques, hilos; int *matriz_d, *aux = (int *) malloc (tam * sizeof aux [0]); max = max_nv (*malla); /* Comprueba que la matriz tiene memoria reservada */ if (malla->matriz == NULL) { imprimir (DETALLE_DEBUG, "Error: la matriz no tiene memoria reservada.\n"); return ERR_MEM; } CUDA (err, hipMalloc ((void **) &matriz_d, tam * sizeof matriz_d [0]) ); /* Llama al ncleo para inicializar la secuencia de nmeros aleatorios */ obtener_dim (&bloques, &hilos, malla->dimens); /* Genera los nmeros aleatorios y los copia en la matriz */ KERNEL (err, gen_aleat_cuda, bloques, hilos, time (NULL), matriz_d, 1, max, malla->dimens ); CUDA (err, hipMemcpy (aux, matriz_d, tam * sizeof aux [0], hipMemcpyDeviceToHost) ); /* Copiar directamente un array de Diamante desde el dispositivo da problemas, as que se usa un array de enteros para crear los nmeros aleatorios en paralelo y luego la CPU se encarga de crear los elementos de tipo Diamante */ copiar_matriz (aux, malla); /* Se libera la memoria del dispositivo */ CUDA (err, hipFree (matriz_d) ); return SUCCESS; } /** * Funcin para ejecutar la bomba I (eliminar fila). * * @param fila_bomba * Fila que se debe eliminar (poner a DIAMANTE_VACIO). * * @param malla * Estructura con la informacin del juego. * * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si hubo algn error al obtener las caractersticas del * dispositivo. * ERR_TAM si la matriz especificada sobrepasa las capacidades del * dispositivo. */ int bomba_fila (int fila_bomba, Malla *malla) { hipError_t err; dim3 bloques, hilos; int tam = malla->dimens.filas * malla->dimens.columnas, i, j, idx = 0, max = max_nv (*malla); int *matriz_d, *aux = (int *) malloc (tam * sizeof aux [0]); /* Dimensiones para luego crear un hilo por columna */ Dim dim_matr_hilos; dim_matr_hilos.filas = 1; dim_matr_hilos.columnas = malla->dimens.columnas; /* Inicializa la matriz auxiliar */ for (i = 0; i < malla->dimens.filas; i++) { for (j = 0; j < malla->dimens.columnas; j++) { idx = (i * malla->dimens.columnas) + j; aux [idx] = malla->matriz [idx].id; } } /* Reserva memoria en el dispositivo y copia la matriz */ CUDA (err, hipMalloc ((void **) &matriz_d, tam * sizeof matriz_d [0]) ); CUDA (err, hipMemcpy (matriz_d, aux, tam * sizeof matriz_d [0], hipMemcpyHostToDevice) ); /* Llama al ncleo para eliminar la fila */ obtener_dim (&bloques, &hilos, dim_matr_hilos); KERNEL_COMP (err, eliminar_fila_cuda, bloques, hilos, malla->dimens.columnas * (fila_bomba + 1) * sizeof matriz_d [0], time (NULL), matriz_d, 1, max, malla->dimens, fila_bomba ); /* Copia la informacin de vuelta y libera la memoria en el dispositivo */ CUDA (err, hipMemcpy (aux, matriz_d, tam * sizeof aux [0], hipMemcpyDeviceToHost) ); /* Copiar directamente un array de Diamante desde el dispositivo da problemas, as que se usa un array de enteros para crear los nmeros aleatorios en paralelo y luego la CPU se encarga de crear los elementos de tipo Diamante */ copiar_matriz (aux, malla); CUDA (err, hipFree (matriz_d) ); return SUCCESS; } /** * Funcin para ejecutar la bomba II (eliminar columna). * * @param col_bomba * Columna que se debe eliminar (poner a DIAMANTE_VACIO). * * @param malla * Estructura con la informacin del juego. * * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si hubo algn error al obtener las caractersticas del * dispositivo. * ERR_TAM si la matriz especificada sobrepasa las capacidades del * dispositivo. */ int bomba_columna (int col_bomba, Malla *malla) { hipError_t err; dim3 bloques, hilos; int tam = malla->dimens.filas * malla->dimens.columnas, i, j, idx = 0, max = max_nv (*malla); int *matriz_d, *aux = (int *) malloc (tam * sizeof aux [0]); /* Dimensiones para luego crear un hilo por columna */ Dim dim_matr_hilos; dim_matr_hilos.filas = malla->dimens.filas; dim_matr_hilos.columnas = 1; /* Inicializa la matriz auxiliar */ for (i = 0; i < malla->dimens.filas; i++) { for (j = 0; j < malla->dimens.columnas; j++) { idx = (i * malla->dimens.columnas) + j; aux [idx] = malla->matriz [idx].id; } } /* Reserva memoria en el dispositivo y copia la matriz */ CUDA (err, hipMalloc ((void **) &matriz_d, tam * sizeof matriz_d [0]) ); CUDA (err, hipMemcpy (matriz_d, aux, tam * sizeof matriz_d [0], hipMemcpyHostToDevice) ); /* Llama al ncleo para eliminar la columna */ obtener_dim (&bloques, &hilos, dim_matr_hilos); KERNEL_COMP (err, eliminar_columna_cuda, bloques, hilos, malla->dimens.filas * (col_bomba + 1) * sizeof matriz_d [0], time (NULL), matriz_d, 1, max, malla->dimens, col_bomba ); /* Copia la informacin de vuelta y libera la memoria en el dispositivo */ CUDA (err, hipMemcpy (aux, matriz_d, tam * sizeof aux [0], hipMemcpyDeviceToHost) ); /* Copiar directamente un array de Diamante desde el dispositivo da problemas, as que se usa un array de enteros para crear los nmeros aleatorios en paralelo y luego la CPU se encarga de crear los elementos de tipo Diamante */ copiar_matriz (aux, malla); CUDA (err, hipFree (matriz_d) ); return SUCCESS; } /** * Funcin para ejecutar la bomba III (girar en grupos de 3x3). * * @param malla * Estructura con toda la informacin del juego (matriz, nivel * y dimensiones). * * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si hubo algn error al obtener las caractersticas del * dispositivo. * ERR_TAM si la matriz especificada sobrepasa las capacidades del * dispositivo. */ int bomba_giro (Malla *malla) { hipError_t err; dim3 bloques, hilos; int tam = malla->dimens.filas * malla->dimens.columnas, i, j, idx = 0; int *matriz_d, *aux = (int *) malloc (tam * sizeof aux [0]); /* Inicializa la matriz auxiliar */ for (i = 0; i < malla->dimens.filas; i++) { for (j = 0; j < malla->dimens.columnas; j++) { idx = (i * malla->dimens.columnas) + j; aux [idx] = malla->matriz [idx].id; } } /* Reserva memoria en el dispositivo y copia la matriz */ CUDA (err, hipMalloc ((void **) &matriz_d, tam * sizeof matriz_d [0]) ); CUDA (err, hipMemcpy (matriz_d, aux, tam * sizeof matriz_d [0], hipMemcpyHostToDevice) ); /* Llama al ncleo para girar la matriz */ obtener_dim (&bloques, &hilos, malla->dimens); KERNEL_COMP (err, girar_matriz_cuda, bloques, hilos, tam * sizeof matriz_d [0], matriz_d, malla->dimens ); /* Copia la informacin de vuelta y libera la memoria en el dispositivo */ CUDA (err, hipMemcpy (aux, matriz_d, tam * sizeof aux [0], hipMemcpyDeviceToHost) ); /* Copiar directamente un array de Diamante desde el dispositivo da problemas, as que se usa un array de enteros para crear los nmeros aleatorios en paralelo y luego la CPU se encarga de crear los elementos de tipo Diamante */ copiar_matriz (aux, malla); CUDA (err, hipFree (matriz_d) ); return SUCCESS; } /** * Busca coincidencias en la matriz y marca las casillas para ser eliminadas (las deja * como DIAMANTE_VACIO. * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si hubo algn error al obtener las caractersticas del * dispositivo. * ERR_TAM si la matriz especificada sobrepasa las capacidades del * dispositivo. */ int eliminar_coincidencias (Malla *malla) { hipError_t err; dim3 bloques, hilos; int tam = malla->dimens.filas * malla->dimens.columnas, i, j, idx = 0; int *matriz_d, *coincidencias_d, *aux = (int *) malloc (tam * sizeof aux [0]); /* Dimensiones para luego crear un hilo por columna */ Dim dim_matr_hilos; dim_matr_hilos.filas = malla->dimens.filas; dim_matr_hilos.columnas = 1; /* Inicializa la matriz auxiliar */ for (i = 0; i < malla->dimens.filas; i++) { for (j = 0; j < malla->dimens.columnas; j++) { idx = (i * malla->dimens.columnas) + j; aux [idx] = malla->matriz [idx].id; } } /* Reserva memoria en el dispositivo y copia la matriz */ CUDA (err, hipMalloc ((void **) &matriz_d, tam * sizeof matriz_d [0]) ); CUDA (err, hipMalloc ((void **) &coincidencias_d, tam * sizeof coincidencias_d [0]) ); CUDA (err, hipMemset (coincidencias_d, NO_COINCIDE, tam * sizeof coincidencias_d [0]) ); CUDA (err, hipMemcpy (matriz_d, aux, tam * sizeof matriz_d [0], hipMemcpyHostToDevice) ); /* Llama al ncleo para comprobar la matriz */ obtener_dim (&bloques, &hilos, dim_matr_hilos); KERNEL_COMP (err, busar_coinc_cuda_fila, bloques, hilos, (2 * tam * sizeof matriz_d [0]), matriz_d, malla->dimens, coincidencias_d ); dim_matr_hilos.filas = 1; dim_matr_hilos.columnas = malla->dimens.columnas; obtener_dim (&bloques, &hilos, dim_matr_hilos); KERNEL_COMP (err, busar_coinc_cuda_col, bloques, hilos, (2 * tam * sizeof matriz_d [0]), matriz_d, malla->dimens, coincidencias_d ); /* Utiliza la matriz con los elementos marcados para eliminarlos */ obtener_dim (&bloques, &hilos, malla->dimens); KERNEL (err, eliminar_coinc_cuda, bloques, hilos, matriz_d, malla->dimens, coincidencias_d ); /* Copia la informacin de vuelta y libera la memoria en el dispositivo */ CUDA (err, hipMemcpy (aux, matriz_d, tam * sizeof aux [0], hipMemcpyDeviceToHost) ); /* Copiar directamente un array de Diamante desde el dispositivo da problemas, as que se usa un array de enteros para crear los nmeros aleatorios en paralelo y luego la CPU se encarga de crear los elementos de tipo Diamante */ copiar_matriz (aux, malla); CUDA (err, hipFree (matriz_d) ); CUDA (err, hipFree (coincidencias_d) ); return SUCCESS; } /** * Rellena los diamantes vacos en la matriz. * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si hubo algn error al obtener las caractersticas del * dispositivo. * ERR_TAM si la matriz especificada sobrepasa las capacidades del * dispositivo. */ int llenar_vacios (Malla *malla) { hipError_t err; dim3 bloques, hilos; int tam = malla->dimens.filas * malla->dimens.columnas, i, j, idx = 0, max = max_nv (*malla); int *matriz_d, *aux = (int *) malloc (tam * sizeof aux [0]); /* Dimensiones para luego crear un hilo por columna */ Dim dim_matr_hilos; dim_matr_hilos.filas = 1; dim_matr_hilos.columnas = malla->dimens.columnas; /* Inicializa la matriz auxiliar */ for (i = 0; i < malla->dimens.filas; i++) { for (j = 0; j < malla->dimens.columnas; j++) { idx = (i * malla->dimens.columnas) + j; aux [idx] = malla->matriz [idx].id; } } /* Reserva memoria en el dispositivo y copia la matriz */ CUDA (err, hipMalloc ((void **) &matriz_d, tam * sizeof matriz_d [0]) ); CUDA (err, hipMemcpy (matriz_d, aux, tam * sizeof matriz_d [0], hipMemcpyHostToDevice) ); /* Llama al ncleo para comprobar la matriz */ obtener_dim (&bloques, &hilos, dim_matr_hilos); KERNEL_COMP (err, llenar_vacios_cuda, bloques, hilos, tam * sizeof matriz_d [0], time (NULL), matriz_d, 1, max, malla->dimens ); /* Copia la informacin de vuelta y libera la memoria en el dispositivo */ CUDA (err, hipMemcpy (aux, matriz_d, tam * sizeof aux [0], hipMemcpyDeviceToHost) ); /* Copiar directamente un array de Diamante desde el dispositivo da problemas, as que se usa un array de enteros para crear los nmeros aleatorios en paralelo y luego la CPU se encarga de crear los elementos de tipo Diamante */ copiar_matriz (aux, malla); CUDA (err, hipFree (matriz_d) ); return SUCCESS; } /* -------- */ /** * Busca las mejores jugadas por filas * * @param matriz * Matriz con los valores actuales de los diamantes. * * @param dimens * Estructura con las dimensiones de la matriz. * * @param mat2 * Matriz en la que se va a indicar los valores de las mejores * jugadas por posicion * * @param solh * Matriz que devuleve las mejores jugdas(posicion, movimiento * y valor de cada jugada). */ __global__ void realizar_jugada_horizontal_cuda (int * mat1, Dim dimens, int * mat2,int * solh) { int fila = blockIdx.x * blockDim.x + threadIdx.x; int aux = fila * dimens.columnas; if ((fila >= dimens.filas) || ( (blockIdx.y * blockDim.y + threadIdx.y) != 0) ) { return; } for(int i = 0; i < dimens.filas * 4; i++){ solh[i] = 0; } //Variable para recorrer la matriz int fin = 0; int cont = 0; int mov = 1; int ref = 0; //Vareables no definitivas para guardar el movimiento que se va a realizar int sen = 0; int posY = 0; int posX = 0; //Mejor movimiento horizontal int mh_sen = 0; //Sentido del movimeinto int mh_posY = 0; int mh_posX = 0; for (int i = 0; i < dimens.columnas; i++) { ref = mat1[aux + i]; for(int j = i; j < dimens.columnas; j ++) { if(fin == 0) { if(ref == mat1[aux + j]) { //Mira si la posicion en la que esta es igual a la referencia cont ++; } else if((mov == 1)&&(fila > 0)&&(ref == mat1[aux - dimens.columnas + j])) { //Mira la posicion de arriba --> mover mat1[aux + j] arriba mov = 0; cont ++; sen = 3; posY = fila; posX = j; } else if((mov == 1)&&(fila < dimens.filas - 1)&&(ref == mat1[aux + dimens.columnas + j])) { //Mira la posicion de abajo --> mover mat1[aux + j] abajo mov = 0; cont ++; sen = 1; posY = fila;posX = j; } else if((mov == 1)&&((j + 1) < dimens.columnas)&&(ref == mat1[aux + j + 1])) { //Mirar la posicion de la derecha --> mover mat1[aux + j] derecha mov = 0; cont ++; sen = 0; posY = fila; posX = j; j++; //Pasa a comprobar la siguiente } else { fin = 1; } } } //Mirar en las posiciones de la izquierda if ((mov == 1)&&(i > 0)&&(ref == mat1 [aux - dimens.columnas + i - 1])) { //Mirar la posicion por el lado de la izquierda arriba --> mover mat[aux + i - 1] arriba mov = 0; cont ++; sen = 3; posY = fila; posX = i - 1; } else if ((mov == 1)&&(i > 0)&&(ref == mat1 [aux + dimens.columnas + i -1])) { //Mirar la posicion por el lado de la izquierda abajo --> mover mat[aux + i - 1] abajo mov = 0; cont ++; sen = 1; posY = fila; posX = i - 1; } if(solh[fila * 4] <= cont){ mh_sen = sen; if (mov == 1) sen = 4; mh_posY = posY; mh_posX = posX; } if((solh[fila * 4] == 0)||(solh [fila * 4] < cont)) { solh[fila * 4] = cont; solh[(fila * 4) + 1] = mh_sen; solh[(fila * 4) + 2] = mh_posY; solh[(fila * 4) + 3] = mh_posX; } mat2[aux + i] = cont; //Reinicia valores mov = 1; fin = 0; cont = 0; } } /** * Busca las mejores jugadas por filas * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si hubo algn error al obtener las caractersticas del * dispositivo. * ERR_TAM si la matriz especificada sobrepasa las capacidades del * dispositivo. */ int realizar_jugada_horizontal(Malla * malla,int * jugada) { hipError_t err; dim3 bloques, hilos; int tam = malla->dimens.columnas * malla->dimens.filas; int * matriz_d; int * mov_d; int * aux = (int *) malloc (tam * sizeof aux [0]); //vector solh int * vec = (int *) malloc (malla->dimens.filas * sizeof (int) * 4); int * vec_d; hipMalloc((void **) &vec_d,malla->dimens.filas * sizeof (int) * 4); for (int i = 0; i < malla->dimens.filas * 4; i++) { vec[i] = 0; } //Crea un hilo por columna Dim dim_matr_hilos; dim_matr_hilos.filas = malla->dimens.filas; dim_matr_hilos.columnas = 1; //Inicializa la matriz auxiliar int idx; for (int i = 0; i < malla->dimens.filas; i++) { for (int j = 0; j < malla->dimens.columnas; j++) { idx = (i * malla->dimens.columnas) + j; aux[idx] = malla->matriz[idx].id; } } CUDA (err, hipMalloc((void **)&matriz_d,tam * sizeof matriz_d[0])); CUDA (err, hipMalloc((void **)&mov_d,tam * sizeof mov_d[0])); CUDA (err, hipMemset(mov_d, NO_COINCIDE, tam * sizeof mov_d[0])); CUDA (err, hipMemcpy(matriz_d, aux, tam * sizeof matriz_d [0],hipMemcpyHostToDevice)); hipMemcpy (vec_d, &vec , sizeof (int) * malla->dimens.filas , hipMemcpyHostToDevice); /* Llama al nucelo para comprar la matriz */ obtener_dim (&bloques, &hilos, dim_matr_hilos); KERNEL (err, realizar_jugada_horizontal_cuda, bloques,hilos,matriz_d,malla->dimens,mov_d,vec_d); CUDA (err, hipMemcpy(aux,mov_d, tam * sizeof aux [0], hipMemcpyDeviceToHost)); CUDA (err, hipMemcpy(vec, vec_d, sizeof(int)*malla->dimens.filas *4,hipMemcpyDeviceToHost)); for (int i = 0; i < malla->dimens.filas; i++) { if(jugada[0]<vec[i * 4]) { jugada[0] = vec[i * 4]; jugada[1] = vec[i * 4 +1]; jugada[2] = vec[i * 4 +2]; jugada[3] = vec[i * 4 +3]; } } //copiar_matriz (aux, malla); CUDA (err, hipFree(matriz_d)); CUDA (err, hipFree(mov_d)); return SUCCESS; } /** * Busca las mejores jugadas por columnas * * @param matriz * Matriz con los valores actuales de los diamantes. * * @param dimens * Estructura con las dimensiones de la matriz. * * @param mat2 * Matriz en la que se va a indicar los valores de las mejores * jugadas por posicion * * @param solv * Matriz que devuleve las mejores jugdas(posicion, movimiento * y valor de cada jugada). */ __global__ void realizar_jugada_vertical_cuda (int * mat1, Dim dimens, int * mat2, int * solv) { int columna = blockIdx.y * blockDim.y + threadIdx.y; if ((columna >= dimens.columnas) || ( (blockIdx.x * blockDim.x + threadIdx.x) != 0) ) { return; } for(int i = 0;i < dimens.columnas * 4; i++){ solv[i] = 0; } //Variable para recorrer la matriz int fin = 0; int cont = 0; int mov = 1; int ref = 0; //Vareables no definitivas para guardar el movimiento que se va a realizar int sen = 0; int posY = 0; int posX = 0; //Mejor movimiento horizontal int mh_sen = 0; //Sentido del movimeinto int mh_posY = 0; int mh_posX = 0; for (int i = 0; i < dimens.filas; i++) { ref = mat1[dimens.columnas * i + columna]; for(int j = i; j < dimens.filas; j ++) { if(fin == 0) { if(ref == mat1[dimens.columnas * j + columna]) { //Mira si la posicion en la que esta es igual a la referencia cont ++; } else if((mov == 1)&&(columna > 0)&&(ref == mat1[dimens.columnas * j + columna - 1])) { //Mira la posicion de la izquierda --> mover mat1[dimens.columnas * j + columna - 1] izquierda mov = 0; cont ++; sen = 2; posY = j; posX = columna; } else if((mov == 1)&&(columna < dimens.columnas - 1)&&(ref == mat1[dimens.columnas * j + columna + 1])) { //Mira la posicion de la derecha --> mover mat1[dimens.columnas * j + columna + 1] derecha mov = 0; cont ++; sen = 0; posY = j;posX = columna; } else if((mov == 1)&&((j + 1) < dimens.filas)&&(ref == mat1[dimens.columnas * (j+1) + columna])) { //Mirar la posicion de abajo --> mover mat1[dimens.columnas * j + columna] abajo mov = 0; cont ++; sen = 1; posY = j; posX = columna; j++; //Pasa a comprobar la siguiente } else { fin = 1; } } } //Mirar en las posiciones de arriba if ((mov == 1)&&(columna > 0)&&(i>0)&&(ref == mat1 [dimens.columnas * (i- 1) + (columna - 1)])) { //Mirar la posicion por el lado de arriba izquierda --> mover mat[dimens.columnas * (i- 1) + (columna - 1)] izquierda mov = 0; cont ++; sen = 2; posY = i -1 ; posX =columna; } else if ((mov == 1)&&(i > 0)&&(columna + 1 < dimens.columnas)&&(ref == mat1 [dimens.columnas * (i - 1) + (columna + 1)])) { //Mirar la posicion por el lado de la arriba derecha --> mover mat[dimens.columnas * (i - 1) + (columna + 1)] derecha mov = 0; cont ++; sen = 0; posY = i - 1; posX = columna; } if(solv[columna * 4] <= cont){ mh_sen = sen; if (mov == 1) sen = 4; mh_posY = posY; mh_posX = posX; } if((solv[columna * 4] == 0)||(solv[columna * 4] < cont)) { solv[columna * 4] = cont; solv[(columna * 4) + 1] = mh_sen; solv[(columna * 4) + 2] = mh_posY; solv[(columna * 4) + 3] = mh_posX; } mat2[dimens.columnas * i + columna] = cont; //Reinicias valores mov = 1; fin = 0; cont = 0; } } /** * Busca las mejores jugadas por columnas * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si hubo algn error al obtener las caractersticas del * dispositivo. * ERR_TAM si la matriz especificada sobrepasa las capacidades del * dispositivo. */ int realizar_jugada_vertical(Malla * malla,int * jugada) { hipError_t err; dim3 bloques, hilos; int tam = malla->dimens.columnas * malla->dimens.filas; int * matriz_d; int * mov_d; int * aux = (int *) malloc (tam * sizeof aux [0]); int * vec = (int *) malloc (malla->dimens.columnas * sizeof (int) * 4); int * vec_d; hipMalloc((void **) &vec_d,malla->dimens.columnas * sizeof (int) * 4); for (int i = 0; i < malla->dimens.columnas * 4; i++) { vec[i] = 0; } //Crea un hilo por fila Dim dim_matr_hilos; dim_matr_hilos.filas = 1; dim_matr_hilos.columnas = malla->dimens.columnas; //Inicializa la matriz auxiliar int idx; for (int i = 0; i < malla->dimens.filas; i++) { for (int j = 0; j < malla->dimens.columnas; j++) { idx = (i * malla->dimens.columnas) + j; aux[idx] = malla->matriz[idx].id; } } CUDA (err, hipMalloc((void **)&matriz_d,tam * sizeof matriz_d[0])); CUDA (err, hipMalloc((void **)&mov_d,tam * sizeof mov_d[0])); CUDA (err, hipMemset(mov_d, NO_COINCIDE, tam * sizeof mov_d[0])); CUDA (err, hipMemcpy(matriz_d, aux, tam * sizeof matriz_d [0],hipMemcpyHostToDevice)); hipMemcpy( vec_d, &vec,sizeof (int) * malla->dimens.columnas, hipMemcpyHostToDevice ); obtener_dim (&bloques, &hilos, dim_matr_hilos); KERNEL (err, realizar_jugada_vertical_cuda, bloques,hilos,matriz_d,malla->dimens,mov_d,vec_d); CUDA (err, hipMemcpy(aux,mov_d, tam * sizeof aux [0], hipMemcpyDeviceToHost)); CUDA (err, hipMemcpy(vec, vec_d,sizeof (int) * malla->dimens.columnas * 4, hipMemcpyDeviceToHost )); for (int i = 0; i < malla->dimens.columnas; i++) { if(jugada[0]<vec[i * 4]) { jugada[0] = vec[i * 4]; jugada[1] = vec[i * 4 +1]; jugada[2] = vec[i * 4 +2]; jugada[3] = vec[i * 4 +3]; } } CUDA (err, hipFree(matriz_d)); CUDA (err, hipFree(mov_d)); return SUCCESS; } int realizar_jugada(Malla * malla) { int jugada_v[4]; int jugada_h[4]; int posY; int posX; int mov; realizar_jugada_vertical(malla,jugada_v); realizar_jugada_horizontal(malla,jugada_h); if(jugada_v[0]>jugada_h[0]) { printf("Mejor jugada --> Mov(%d): %d PosY: %d PosX: %d\n",jugada_v[0],jugada_v[1],jugada_v[2],jugada_v[3]); posY = jugada_v[2]; posX = jugada_v[3]; mov = jugada_v[1]; } else { printf("Mov(%d): %d PosY: %d PosX: %d\n",jugada_h[0],jugada_h[1],jugada_h[2],jugada_h[3]); posY = jugada_h[2]; posX = jugada_h[3]; mov = jugada_h[1]; } mover_diamante(posY,posX,mov,* malla); return SUCCESS; }
615512d544250db50d525bf4caab45e28e191552.cu
#include "include/kernel.cuh" /* ------------------------ */ /* FUNCIONES DE DISPOSITIVO */ /* ------------------------ */ /** * Comprueba si es posible realizar un giro de 3x3 en la posición dada. * * @param posY * Coordenada Y del eje a comprobar. * * @param posX * Coordenada X del eje a comprobar. * * @param dimens * Dimensiones de la matriz a comprobar. * * * @return * true si es posible. * false si no lo es. */ __device__ bool comprobar_giro (int posY, int posX, Dim dimens) { int filas = dimens.filas, cols = dimens.columnas; /* Comprueba los límites del eje de giro */ if ( ((posY + 1) >= filas) || ((posX + 1) >= cols) ) { return false; } if(((posY - 1) == 0) || ((posY - 1) % 3) == 0) { /* Posición correcta para el eje Y */ if(((posX - 1) == 0) || ((posX - 1) % 3) == 0) { /* Posición correcta para el eje X */ return true; } } return false; } /** * Busca el primer elemento no vacío por encima de la posición especificada. * Además, este elemento se convierte a DIAMANTE_VACIO. * * @param matriz * Matriz en la que se ha de buscar el elemento. * * @param fila_ini * Fila del primer elemento a comprobar. * * @param columna * Columna a comprobar. * * @param dimens * Dimensiones de la matriz. * * * @return * El primer elemento encontrado, si había alguno. * -1 si no se encontró ningún elemento no vacío. */ __device__ int buscar_lleno (int *matriz, int fila_ini, int columna, Dim dimens) { int elem = -1, fila = fila_ini, aux; while ( (elem == -1) && (fila >= 0)) { aux = (fila * dimens.columnas) + columna; if (matriz [aux] != DIAMANTE_VACIO) { elem = matriz [aux]; matriz [aux] = DIAMANTE_VACIO; } fila--; } return elem; } /* ------- */ /* NÚCLEOS */ /* ------- */ /** * Genera un número aleatorio en base a la secuencia especificada y al índice del hilo. * * @param semilla * Elemento inicial para generar la secuencia. * * @param resultado * Vector en el que se almacenarán los números generados. * * @param min * Límite inferior para generar un número (inclusivo). * * @param max * Límite superior para generar un número (inclusivo). * * @param dimens * Dimensiones de la matriz resultado. */ __global__ void gen_aleat_cuda (unsigned long semilla, int *resultado, const int min, const int max, const Dim dimens) { int rand_int, fila = blockIdx.y * blockDim.y + threadIdx.y, columna = blockIdx.x * blockDim.x + threadIdx.x, aux = (fila * dimens.columnas) + columna; curandState estado; /* Comprueba los límites de la matriz */ if ((fila <= dimens.filas) && (columna <= dimens.columnas)) { curand_init (semilla, aux, 0, &estado); /* El número se genera primero con coma flotante (ajustando los límites como se haya especificado) y luego se convierte a entero. Esto es más rápido que realizar la operación de módulo */ float rand_f = curand_uniform (&estado); rand_f *= (max - min + 0.999999); rand_f += min; /* Convierte el float a entero */ rand_int = __float2int_rz (rand_f); /* Guarda el resultado */ resultado [aux] = rand_int; } } /** * Mueve todos los elementos a la izquierda de fila_bomba hacia su derecha. Cuando llega * al primer elemento, genera un nuevo elemento. * * @param semilla * Elemento inicial para generar la secuencia (para crear los * nuevos elementos). * * @param resultado * Vector que almacena la matriz que va a ser cambiada. * * @param min * Límite inferior para generar un número (inclusivo). * * @param max * Límite superior para generar un número (inclusivo). * * @param dimens * Dimensiones de la matriz resultado. * * * @param fila_bomb * Fila a eliminar. */ __global__ void eliminar_fila_cuda (unsigned long semilla, int *resultado, const int min, const int max, const Dim dimens, int fila_bomba) { int columna = blockIdx.x * blockDim.x + threadIdx.x, i, rand_int, aux; curandState estado; float rand_f; extern __shared__ int matriz_comp []; if ( (columna >= dimens.columnas) || ( (blockIdx.y * blockDim.y + threadIdx.y) != 0) ) { return; } /* Copia la columna en la memoria compartida */ for (i = 0; i <= fila_bomba; i++) { aux = (i * dimens.columnas) + columna; matriz_comp [aux] = resultado [aux]; } /* ---- A partir de aquí, trabaja con la memoria compartida ---- */ /* Intercambia los elementos desde la fila actual hasta el principio */ for (i = fila_bomba; i > 0; i--) { matriz_comp [(i * dimens.columnas) + columna] = matriz_comp [(i - 1) * dimens.columnas + columna]; } /* Genera el último elemento */ curand_init (semilla, columna, 0, &estado); /* El número se genera primero con coma flotante (ajustando los límites como se haya especificado) y luego se convierte a entero. Esto es más rápido que realizar la operación de módulo */ rand_f = curand_uniform (&estado); rand_f *= (max - min + 0.999999); rand_f += min; /* Convierte el float a entero */ rand_int = __float2int_rz (rand_f); /* Guarda el resultado */ matriz_comp [columna] = rand_int; /* Copia los datos de vuelta a la memoria global */ for (i = 0; i <= fila_bomba; i++) { aux = (i * dimens.columnas) + columna; resultado [aux] = matriz_comp [aux]; } } /** * Mueve todos los elementos a la izquierda de fila_bomba hacia su derecha. Cuando llega * al primer elemento, genera un nuevo elemento. * * @param semilla * Elemento inicial para generar la secuencia (para crear los * nuevos elementos). * * @param resultado * Vector que almacena la matriz que va a ser cambiada. * * @param min * Límite inferior para generar un número (inclusivo). * * @param max * Límite superior para generar un número (inclusivo). * * @param dimens * Dimensiones de la matriz resultado. * * * @param fila_bomb * Fila a eliminar. */ __global__ void eliminar_columna_cuda (unsigned long semilla, int *resultado, const int min, const int max, const Dim dimens, int col_bomba) { int fila = blockIdx.y * blockDim.y + threadIdx.y, i, rand_int, aux; curandState estado; extern __shared__ int matriz_comp []; float rand_f; if ( (fila >= dimens.filas) || ( (blockIdx.x * blockDim.x + threadIdx.x) != 0) ) { return; } /* Copia la fila en la memoria compartida */ for (i = 0; i <= col_bomba; i++) { aux = (fila * dimens.columnas) + i; matriz_comp [aux] = resultado [aux]; } /* ---- A partir de aquí, trabaja con la memoria compartida ---- */ /* Intercambia los elementos desde la fila actual hasta el principio */ for (i = col_bomba; i > 0; i--) { aux = (fila * dimens.columnas) + i; matriz_comp [aux] = matriz_comp [aux - 1]; } /* Genera el último elemento */ curand_init (semilla, fila, 0, &estado); /* El número se genera primero con coma flotante (ajustando los límites como se haya especificado) y luego se convierte a entero. Esto es más rápido que realizar la operación de módulo */ rand_f = curand_uniform (&estado); rand_f *= (max - min + 0.999999); rand_f += min; /* Convierte el float a entero */ rand_int = __float2int_rz (rand_f); /* Guarda el resultado */ matriz_comp [fila * dimens.columnas] = rand_int; /* Copia los datos de vuelta a la memoria global */ for (i = 0; i <= col_bomba; i++) { aux = (fila * dimens.columnas) + i; resultado [aux] = matriz_comp [aux]; } } /** * Gira todos los elementos posibles en grupos de 3x3 (bomba III). * * @param resultado * Vector que almacena la matriz que va a ser cambiada. * * @param dimens * Dimensiones de la matriz. */ __global__ void girar_matriz_cuda (int *resultado, Dim dimens) { int fila = blockIdx.y * blockDim.y + threadIdx.y, columna = blockIdx.x * blockDim.x + threadIdx.x, posY = fila - 1, posX = columna - 1, aux; extern __shared__ int matriz_comp []; if ((fila >= dimens.filas) || (columna >= dimens.columnas)) { return; } if (comprobar_giro (fila, columna, dimens)) { /* Copia el cuadrante en la memoria compartida (desenrrollamiento de un bucle 'for') */ aux = (posY * dimens.columnas) + posX; matriz_comp [aux] = resultado [aux]; matriz_comp [aux + 1] = resultado [aux + 1]; matriz_comp [aux + 2] = resultado [aux + 2]; aux = ( (posY + 1) * dimens.columnas) + posX; matriz_comp [aux] = resultado [aux]; matriz_comp [aux + 1] = resultado [aux + 1]; matriz_comp [aux + 2] = resultado [aux + 2]; aux = ( (posY + 2) * dimens.columnas) + posX; matriz_comp [aux] = resultado [aux]; matriz_comp [aux + 1] = resultado [aux + 1]; matriz_comp [aux + 2] = resultado [aux + 2]; /* ---- A partir de aquí, se usa la memoria compartida ---- */ /* Se realizan los intercambios de manera manual */ aux = matriz_comp [(posY * dimens.columnas) + posX]; /* ---- */ matriz_comp [(posY * dimens.columnas) + posX] = matriz_comp [( (posY + 2) * dimens.columnas) + posX]; matriz_comp [( (posY + 2) * dimens.columnas) + posX] = matriz_comp [( (posY + 2) * dimens.columnas) + posX + 2]; matriz_comp [( (posY + 2) * dimens.columnas) + posX + 2] = matriz_comp [(posY * dimens.columnas) + posX + 2]; matriz_comp [(posY * dimens.columnas) + posX + 2] = aux; /* ---- */ aux = matriz_comp [(posY * dimens.columnas) + posX + 1]; matriz_comp [(posY * dimens.columnas) + posX + 1] = matriz_comp [( (posY + 1) * dimens.columnas) + posX]; matriz_comp [( (posY + 1) * dimens.columnas) + posX] = matriz_comp [( (posY + 2) * dimens.columnas) + posX + 1]; matriz_comp [( (posY + 2) * dimens.columnas) + posX + 1] = matriz_comp [( (posY + 1) * dimens.columnas) + posX + 2]; matriz_comp [( (posY + 1) * dimens.columnas) + posX + 2] = aux; /* Copia el cuadrante de nuevo en memoria global (desenrrollamiento de un bucle 'for') */ aux = (posY * dimens.columnas) + posX; resultado [aux] = matriz_comp [aux]; resultado [aux + 1] = matriz_comp [aux + 1]; resultado [aux + 2] = matriz_comp [aux + 2]; aux = ( (posY + 1) * dimens.columnas) + posX; resultado [aux] = matriz_comp [aux]; resultado [aux + 1] = matriz_comp [aux + 1]; resultado [aux + 2] = matriz_comp [aux + 2]; aux = ( (posY + 2) * dimens.columnas) + posX; resultado [aux] = matriz_comp [aux]; resultado [aux + 1] = matriz_comp [aux + 1]; resultado [aux + 2] = matriz_comp [aux + 2]; } } /** * Comprueba si la fila contiene elementos repetidos. * * @param matriz * Matriz con los valores actuales de los diamantes. * * @param dimens * Estructura con las dimensiones de la matriz. * * @param coincidencias * Matriz en la que se va a indicar si había alguna coincidencia. */ __global__ void busar_coinc_cuda_fila (int *matriz, Dim dimens, int *coincidencias) { int fila = blockIdx.y * blockDim.y + threadIdx.y, i, aux; extern __shared__ int mem_comp []; int *matriz_comp, tam_matriz = dimens.filas * dimens.columnas, *coinc_comp; if ( (fila >= dimens.filas) || ( (blockIdx.x * blockDim.x+ threadIdx.x) != 0) ) { return; } /* Obtiene los punteros a las diferentes zonas de la memoria compartida */ matriz_comp = mem_comp; coinc_comp = &mem_comp [tam_matriz]; /* Copia la fila en la memoria compartida */ for (i = 0; i < dimens.columnas; i++) { aux = (fila * dimens.columnas) + i; matriz_comp [aux] = matriz [aux]; coinc_comp [aux] = coincidencias [aux]; } /* ---- A partir de aquí, trabaja con la memoria compartida ---- */ aux = fila * dimens.columnas; /* Recorre la matriz marcando los elementos iguales consecutivos */ for (i = 0; i < (dimens.columnas - 2) ; i++) { if ( (matriz_comp [aux + i] == matriz_comp [aux + i + 1]) && (matriz_comp [aux + i] == matriz_comp [aux + i + 2]) ) { coinc_comp [aux + i] = COINCIDE; coinc_comp [aux + i + 1] = COINCIDE; coinc_comp [aux + i + 2] = COINCIDE; } } /* Copia de vuelta los resultados (sólo hay que copiar la matriz con las coincidencias) */ for (i = 0; i < dimens.columnas; i++) { aux = (fila * dimens.columnas) + i; coincidencias [aux] = coinc_comp [aux]; } } /** * Comprueba si la columna contiene elementos repetidos. * * @param matriz * Matriz con los valores actuales de los diamantes. * * @param dimens * Estructura con las dimensiones de la matriz. * * @param coincidencias * Matriz en la que se va a indicar si había alguna coincidencia. */ __global__ void busar_coinc_cuda_col (int *matriz, Dim dimens, int *coincidencias) { int columna = blockIdx.x * blockDim.x + threadIdx.x, i, aux; extern __shared__ int mem_comp []; int *matriz_comp, tam_matriz = dimens.filas * dimens.columnas, *coinc_comp; if ( (columna >= dimens.columnas) || ( (blockIdx.y * blockDim.y + threadIdx.y) != 0) ) { return; } /* Obtiene los punteros a las diferentes zonas de la memoria compartida */ matriz_comp = mem_comp; coinc_comp = &mem_comp [tam_matriz]; /* Copia la fila en la memoria compartida */ for (i = 0; i < dimens.filas; i++) { aux = (i * dimens.columnas) + columna; matriz_comp [aux] = matriz [aux]; coinc_comp [aux] = coincidencias [aux]; } /* ---- A partir de aquí, trabaja con la memoria compartida ---- */ /* Recorre la matriz marcando los elementos iguales consecutivos */ for (i = 0; i < (dimens.filas - 2) ; i++) { aux = (i * dimens.columnas); if ( (matriz_comp [aux + columna] == matriz_comp [( (i + 1) * dimens.columnas) + columna]) && (matriz_comp [aux + columna] == matriz_comp [( (i + 2) * dimens.columnas) + columna]) ) { coinc_comp [aux + columna] = COINCIDE; coinc_comp [(i + 1) * dimens.columnas + columna] = COINCIDE; coinc_comp [(i + 2) * dimens.columnas + columna] = COINCIDE; } } /* Copia de vuelta los resultados (sólo hay que copiar la matriz con las coincidencias) */ for (i = 0; i < dimens.filas; i++) { aux = (i * dimens.columnas) + columna; coincidencias [aux] = coinc_comp [aux]; } } /** * Elimina todos los elementos que se haya visto que han coincidido. * * * @param matriz * Matriz con los valores actuales de los diamantes. * * @param dimens * Estructura con las dimensiones de la matriz. * * @param coincidencias * Matriz con las coincidencias encontradas. */ __global__ void eliminar_coinc_cuda (int *matriz, Dim dimens, int *coincidencias) { int fila = blockIdx.y * blockDim.y + threadIdx.y, columna = blockIdx.x * blockDim.x + threadIdx.x; if ( (fila >= dimens.filas) || (columna >= dimens.columnas)) { return; } if (coincidencias [(fila * dimens.columnas) + columna] == COINCIDE) { matriz [(fila * dimens.columnas) + columna] = DIAMANTE_VACIO; } } /** * Comprueba todos los huecos de la columna y rellena los vacíos. * * @param semilla * Elemento inicial para generar la secuencia. * * @param resultado * Vector en el que se almacenarán los números generados. * * @param min * Límite inferior para generar un número (inclusivo). * * @param max * Límite superior para generar un número (inclusivo). * * @param dimens * Dimensiones de la matriz resultado. */ __global__ void llenar_vacios_cuda (unsigned long semilla, int *resultado, const int min, const int max, const Dim dimens) { int columna = blockIdx.x * blockDim.x + threadIdx.x, i, elem, rand_int, aux; extern __shared__ int matriz_comp []; curandState estado; float rand_f; if ( (columna >= dimens.columnas) || ( (blockIdx.y * blockDim.y + threadIdx.y) != 0) ) { return; } /* Copia la columna en la memoria compartida */ for (i = 0; i < dimens.filas; i++) { aux = (i * dimens.columnas) + columna; matriz_comp [aux] = resultado [aux]; } /* ---- A partir de aquí, trabaja con la memoria compartida ---- */ /* Recorre la columna hasta encontrar un elemento vacío */ for (i = (dimens.filas - 1); i >= 0; i--) { aux = (i * dimens.columnas) + columna; elem = matriz_comp [aux]; if (elem == DIAMANTE_VACIO) { /* Busca el primer elemento que haya por encima y lo baja */ elem = buscar_lleno (matriz_comp, i, columna, dimens); if (elem == -1) { curand_init (semilla, i + columna, 0, &estado); /* El número se genera primero con coma flotante (ajustando los límites como se haya especificado) y luego se convierte a entero. Esto es más rápido que realizar la operación de módulo */ rand_f = curand_uniform (&estado); rand_f *= (max - min + 0.999999); rand_f += min; /* Convierte el float a entero */ rand_int = __float2int_rz (rand_f); /* Guarda el resultado */ elem = rand_int; } matriz_comp [aux] = elem; } } /* Copia de vuelta los resultados */ for (i = 0; i < dimens.filas; i++) { aux = (i * dimens.columnas) + columna; resultado [aux] = matriz_comp [aux]; } } /* -------------------- */ /* FUNCIONES AUXILIARES */ /* -------------------- */ /** * Obtiene las dimensiones de los hilos necesarias para ejecutar la matriz con las * dimensiones especificadas, teniendo en cuenta las limitaciones del dispositivo. * * @param bloques * Elemento de tipo dim3 para almacenar las dimensiones de los bloques * dentro de la rejilla (2D). * * @param hilos * Elemento de tipo dim3 para almacenar las dimensiones de los hilos dentro * de los bloques (3D). * * @param tam_matriz * Estructura Dim (definida en 'commno.h') con las dimensiones de la matriz * que se desea usar en el dispositivo. * * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si hubo algún error al obtener las características del * dispositivo. * ERR_TAM si la matriz especificada sobrepasa las capacidades del * dispositivo. */ int obtener_dim (dim3 *bloques, dim3 *hilos, Dim tam_matriz) { cudaDeviceProp propiedades; cudaError_t err; int id_dispos = -1; /* Busca el dispositivo con versión >= 2 (para poder usar más hilos por bloque) */ propiedades.major = 2; CUDA (err, cudaChooseDevice (&id_dispos, &propiedades) ); /* Actualiza la información del dispositivo (chooseDevice no lo hizo correctamente) */ CUDA (err, cudaGetDeviceProperties (&propiedades, id_dispos) ); imprimir (DETALLE_EXTRA, "\n -> Escogido dispositivo %d, con versión %d.%d\n\n", id_dispos, propiedades.major, propiedades.minor); cudaSetDevice (id_dispos); /* Número constante de hilos por bloque (para versiones anteriores a Fermi, 16 hilos) */ hilos->x = (propiedades.major < 2)? 16 : 32; hilos->y = (propiedades.major < 2)? 16 : 32; hilos->z = 1; /* Se calcula el número de bloques que se deben utilizar */ bloques->x = ceil (((float) tam_matriz.columnas) / ((float) hilos->x)); bloques->y = ceil (((float) tam_matriz.filas) / ((float) hilos->y)); bloques->z = 1; imprimir (DETALLE_EXTRA, "Se usan bloques de %d x %d para alojar los (%d x %d)" " hilos necesarios.\n", hilos->x, hilos->y, tam_matriz.filas, tam_matriz.columnas); /* Si la matriz no cabe, se avisa */ if ((bloques->x > propiedades.maxGridSize [0]) || (bloques->y > propiedades.maxGridSize [1])) { imprimir (DETALLE_LOG, "\n -> Error: la matriz es demasiado grande " "para el dispositivo\n"); return ERR_TAM; } /* Limitación para la práctica. Si la matriz cabe en un bloque, se divide para que ocupe 4 */ if ((tam_matriz.columnas * tam_matriz.filas) < propiedades.maxThreadsPerBlock) { hilos->x = ceil ( ((float) tam_matriz.columnas) / 2.0 ); hilos->y = ceil ( ((float) tam_matriz.filas) / 2.0 ); bloques->x = ceil (((float) tam_matriz.columnas) / ((float) hilos->x)); bloques->y = ceil (((float) tam_matriz.filas) / ((float) hilos->y)); imprimir (DETALLE_EXTRA, " --> Limitación artificial (para la" " práctica): se usan %d x %d bloques de " " %d x %d hilos. La matriz es de %d x %d " " elementos.\n", bloques->x, bloques->y, hilos->x, hilos->y, tam_matriz.filas, tam_matriz.columnas); } return SUCCESS; } /** * Copia la información de la matriz de enteros (el resultado devuelto por el * dispositivo) en la matriz de juego con la que trabaja la CPU. * * @param matriz_d * Matriz de enteros con los resultados de la tarjeta. * * @param malla * Malla con la información del juego, cuya matriz va a ser actualizada. */ void copiar_matriz (int *matriz_d, Malla *malla) { int i, j, idx, filas = malla->dimens.filas, columnas = malla->dimens.columnas; for (i = 0; i < filas; i++) { for (j = 0; j < columnas; j++) { idx = (i * columnas) + j; malla->matriz [idx] = crear_diamante (matriz_d [idx]); } } } /** * Rellena la matriz de juego con diamantes aleatorios. * * @param malla * Estructura de tipo Malla (definida en 'common.h') con las dimensiones de * la matriz y su contenido. * * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si alguna función CUDA ha fallado. */ int matriz_aleat (Malla *malla) { int max = DIAMANTE_VACIO, filas = malla->dimens.filas, columnas = malla->dimens.columnas, tam = filas * columnas; cudaError_t err; dim3 bloques, hilos; int *matriz_d, *aux = (int *) malloc (tam * sizeof aux [0]); max = max_nv (*malla); /* Comprueba que la matriz tiene memoria reservada */ if (malla->matriz == NULL) { imprimir (DETALLE_DEBUG, "Error: la matriz no tiene memoria reservada.\n"); return ERR_MEM; } CUDA (err, cudaMalloc ((void **) &matriz_d, tam * sizeof matriz_d [0]) ); /* Llama al núcleo para inicializar la secuencia de números aleatorios */ obtener_dim (&bloques, &hilos, malla->dimens); /* Genera los números aleatorios y los copia en la matriz */ KERNEL (err, gen_aleat_cuda, bloques, hilos, time (NULL), matriz_d, 1, max, malla->dimens ); CUDA (err, cudaMemcpy (aux, matriz_d, tam * sizeof aux [0], cudaMemcpyDeviceToHost) ); /* Copiar directamente un array de Diamante desde el dispositivo da problemas, así que se usa un array de enteros para crear los números aleatorios en paralelo y luego la CPU se encarga de crear los elementos de tipo Diamante */ copiar_matriz (aux, malla); /* Se libera la memoria del dispositivo */ CUDA (err, cudaFree (matriz_d) ); return SUCCESS; } /** * Función para ejecutar la bomba I (eliminar fila). * * @param fila_bomba * Fila que se debe eliminar (poner a DIAMANTE_VACIO). * * @param malla * Estructura con la información del juego. * * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si hubo algún error al obtener las características del * dispositivo. * ERR_TAM si la matriz especificada sobrepasa las capacidades del * dispositivo. */ int bomba_fila (int fila_bomba, Malla *malla) { cudaError_t err; dim3 bloques, hilos; int tam = malla->dimens.filas * malla->dimens.columnas, i, j, idx = 0, max = max_nv (*malla); int *matriz_d, *aux = (int *) malloc (tam * sizeof aux [0]); /* Dimensiones para luego crear un hilo por columna */ Dim dim_matr_hilos; dim_matr_hilos.filas = 1; dim_matr_hilos.columnas = malla->dimens.columnas; /* Inicializa la matriz auxiliar */ for (i = 0; i < malla->dimens.filas; i++) { for (j = 0; j < malla->dimens.columnas; j++) { idx = (i * malla->dimens.columnas) + j; aux [idx] = malla->matriz [idx].id; } } /* Reserva memoria en el dispositivo y copia la matriz */ CUDA (err, cudaMalloc ((void **) &matriz_d, tam * sizeof matriz_d [0]) ); CUDA (err, cudaMemcpy (matriz_d, aux, tam * sizeof matriz_d [0], cudaMemcpyHostToDevice) ); /* Llama al núcleo para eliminar la fila */ obtener_dim (&bloques, &hilos, dim_matr_hilos); KERNEL_COMP (err, eliminar_fila_cuda, bloques, hilos, malla->dimens.columnas * (fila_bomba + 1) * sizeof matriz_d [0], time (NULL), matriz_d, 1, max, malla->dimens, fila_bomba ); /* Copia la información de vuelta y libera la memoria en el dispositivo */ CUDA (err, cudaMemcpy (aux, matriz_d, tam * sizeof aux [0], cudaMemcpyDeviceToHost) ); /* Copiar directamente un array de Diamante desde el dispositivo da problemas, así que se usa un array de enteros para crear los números aleatorios en paralelo y luego la CPU se encarga de crear los elementos de tipo Diamante */ copiar_matriz (aux, malla); CUDA (err, cudaFree (matriz_d) ); return SUCCESS; } /** * Función para ejecutar la bomba II (eliminar columna). * * @param col_bomba * Columna que se debe eliminar (poner a DIAMANTE_VACIO). * * @param malla * Estructura con la información del juego. * * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si hubo algún error al obtener las características del * dispositivo. * ERR_TAM si la matriz especificada sobrepasa las capacidades del * dispositivo. */ int bomba_columna (int col_bomba, Malla *malla) { cudaError_t err; dim3 bloques, hilos; int tam = malla->dimens.filas * malla->dimens.columnas, i, j, idx = 0, max = max_nv (*malla); int *matriz_d, *aux = (int *) malloc (tam * sizeof aux [0]); /* Dimensiones para luego crear un hilo por columna */ Dim dim_matr_hilos; dim_matr_hilos.filas = malla->dimens.filas; dim_matr_hilos.columnas = 1; /* Inicializa la matriz auxiliar */ for (i = 0; i < malla->dimens.filas; i++) { for (j = 0; j < malla->dimens.columnas; j++) { idx = (i * malla->dimens.columnas) + j; aux [idx] = malla->matriz [idx].id; } } /* Reserva memoria en el dispositivo y copia la matriz */ CUDA (err, cudaMalloc ((void **) &matriz_d, tam * sizeof matriz_d [0]) ); CUDA (err, cudaMemcpy (matriz_d, aux, tam * sizeof matriz_d [0], cudaMemcpyHostToDevice) ); /* Llama al núcleo para eliminar la columna */ obtener_dim (&bloques, &hilos, dim_matr_hilos); KERNEL_COMP (err, eliminar_columna_cuda, bloques, hilos, malla->dimens.filas * (col_bomba + 1) * sizeof matriz_d [0], time (NULL), matriz_d, 1, max, malla->dimens, col_bomba ); /* Copia la información de vuelta y libera la memoria en el dispositivo */ CUDA (err, cudaMemcpy (aux, matriz_d, tam * sizeof aux [0], cudaMemcpyDeviceToHost) ); /* Copiar directamente un array de Diamante desde el dispositivo da problemas, así que se usa un array de enteros para crear los números aleatorios en paralelo y luego la CPU se encarga de crear los elementos de tipo Diamante */ copiar_matriz (aux, malla); CUDA (err, cudaFree (matriz_d) ); return SUCCESS; } /** * Función para ejecutar la bomba III (girar en grupos de 3x3). * * @param malla * Estructura con toda la información del juego (matriz, nivel * y dimensiones). * * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si hubo algún error al obtener las características del * dispositivo. * ERR_TAM si la matriz especificada sobrepasa las capacidades del * dispositivo. */ int bomba_giro (Malla *malla) { cudaError_t err; dim3 bloques, hilos; int tam = malla->dimens.filas * malla->dimens.columnas, i, j, idx = 0; int *matriz_d, *aux = (int *) malloc (tam * sizeof aux [0]); /* Inicializa la matriz auxiliar */ for (i = 0; i < malla->dimens.filas; i++) { for (j = 0; j < malla->dimens.columnas; j++) { idx = (i * malla->dimens.columnas) + j; aux [idx] = malla->matriz [idx].id; } } /* Reserva memoria en el dispositivo y copia la matriz */ CUDA (err, cudaMalloc ((void **) &matriz_d, tam * sizeof matriz_d [0]) ); CUDA (err, cudaMemcpy (matriz_d, aux, tam * sizeof matriz_d [0], cudaMemcpyHostToDevice) ); /* Llama al núcleo para girar la matriz */ obtener_dim (&bloques, &hilos, malla->dimens); KERNEL_COMP (err, girar_matriz_cuda, bloques, hilos, tam * sizeof matriz_d [0], matriz_d, malla->dimens ); /* Copia la información de vuelta y libera la memoria en el dispositivo */ CUDA (err, cudaMemcpy (aux, matriz_d, tam * sizeof aux [0], cudaMemcpyDeviceToHost) ); /* Copiar directamente un array de Diamante desde el dispositivo da problemas, así que se usa un array de enteros para crear los números aleatorios en paralelo y luego la CPU se encarga de crear los elementos de tipo Diamante */ copiar_matriz (aux, malla); CUDA (err, cudaFree (matriz_d) ); return SUCCESS; } /** * Busca coincidencias en la matriz y marca las casillas para ser eliminadas (las deja * como DIAMANTE_VACIO. * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si hubo algún error al obtener las características del * dispositivo. * ERR_TAM si la matriz especificada sobrepasa las capacidades del * dispositivo. */ int eliminar_coincidencias (Malla *malla) { cudaError_t err; dim3 bloques, hilos; int tam = malla->dimens.filas * malla->dimens.columnas, i, j, idx = 0; int *matriz_d, *coincidencias_d, *aux = (int *) malloc (tam * sizeof aux [0]); /* Dimensiones para luego crear un hilo por columna */ Dim dim_matr_hilos; dim_matr_hilos.filas = malla->dimens.filas; dim_matr_hilos.columnas = 1; /* Inicializa la matriz auxiliar */ for (i = 0; i < malla->dimens.filas; i++) { for (j = 0; j < malla->dimens.columnas; j++) { idx = (i * malla->dimens.columnas) + j; aux [idx] = malla->matriz [idx].id; } } /* Reserva memoria en el dispositivo y copia la matriz */ CUDA (err, cudaMalloc ((void **) &matriz_d, tam * sizeof matriz_d [0]) ); CUDA (err, cudaMalloc ((void **) &coincidencias_d, tam * sizeof coincidencias_d [0]) ); CUDA (err, cudaMemset (coincidencias_d, NO_COINCIDE, tam * sizeof coincidencias_d [0]) ); CUDA (err, cudaMemcpy (matriz_d, aux, tam * sizeof matriz_d [0], cudaMemcpyHostToDevice) ); /* Llama al núcleo para comprobar la matriz */ obtener_dim (&bloques, &hilos, dim_matr_hilos); KERNEL_COMP (err, busar_coinc_cuda_fila, bloques, hilos, (2 * tam * sizeof matriz_d [0]), matriz_d, malla->dimens, coincidencias_d ); dim_matr_hilos.filas = 1; dim_matr_hilos.columnas = malla->dimens.columnas; obtener_dim (&bloques, &hilos, dim_matr_hilos); KERNEL_COMP (err, busar_coinc_cuda_col, bloques, hilos, (2 * tam * sizeof matriz_d [0]), matriz_d, malla->dimens, coincidencias_d ); /* Utiliza la matriz con los elementos marcados para eliminarlos */ obtener_dim (&bloques, &hilos, malla->dimens); KERNEL (err, eliminar_coinc_cuda, bloques, hilos, matriz_d, malla->dimens, coincidencias_d ); /* Copia la información de vuelta y libera la memoria en el dispositivo */ CUDA (err, cudaMemcpy (aux, matriz_d, tam * sizeof aux [0], cudaMemcpyDeviceToHost) ); /* Copiar directamente un array de Diamante desde el dispositivo da problemas, así que se usa un array de enteros para crear los números aleatorios en paralelo y luego la CPU se encarga de crear los elementos de tipo Diamante */ copiar_matriz (aux, malla); CUDA (err, cudaFree (matriz_d) ); CUDA (err, cudaFree (coincidencias_d) ); return SUCCESS; } /** * Rellena los diamantes vacíos en la matriz. * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si hubo algún error al obtener las características del * dispositivo. * ERR_TAM si la matriz especificada sobrepasa las capacidades del * dispositivo. */ int llenar_vacios (Malla *malla) { cudaError_t err; dim3 bloques, hilos; int tam = malla->dimens.filas * malla->dimens.columnas, i, j, idx = 0, max = max_nv (*malla); int *matriz_d, *aux = (int *) malloc (tam * sizeof aux [0]); /* Dimensiones para luego crear un hilo por columna */ Dim dim_matr_hilos; dim_matr_hilos.filas = 1; dim_matr_hilos.columnas = malla->dimens.columnas; /* Inicializa la matriz auxiliar */ for (i = 0; i < malla->dimens.filas; i++) { for (j = 0; j < malla->dimens.columnas; j++) { idx = (i * malla->dimens.columnas) + j; aux [idx] = malla->matriz [idx].id; } } /* Reserva memoria en el dispositivo y copia la matriz */ CUDA (err, cudaMalloc ((void **) &matriz_d, tam * sizeof matriz_d [0]) ); CUDA (err, cudaMemcpy (matriz_d, aux, tam * sizeof matriz_d [0], cudaMemcpyHostToDevice) ); /* Llama al núcleo para comprobar la matriz */ obtener_dim (&bloques, &hilos, dim_matr_hilos); KERNEL_COMP (err, llenar_vacios_cuda, bloques, hilos, tam * sizeof matriz_d [0], time (NULL), matriz_d, 1, max, malla->dimens ); /* Copia la información de vuelta y libera la memoria en el dispositivo */ CUDA (err, cudaMemcpy (aux, matriz_d, tam * sizeof aux [0], cudaMemcpyDeviceToHost) ); /* Copiar directamente un array de Diamante desde el dispositivo da problemas, así que se usa un array de enteros para crear los números aleatorios en paralelo y luego la CPU se encarga de crear los elementos de tipo Diamante */ copiar_matriz (aux, malla); CUDA (err, cudaFree (matriz_d) ); return SUCCESS; } /* -------- */ /** * Busca las mejores jugadas por filas * * @param matriz * Matriz con los valores actuales de los diamantes. * * @param dimens * Estructura con las dimensiones de la matriz. * * @param mat2 * Matriz en la que se va a indicar los valores de las mejores * jugadas por posicion * * @param solh * Matriz que devuleve las mejores jugdas(posicion, movimiento * y valor de cada jugada). */ __global__ void realizar_jugada_horizontal_cuda (int * mat1, Dim dimens, int * mat2,int * solh) { int fila = blockIdx.x * blockDim.x + threadIdx.x; int aux = fila * dimens.columnas; if ((fila >= dimens.filas) || ( (blockIdx.y * blockDim.y + threadIdx.y) != 0) ) { return; } for(int i = 0; i < dimens.filas * 4; i++){ solh[i] = 0; } //Variable para recorrer la matriz int fin = 0; int cont = 0; int mov = 1; int ref = 0; //Vareables no definitivas para guardar el movimiento que se va a realizar int sen = 0; int posY = 0; int posX = 0; //Mejor movimiento horizontal int mh_sen = 0; //Sentido del movimeinto int mh_posY = 0; int mh_posX = 0; for (int i = 0; i < dimens.columnas; i++) { ref = mat1[aux + i]; for(int j = i; j < dimens.columnas; j ++) { if(fin == 0) { if(ref == mat1[aux + j]) { //Mira si la posicion en la que esta es igual a la referencia cont ++; } else if((mov == 1)&&(fila > 0)&&(ref == mat1[aux - dimens.columnas + j])) { //Mira la posicion de arriba --> mover mat1[aux + j] arriba mov = 0; cont ++; sen = 3; posY = fila; posX = j; } else if((mov == 1)&&(fila < dimens.filas - 1)&&(ref == mat1[aux + dimens.columnas + j])) { //Mira la posicion de abajo --> mover mat1[aux + j] abajo mov = 0; cont ++; sen = 1; posY = fila;posX = j; } else if((mov == 1)&&((j + 1) < dimens.columnas)&&(ref == mat1[aux + j + 1])) { //Mirar la posicion de la derecha --> mover mat1[aux + j] derecha mov = 0; cont ++; sen = 0; posY = fila; posX = j; j++; //Pasa a comprobar la siguiente } else { fin = 1; } } } //Mirar en las posiciones de la izquierda if ((mov == 1)&&(i > 0)&&(ref == mat1 [aux - dimens.columnas + i - 1])) { //Mirar la posicion por el lado de la izquierda arriba --> mover mat[aux + i - 1] arriba mov = 0; cont ++; sen = 3; posY = fila; posX = i - 1; } else if ((mov == 1)&&(i > 0)&&(ref == mat1 [aux + dimens.columnas + i -1])) { //Mirar la posicion por el lado de la izquierda abajo --> mover mat[aux + i - 1] abajo mov = 0; cont ++; sen = 1; posY = fila; posX = i - 1; } if(solh[fila * 4] <= cont){ mh_sen = sen; if (mov == 1) sen = 4; mh_posY = posY; mh_posX = posX; } if((solh[fila * 4] == 0)||(solh [fila * 4] < cont)) { solh[fila * 4] = cont; solh[(fila * 4) + 1] = mh_sen; solh[(fila * 4) + 2] = mh_posY; solh[(fila * 4) + 3] = mh_posX; } mat2[aux + i] = cont; //Reinicia valores mov = 1; fin = 0; cont = 0; } } /** * Busca las mejores jugadas por filas * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si hubo alg˙n error al obtener las caracterÌsticas del * dispositivo. * ERR_TAM si la matriz especificada sobrepasa las capacidades del * dispositivo. */ int realizar_jugada_horizontal(Malla * malla,int * jugada) { cudaError_t err; dim3 bloques, hilos; int tam = malla->dimens.columnas * malla->dimens.filas; int * matriz_d; int * mov_d; int * aux = (int *) malloc (tam * sizeof aux [0]); //vector solh int * vec = (int *) malloc (malla->dimens.filas * sizeof (int) * 4); int * vec_d; cudaMalloc((void **) &vec_d,malla->dimens.filas * sizeof (int) * 4); for (int i = 0; i < malla->dimens.filas * 4; i++) { vec[i] = 0; } //Crea un hilo por columna Dim dim_matr_hilos; dim_matr_hilos.filas = malla->dimens.filas; dim_matr_hilos.columnas = 1; //Inicializa la matriz auxiliar int idx; for (int i = 0; i < malla->dimens.filas; i++) { for (int j = 0; j < malla->dimens.columnas; j++) { idx = (i * malla->dimens.columnas) + j; aux[idx] = malla->matriz[idx].id; } } CUDA (err, cudaMalloc((void **)&matriz_d,tam * sizeof matriz_d[0])); CUDA (err, cudaMalloc((void **)&mov_d,tam * sizeof mov_d[0])); CUDA (err, cudaMemset(mov_d, NO_COINCIDE, tam * sizeof mov_d[0])); CUDA (err, cudaMemcpy(matriz_d, aux, tam * sizeof matriz_d [0],cudaMemcpyHostToDevice)); cudaMemcpy (vec_d, &vec , sizeof (int) * malla->dimens.filas , cudaMemcpyHostToDevice); /* Llama al nucelo para comprar la matriz */ obtener_dim (&bloques, &hilos, dim_matr_hilos); KERNEL (err, realizar_jugada_horizontal_cuda, bloques,hilos,matriz_d,malla->dimens,mov_d,vec_d); CUDA (err, cudaMemcpy(aux,mov_d, tam * sizeof aux [0], cudaMemcpyDeviceToHost)); CUDA (err, cudaMemcpy(vec, vec_d, sizeof(int)*malla->dimens.filas *4,cudaMemcpyDeviceToHost)); for (int i = 0; i < malla->dimens.filas; i++) { if(jugada[0]<vec[i * 4]) { jugada[0] = vec[i * 4]; jugada[1] = vec[i * 4 +1]; jugada[2] = vec[i * 4 +2]; jugada[3] = vec[i * 4 +3]; } } //copiar_matriz (aux, malla); CUDA (err, cudaFree(matriz_d)); CUDA (err, cudaFree(mov_d)); return SUCCESS; } /** * Busca las mejores jugadas por columnas * * @param matriz * Matriz con los valores actuales de los diamantes. * * @param dimens * Estructura con las dimensiones de la matriz. * * @param mat2 * Matriz en la que se va a indicar los valores de las mejores * jugadas por posicion * * @param solv * Matriz que devuleve las mejores jugdas(posicion, movimiento * y valor de cada jugada). */ __global__ void realizar_jugada_vertical_cuda (int * mat1, Dim dimens, int * mat2, int * solv) { int columna = blockIdx.y * blockDim.y + threadIdx.y; if ((columna >= dimens.columnas) || ( (blockIdx.x * blockDim.x + threadIdx.x) != 0) ) { return; } for(int i = 0;i < dimens.columnas * 4; i++){ solv[i] = 0; } //Variable para recorrer la matriz int fin = 0; int cont = 0; int mov = 1; int ref = 0; //Vareables no definitivas para guardar el movimiento que se va a realizar int sen = 0; int posY = 0; int posX = 0; //Mejor movimiento horizontal int mh_sen = 0; //Sentido del movimeinto int mh_posY = 0; int mh_posX = 0; for (int i = 0; i < dimens.filas; i++) { ref = mat1[dimens.columnas * i + columna]; for(int j = i; j < dimens.filas; j ++) { if(fin == 0) { if(ref == mat1[dimens.columnas * j + columna]) { //Mira si la posicion en la que esta es igual a la referencia cont ++; } else if((mov == 1)&&(columna > 0)&&(ref == mat1[dimens.columnas * j + columna - 1])) { //Mira la posicion de la izquierda --> mover mat1[dimens.columnas * j + columna - 1] izquierda mov = 0; cont ++; sen = 2; posY = j; posX = columna; } else if((mov == 1)&&(columna < dimens.columnas - 1)&&(ref == mat1[dimens.columnas * j + columna + 1])) { //Mira la posicion de la derecha --> mover mat1[dimens.columnas * j + columna + 1] derecha mov = 0; cont ++; sen = 0; posY = j;posX = columna; } else if((mov == 1)&&((j + 1) < dimens.filas)&&(ref == mat1[dimens.columnas * (j+1) + columna])) { //Mirar la posicion de abajo --> mover mat1[dimens.columnas * j + columna] abajo mov = 0; cont ++; sen = 1; posY = j; posX = columna; j++; //Pasa a comprobar la siguiente } else { fin = 1; } } } //Mirar en las posiciones de arriba if ((mov == 1)&&(columna > 0)&&(i>0)&&(ref == mat1 [dimens.columnas * (i- 1) + (columna - 1)])) { //Mirar la posicion por el lado de arriba izquierda --> mover mat[dimens.columnas * (i- 1) + (columna - 1)] izquierda mov = 0; cont ++; sen = 2; posY = i -1 ; posX =columna; } else if ((mov == 1)&&(i > 0)&&(columna + 1 < dimens.columnas)&&(ref == mat1 [dimens.columnas * (i - 1) + (columna + 1)])) { //Mirar la posicion por el lado de la arriba derecha --> mover mat[dimens.columnas * (i - 1) + (columna + 1)] derecha mov = 0; cont ++; sen = 0; posY = i - 1; posX = columna; } if(solv[columna * 4] <= cont){ mh_sen = sen; if (mov == 1) sen = 4; mh_posY = posY; mh_posX = posX; } if((solv[columna * 4] == 0)||(solv[columna * 4] < cont)) { solv[columna * 4] = cont; solv[(columna * 4) + 1] = mh_sen; solv[(columna * 4) + 2] = mh_posY; solv[(columna * 4) + 3] = mh_posX; } mat2[dimens.columnas * i + columna] = cont; //Reinicias valores mov = 1; fin = 0; cont = 0; } } /** * Busca las mejores jugadas por columnas * * @return * SUCCESS si todo ha salido correctamente. * ERR_CUDA si hubo alg˙n error al obtener las caracterÌsticas del * dispositivo. * ERR_TAM si la matriz especificada sobrepasa las capacidades del * dispositivo. */ int realizar_jugada_vertical(Malla * malla,int * jugada) { cudaError_t err; dim3 bloques, hilos; int tam = malla->dimens.columnas * malla->dimens.filas; int * matriz_d; int * mov_d; int * aux = (int *) malloc (tam * sizeof aux [0]); int * vec = (int *) malloc (malla->dimens.columnas * sizeof (int) * 4); int * vec_d; cudaMalloc((void **) &vec_d,malla->dimens.columnas * sizeof (int) * 4); for (int i = 0; i < malla->dimens.columnas * 4; i++) { vec[i] = 0; } //Crea un hilo por fila Dim dim_matr_hilos; dim_matr_hilos.filas = 1; dim_matr_hilos.columnas = malla->dimens.columnas; //Inicializa la matriz auxiliar int idx; for (int i = 0; i < malla->dimens.filas; i++) { for (int j = 0; j < malla->dimens.columnas; j++) { idx = (i * malla->dimens.columnas) + j; aux[idx] = malla->matriz[idx].id; } } CUDA (err, cudaMalloc((void **)&matriz_d,tam * sizeof matriz_d[0])); CUDA (err, cudaMalloc((void **)&mov_d,tam * sizeof mov_d[0])); CUDA (err, cudaMemset(mov_d, NO_COINCIDE, tam * sizeof mov_d[0])); CUDA (err, cudaMemcpy(matriz_d, aux, tam * sizeof matriz_d [0],cudaMemcpyHostToDevice)); cudaMemcpy( vec_d, &vec,sizeof (int) * malla->dimens.columnas, cudaMemcpyHostToDevice ); obtener_dim (&bloques, &hilos, dim_matr_hilos); KERNEL (err, realizar_jugada_vertical_cuda, bloques,hilos,matriz_d,malla->dimens,mov_d,vec_d); CUDA (err, cudaMemcpy(aux,mov_d, tam * sizeof aux [0], cudaMemcpyDeviceToHost)); CUDA (err, cudaMemcpy(vec, vec_d,sizeof (int) * malla->dimens.columnas * 4, cudaMemcpyDeviceToHost )); for (int i = 0; i < malla->dimens.columnas; i++) { if(jugada[0]<vec[i * 4]) { jugada[0] = vec[i * 4]; jugada[1] = vec[i * 4 +1]; jugada[2] = vec[i * 4 +2]; jugada[3] = vec[i * 4 +3]; } } CUDA (err, cudaFree(matriz_d)); CUDA (err, cudaFree(mov_d)); return SUCCESS; } int realizar_jugada(Malla * malla) { int jugada_v[4]; int jugada_h[4]; int posY; int posX; int mov; realizar_jugada_vertical(malla,jugada_v); realizar_jugada_horizontal(malla,jugada_h); if(jugada_v[0]>jugada_h[0]) { printf("Mejor jugada --> Mov(%d): %d PosY: %d PosX: %d\n",jugada_v[0],jugada_v[1],jugada_v[2],jugada_v[3]); posY = jugada_v[2]; posX = jugada_v[3]; mov = jugada_v[1]; } else { printf("Mov(%d): %d PosY: %d PosX: %d\n",jugada_h[0],jugada_h[1],jugada_h[2],jugada_h[3]); posY = jugada_h[2]; posX = jugada_h[3]; mov = jugada_h[1]; } mover_diamante(posY,posX,mov,* malla); return SUCCESS; }
2a1a27907b3f3e470a06ddd223c8e9057594face.hip
// !!! This is a file automatically generated by hipify!!! #define PETSC_SKIP_SPINLOCK #include <petscconf.h> #include <../src/mat/impls/aij/mpi/mpiaij.h> /*I "petscmat.h" I*/ #include <../src/mat/impls/aij/mpi/mpicusparse/mpicusparsematimpl.h> PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJCUSPARSE(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[]) { Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data; Mat_MPIAIJCUSPARSE * cusparseStruct = (Mat_MPIAIJCUSPARSE*)b->spptr; PetscErrorCode ierr; PetscInt i; PetscFunctionBegin; ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr); if (d_nnz) { for (i=0; i<B->rmap->n; i++) { if (d_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than 0: local row %D value %D",i,d_nnz[i]); } } if (o_nnz) { for (i=0; i<B->rmap->n; i++) { if (o_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than 0: local row %D value %D",i,o_nnz[i]); } } if (!B->preallocated) { /* Explicitly create 2 MATSEQAIJCUSPARSE matrices. */ ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr); ierr = MatPinToCPU(b->A,B->pinnedtocpu);CHKERRQ(ierr); ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr); ierr = MatSetType(b->A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);CHKERRQ(ierr); ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr); ierr = MatPinToCPU(b->B,B->pinnedtocpu);CHKERRQ(ierr); ierr = MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);CHKERRQ(ierr); ierr = MatSetType(b->B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);CHKERRQ(ierr); } ierr = MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);CHKERRQ(ierr); ierr = MatCUSPARSESetFormat(b->A,MAT_CUSPARSE_MULT,cusparseStruct->diagGPUMatFormat);CHKERRQ(ierr); ierr = MatCUSPARSESetFormat(b->B,MAT_CUSPARSE_MULT,cusparseStruct->offdiagGPUMatFormat);CHKERRQ(ierr); ierr = MatCUSPARSESetHandle(b->A,cusparseStruct->handle);CHKERRQ(ierr); ierr = MatCUSPARSESetHandle(b->B,cusparseStruct->handle);CHKERRQ(ierr); ierr = MatCUSPARSESetStream(b->A,cusparseStruct->stream);CHKERRQ(ierr); ierr = MatCUSPARSESetStream(b->B,cusparseStruct->stream);CHKERRQ(ierr); B->preallocated = PETSC_TRUE; PetscFunctionReturn(0); } PetscErrorCode MatMult_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy) { /* This multiplication sequence is different sequence than the CPU version. In particular, the diagonal block multiplication kernel is launched in one stream. Then, in a separate stream, the data transfers from DeviceToHost (with MPI messaging in between), then HostToDevice are launched. Once the data transfer stream is synchronized, to ensure messaging is complete, the MatMultAdd kernel is launched in the original (MatMult) stream to protect against race conditions. */ Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; PetscErrorCode ierr; PetscInt nt; PetscFunctionBegin; ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr); if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt); ierr = VecScatterInitializeForGPU(a->Mvctx,xx);CHKERRQ(ierr); ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr); ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr); ierr = VecScatterFinalizeForGPU(a->Mvctx);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultAdd_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) { /* This multiplication sequence is different sequence than the CPU version. In particular, the diagonal block multiplication kernel is launched in one stream. Then, in a separate stream, the data transfers from DeviceToHost (with MPI messaging in between), then HostToDevice are launched. Once the data transfer stream is synchronized, to ensure messaging is complete, the MatMultAdd kernel is launched in the original (MatMult) stream to protect against race conditions. */ Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; PetscErrorCode ierr; PetscInt nt; PetscFunctionBegin; ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr); if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt); ierr = VecScatterInitializeForGPU(a->Mvctx,xx);CHKERRQ(ierr); ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr); ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr); ierr = VecScatterFinalizeForGPU(a->Mvctx);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultTranspose_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy) { /* This multiplication sequence is different sequence than the CPU version. In particular, the diagonal block multiplication kernel is launched in one stream. Then, in a separate stream, the data transfers from DeviceToHost (with MPI messaging in between), then HostToDevice are launched. Once the data transfer stream is synchronized, to ensure messaging is complete, the MatMultAdd kernel is launched in the original (MatMult) stream to protect against race conditions. This sequence should only be called for GPU computation. */ Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; PetscErrorCode ierr; PetscInt nt; PetscFunctionBegin; ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr); if (nt != A->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->rmap->n,nt); ierr = VecScatterInitializeForGPU(a->Mvctx,a->lvec);CHKERRQ(ierr); ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr); ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr); ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); ierr = VecScatterFinalizeForGPU(a->Mvctx);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatCUSPARSESetFormat_MPIAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) { Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE * cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr; PetscFunctionBegin; switch (op) { case MAT_CUSPARSE_MULT_DIAG: cusparseStruct->diagGPUMatFormat = format; break; case MAT_CUSPARSE_MULT_OFFDIAG: cusparseStruct->offdiagGPUMatFormat = format; break; case MAT_CUSPARSE_ALL: cusparseStruct->diagGPUMatFormat = format; cusparseStruct->offdiagGPUMatFormat = format; break; default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. Only MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_DIAG, and MAT_CUSPARSE_MULT_ALL are currently supported.",op); } PetscFunctionReturn(0); } PetscErrorCode MatSetFromOptions_MPIAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A) { MatCUSPARSEStorageFormat format; PetscErrorCode ierr; PetscBool flg; Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr; PetscFunctionBegin; ierr = PetscOptionsHead(PetscOptionsObject,"MPIAIJCUSPARSE options");CHKERRQ(ierr); if (A->factortype==MAT_FACTOR_NONE) { ierr = PetscOptionsEnum("-mat_cusparse_mult_diag_storage_format","sets storage format of the diagonal blocks of (mpi)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->diagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) { ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT_DIAG,format);CHKERRQ(ierr); } ierr = PetscOptionsEnum("-mat_cusparse_mult_offdiag_storage_format","sets storage format of the off-diagonal blocks (mpi)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->offdiagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) { ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT_OFFDIAG,format);CHKERRQ(ierr); } ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of the diagonal and off-diagonal blocks (mpi)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->diagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) { ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr); } } ierr = PetscOptionsTail();CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatAssemblyEnd_MPIAIJCUSPARSE(Mat A,MatAssemblyType mode) { PetscErrorCode ierr; Mat_MPIAIJ *mpiaij; PetscFunctionBegin; mpiaij = (Mat_MPIAIJ*)A->data; ierr = MatAssemblyEnd_MPIAIJ(A,mode);CHKERRQ(ierr); if (!A->was_assembled && mode == MAT_FINAL_ASSEMBLY) { ierr = VecSetType(mpiaij->lvec,VECSEQCUDA);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode MatDestroy_MPIAIJCUSPARSE(Mat A) { PetscErrorCode ierr; Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr; hipError_t err; hipsparseStatus_t stat; PetscFunctionBegin; try { ierr = MatCUSPARSEClearHandle(a->A);CHKERRQ(ierr); ierr = MatCUSPARSEClearHandle(a->B);CHKERRQ(ierr); stat = hipsparseDestroy(cusparseStruct->handle);CHKERRCUDA(stat); err = hipStreamDestroy(cusparseStruct->stream);CHKERRCUDA(err); delete cusparseStruct; } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Mat_MPIAIJCUSPARSE error: %s", ex); } ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJCUSPARSE(Mat A) { PetscErrorCode ierr; Mat_MPIAIJ *a; Mat_MPIAIJCUSPARSE * cusparseStruct; hipError_t err; hipsparseStatus_t stat; PetscFunctionBegin; ierr = MatCreate_MPIAIJ(A);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscFree(A->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECCUDA,&A->defaultvectype);CHKERRQ(ierr); a = (Mat_MPIAIJ*)A->data; a->spptr = new Mat_MPIAIJCUSPARSE; cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr; cusparseStruct->diagGPUMatFormat = MAT_CUSPARSE_CSR; cusparseStruct->offdiagGPUMatFormat = MAT_CUSPARSE_CSR; stat = hipsparseCreate(&(cusparseStruct->handle));CHKERRCUDA(stat); err = hipStreamCreate(&(cusparseStruct->stream));CHKERRCUDA(err); A->ops->assemblyend = MatAssemblyEnd_MPIAIJCUSPARSE; A->ops->mult = MatMult_MPIAIJCUSPARSE; A->ops->multadd = MatMultAdd_MPIAIJCUSPARSE; A->ops->multtranspose = MatMultTranspose_MPIAIJCUSPARSE; A->ops->setfromoptions = MatSetFromOptions_MPIAIJCUSPARSE; A->ops->destroy = MatDestroy_MPIAIJCUSPARSE; ierr = PetscObjectChangeTypeName((PetscObject)A,MATMPIAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_MPIAIJCUSPARSE);CHKERRQ(ierr); PetscFunctionReturn(0); } /*@ MatCreateAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format (the default parallel PETSc format). This matrix will ultimately pushed down to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix assembly performance the user should preallocate the matrix storage by setting the parameter nz (or the array nnz). By setting these parameters accurately, performance during matrix assembly can be increased by more than a factor of 50. Collective Input Parameters: + comm - MPI communicator, set to PETSC_COMM_SELF . m - number of rows . n - number of columns . nz - number of nonzeros per row (same for all rows) - nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or NULL Output Parameter: . A - the matrix It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), MatXXXXSetPreallocation() paradigm instead of this routine directly. [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] Notes: If nnz is given then nz is ignored The AIJ format (also called the Yale sparse matrix format or compressed row storage), is fully compatible with standard Fortran 77 storage. That is, the stored row and column indices can begin at either one (as in Fortran) or zero. See the users' manual for details. Specify the preallocated storage with either nz or nnz (not both). Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory allocation. For large problems you MUST preallocate memory or you will get TERRIBLE performance, see the users' manual chapter on matrices. By default, this format uses inodes (identical nodes) when possible, to improve numerical efficiency of matrix-vector products and solves. We search for consecutive rows with the same nonzero structure, thereby reusing matrix information to achieve increased efficiency. Level: intermediate .seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATMPIAIJCUSPARSE, MATAIJCUSPARSE @*/ PetscErrorCode MatCreateAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr); ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); if (size > 1) { ierr = MatSetType(*A,MATMPIAIJCUSPARSE);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr); } else { ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);CHKERRQ(ierr); } PetscFunctionReturn(0); } /*MC MATAIJCUSPARSE - MATMPIAIJCUSPARSE = "aijcusparse" = "mpiaijcusparse" - A matrix type to be used for sparse matrices. A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later. All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library. This matrix type is identical to MATSEQAIJCUSPARSE when constructed with a single process communicator, and MATMPIAIJCUSPARSE otherwise. As a result, for single process communicators, MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported for communicators controlling multiple processes. It is recommended that you call both of the above preallocation routines for simplicity. Options Database Keys: + -mat_type mpiaijcusparse - sets the matrix type to "mpiaijcusparse" during a call to MatSetFromOptions() . -mat_cusparse_storage_format csr - sets the storage format of diagonal and off-diagonal matrices during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). . -mat_cusparse_mult_diag_storage_format csr - sets the storage format of diagonal matrix during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). - -mat_cusparse_mult_offdiag_storage_format csr - sets the storage format of off-diagonal matrix during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). Level: beginner .seealso: MatCreateAIJCUSPARSE(), MATSEQAIJCUSPARSE, MatCreateSeqAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation M M*/
2a1a27907b3f3e470a06ddd223c8e9057594face.cu
#define PETSC_SKIP_SPINLOCK #include <petscconf.h> #include <../src/mat/impls/aij/mpi/mpiaij.h> /*I "petscmat.h" I*/ #include <../src/mat/impls/aij/mpi/mpicusparse/mpicusparsematimpl.h> PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJCUSPARSE(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[]) { Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data; Mat_MPIAIJCUSPARSE * cusparseStruct = (Mat_MPIAIJCUSPARSE*)b->spptr; PetscErrorCode ierr; PetscInt i; PetscFunctionBegin; ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr); if (d_nnz) { for (i=0; i<B->rmap->n; i++) { if (d_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than 0: local row %D value %D",i,d_nnz[i]); } } if (o_nnz) { for (i=0; i<B->rmap->n; i++) { if (o_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than 0: local row %D value %D",i,o_nnz[i]); } } if (!B->preallocated) { /* Explicitly create 2 MATSEQAIJCUSPARSE matrices. */ ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr); ierr = MatPinToCPU(b->A,B->pinnedtocpu);CHKERRQ(ierr); ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr); ierr = MatSetType(b->A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);CHKERRQ(ierr); ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr); ierr = MatPinToCPU(b->B,B->pinnedtocpu);CHKERRQ(ierr); ierr = MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);CHKERRQ(ierr); ierr = MatSetType(b->B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);CHKERRQ(ierr); } ierr = MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);CHKERRQ(ierr); ierr = MatCUSPARSESetFormat(b->A,MAT_CUSPARSE_MULT,cusparseStruct->diagGPUMatFormat);CHKERRQ(ierr); ierr = MatCUSPARSESetFormat(b->B,MAT_CUSPARSE_MULT,cusparseStruct->offdiagGPUMatFormat);CHKERRQ(ierr); ierr = MatCUSPARSESetHandle(b->A,cusparseStruct->handle);CHKERRQ(ierr); ierr = MatCUSPARSESetHandle(b->B,cusparseStruct->handle);CHKERRQ(ierr); ierr = MatCUSPARSESetStream(b->A,cusparseStruct->stream);CHKERRQ(ierr); ierr = MatCUSPARSESetStream(b->B,cusparseStruct->stream);CHKERRQ(ierr); B->preallocated = PETSC_TRUE; PetscFunctionReturn(0); } PetscErrorCode MatMult_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy) { /* This multiplication sequence is different sequence than the CPU version. In particular, the diagonal block multiplication kernel is launched in one stream. Then, in a separate stream, the data transfers from DeviceToHost (with MPI messaging in between), then HostToDevice are launched. Once the data transfer stream is synchronized, to ensure messaging is complete, the MatMultAdd kernel is launched in the original (MatMult) stream to protect against race conditions. */ Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; PetscErrorCode ierr; PetscInt nt; PetscFunctionBegin; ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr); if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt); ierr = VecScatterInitializeForGPU(a->Mvctx,xx);CHKERRQ(ierr); ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr); ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr); ierr = VecScatterFinalizeForGPU(a->Mvctx);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultAdd_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) { /* This multiplication sequence is different sequence than the CPU version. In particular, the diagonal block multiplication kernel is launched in one stream. Then, in a separate stream, the data transfers from DeviceToHost (with MPI messaging in between), then HostToDevice are launched. Once the data transfer stream is synchronized, to ensure messaging is complete, the MatMultAdd kernel is launched in the original (MatMult) stream to protect against race conditions. */ Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; PetscErrorCode ierr; PetscInt nt; PetscFunctionBegin; ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr); if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt); ierr = VecScatterInitializeForGPU(a->Mvctx,xx);CHKERRQ(ierr); ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr); ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr); ierr = VecScatterFinalizeForGPU(a->Mvctx);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultTranspose_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy) { /* This multiplication sequence is different sequence than the CPU version. In particular, the diagonal block multiplication kernel is launched in one stream. Then, in a separate stream, the data transfers from DeviceToHost (with MPI messaging in between), then HostToDevice are launched. Once the data transfer stream is synchronized, to ensure messaging is complete, the MatMultAdd kernel is launched in the original (MatMult) stream to protect against race conditions. This sequence should only be called for GPU computation. */ Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; PetscErrorCode ierr; PetscInt nt; PetscFunctionBegin; ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr); if (nt != A->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->rmap->n,nt); ierr = VecScatterInitializeForGPU(a->Mvctx,a->lvec);CHKERRQ(ierr); ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr); ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr); ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); ierr = VecScatterFinalizeForGPU(a->Mvctx);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatCUSPARSESetFormat_MPIAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) { Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE * cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr; PetscFunctionBegin; switch (op) { case MAT_CUSPARSE_MULT_DIAG: cusparseStruct->diagGPUMatFormat = format; break; case MAT_CUSPARSE_MULT_OFFDIAG: cusparseStruct->offdiagGPUMatFormat = format; break; case MAT_CUSPARSE_ALL: cusparseStruct->diagGPUMatFormat = format; cusparseStruct->offdiagGPUMatFormat = format; break; default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. Only MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_DIAG, and MAT_CUSPARSE_MULT_ALL are currently supported.",op); } PetscFunctionReturn(0); } PetscErrorCode MatSetFromOptions_MPIAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A) { MatCUSPARSEStorageFormat format; PetscErrorCode ierr; PetscBool flg; Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr; PetscFunctionBegin; ierr = PetscOptionsHead(PetscOptionsObject,"MPIAIJCUSPARSE options");CHKERRQ(ierr); if (A->factortype==MAT_FACTOR_NONE) { ierr = PetscOptionsEnum("-mat_cusparse_mult_diag_storage_format","sets storage format of the diagonal blocks of (mpi)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->diagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) { ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT_DIAG,format);CHKERRQ(ierr); } ierr = PetscOptionsEnum("-mat_cusparse_mult_offdiag_storage_format","sets storage format of the off-diagonal blocks (mpi)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->offdiagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) { ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT_OFFDIAG,format);CHKERRQ(ierr); } ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of the diagonal and off-diagonal blocks (mpi)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->diagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) { ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr); } } ierr = PetscOptionsTail();CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatAssemblyEnd_MPIAIJCUSPARSE(Mat A,MatAssemblyType mode) { PetscErrorCode ierr; Mat_MPIAIJ *mpiaij; PetscFunctionBegin; mpiaij = (Mat_MPIAIJ*)A->data; ierr = MatAssemblyEnd_MPIAIJ(A,mode);CHKERRQ(ierr); if (!A->was_assembled && mode == MAT_FINAL_ASSEMBLY) { ierr = VecSetType(mpiaij->lvec,VECSEQCUDA);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode MatDestroy_MPIAIJCUSPARSE(Mat A) { PetscErrorCode ierr; Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr; cudaError_t err; cusparseStatus_t stat; PetscFunctionBegin; try { ierr = MatCUSPARSEClearHandle(a->A);CHKERRQ(ierr); ierr = MatCUSPARSEClearHandle(a->B);CHKERRQ(ierr); stat = cusparseDestroy(cusparseStruct->handle);CHKERRCUDA(stat); err = cudaStreamDestroy(cusparseStruct->stream);CHKERRCUDA(err); delete cusparseStruct; } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Mat_MPIAIJCUSPARSE error: %s", ex); } ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJCUSPARSE(Mat A) { PetscErrorCode ierr; Mat_MPIAIJ *a; Mat_MPIAIJCUSPARSE * cusparseStruct; cudaError_t err; cusparseStatus_t stat; PetscFunctionBegin; ierr = MatCreate_MPIAIJ(A);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscFree(A->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECCUDA,&A->defaultvectype);CHKERRQ(ierr); a = (Mat_MPIAIJ*)A->data; a->spptr = new Mat_MPIAIJCUSPARSE; cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr; cusparseStruct->diagGPUMatFormat = MAT_CUSPARSE_CSR; cusparseStruct->offdiagGPUMatFormat = MAT_CUSPARSE_CSR; stat = cusparseCreate(&(cusparseStruct->handle));CHKERRCUDA(stat); err = cudaStreamCreate(&(cusparseStruct->stream));CHKERRCUDA(err); A->ops->assemblyend = MatAssemblyEnd_MPIAIJCUSPARSE; A->ops->mult = MatMult_MPIAIJCUSPARSE; A->ops->multadd = MatMultAdd_MPIAIJCUSPARSE; A->ops->multtranspose = MatMultTranspose_MPIAIJCUSPARSE; A->ops->setfromoptions = MatSetFromOptions_MPIAIJCUSPARSE; A->ops->destroy = MatDestroy_MPIAIJCUSPARSE; ierr = PetscObjectChangeTypeName((PetscObject)A,MATMPIAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_MPIAIJCUSPARSE);CHKERRQ(ierr); PetscFunctionReturn(0); } /*@ MatCreateAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format (the default parallel PETSc format). This matrix will ultimately pushed down to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix assembly performance the user should preallocate the matrix storage by setting the parameter nz (or the array nnz). By setting these parameters accurately, performance during matrix assembly can be increased by more than a factor of 50. Collective Input Parameters: + comm - MPI communicator, set to PETSC_COMM_SELF . m - number of rows . n - number of columns . nz - number of nonzeros per row (same for all rows) - nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or NULL Output Parameter: . A - the matrix It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), MatXXXXSetPreallocation() paradigm instead of this routine directly. [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] Notes: If nnz is given then nz is ignored The AIJ format (also called the Yale sparse matrix format or compressed row storage), is fully compatible with standard Fortran 77 storage. That is, the stored row and column indices can begin at either one (as in Fortran) or zero. See the users' manual for details. Specify the preallocated storage with either nz or nnz (not both). Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory allocation. For large problems you MUST preallocate memory or you will get TERRIBLE performance, see the users' manual chapter on matrices. By default, this format uses inodes (identical nodes) when possible, to improve numerical efficiency of matrix-vector products and solves. We search for consecutive rows with the same nonzero structure, thereby reusing matrix information to achieve increased efficiency. Level: intermediate .seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATMPIAIJCUSPARSE, MATAIJCUSPARSE @*/ PetscErrorCode MatCreateAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr); ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); if (size > 1) { ierr = MatSetType(*A,MATMPIAIJCUSPARSE);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr); } else { ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);CHKERRQ(ierr); } PetscFunctionReturn(0); } /*MC MATAIJCUSPARSE - MATMPIAIJCUSPARSE = "aijcusparse" = "mpiaijcusparse" - A matrix type to be used for sparse matrices. A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later. All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library. This matrix type is identical to MATSEQAIJCUSPARSE when constructed with a single process communicator, and MATMPIAIJCUSPARSE otherwise. As a result, for single process communicators, MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported for communicators controlling multiple processes. It is recommended that you call both of the above preallocation routines for simplicity. Options Database Keys: + -mat_type mpiaijcusparse - sets the matrix type to "mpiaijcusparse" during a call to MatSetFromOptions() . -mat_cusparse_storage_format csr - sets the storage format of diagonal and off-diagonal matrices during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). . -mat_cusparse_mult_diag_storage_format csr - sets the storage format of diagonal matrix during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). - -mat_cusparse_mult_offdiag_storage_format csr - sets the storage format of off-diagonal matrix during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). Level: beginner .seealso: MatCreateAIJCUSPARSE(), MATSEQAIJCUSPARSE, MatCreateSeqAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation M M*/
b2c0e3027b46111f925b4f2ae763ba2da56cc048.hip
// !!! This is a file automatically generated by hipify!!! #include "../NativeBlas.h" #include <rocblas.h> #include <pointercast.h> #include <stdio.h> #include <types/float16.h> hipblasStatus_t convertStatus(int status) { switch (status) { case 0: return HIPBLAS_STATUS_SUCCESS; case 1: return HIPBLAS_STATUS_NOT_INITIALIZED; case 3: return HIPBLAS_STATUS_ALLOC_FAILED; case 7: return HIPBLAS_STATUS_INVALID_VALUE; case 8: return HIPBLAS_STATUS_ARCH_MISMATCH; case 11: return HIPBLAS_STATUS_MAPPING_ERROR; case 13: return HIPBLAS_STATUS_EXECUTION_FAILED; case 14: return HIPBLAS_STATUS_INTERNAL_ERROR; case 15: return HIPBLAS_STATUS_NOT_SUPPORTED; case 16: return CUBLAS_STATUS_LICENSE_ERROR; default: return HIPBLAS_STATUS_SUCCESS; } } hipblasFillMode_t convertUplo(int fillMode) { switch (fillMode) { case 0: return HIPBLAS_FILL_MODE_LOWER; case 1: return HIPBLAS_FILL_MODE_UPPER; default: return HIPBLAS_FILL_MODE_LOWER; } } hipblasDiagType_t convertDiag(int diag) { switch (diag) { case 0: return HIPBLAS_DIAG_NON_UNIT; case 1: return HIPBLAS_DIAG_UNIT; default: return HIPBLAS_DIAG_NON_UNIT; } } hipblasOperation_t convertTranspose(int op) { switch(op) { case 78: return HIPBLAS_OP_N; case 84: return HIPBLAS_OP_T; case 67: return HIPBLAS_OP_C; default: return HIPBLAS_OP_N; } } hipblasPointerMode_t convertPointerMode(int pointerMode) { switch(pointerMode) { case 0: return HIPBLAS_POINTER_MODE_HOST; case 1: return HIPBLAS_POINTER_MODE_DEVICE; default: return HIPBLAS_POINTER_MODE_HOST; }} hipblasSideMode_t convertSideMode(int sideMode) { switch(sideMode) { case 0: return HIPBLAS_SIDE_LEFT; case 1: return HIPBLAS_SIDE_RIGHT; default: return HIPBLAS_SIDE_LEFT; } } void Nd4jBlas::setMaxThreads(int num) { // no-op } /* * ====================================================== * Level 1 BLAS functions * ====================================================== */ /* * ------------------------------------------------------ * DOT * ------------------------------------------------------ */ float Nd4jBlas::sdsdot(Nd4jPointer *extraParams, int N, float alpha, float *X, int incX, float *Y, int incY) { // hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); //nothing found? return 0.0f; } double Nd4jBlas::dsdot(Nd4jPointer *extraParams, int N, float *X, int incX, float *Y, int incY) { // hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); //nothing found? return 0.0f; } double Nd4jBlas::ddot(Nd4jPointer *extraParams, int N, double *X, int incX, double *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); double resultPointer = 0.0f; hipblasDdot(*handle, N, X, incX, Y, incY, &resultPointer); return resultPointer; } float Nd4jBlas::sdot(Nd4jPointer *extraParams, int N, float *X, int incX, float *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); float resultPointer = 0.0f; hipblasSdot(*handle, N, X, incX, Y, incY, &resultPointer); return resultPointer; } /* * ------------------------------------------------------ * NRM2 * ------------------------------------------------------ */ float Nd4jBlas::snrm2(Nd4jPointer *extraParams, int N, float *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); float resultPointer = 0.0f; hipblasSnrm2(*handle, N, X, incX, &resultPointer); return resultPointer; } double Nd4jBlas::dnrm2(Nd4jPointer *extraParams, int N, double *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); double resultPointer = 0.0; hipblasDnrm2(*handle, N, X, incX, &resultPointer); return resultPointer; } /* * ------------------------------------------------------ * ASUM * ------------------------------------------------------ */ float Nd4jBlas::sasum(Nd4jPointer *extraParams, int N, float *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); float resultPointer = 0.0f; hipblasSasum(*handle, N, X, incX, &resultPointer); return resultPointer; } double Nd4jBlas::dasum(Nd4jPointer *extraParams, int N, double *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); double resultPointer = 0.0f; hipblasDasum(*handle, N, X, incX, &resultPointer); return resultPointer; } /* * ------------------------------------------------------ * IAMAX * ------------------------------------------------------ */ int Nd4jBlas::isamax(Nd4jPointer *extraParams, int N, float *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); int resultPointer = 0; hipblasIsamax(*handle,N,X,incX,&resultPointer); return resultPointer; } int Nd4jBlas::idamax(Nd4jPointer *extraParams, int N, double *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); int resultPointer = 0; hipblasIdamax(*handle, N, X, incX, &resultPointer); return resultPointer; } /* * ====================================================== * Level 1 BLAS procedures * ====================================================== */ /* * ------------------------------------------------------ * ROT * ------------------------------------------------------ */ void Nd4jBlas::srot(Nd4jPointer *extraParams, int N, float *X, int incX, float *Y, int incY, float c, float s) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSrot(*handle, N, X, incX, Y, incY, &c, &s); } void Nd4jBlas::drot(Nd4jPointer *extraParams, int N, double *X, int incX, double *Y, int incY, double c, double s) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDrot(*handle, N, X, incX, Y, incY, &c, &s); } /* * ------------------------------------------------------ * ROTG * ------------------------------------------------------ */ void Nd4jBlas::srotg(Nd4jPointer *extraParams, float *args) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSrotg(*handle, &args[0], &args[1], &args[2], &args[3]); } void Nd4jBlas::drotg(Nd4jPointer *extraParams, double *args) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDrotg(*handle, &args[0], &args[1], &args[2], &args[3]); } /* * ------------------------------------------------------ * ROTMG * ------------------------------------------------------ */ void Nd4jBlas::srotmg(Nd4jPointer *extraParams, float *args, float *P) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSrotmg(*handle, &args[0], &args[1], &args[2], &args[3], P); } void Nd4jBlas::drotmg(Nd4jPointer *extraParams, double *args, double *P) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDrotmg(*handle, &args[0], &args[1], &args[2], &args[3], P); } /* * ------------------------------------------------------ * ROTM * ------------------------------------------------------ */ void Nd4jBlas::srotm(Nd4jPointer *extraParams, int N, float *X, int incX, float *Y, int incY, float *P) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSrotm(*handle, N, X, incX, Y, incY, P); } void Nd4jBlas::drotm(Nd4jPointer *extraParams, int N, double *X, int incX, double *Y, int incY, double *P) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDrotm(*handle, N, X, incX, Y, incY, P); } /* * ------------------------------------------------------ * SWAP * ------------------------------------------------------ */ void Nd4jBlas::sswap(Nd4jPointer *extraParams, int N, float *X, int incX, float *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSswap(*handle, N, X, incX, Y, incY); } void Nd4jBlas::dswap(Nd4jPointer *extraParams, int N, double *X, int incX, double *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDswap(*handle, N, X, incX, Y, incY); } /* * ------------------------------------------------------ * SCAL * ------------------------------------------------------ */ void Nd4jBlas::sscal(Nd4jPointer *extraParams, int N, float alpha, float *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSscal(*handle, N, &alpha, X, incX); } void Nd4jBlas::dscal(Nd4jPointer *extraParams, int N, double alpha, double *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDscal(*handle, N, &alpha, X, incX); } /* * ------------------------------------------------------ * SCOPY * ------------------------------------------------------ */ void Nd4jBlas::scopy(Nd4jPointer *extraParams, int N, float *X, int incX, float *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasScopy(*handle, N, X, incX, Y, incY); } void Nd4jBlas::dcopy(Nd4jPointer *extraParams, int N, double *X, int incX, double *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDcopy(*handle, N, X, incX, Y, incY); } /* * ------------------------------------------------------ * AXPY * ------------------------------------------------------ */ void Nd4jBlas::saxpy(Nd4jPointer *extraParams, int N, float alpha, float *X, int incX, float *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSaxpy(*handle, N, &alpha, X, incX, Y, incY); } void Nd4jBlas::daxpy(Nd4jPointer *extraParams, int N, double alpha, double *X, int incX, double *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDaxpy(*handle, N, &alpha, X, incX, Y, incY); } /* * ====================================================== * Level 2 BLAS procedures * ====================================================== */ /* * ------------------------------------------------------ * GEMV * ------------------------------------------------------ */ void Nd4jBlas::sgemv(Nd4jPointer *extraParams, int Order, int TransA, int M, int N, float alpha, float *A, int lda, float *X, int incX, float beta, float *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSgemv(*handle, convertTranspose(TransA), M, N, &alpha, A, lda, X, incX, &beta, Y, incY); } void Nd4jBlas::dgemv(Nd4jPointer *extraParams, int Order, int TransA, int M, int N, double alpha, double *A, int lda, double *X, int incX, double beta, double *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDgemv(*handle,convertTranspose(TransA),M,N,&alpha,A,lda,X,incX,&beta,Y,incY); } /* * ------------------------------------------------------ * GBMV * ------------------------------------------------------ */ void Nd4jBlas::sgbmv(Nd4jPointer *extraParams, int Order, int TransA, int M, int N, int KL, int KU, float alpha, float *A, int lda, float *X, int incX, float beta, float *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSgbmv(*handle, convertTranspose(TransA), M, N, KL, KU, &alpha, A, lda, X, incX, &beta, Y, incY); } void Nd4jBlas::dgbmv(Nd4jPointer *extraParams, int Order, int TransA, int M, int N, int KL, int KU, double alpha, double *A, int lda, double *X, int incX, double beta, double *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDgbmv(*handle, convertTranspose(TransA), M, N, KL, KU, &alpha, A, lda, X, incX, &beta, Y, incY); } /* * ------------------------------------------------------ * SYMV * ------------------------------------------------------ */ void Nd4jBlas::ssymv(Nd4jPointer *extraParams, int Order, int Uplo, int N, float alpha, float *A, int lda, float *X, int incX, float beta, float *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSsymv(*handle, convertUplo(Uplo), N, &alpha, A, lda, X, incX, &beta, Y, incY); } void Nd4jBlas::dsymv(Nd4jPointer *extraParams, int Order, int Uplo, int N, double alpha, double *A, int lda, double *X, int incX, double beta, double *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDsymv(*handle, convertUplo(Uplo), N, &alpha, A, lda, X, incX, &beta, Y, incY); } /* * ------------------------------------------------------ * SBMV * ------------------------------------------------------ */ void Nd4jBlas::ssbmv(Nd4jPointer *extraParams, int Order, int Uplo, int N, int K, float alpha, float *A, int lda, float *X, int incX, float beta, float *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSsbmv(*handle, convertUplo(Uplo), N, K, &alpha, A, lda, X, incX, &beta, Y, incY); } void Nd4jBlas::dsbmv(Nd4jPointer *extraParams, int Order, int Uplo, int N, int K, double alpha, double *A, int lda, double *X, int incX, double beta, double *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDsbmv(*handle, convertUplo(Uplo), N, K, &alpha, A, lda, X, incX, &beta, Y, incY); } /* * ------------------------------------------------------ * SPMV * ------------------------------------------------------ */ void Nd4jBlas::sspmv(Nd4jPointer *extraParams, int Order, int Uplo, int N, float alpha, float *Ap, float *X, int incX, float beta, float *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSspmv(*handle, convertUplo(Uplo), N, &alpha, Ap, X, incX, &beta, Y, incY); } void Nd4jBlas::dspmv(Nd4jPointer *extraParams, int Order, int Uplo, int N, double alpha, double *Ap, double *X, int incX, double beta, double *Y, int incY) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDspmv(*handle, convertUplo(Uplo), N, &alpha, Ap, X, incX, &beta, Y, incY); } /* * ------------------------------------------------------ * TRMV * ------------------------------------------------------ */ void Nd4jBlas::strmv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, float alpha, float *A, int lda, float *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasStrmv(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, A, lda, X, incX); } void Nd4jBlas::dtrmv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, double alpha, double *A, int lda, double *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDtrmv(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, A, lda, X, incX); } /* * ------------------------------------------------------ * TBMV * ------------------------------------------------------ */ void Nd4jBlas::stbmv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, int K, float *A, int lda, float *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasStbmv(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, K, A, lda, X, incX); } void Nd4jBlas::dtbmv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, int K, double *A, int lda, double *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDtbmv(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, K, A, lda, X, incX); } /* * ------------------------------------------------------ * TPMV * ------------------------------------------------------ */ void Nd4jBlas::stpmv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, float *Ap, float *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasStpmv(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, Ap, X, incX); } void Nd4jBlas::dtpmv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, double *Ap, double *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDtpmv(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, Ap, X, incX); } /* * ------------------------------------------------------ * TRSV * ------------------------------------------------------ */ void Nd4jBlas::strsv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, float *A, int lda, float *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasStrsv(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, A, lda, X, incX); } void Nd4jBlas::dtrsv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, double *A, int lda, double *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDtrsv(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, A, lda, X, incX); } /* * ------------------------------------------------------ * TBSV * ------------------------------------------------------ */ void Nd4jBlas::stbsv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, int K, float *A, int lda, float *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasStbsv(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, K, A, lda, X, incX); } void Nd4jBlas::dtbsv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, int K, double *A, int lda, double *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDtbsv(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, K, A, lda, X, incX); } /* * ------------------------------------------------------ * TPSV * ------------------------------------------------------ */ void Nd4jBlas::stpsv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, float *Ap, float *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasStpsv(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, Ap, X, incX); } void Nd4jBlas::dtpsv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, double *Ap, double *X, int incX) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDtpsv(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, Ap, X, incX); } /* * ------------------------------------------------------ * GER * ------------------------------------------------------ */ void Nd4jBlas::sger(Nd4jPointer *extraParams, int Order, int M, int N, float alpha, float *X, int incX, float *Y, int incY, float *A, int lda) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSger(*handle, M, N, &alpha, X, incX, Y, incY, A, lda); } void Nd4jBlas::dger(Nd4jPointer *extraParams, int Order, int M, int N, double alpha, double *X, int incX, double *Y, int incY, double *A, int lda) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDger(*handle, M, N, &alpha, X, incX, Y, incY, A, lda); } /* * ------------------------------------------------------ * SYR * ------------------------------------------------------ */ void Nd4jBlas::ssyr(Nd4jPointer *extraParams, int Order, int Uplo, int N, float alpha, float *X, int incX, float *A, int lda) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSsyr(*handle, convertUplo(Uplo), N, &alpha, X, incX, A, lda); } void Nd4jBlas::dsyr(Nd4jPointer *extraParams, int Order, int Uplo, int N, double alpha, double *X, int incX, double *A, int lda) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDsyr(*handle, convertUplo(Uplo), N, &alpha, X, incX, A, lda); } /* * ------------------------------------------------------ * SPR * ------------------------------------------------------ */ void Nd4jBlas::sspr(Nd4jPointer *extraParams, int Order, int Uplo, int N, float alpha, float *X, int incX, float *Ap) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSspr(*handle, convertUplo(Uplo), N, &alpha, X, incX, Ap); } void Nd4jBlas::dspr(Nd4jPointer *extraParams, int Order, int Uplo, int N, double alpha, double *X, int incX, double *Ap) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDspr(*handle, convertUplo(Uplo), N, &alpha, X, incX, Ap); } /* * ------------------------------------------------------ * SYR2 * ------------------------------------------------------ */ void Nd4jBlas::ssyr2(Nd4jPointer *extraParams, int Order, int Uplo, int N, float alpha, float *X, int incX, float *Y, int incY, float *A, int lda) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSsyr2(*handle, convertUplo(Uplo), N, &alpha, X, incX, Y, incY, A, lda); } void Nd4jBlas::dsyr2(Nd4jPointer *extraParams, int Order, int Uplo, int N, double alpha, double *X, int incX, double *Y, int incY, double *A, int lda) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDsyr2(*handle, convertUplo(Uplo), N, &alpha, X, incX, Y, incY, A, lda); } /* * ------------------------------------------------------ * SPR2 * ------------------------------------------------------ */ void Nd4jBlas::sspr2(Nd4jPointer *extraParams, int Order, int Uplo, int N, float alpha, float *X, int incX, float *Y, int incY, float *Ap) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSspr2(*handle, convertUplo(Uplo), N, &alpha, X, incX, Y, incY, Ap); } void Nd4jBlas::dspr2(Nd4jPointer *extraParams, int Order, int Uplo, int N, double alpha, double *X, int incX, double *Y, int incY, double *Ap) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDspr2(*handle, convertUplo(Uplo), N, &alpha, X, incX, Y, incY, Ap); } /* * ====================================================== * Level 3 BLAS procedures * ====================================================== */ /* * ------------------------------------------------------ * GEMM * ------------------------------------------------------ */ void Nd4jBlas::hgemm(Nd4jPointer *extraParams, int Order, int TransA, int TransB, int M, int N, int K, float alpha, float16 *A, int lda, float16 *B, int ldb, float beta, float16 *C, int ldc) { /* __half *aPointer = reinterpret_cast<__half *>(A); __half *bPointer = reinterpret_cast<__half *>(B); __half *cPointer = reinterpret_cast<__half *>(C); */ hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); /* nd4j::float16 hAlpha = alpha; nd4j::float16 hBeta = beta; hipblasHgemm(*handle, convertTranspose(TransA), convertTranspose(TransB), M, N, K, &hAlpha.data, A, lda, B, ldb, &hBeta.data, C, ldc); */ #ifdef CUDA_8 // HIP_R_16F for CUDA 8 // HIPBLAS_DATA_HALF for CUDA 7.5 cublasSgemmEx(*handle, convertTranspose(TransA), convertTranspose(TransB), M, N, K, &alpha, A, HIP_R_16F, lda, B, HIP_R_16F, ldb, &beta, C, HIP_R_16F, ldc); #else cublasSgemmEx(*handle, convertTranspose(TransA), convertTranspose(TransB), M, N, K, &alpha, A, HIPBLAS_DATA_HALF, lda, B, HIPBLAS_DATA_HALF, ldb, &beta, C, HIPBLAS_DATA_HALF, ldc); #endif } void Nd4jBlas::sgemm(Nd4jPointer *extraParams, int Order, int TransA, int TransB, int M, int N, int K, float alpha, float *A, int lda, float *B, int ldb, float beta, float *C, int ldc) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSgemm(*handle, convertTranspose(TransA), convertTranspose(TransB), M, N, K, &alpha, A, lda, B, ldb, &beta, C, ldc); } void Nd4jBlas::dgemm(Nd4jPointer *extraParams, int Order, int TransA, int TransB, int M, int N, int K, double alpha, double *A, int lda, double *B, int ldb, double beta, double *C, int ldc) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDgemm(*handle, convertTranspose(TransA), convertTranspose(TransB), M, N, K, &alpha, A, lda, B, ldb, &beta, C, ldc); } /* * ------------------------------------------------------ * SYMM * ------------------------------------------------------ */ void Nd4jBlas::ssymm(Nd4jPointer *extraParams, int Order, int Side, int Uplo, int M, int N, float alpha, float *A, int lda, float *B, int ldb, float beta, float *C, int ldc) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSsymm(*handle, convertSideMode(Side), convertUplo(Uplo), M, N, &alpha, A, lda, B, ldb, &beta, C, ldc); } void Nd4jBlas::dsymm(Nd4jPointer *extraParams, int Order, int Side, int Uplo, int M, int N, double alpha, double *A, int lda, double *B, int ldb, double beta, double *C, int ldc) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDsymm(*handle, convertSideMode(Side), convertUplo(Uplo), M, N, &alpha, A, lda, B, ldb, &beta, C, ldc); } /* * ------------------------------------------------------ * SYRK * ------------------------------------------------------ */ void Nd4jBlas::ssyrk(Nd4jPointer *extraParams, int Order, int Uplo, int Trans, int N, int K, float alpha, float *A, int lda, float beta, float *C, int ldc) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSsyrk(*handle, convertUplo(Uplo), convertTranspose(Trans), N, K, &alpha, A, lda, &beta, C, ldc); } void Nd4jBlas::dsyrk(Nd4jPointer *extraParams, int Order, int Uplo, int Trans, int N, int K, double alpha, double *A, int lda, double beta, double *C, int ldc) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDsyrk(*handle, convertUplo(Uplo), convertTranspose(Trans), N, K, &alpha, A, lda, &beta, C, ldc); } /* * ------------------------------------------------------ * SYR2K * ------------------------------------------------------ */ void Nd4jBlas::ssyr2k(Nd4jPointer *extraParams, int Order, int Uplo, int Trans, int N, int K, float alpha, float *A, int lda, float *B, int ldb, float beta, float *C, int ldc) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasSsyr2k(*handle, convertUplo(Uplo), convertTranspose(Trans), N, K, &alpha, A, lda, B, ldb, &beta, C, ldc); } void Nd4jBlas::dsyr2k(Nd4jPointer *extraParams, int Order, int Uplo, int Trans, int N, int K, double alpha, double *A, int lda, double *B, int ldb, double beta, double *C, int ldc) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDsyr2k(*handle, convertUplo(Uplo), convertTranspose(Trans), N, K, &alpha, A, lda, B, ldb, &beta, C, ldc); } /* * ------------------------------------------------------ * TRMM * ------------------------------------------------------ */ void Nd4jBlas::strmm(Nd4jPointer *extraParams, int Order, int Side, int Uplo, int TransA, int Diag, int M, int N, float alpha, float *A, int lda, float *B, int ldb) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); //float *C = reinterpret_cast<float *>(&extraParams[1]); hipblasStrmm(*handle, convertSideMode(Side), convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), M, N, &alpha, A, lda, B, ldb, B, ldb); } void Nd4jBlas::dtrmm(Nd4jPointer *extraParams, int Order, int Side, int Uplo, int TransA, int Diag, int M, int N, double alpha, double *A, int lda, double *B, int ldb) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDtrmm(*handle, convertSideMode(Side), convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), M, N, &alpha, A, lda, B, ldb, B, ldb); } /* * ------------------------------------------------------ * TRSM * ------------------------------------------------------ */ void Nd4jBlas::strsm(Nd4jPointer *extraParams, int Order, int Side, int Uplo, int TransA, int Diag, int M, int N, float alpha, float *A, int lda, float *B, int ldb) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasStrsm(*handle, convertSideMode(Side), convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), M, N, &alpha, A, lda, B, ldb); } void Nd4jBlas::dtrsm(Nd4jPointer *extraParams, int Order, int Side, int Uplo, int TransA, int Diag, int M, int N, double alpha, double *A, int lda, double *B, int ldb) { hipblasHandle_t *handle = reinterpret_cast<hipblasHandle_t *>(&extraParams[0]); hipblasDtrsm(*handle, convertSideMode(Side), convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), M, N, &alpha, A, lda, B, ldb); }
b2c0e3027b46111f925b4f2ae763ba2da56cc048.cu
#include "../NativeBlas.h" #include <cublas_v2.h> #include <pointercast.h> #include <stdio.h> #include <types/float16.h> cublasStatus_t convertStatus(int status) { switch (status) { case 0: return CUBLAS_STATUS_SUCCESS; case 1: return CUBLAS_STATUS_NOT_INITIALIZED; case 3: return CUBLAS_STATUS_ALLOC_FAILED; case 7: return CUBLAS_STATUS_INVALID_VALUE; case 8: return CUBLAS_STATUS_ARCH_MISMATCH; case 11: return CUBLAS_STATUS_MAPPING_ERROR; case 13: return CUBLAS_STATUS_EXECUTION_FAILED; case 14: return CUBLAS_STATUS_INTERNAL_ERROR; case 15: return CUBLAS_STATUS_NOT_SUPPORTED; case 16: return CUBLAS_STATUS_LICENSE_ERROR; default: return CUBLAS_STATUS_SUCCESS; } } cublasFillMode_t convertUplo(int fillMode) { switch (fillMode) { case 0: return CUBLAS_FILL_MODE_LOWER; case 1: return CUBLAS_FILL_MODE_UPPER; default: return CUBLAS_FILL_MODE_LOWER; } } cublasDiagType_t convertDiag(int diag) { switch (diag) { case 0: return CUBLAS_DIAG_NON_UNIT; case 1: return CUBLAS_DIAG_UNIT; default: return CUBLAS_DIAG_NON_UNIT; } } cublasOperation_t convertTranspose(int op) { switch(op) { case 78: return CUBLAS_OP_N; case 84: return CUBLAS_OP_T; case 67: return CUBLAS_OP_C; default: return CUBLAS_OP_N; } } cublasPointerMode_t convertPointerMode(int pointerMode) { switch(pointerMode) { case 0: return CUBLAS_POINTER_MODE_HOST; case 1: return CUBLAS_POINTER_MODE_DEVICE; default: return CUBLAS_POINTER_MODE_HOST; }} cublasSideMode_t convertSideMode(int sideMode) { switch(sideMode) { case 0: return CUBLAS_SIDE_LEFT; case 1: return CUBLAS_SIDE_RIGHT; default: return CUBLAS_SIDE_LEFT; } } void Nd4jBlas::setMaxThreads(int num) { // no-op } /* * ====================================================== * Level 1 BLAS functions * ====================================================== */ /* * ------------------------------------------------------ * DOT * ------------------------------------------------------ */ float Nd4jBlas::sdsdot(Nd4jPointer *extraParams, int N, float alpha, float *X, int incX, float *Y, int incY) { // cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); //nothing found? return 0.0f; } double Nd4jBlas::dsdot(Nd4jPointer *extraParams, int N, float *X, int incX, float *Y, int incY) { // cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); //nothing found? return 0.0f; } double Nd4jBlas::ddot(Nd4jPointer *extraParams, int N, double *X, int incX, double *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); double resultPointer = 0.0f; cublasDdot_v2(*handle, N, X, incX, Y, incY, &resultPointer); return resultPointer; } float Nd4jBlas::sdot(Nd4jPointer *extraParams, int N, float *X, int incX, float *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); float resultPointer = 0.0f; cublasSdot_v2(*handle, N, X, incX, Y, incY, &resultPointer); return resultPointer; } /* * ------------------------------------------------------ * NRM2 * ------------------------------------------------------ */ float Nd4jBlas::snrm2(Nd4jPointer *extraParams, int N, float *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); float resultPointer = 0.0f; cublasSnrm2_v2(*handle, N, X, incX, &resultPointer); return resultPointer; } double Nd4jBlas::dnrm2(Nd4jPointer *extraParams, int N, double *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); double resultPointer = 0.0; cublasDnrm2_v2(*handle, N, X, incX, &resultPointer); return resultPointer; } /* * ------------------------------------------------------ * ASUM * ------------------------------------------------------ */ float Nd4jBlas::sasum(Nd4jPointer *extraParams, int N, float *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); float resultPointer = 0.0f; cublasSasum_v2(*handle, N, X, incX, &resultPointer); return resultPointer; } double Nd4jBlas::dasum(Nd4jPointer *extraParams, int N, double *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); double resultPointer = 0.0f; cublasDasum_v2(*handle, N, X, incX, &resultPointer); return resultPointer; } /* * ------------------------------------------------------ * IAMAX * ------------------------------------------------------ */ int Nd4jBlas::isamax(Nd4jPointer *extraParams, int N, float *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); int resultPointer = 0; cublasIsamax_v2(*handle,N,X,incX,&resultPointer); return resultPointer; } int Nd4jBlas::idamax(Nd4jPointer *extraParams, int N, double *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); int resultPointer = 0; cublasIdamax_v2(*handle, N, X, incX, &resultPointer); return resultPointer; } /* * ====================================================== * Level 1 BLAS procedures * ====================================================== */ /* * ------------------------------------------------------ * ROT * ------------------------------------------------------ */ void Nd4jBlas::srot(Nd4jPointer *extraParams, int N, float *X, int incX, float *Y, int incY, float c, float s) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSrot_v2(*handle, N, X, incX, Y, incY, &c, &s); } void Nd4jBlas::drot(Nd4jPointer *extraParams, int N, double *X, int incX, double *Y, int incY, double c, double s) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDrot_v2(*handle, N, X, incX, Y, incY, &c, &s); } /* * ------------------------------------------------------ * ROTG * ------------------------------------------------------ */ void Nd4jBlas::srotg(Nd4jPointer *extraParams, float *args) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSrotg_v2(*handle, &args[0], &args[1], &args[2], &args[3]); } void Nd4jBlas::drotg(Nd4jPointer *extraParams, double *args) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDrotg_v2(*handle, &args[0], &args[1], &args[2], &args[3]); } /* * ------------------------------------------------------ * ROTMG * ------------------------------------------------------ */ void Nd4jBlas::srotmg(Nd4jPointer *extraParams, float *args, float *P) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSrotmg_v2(*handle, &args[0], &args[1], &args[2], &args[3], P); } void Nd4jBlas::drotmg(Nd4jPointer *extraParams, double *args, double *P) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDrotmg_v2(*handle, &args[0], &args[1], &args[2], &args[3], P); } /* * ------------------------------------------------------ * ROTM * ------------------------------------------------------ */ void Nd4jBlas::srotm(Nd4jPointer *extraParams, int N, float *X, int incX, float *Y, int incY, float *P) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSrotm_v2(*handle, N, X, incX, Y, incY, P); } void Nd4jBlas::drotm(Nd4jPointer *extraParams, int N, double *X, int incX, double *Y, int incY, double *P) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDrotm_v2(*handle, N, X, incX, Y, incY, P); } /* * ------------------------------------------------------ * SWAP * ------------------------------------------------------ */ void Nd4jBlas::sswap(Nd4jPointer *extraParams, int N, float *X, int incX, float *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSswap_v2(*handle, N, X, incX, Y, incY); } void Nd4jBlas::dswap(Nd4jPointer *extraParams, int N, double *X, int incX, double *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDswap_v2(*handle, N, X, incX, Y, incY); } /* * ------------------------------------------------------ * SCAL * ------------------------------------------------------ */ void Nd4jBlas::sscal(Nd4jPointer *extraParams, int N, float alpha, float *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSscal_v2(*handle, N, &alpha, X, incX); } void Nd4jBlas::dscal(Nd4jPointer *extraParams, int N, double alpha, double *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDscal_v2(*handle, N, &alpha, X, incX); } /* * ------------------------------------------------------ * SCOPY * ------------------------------------------------------ */ void Nd4jBlas::scopy(Nd4jPointer *extraParams, int N, float *X, int incX, float *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasScopy_v2(*handle, N, X, incX, Y, incY); } void Nd4jBlas::dcopy(Nd4jPointer *extraParams, int N, double *X, int incX, double *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDcopy_v2(*handle, N, X, incX, Y, incY); } /* * ------------------------------------------------------ * AXPY * ------------------------------------------------------ */ void Nd4jBlas::saxpy(Nd4jPointer *extraParams, int N, float alpha, float *X, int incX, float *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSaxpy_v2(*handle, N, &alpha, X, incX, Y, incY); } void Nd4jBlas::daxpy(Nd4jPointer *extraParams, int N, double alpha, double *X, int incX, double *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDaxpy_v2(*handle, N, &alpha, X, incX, Y, incY); } /* * ====================================================== * Level 2 BLAS procedures * ====================================================== */ /* * ------------------------------------------------------ * GEMV * ------------------------------------------------------ */ void Nd4jBlas::sgemv(Nd4jPointer *extraParams, int Order, int TransA, int M, int N, float alpha, float *A, int lda, float *X, int incX, float beta, float *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSgemv_v2(*handle, convertTranspose(TransA), M, N, &alpha, A, lda, X, incX, &beta, Y, incY); } void Nd4jBlas::dgemv(Nd4jPointer *extraParams, int Order, int TransA, int M, int N, double alpha, double *A, int lda, double *X, int incX, double beta, double *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDgemv_v2(*handle,convertTranspose(TransA),M,N,&alpha,A,lda,X,incX,&beta,Y,incY); } /* * ------------------------------------------------------ * GBMV * ------------------------------------------------------ */ void Nd4jBlas::sgbmv(Nd4jPointer *extraParams, int Order, int TransA, int M, int N, int KL, int KU, float alpha, float *A, int lda, float *X, int incX, float beta, float *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSgbmv_v2(*handle, convertTranspose(TransA), M, N, KL, KU, &alpha, A, lda, X, incX, &beta, Y, incY); } void Nd4jBlas::dgbmv(Nd4jPointer *extraParams, int Order, int TransA, int M, int N, int KL, int KU, double alpha, double *A, int lda, double *X, int incX, double beta, double *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDgbmv_v2(*handle, convertTranspose(TransA), M, N, KL, KU, &alpha, A, lda, X, incX, &beta, Y, incY); } /* * ------------------------------------------------------ * SYMV * ------------------------------------------------------ */ void Nd4jBlas::ssymv(Nd4jPointer *extraParams, int Order, int Uplo, int N, float alpha, float *A, int lda, float *X, int incX, float beta, float *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSsymv_v2(*handle, convertUplo(Uplo), N, &alpha, A, lda, X, incX, &beta, Y, incY); } void Nd4jBlas::dsymv(Nd4jPointer *extraParams, int Order, int Uplo, int N, double alpha, double *A, int lda, double *X, int incX, double beta, double *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDsymv_v2(*handle, convertUplo(Uplo), N, &alpha, A, lda, X, incX, &beta, Y, incY); } /* * ------------------------------------------------------ * SBMV * ------------------------------------------------------ */ void Nd4jBlas::ssbmv(Nd4jPointer *extraParams, int Order, int Uplo, int N, int K, float alpha, float *A, int lda, float *X, int incX, float beta, float *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSsbmv_v2(*handle, convertUplo(Uplo), N, K, &alpha, A, lda, X, incX, &beta, Y, incY); } void Nd4jBlas::dsbmv(Nd4jPointer *extraParams, int Order, int Uplo, int N, int K, double alpha, double *A, int lda, double *X, int incX, double beta, double *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDsbmv_v2(*handle, convertUplo(Uplo), N, K, &alpha, A, lda, X, incX, &beta, Y, incY); } /* * ------------------------------------------------------ * SPMV * ------------------------------------------------------ */ void Nd4jBlas::sspmv(Nd4jPointer *extraParams, int Order, int Uplo, int N, float alpha, float *Ap, float *X, int incX, float beta, float *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSspmv_v2(*handle, convertUplo(Uplo), N, &alpha, Ap, X, incX, &beta, Y, incY); } void Nd4jBlas::dspmv(Nd4jPointer *extraParams, int Order, int Uplo, int N, double alpha, double *Ap, double *X, int incX, double beta, double *Y, int incY) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDspmv_v2(*handle, convertUplo(Uplo), N, &alpha, Ap, X, incX, &beta, Y, incY); } /* * ------------------------------------------------------ * TRMV * ------------------------------------------------------ */ void Nd4jBlas::strmv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, float alpha, float *A, int lda, float *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasStrmv_v2(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, A, lda, X, incX); } void Nd4jBlas::dtrmv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, double alpha, double *A, int lda, double *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDtrmv_v2(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, A, lda, X, incX); } /* * ------------------------------------------------------ * TBMV * ------------------------------------------------------ */ void Nd4jBlas::stbmv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, int K, float *A, int lda, float *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasStbmv_v2(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, K, A, lda, X, incX); } void Nd4jBlas::dtbmv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, int K, double *A, int lda, double *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDtbmv_v2(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, K, A, lda, X, incX); } /* * ------------------------------------------------------ * TPMV * ------------------------------------------------------ */ void Nd4jBlas::stpmv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, float *Ap, float *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasStpmv_v2(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, Ap, X, incX); } void Nd4jBlas::dtpmv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, double *Ap, double *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDtpmv_v2(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, Ap, X, incX); } /* * ------------------------------------------------------ * TRSV * ------------------------------------------------------ */ void Nd4jBlas::strsv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, float *A, int lda, float *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasStrsv_v2(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, A, lda, X, incX); } void Nd4jBlas::dtrsv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, double *A, int lda, double *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDtrsv_v2(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, A, lda, X, incX); } /* * ------------------------------------------------------ * TBSV * ------------------------------------------------------ */ void Nd4jBlas::stbsv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, int K, float *A, int lda, float *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasStbsv_v2(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, K, A, lda, X, incX); } void Nd4jBlas::dtbsv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, int K, double *A, int lda, double *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDtbsv_v2(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, K, A, lda, X, incX); } /* * ------------------------------------------------------ * TPSV * ------------------------------------------------------ */ void Nd4jBlas::stpsv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, float *Ap, float *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasStpsv_v2(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, Ap, X, incX); } void Nd4jBlas::dtpsv(Nd4jPointer *extraParams, int Order, int Uplo, int TransA, int Diag, int N, double *Ap, double *X, int incX) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDtpsv_v2(*handle, convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), N, Ap, X, incX); } /* * ------------------------------------------------------ * GER * ------------------------------------------------------ */ void Nd4jBlas::sger(Nd4jPointer *extraParams, int Order, int M, int N, float alpha, float *X, int incX, float *Y, int incY, float *A, int lda) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSger_v2(*handle, M, N, &alpha, X, incX, Y, incY, A, lda); } void Nd4jBlas::dger(Nd4jPointer *extraParams, int Order, int M, int N, double alpha, double *X, int incX, double *Y, int incY, double *A, int lda) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDger_v2(*handle, M, N, &alpha, X, incX, Y, incY, A, lda); } /* * ------------------------------------------------------ * SYR * ------------------------------------------------------ */ void Nd4jBlas::ssyr(Nd4jPointer *extraParams, int Order, int Uplo, int N, float alpha, float *X, int incX, float *A, int lda) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSsyr_v2(*handle, convertUplo(Uplo), N, &alpha, X, incX, A, lda); } void Nd4jBlas::dsyr(Nd4jPointer *extraParams, int Order, int Uplo, int N, double alpha, double *X, int incX, double *A, int lda) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDsyr_v2(*handle, convertUplo(Uplo), N, &alpha, X, incX, A, lda); } /* * ------------------------------------------------------ * SPR * ------------------------------------------------------ */ void Nd4jBlas::sspr(Nd4jPointer *extraParams, int Order, int Uplo, int N, float alpha, float *X, int incX, float *Ap) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSspr(*handle, convertUplo(Uplo), N, &alpha, X, incX, Ap); } void Nd4jBlas::dspr(Nd4jPointer *extraParams, int Order, int Uplo, int N, double alpha, double *X, int incX, double *Ap) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDspr(*handle, convertUplo(Uplo), N, &alpha, X, incX, Ap); } /* * ------------------------------------------------------ * SYR2 * ------------------------------------------------------ */ void Nd4jBlas::ssyr2(Nd4jPointer *extraParams, int Order, int Uplo, int N, float alpha, float *X, int incX, float *Y, int incY, float *A, int lda) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSsyr2_v2(*handle, convertUplo(Uplo), N, &alpha, X, incX, Y, incY, A, lda); } void Nd4jBlas::dsyr2(Nd4jPointer *extraParams, int Order, int Uplo, int N, double alpha, double *X, int incX, double *Y, int incY, double *A, int lda) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDsyr2_v2(*handle, convertUplo(Uplo), N, &alpha, X, incX, Y, incY, A, lda); } /* * ------------------------------------------------------ * SPR2 * ------------------------------------------------------ */ void Nd4jBlas::sspr2(Nd4jPointer *extraParams, int Order, int Uplo, int N, float alpha, float *X, int incX, float *Y, int incY, float *Ap) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSspr2_v2(*handle, convertUplo(Uplo), N, &alpha, X, incX, Y, incY, Ap); } void Nd4jBlas::dspr2(Nd4jPointer *extraParams, int Order, int Uplo, int N, double alpha, double *X, int incX, double *Y, int incY, double *Ap) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDspr2_v2(*handle, convertUplo(Uplo), N, &alpha, X, incX, Y, incY, Ap); } /* * ====================================================== * Level 3 BLAS procedures * ====================================================== */ /* * ------------------------------------------------------ * GEMM * ------------------------------------------------------ */ void Nd4jBlas::hgemm(Nd4jPointer *extraParams, int Order, int TransA, int TransB, int M, int N, int K, float alpha, float16 *A, int lda, float16 *B, int ldb, float beta, float16 *C, int ldc) { /* __half *aPointer = reinterpret_cast<__half *>(A); __half *bPointer = reinterpret_cast<__half *>(B); __half *cPointer = reinterpret_cast<__half *>(C); */ cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); /* nd4j::float16 hAlpha = alpha; nd4j::float16 hBeta = beta; cublasHgemm(*handle, convertTranspose(TransA), convertTranspose(TransB), M, N, K, &hAlpha.data, A, lda, B, ldb, &hBeta.data, C, ldc); */ #ifdef CUDA_8 // CUDA_R_16F for CUDA 8 // CUBLAS_DATA_HALF for CUDA 7.5 cublasSgemmEx(*handle, convertTranspose(TransA), convertTranspose(TransB), M, N, K, &alpha, A, CUDA_R_16F, lda, B, CUDA_R_16F, ldb, &beta, C, CUDA_R_16F, ldc); #else cublasSgemmEx(*handle, convertTranspose(TransA), convertTranspose(TransB), M, N, K, &alpha, A, CUBLAS_DATA_HALF, lda, B, CUBLAS_DATA_HALF, ldb, &beta, C, CUBLAS_DATA_HALF, ldc); #endif } void Nd4jBlas::sgemm(Nd4jPointer *extraParams, int Order, int TransA, int TransB, int M, int N, int K, float alpha, float *A, int lda, float *B, int ldb, float beta, float *C, int ldc) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSgemm_v2(*handle, convertTranspose(TransA), convertTranspose(TransB), M, N, K, &alpha, A, lda, B, ldb, &beta, C, ldc); } void Nd4jBlas::dgemm(Nd4jPointer *extraParams, int Order, int TransA, int TransB, int M, int N, int K, double alpha, double *A, int lda, double *B, int ldb, double beta, double *C, int ldc) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDgemm_v2(*handle, convertTranspose(TransA), convertTranspose(TransB), M, N, K, &alpha, A, lda, B, ldb, &beta, C, ldc); } /* * ------------------------------------------------------ * SYMM * ------------------------------------------------------ */ void Nd4jBlas::ssymm(Nd4jPointer *extraParams, int Order, int Side, int Uplo, int M, int N, float alpha, float *A, int lda, float *B, int ldb, float beta, float *C, int ldc) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSsymm_v2(*handle, convertSideMode(Side), convertUplo(Uplo), M, N, &alpha, A, lda, B, ldb, &beta, C, ldc); } void Nd4jBlas::dsymm(Nd4jPointer *extraParams, int Order, int Side, int Uplo, int M, int N, double alpha, double *A, int lda, double *B, int ldb, double beta, double *C, int ldc) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDsymm_v2(*handle, convertSideMode(Side), convertUplo(Uplo), M, N, &alpha, A, lda, B, ldb, &beta, C, ldc); } /* * ------------------------------------------------------ * SYRK * ------------------------------------------------------ */ void Nd4jBlas::ssyrk(Nd4jPointer *extraParams, int Order, int Uplo, int Trans, int N, int K, float alpha, float *A, int lda, float beta, float *C, int ldc) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSsyrk_v2(*handle, convertUplo(Uplo), convertTranspose(Trans), N, K, &alpha, A, lda, &beta, C, ldc); } void Nd4jBlas::dsyrk(Nd4jPointer *extraParams, int Order, int Uplo, int Trans, int N, int K, double alpha, double *A, int lda, double beta, double *C, int ldc) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDsyrk_v2(*handle, convertUplo(Uplo), convertTranspose(Trans), N, K, &alpha, A, lda, &beta, C, ldc); } /* * ------------------------------------------------------ * SYR2K * ------------------------------------------------------ */ void Nd4jBlas::ssyr2k(Nd4jPointer *extraParams, int Order, int Uplo, int Trans, int N, int K, float alpha, float *A, int lda, float *B, int ldb, float beta, float *C, int ldc) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasSsyr2k_v2(*handle, convertUplo(Uplo), convertTranspose(Trans), N, K, &alpha, A, lda, B, ldb, &beta, C, ldc); } void Nd4jBlas::dsyr2k(Nd4jPointer *extraParams, int Order, int Uplo, int Trans, int N, int K, double alpha, double *A, int lda, double *B, int ldb, double beta, double *C, int ldc) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDsyr2k_v2(*handle, convertUplo(Uplo), convertTranspose(Trans), N, K, &alpha, A, lda, B, ldb, &beta, C, ldc); } /* * ------------------------------------------------------ * TRMM * ------------------------------------------------------ */ void Nd4jBlas::strmm(Nd4jPointer *extraParams, int Order, int Side, int Uplo, int TransA, int Diag, int M, int N, float alpha, float *A, int lda, float *B, int ldb) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); //float *C = reinterpret_cast<float *>(&extraParams[1]); cublasStrmm_v2(*handle, convertSideMode(Side), convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), M, N, &alpha, A, lda, B, ldb, B, ldb); } void Nd4jBlas::dtrmm(Nd4jPointer *extraParams, int Order, int Side, int Uplo, int TransA, int Diag, int M, int N, double alpha, double *A, int lda, double *B, int ldb) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDtrmm_v2(*handle, convertSideMode(Side), convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), M, N, &alpha, A, lda, B, ldb, B, ldb); } /* * ------------------------------------------------------ * TRSM * ------------------------------------------------------ */ void Nd4jBlas::strsm(Nd4jPointer *extraParams, int Order, int Side, int Uplo, int TransA, int Diag, int M, int N, float alpha, float *A, int lda, float *B, int ldb) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasStrsm_v2(*handle, convertSideMode(Side), convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), M, N, &alpha, A, lda, B, ldb); } void Nd4jBlas::dtrsm(Nd4jPointer *extraParams, int Order, int Side, int Uplo, int TransA, int Diag, int M, int N, double alpha, double *A, int lda, double *B, int ldb) { cublasHandle_t *handle = reinterpret_cast<cublasHandle_t *>(&extraParams[0]); cublasDtrsm_v2(*handle, convertSideMode(Side), convertUplo(Uplo), convertTranspose(TransA), convertDiag(Diag), M, N, &alpha, A, lda, B, ldb); }
fa5a375b3e54b1b931e3f268a589b6a4c2f8f54d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void cube(float * d_out, float * d_in){ // Todo: Fill in this function int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f * f * f; } int main(int argc, char ** argv) { const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory hipMalloc((void**) &d_in, ARRAY_BYTES); hipMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); // launch the kernel hipLaunchKernelGGL(( cube), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in); // copy back the result array to the CPU hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost); // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } hipFree(d_in); hipFree(d_out); return 0; }
fa5a375b3e54b1b931e3f268a589b6a4c2f8f54d.cu
#include <stdio.h> __global__ void cube(float * d_out, float * d_in){ // Todo: Fill in this function int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f * f * f; } int main(int argc, char ** argv) { const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory cudaMalloc((void**) &d_in, ARRAY_BYTES); cudaMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // launch the kernel cube<<<1, ARRAY_SIZE>>>(d_out, d_in); // copy back the result array to the CPU cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } cudaFree(d_in); cudaFree(d_out); return 0; }
38a9aecf49f1b346d35d2d15132a39a097b6c06a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2008-2009 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include "ocuutil/float_routines.h" #include "ocustorage/grid3dboundary.h" #include "ocuutil/kernel_wrapper.h" #include "ocustorage/coarray.h" //#include "ocuequation/eqn_advectiondiffusion3d.h" #include "eqn_advectiondiffusion3d.h" namespace ocu { template<typename T> Eqn_AdvectionDiffusion3DBase<T>::Eqn_AdvectionDiffusion3DBase() { num_steps = 0; _nx = 0; _ny = 0; _nz = 0; _hx = 0; _hy = 0; _hz = 0; _lastdt = 0; _max_divergence = 0; _cfl_factor = 0; _bouyancy = 0; _gravity = 0; _time_step = TS_ERROR; _vertical_direction = DIR_ZPOS; } template<typename T> bool Eqn_AdvectionDiffusion3DBase<T>::set_base_parameters(const Eqn_AdvectionDiffusion3DParams<T> &params) { //SPstatus = cusparseDgtsv(SPhandel,3,1, _max_divergence = params.max_divergence; _cfl_factor = params.cfl_factor; _bouyancy = params.bouyancy; _gravity = params.gravity; if (!check_float(_max_divergence) || _max_divergence < 0 || _max_divergence > 1) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - invalid max_divergence %f\n", _max_divergence); return false; } if (_cfl_factor < 0) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - invalid cfl_factor %f\n", _cfl_factor); return false; } _time_step = params.time_step; if (_time_step != TS_ADAMS_BASHFORD2 && _time_step != TS_FORWARD_EULER) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - unknown timestep type %d\n", _time_step); return false; } _nx = params.nx; _ny = params.ny; _nz = params.nz; _hx = params.hx; _hy = params.hy; _hz = params.hz; _lastdt = 0; if (params.vertical_direction != DIR_XPOS && params.vertical_direction != DIR_XNEG && params.vertical_direction != DIR_YPOS && params.vertical_direction != DIR_YNEG && params.vertical_direction != DIR_ZPOS && params.vertical_direction != DIR_ZNEG) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - bad vertical_direction %d\n", params.vertical_direction); return false; } _vertical_direction = params.vertical_direction; if (!check_float(_hx) || !check_float(_hy) || !check_float(_hz) || _hx <= 0 || _hy <= 0 || _hz <= 0) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - bad hx, hy, hz (%f, %f, %f)\n", _hx, _hy, _hz); return false; } //_thermalbc = params.temp_bc; return true; } template<typename T> Eqn_AdvectionDiffusion3D<T>::Eqn_AdvectionDiffusion3D() { } template<typename T> bool Eqn_AdvectionDiffusion3D<T>::set_parameters(const Eqn_AdvectionDiffusion3DParams<T> &params) { if (!set_base_parameters(params)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on base parameters\n"); return false; } if (!_u.init_congruent(params.init_u) || !_deriv_udt.init_congruent(params.init_u) || !_last_deriv_udt.init_congruent(params.init_u)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on initializing u\n"); return false; } if (!_u.copy_all_data(params.init_u)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed copying to u\n"); return false; } if (!_v.init_congruent(params.init_v) || !_deriv_vdt.init_congruent(params.init_v) || !_last_deriv_vdt.init_congruent(params.init_v)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on initializing v\n"); return false; } if (!_v.copy_all_data(params.init_v)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed copying to v\n"); return false; } if (!_w.init_congruent(params.init_w) || !_deriv_wdt.init_congruent(params.init_w) || !_last_deriv_wdt.init_congruent(params.init_w)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on initializing w\n"); return false; } if (!_w.copy_all_data(params.init_w)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed copying to w\n"); return false; } //if (!_temp.init_congruent(params.init_temp) || !_deriv_tempdt.init_congruent(params.init_temp) || !_last_deriv_tempdt.init_congruent(params.init_temp)) { // printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on initializing temp\n"); // return false; //} //if (!_temp.copy_all_data(params.init_temp)) { // printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on copying temperature field\n"); // return false; //} //_projection_solver.bc = params.flow_bc; _advection_solver.interp_type = params.advection_scheme; //_thermal_solver.interp_type = params.advection_scheme; //if (!_thermal_solver.initialize_storage(this->_nx, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_u, &_v, &_w, &_temp, &_deriv_tempdt)) { // printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on _thermal_solver initialization\n"); // return false; // } if (!_advection_solver.initialize_storage(this->_nx, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_u, &_v, &_w, &_deriv_udt, &_deriv_vdt, &_deriv_wdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on _advection_solver initialization\n"); return false; } //if (!_projection_solver.initialize_storage(this->_nx, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_u, &_v, &_w)) { // printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on _projection_solver initialization\n"); // return false; //} //if (!_thermal_diffusion.initialize_storage(this->_nx, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_temp, &_deriv_tempdt)) { // printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on _thermal_diffusion initialization\n"); // return false; // } //if (!check_float(params.thermal_diffusion) || params.thermal_diffusion < 0) { // printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - invalid thermal diffusion %f\n", params.thermal_diffusion); // return false; //} //_thermal_diffusion.coefficient = params.thermal_diffusion; if (!_u_diffusion.initialize_storage(this->_nx+1, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_u, &_deriv_udt)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on _u_diffusion initialization\n"); return false; } if (!check_float(params.viscosity) || params.viscosity < 0) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - invalid viscosity %f\n", params.viscosity); return false; } _u_diffusion.coefficient = params.viscosity; if (!_v_diffusion.initialize_storage(this->_nx, this->_ny+1, this->_nz, this->_hx, this->_hy, this->_hz, &_v, &_deriv_vdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on _v_diffusion initialization\n"); return false; } _v_diffusion.coefficient = params.viscosity; if (!_w_diffusion.initialize_storage(this->_nx, this->_ny, this->_nz+1, this->_hx, this->_hy, this->_hz, &_w, &_deriv_wdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on _w_diffusion initialization\n"); return false; } _w_diffusion.coefficient = params.viscosity; if (!apply_3d_mac_boundary_conditions_level1(_u, _v, _w, params.flow_bc, this->_hx, this->_hy, this->_hz)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on enforcing flow boundary conditions\n"); return false; } //if (!apply_3d_boundary_conditions_level1(_temp, this->_thermalbc, this->_hx, this->_hy, this->_hz)) { // printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on enforcing thermal boundary conditions\n"); // return false; //} _deriv_udt.clear_zero(); _deriv_vdt.clear_zero(); _deriv_wdt.clear_zero(); //_deriv_tempdt.clear_zero(); // all grid layouts should match if (!_u.check_layout_match(_v) || !_u.check_layout_match(_w) || !_u.check_layout_match(_deriv_udt) || !_u.check_layout_match(_deriv_vdt) || !_u.check_layout_match(_deriv_wdt) || //!_u.check_layout_match(_temp) || //!_u.check_layout_match(_deriv_tempdt) || //!_u.check_layout_match(_last_deriv_tempdt) || !_u.check_layout_match(_last_deriv_udt) || !_u.check_layout_match(_last_deriv_vdt) || !_u.check_layout_match(_last_deriv_wdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - grid layouts do not all match\n"); return false; } return true; } template<typename T> double Eqn_AdvectionDiffusion3D<T>::get_max_stable_timestep() const { T max_u, max_v, max_w; _u.reduce_maxabs(max_u); _v.reduce_maxabs(max_v); _w.reduce_maxabs(max_w); double ut = this->hx() / max_u; double vt = this->hy() / max_v; double wt = this->hz() / max_w; if (!check_float(ut)) ut = 1e10; if (!check_float(vt)) vt = 1e10; if (!check_float(wt)) wt = 1e10; double step = this->_cfl_factor * min3(ut, vt, wt); double minh = min3(this->hx(), this->hy(), this->hz()); //if (thermal_diffusion_coefficient() > 0) // step = ::min(step, (minh * minh) / (6 * thermal_diffusion_coefficient())); if (viscosity_coefficient() > 0) step = ::min(step, (minh * minh) / (6 * viscosity_coefficient())); printf("Eqn_AdvectionDiffusion3D<T>::get_max_stable_timestep - return %f\n", step); return step; } template<typename T> bool Eqn_AdvectionDiffusion3D<T>::test_tri_solver() { // handlers for different library hipsparseStatus_t SPstatus; hipsparseHandle_t SPhandle; hipsparseMatDescr_t descra; //hipError_t cudastat1,cudastat2,cudastat3; hipError_t cudastat1; // __initialize handlers__ SPstatus = hipsparseCreate(&SPhandle); if(SPstatus != HIPSPARSE_STATUS_SUCCESS) { printf("CUSPARSE_Lib_Init failed"); fflush(stdout); return EXIT_FAILURE; } SPstatus = hipsparseCreateMatDescr(&descra); if(SPstatus != HIPSPARSE_STATUS_SUCCESS) { printf("CUSPARSE_Matrix_description_Init failed"); fflush(stdout); return EXIT_FAILURE; } double *D_low_tri, *D_mid_tri, *D_upp_tri, *D_res; double *low_tri = new double[3]; double *mid_tri = new double[3]; double *upp_tri = new double[3]; low_tri[0] = 0; low_tri[1] = 1; low_tri[2] = 1; mid_tri[0] = 1; mid_tri[1] = 2; mid_tri[2] = 2; upp_tri[0] = 1; upp_tri[1] = 1; upp_tri[2] = 0; double *res = new double[3]; res[0] = 2; res[1] =4; res[2] = 3; cudastat1 = hipMalloc((void**)&D_low_tri,3*sizeof(double)); if(cudastat1!=hipSuccess) { printf(" allocation failed\n");} cudastat1 = hipMalloc((void**)&D_mid_tri,3*sizeof(double)); if(cudastat1!=hipSuccess) { printf(" allocation failed\n");} cudastat1 = hipMalloc((void**)&D_upp_tri,3*sizeof(double)); if(cudastat1!=hipSuccess) { printf(" allocation failed\n");} cudastat1 = hipMalloc((void**)&D_res,3*sizeof(double)); if(cudastat1!=hipSuccess) { printf(" allocation failed\n");} cudastat1 = hipMemcpy(D_low_tri,low_tri,3*sizeof(double),hipMemcpyHostToDevice); if(cudastat1!=hipSuccess) { printf(" mem copy failed\n");} cudastat1 = hipMemcpy(D_mid_tri,mid_tri,3*sizeof(double),hipMemcpyHostToDevice); if(cudastat1!=hipSuccess) { printf(" mem copy failed\n");} cudastat1 = hipMemcpy(D_upp_tri,upp_tri,3*sizeof(double),hipMemcpyHostToDevice); if(cudastat1!=hipSuccess) { printf(" mem copy failed\n");} cudastat1 = hipMemcpy(D_res,res,3*sizeof(double),hipMemcpyHostToDevice); if(cudastat1!=hipSuccess) { printf(" mem copy failed\n");} SPstatus = cusparseDgtsv(SPhandle,3,1,D_low_tri,D_mid_tri, D_upp_tri,D_res,3); if(SPstatus!=HIPSPARSE_STATUS_SUCCESS) { printf(" cusparseDgtsv failed !"); fflush(stdout); if(SPstatus == HIPSPARSE_STATUS_NOT_INITIALIZED) { printf(" not init\n"); } if(SPstatus == HIPSPARSE_STATUS_ALLOC_FAILED) { printf(" alloc failed\n"); } if(SPstatus == HIPSPARSE_STATUS_INVALID_VALUE) { printf(" invalid value\n"); } if(SPstatus == HIPSPARSE_STATUS_ARCH_MISMATCH) { printf(" arch mismatch\n"); } if(SPstatus == HIPSPARSE_STATUS_EXECUTION_FAILED) { printf(" execution failed\n"); } if(SPstatus == HIPSPARSE_STATUS_INTERNAL_ERROR) { printf(" internal error\n"); } if(SPstatus == HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED) { printf(" matrix not supported\n"); } throw SPstatus; } //low_tri[0] = 0; low_tri[1] = 0; low_tri[2] = 0; //res[0] = 0; res[1] = 0; res[2] = 0; //cudastat1 = hipMemcpy(low_tri,D_low_tri,3*sizeof(double),hipMemcpyDeviceToHost); //if(cudastat1!=hipSuccess) { printf(" mem copy failed\n");} //cudastat1 = hipMemcpy(mid_tri,D_mid_tri,3*sizeof(double),hipMemcpyDeviceToHost); //if(cudastat1!=hipSuccess) { printf(" mem copy failed\n");} //cudastat1 = hipMemcpy(upp_tri,D_upp_tri,3*sizeof(double),hipMemcpyDeviceToHost); //if(cudastat1!=hipSuccess) { printf(" mem copy failed\n");} cudastat1 = hipMemcpy(res,D_res,3*sizeof(double),hipMemcpyDeviceToHost); if(cudastat1!=hipSuccess) { printf(" mem copy failed\n");} //printf(" low_tri is : %f %f %f\n", low_tri[0],low_tri[1],low_tri[2]); printf(" result is : %f %f %f\n", res[0],res[1],res[2]); delete[] low_tri; delete[] mid_tri; delete[] upp_tri; delete[] res; return 1; } template<typename T> bool Eqn_AdvectionDiffusion3D<T>::advance_one_step(double dt) { this->clear_error(); this->num_steps++; // update dudt check_ok(_advection_solver.solve()); // updates dudt, dvdt, dwdt, overwrites whatever is there if (viscosity_coefficient() > 0) { check_ok(_u_diffusion.solve()); // dudt += \nu \nabla^2 u check_ok(_v_diffusion.solve()); // dvdt += \nu \nabla^2 v check_ok(_w_diffusion.solve()); // dwdt += \nu \nabla^2 w } // eventually this will be replaced with a grid-wide operation. //add_thermal_force(); // update dTdt //check_ok(_thermal_solver.solve()); // updates dTdt, overwrites whatever is there //if (thermal_diffusion_coefficient() > 0) { // check_ok(_thermal_diffusion.solve()); // dTdt += k \nabla^2 T //} T ab_coeff = -dt*dt / (2 * this->_lastdt); // advance T //if (this->_time_step == TS_ADAMS_BASHFORD2 && this->_lastdt > 0) { // check_ok(_temp.linear_combination((T)1.0, _temp, (T)(dt - ab_coeff), _deriv_tempdt)); // check_ok(_temp.linear_combination((T)1.0, _temp, (T)ab_coeff, _last_deriv_tempdt)); //} //else { // check_ok(_temp.linear_combination((T)1.0, _temp, (T)dt, _deriv_tempdt)); //} //check_ok(apply_3d_boundary_conditions_level1_nocorners(_temp, this->_thermalbc, this->_hx, this->_hy, this->_hz)); // advance u,v,w if (this->_time_step == TS_ADAMS_BASHFORD2 && this->_lastdt > 0) { check_ok(_u.linear_combination((T)1.0, _u, (T)(dt - ab_coeff), _deriv_udt)); check_ok(_u.linear_combination((T)1.0, _u, (T)ab_coeff, _last_deriv_udt)); check_ok(_v.linear_combination((T)1.0, _v, (T)(dt - ab_coeff), _deriv_vdt)); check_ok(_v.linear_combination((T)1.0, _v, (T)ab_coeff, _last_deriv_vdt)); check_ok(_w.linear_combination((T)1.0, _w, (T)(dt - ab_coeff), _deriv_wdt)); check_ok(_w.linear_combination((T)1.0, _w, (T)ab_coeff, _last_deriv_wdt)); } else { check_ok(_u.linear_combination((T)1.0, _u, (T)dt, _deriv_udt)); check_ok(_v.linear_combination((T)1.0, _v, (T)dt, _deriv_vdt)); check_ok(_w.linear_combination((T)1.0, _w, (T)dt, _deriv_wdt)); } // copy state for AB2 if (this->_time_step == TS_ADAMS_BASHFORD2) { this->_lastdt = dt; //_last_deriv_tempdt.copy_all_data(_deriv_tempdt); _last_deriv_udt.copy_all_data(_deriv_udt); _last_deriv_vdt.copy_all_data(_deriv_vdt); _last_deriv_wdt.copy_all_data(_deriv_wdt); } // enforce incompressibility - this enforces bc's before and after projection //check_ok(_projection_solver.solve(this->_max_divergence)); return !this->any_error(); } template<typename T> Eqn_AdvectionDiffusion3DCo<T>::Eqn_AdvectionDiffusion3DCo(const char *name): //_projection_solver((std::string(name) + std::string("._projection_solver")).c_str()), _u((std::string(name) + std::string("._u")).c_str()), _v((std::string(name) + std::string("._v")).c_str()), _w((std::string(name) + std::string("._w")).c_str()), //_temp((std::string(name) + std::string("._temp")).c_str()), _deriv_udt((std::string(name) + std::string("._deriv_udt")).c_str()), _deriv_vdt((std::string(name) + std::string("._deriv_vdt")).c_str()), _deriv_wdt((std::string(name) + std::string("._deriv_wdt")).c_str()) //_deriv_tempdt((std::string(name) + std::string("._deriv_tempdt")).c_str()) { _u_negx_hdl = -1; _v_negx_hdl = -1; _w_negx_hdl = -1; //_t_negx_hdl = -1; _u_posx_hdl = -1; _v_posx_hdl = -1; _w_posx_hdl = -1; //_t_posx_hdl = -1; } template<typename T> Eqn_AdvectionDiffusion3DCo<T>::~Eqn_AdvectionDiffusion3DCo() { CoArrayManager::barrier_deallocate(_u_negx_hdl); CoArrayManager::barrier_deallocate(_v_negx_hdl); CoArrayManager::barrier_deallocate(_w_negx_hdl); //CoArrayManager::barrier_deallocate(_t_negx_hdl); CoArrayManager::barrier_deallocate(_u_posx_hdl); CoArrayManager::barrier_deallocate(_v_posx_hdl); CoArrayManager::barrier_deallocate(_w_posx_hdl); //CoArrayManager::barrier_deallocate(_t_posx_hdl); } template<typename T> void Eqn_AdvectionDiffusion3DCo<T>::do_halo_exchange_uvw() { CoArrayManager::barrier_exchange(_u_negx_hdl); CoArrayManager::barrier_exchange(_v_negx_hdl); CoArrayManager::barrier_exchange(_w_negx_hdl); CoArrayManager::barrier_exchange(_u_posx_hdl); CoArrayManager::barrier_exchange(_v_posx_hdl); CoArrayManager::barrier_exchange(_w_posx_hdl); } /*template<typename T> void Eqn_AdvectionDiffusion3DCo<T>::do_halo_exchange_t() { CoArrayManager::barrier_exchange(_t_negx_hdl); CoArrayManager::barrier_exchange(_t_posx_hdl); }*/ template<typename T> bool Eqn_AdvectionDiffusion3DCo<T>::set_parameters(const Eqn_AdvectionDiffusion3DParams<T> &params) { int tid = ThreadManager::this_image(); int num_images = ThreadManager::num_images(); if (!set_base_parameters(params)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on base parameters\n"); return false; } if (!_u.init_congruent(params.init_u) || !_deriv_udt.init_congruent(params.init_u) || !_last_deriv_udt.init_congruent(params.init_u)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on initializing u\n"); return false; } if (!_u.copy_all_data(params.init_u)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed copying to u\n"); return false; } if (!_v.init_congruent(params.init_v) || !_deriv_vdt.init_congruent(params.init_v) || !_last_deriv_vdt.init_congruent(params.init_v)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on initializing v\n"); return false; } if (!_v.copy_all_data(params.init_v)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed copying to v\n"); return false; } if (!_w.init_congruent(params.init_w) || !_deriv_wdt.init_congruent(params.init_w) || !_last_deriv_wdt.init_congruent(params.init_w)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on initializing w\n"); return false; } if (!_w.copy_all_data(params.init_w)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed copying to w\n"); return false; } /*if (!_temp.init_congruent(params.init_temp) || !_deriv_tempdt.init_congruent(params.init_temp) || !_last_deriv_tempdt.init_congruent(params.init_temp)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on initializing temp\n"); return false; } if (!_temp.copy_all_data(params.init_temp)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on copying temperature field\n"); return false; } */ //_projection_solver.bc = params.flow_bc; _advection_solver.interp_type = params.advection_scheme; //_thermal_solver.interp_type = params.advection_scheme; /*if (!_thermal_solver.initialize_storage(this->_nx, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_u, &_v, &_w, &_temp, &_deriv_tempdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on _thermal_solver initialization\n"); return false; }*/ if (!_advection_solver.initialize_storage(this->_nx, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_u, &_v, &_w, &_deriv_udt, &_deriv_vdt, &_deriv_wdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on _advection_solver initialization\n"); return false; } /*if (!_projection_solver.initialize_storage(this->_nx, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_u, &_v, &_w)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on _projection_solver initialization\n"); return false; }*/ /*if (!_thermal_diffusion.initialize_storage(this->_nx, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_temp, &_deriv_tempdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on _thermal_diffusion initialization\n"); return false; }*/ /*if (!check_float(params.thermal_diffusion) || params.thermal_diffusion < 0) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - invalid thermal diffusion %f\n", params.thermal_diffusion); return false; } _thermal_diffusion.coefficient = params.thermal_diffusion; */ if (!_u_diffusion.initialize_storage(this->_nx+1, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_u, &_deriv_udt)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on _u_diffusion initialization\n"); return false; } if (!check_float(params.viscosity) || params.viscosity < 0) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - invalid viscosity %f\n", params.viscosity); return false; } _u_diffusion.coefficient = params.viscosity; if (!_v_diffusion.initialize_storage(this->_nx, this->_ny+1, this->_nz, this->_hx, this->_hy, this->_hz, &_v, &_deriv_vdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on _v_diffusion initialization\n"); return false; } _v_diffusion.coefficient = params.viscosity; if (!_w_diffusion.initialize_storage(this->_nx, this->_ny, this->_nz+1, this->_hx, this->_hy, this->_hz, &_w, &_deriv_wdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on _w_diffusion initialization\n"); return false; } _w_diffusion.coefficient = params.viscosity; // figure out who our neighbors are int negx_image = (tid - 1 + num_images) % num_images; int posx_image = (tid + 1) % num_images; if (tid == 0 && params.flow_bc.xneg.type != BC_PERIODIC) negx_image = -1; if (tid == num_images - 1 && params.flow_bc.xpos.type != BC_PERIODIC) posx_image = -1; // set up bc's & transfers here this->_local_bc = params.flow_bc; //this->_local_thermalbc = this->_thermalbc; if (negx_image != -1) { _local_bc.xneg.type = BC_NONE; //_local_thermalbc.xneg.type = BC_NONE; } if (posx_image != -1) { _local_bc.xpos.type = BC_NONE; //_local_thermalbc.xpos.type = BC_NONE; } if (posx_image != -1) { Region3D uto = _u .region(this->_nx, this->_nx+1)()(); Region3D ufrom = _u.co(posx_image)->region(0,1)()(); _u_posx_hdl = CoArrayManager::barrier_allocate(uto, ufrom); if (_u_posx_hdl == -1) printf("[ERROR] Eqn_AdvectionDiffusion3DCo::initialize_storage - failed to allocate _u_posx_hdl\n"); Region3D vto = _v .region(this->_nx)()(); Region3D vfrom = _v.co(posx_image)->region(0)()(); _v_posx_hdl = CoArrayManager::barrier_allocate(vto, vfrom); if (_v_posx_hdl == -1) printf("[ERROR] Eqn_AdvectionDiffusion3DCo::initialize_storage - failed to allocate _v_posx_hdl\n"); Region3D wto = _w .region(this->_nx)()(); Region3D wfrom = _w.co(posx_image)->region(0)()(); _w_posx_hdl = CoArrayManager::barrier_allocate(wto, wfrom); if (_w_posx_hdl == -1) printf("[ERROR] Eqn_AdvectionDiffusion3DCo::initialize_storage - failed to allocate _w_posx_hdl\n"); //Region3D tto = _temp .region(this->_nx)()(); //Region3D tfrom = _temp.co(posx_image)->region(0)()(); //_t_posx_hdl = CoArrayManager::barrier_allocate(tto, tfrom); //if (_t_posx_hdl == -1) // printf("[ERROR] Eqn_AdvectionDiffusion3DCo::initialize_storage - failed to allocate _t_posx_hdl\n"); } else { _u_posx_hdl = CoArrayManager::barrier_allocate(); _v_posx_hdl = CoArrayManager::barrier_allocate(); _w_posx_hdl = CoArrayManager::barrier_allocate(); //_t_posx_hdl = CoArrayManager::barrier_allocate(); } if (negx_image != -1) { Region3D uto = _u .region(-1)()(); Region3D ufrom = _u.co(negx_image)->region(this->_nx-1)()(); _u_negx_hdl = CoArrayManager::barrier_allocate(uto, ufrom); if (_u_negx_hdl == -1) printf("[ERROR] Sol_MultigridPressure3DDevice::initialize_storage - failed to allocate _u_negx_hdl\n"); Region3D vto = _v .region(-1)()(); Region3D vfrom = _v.co(negx_image)->region(this->_nx-1)()(); _v_negx_hdl = CoArrayManager::barrier_allocate(vto, vfrom); if (_v_negx_hdl == -1) printf("[ERROR] Sol_MultigridPressure3DDevice::initialize_storage - failed to allocate _v_negx_hdl\n"); Region3D wto = _w .region(-1)()(); Region3D wfrom = _w.co(negx_image)->region(this->_nx-1)()(); _w_negx_hdl = CoArrayManager::barrier_allocate(wto, wfrom); if (_w_negx_hdl == -1) printf("[ERROR] Sol_MultigridPressure3DDevice::initialize_storage - failed to allocate _w_negx_hdl\n"); // Region3D tto = _temp .region(-1)()(); // Region3D tfrom = _temp.co(negx_image)->region(this->_nx-1)()(); /*_t_negx_hdl = CoArrayManager::barrier_allocate(tto, tfrom); if (_t_negx_hdl == -1) printf("[ERROR] Sol_MultigridPressure3DDevice::initialize_storage - failed to allocate _t_negx_hdl\n"); */ } else { _u_negx_hdl = CoArrayManager::barrier_allocate(); _v_negx_hdl = CoArrayManager::barrier_allocate(); _w_negx_hdl = CoArrayManager::barrier_allocate(); //_t_negx_hdl = CoArrayManager::barrier_allocate(); } do_halo_exchange_uvw(); if (!apply_3d_mac_boundary_conditions_level1(_u, _v, _w, _local_bc, this->_hx, this->_hy, this->_hz)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on enforcing flow boundary conditions\n"); return false; } /*do_halo_exchange_t(); if (!apply_3d_boundary_conditions_level1(_temp, _local_thermalbc, this->_hx, this->_hy, this->_hz)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on enforcing thermal boundary conditions\n"); return false; } */ _deriv_udt.clear_zero(); _deriv_vdt.clear_zero(); _deriv_wdt.clear_zero(); //_deriv_tempdt.clear_zero(); // all grid layouts should match if (!_u.check_layout_match(_v) || !_u.check_layout_match(_w) || !_u.check_layout_match(_deriv_udt) || !_u.check_layout_match(_deriv_vdt) || !_u.check_layout_match(_deriv_wdt) || //!_u.check_layout_match(_temp) || //!_u.check_layout_match(_deriv_tempdt) || //!_u.check_layout_match(_last_deriv_tempdt) || !_u.check_layout_match(_last_deriv_udt) || !_u.check_layout_match(_last_deriv_vdt) || !_u.check_layout_match(_last_deriv_wdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - grid layouts do not all match\n"); return false; } ThreadManager::io_fence(); ThreadManager::barrier(); return true; } template<typename T> double Eqn_AdvectionDiffusion3DCo<T>::get_max_stable_timestep() const { T max_u, max_v, max_w; _u.co_reduce_maxabs(max_u); _v.co_reduce_maxabs(max_v); _w.co_reduce_maxabs(max_w); double ut = this->hx() / max_u; double vt = this->hy() / max_v; double wt = this->hz() / max_w; if (!check_float(ut)) ut = 1e10; if (!check_float(vt)) vt = 1e10; if (!check_float(wt)) wt = 1e10; double step = this->_cfl_factor * min3(ut, vt, wt); double minh = min3(this->hx(), this->hy(), this->hz()); //if (thermal_diffusion_coefficient() > 0) // step = ::min(step, (minh * minh) / (6 * this->thermal_diffusion_coefficient())); if (viscosity_coefficient() > 0) step = ::min(step, (minh * minh) / (6 * this->viscosity_coefficient())); printf("Eqn_AdvectionDiffusion3DCo<T>::get_max_stable_timestep - return %f (%f %f %f)\n", step, ut, vt, wt); return step; } /*template<typename T> void Eqn_AdvectionDiffusion3DCo<T>::add_thermal_force() { // apply thermal force by adding -gkT to dvdt (let g = -1, k = 1, so this is just dvdt += T) //_advection_solver.deriv_vdt.linear_combination((T)1.0, _advection_solver.deriv_vdt, (T)1.0, _thermal_solver.phi); int tnx = this->nz(); int tny = this->ny(); int tnz = this->nx(); int threadsInX = 16; int threadsInY = 2; int threadsInZ = 2; int blocksInX = (tnx+threadsInX-1)/threadsInX; int blocksInY = (tny+threadsInY-1)/threadsInY; int blocksInZ = (tnz+threadsInZ-1)/threadsInZ; dim3 Dg = dim3(blocksInX, blocksInY*blocksInZ); dim3 Db = dim3(threadsInX, threadsInY, threadsInZ); T direction_mult = this->_vertical_direction & DIR_NEGATIVE_FLAG ? 1 : -1; T *uvw = (this->_vertical_direction & DIR_XAXIS_FLAG) ? &_deriv_udt.at(0,0,0) : (this->_vertical_direction & DIR_YAXIS_FLAG) ? &_deriv_vdt.at(0,0,0) : &_deriv_wdt.at(0,0,0); KernelWrapper wrapper; wrapper.PreKernel(); //Eqn_AdvectionDiffusion3D_add_thermal_force<<<Dg, Db, 0, ThreadManager::get_compute_stream()>>>(uvw, direction_mult * this->_gravity * this->_bouyancy, &_temp.at(0,0,0), // _temp.xstride(), _temp.ystride(), _temp.stride(this->_vertical_direction), this->nx(), this->ny(), this->nz(), // blocksInY, 1.0f / (float)blocksInY); if (!wrapper.PostKernel("Eqn_AdvectionDiffusion3D_add_thermal_force")) this->add_error(); }*/ template<typename T> bool Eqn_AdvectionDiffusion3DCo<T>::advance_one_step(double dt) { this->clear_error(); this->num_steps++; // update dudt check_ok(_advection_solver.solve()); // updates dudt, dvdt, dwdt, overwrites whatever is there if (viscosity_coefficient() > 0) { check_ok(_u_diffusion.solve()); // dudt += \nu \nabla^2 u check_ok(_v_diffusion.solve()); // dvdt += \nu \nabla^2 v check_ok(_w_diffusion.solve()); // dwdt += \nu \nabla^2 w } // eventually this will be replaced with a grid-wide operation. //add_thermal_force(); // update dTdt /*check_ok(_thermal_solver.solve()); // updates dTdt, overwrites whatever is there if (thermal_diffusion_coefficient() > 0) { check_ok(_thermal_diffusion.solve()); // dTdt += k \nabla^2 T } */ T ab_coeff = -dt*dt / (2 * this->_lastdt); // advance T /*if (this->_time_step == TS_ADAMS_BASHFORD2 && this->_lastdt > 0) { check_ok(_temp.linear_combination((T)1.0, _temp, (T)(dt - ab_coeff), _deriv_tempdt)); check_ok(_temp.linear_combination((T)1.0, _temp, (T)ab_coeff, _last_deriv_tempdt)); } else { check_ok(_temp.linear_combination((T)1.0, _temp, (T)dt, _deriv_tempdt)); } */ //do_halo_exchange_t(); //check_ok(apply_3d_boundary_conditions_level1_nocorners(_temp, this->_local_thermalbc, this->_hx, this->_hy, this->_hz)); // advance u,v,w if (this->_time_step == TS_ADAMS_BASHFORD2 && this->_lastdt > 0) { check_ok(_u.linear_combination((T)1.0, _u, (T)(dt - ab_coeff), _deriv_udt)); check_ok(_u.linear_combination((T)1.0, _u, (T)ab_coeff, _last_deriv_udt)); check_ok(_v.linear_combination((T)1.0, _v, (T)(dt - ab_coeff), _deriv_vdt)); check_ok(_v.linear_combination((T)1.0, _v, (T)ab_coeff, _last_deriv_vdt)); check_ok(_w.linear_combination((T)1.0, _w, (T)(dt - ab_coeff), _deriv_wdt)); check_ok(_w.linear_combination((T)1.0, _w, (T)ab_coeff, _last_deriv_wdt)); } else { check_ok(_u.linear_combination((T)1.0, _u, (T)dt, _deriv_udt)); check_ok(_v.linear_combination((T)1.0, _v, (T)dt, _deriv_vdt)); check_ok(_w.linear_combination((T)1.0, _w, (T)dt, _deriv_wdt)); } // copy state for AB2 if (this->_time_step == TS_ADAMS_BASHFORD2) { this->_lastdt = dt; //_last_deriv_tempdt.copy_all_data(_deriv_tempdt); _last_deriv_udt.copy_all_data(_deriv_udt); _last_deriv_vdt.copy_all_data(_deriv_vdt); _last_deriv_wdt.copy_all_data(_deriv_wdt); } // enforce incompressibility - this enforces bc's before and after projection //check_ok(_projection_solver.solve(this->_max_divergence)); return !this->any_error(); } template class Eqn_AdvectionDiffusion3DBase<float>; template class Eqn_AdvectionDiffusion3D<float>; template class Eqn_AdvectionDiffusion3DCo<float>; #ifdef OCU_DOUBLESUPPORT template class Eqn_AdvectionDiffusion3DBase<double>; template class Eqn_AdvectionDiffusion3D<double>; template class Eqn_AdvectionDiffusion3DCo<double>; #endif } // end namespace
38a9aecf49f1b346d35d2d15132a39a097b6c06a.cu
/* * Copyright 2008-2009 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include "ocuutil/float_routines.h" #include "ocustorage/grid3dboundary.h" #include "ocuutil/kernel_wrapper.h" #include "ocustorage/coarray.h" //#include "ocuequation/eqn_advectiondiffusion3d.h" #include "eqn_advectiondiffusion3d.h" namespace ocu { template<typename T> Eqn_AdvectionDiffusion3DBase<T>::Eqn_AdvectionDiffusion3DBase() { num_steps = 0; _nx = 0; _ny = 0; _nz = 0; _hx = 0; _hy = 0; _hz = 0; _lastdt = 0; _max_divergence = 0; _cfl_factor = 0; _bouyancy = 0; _gravity = 0; _time_step = TS_ERROR; _vertical_direction = DIR_ZPOS; } template<typename T> bool Eqn_AdvectionDiffusion3DBase<T>::set_base_parameters(const Eqn_AdvectionDiffusion3DParams<T> &params) { //SPstatus = cusparseDgtsv(SPhandel,3,1, _max_divergence = params.max_divergence; _cfl_factor = params.cfl_factor; _bouyancy = params.bouyancy; _gravity = params.gravity; if (!check_float(_max_divergence) || _max_divergence < 0 || _max_divergence > 1) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - invalid max_divergence %f\n", _max_divergence); return false; } if (_cfl_factor < 0) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - invalid cfl_factor %f\n", _cfl_factor); return false; } _time_step = params.time_step; if (_time_step != TS_ADAMS_BASHFORD2 && _time_step != TS_FORWARD_EULER) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - unknown timestep type %d\n", _time_step); return false; } _nx = params.nx; _ny = params.ny; _nz = params.nz; _hx = params.hx; _hy = params.hy; _hz = params.hz; _lastdt = 0; if (params.vertical_direction != DIR_XPOS && params.vertical_direction != DIR_XNEG && params.vertical_direction != DIR_YPOS && params.vertical_direction != DIR_YNEG && params.vertical_direction != DIR_ZPOS && params.vertical_direction != DIR_ZNEG) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - bad vertical_direction %d\n", params.vertical_direction); return false; } _vertical_direction = params.vertical_direction; if (!check_float(_hx) || !check_float(_hy) || !check_float(_hz) || _hx <= 0 || _hy <= 0 || _hz <= 0) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - bad hx, hy, hz (%f, %f, %f)\n", _hx, _hy, _hz); return false; } //_thermalbc = params.temp_bc; return true; } template<typename T> Eqn_AdvectionDiffusion3D<T>::Eqn_AdvectionDiffusion3D() { } template<typename T> bool Eqn_AdvectionDiffusion3D<T>::set_parameters(const Eqn_AdvectionDiffusion3DParams<T> &params) { if (!set_base_parameters(params)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on base parameters\n"); return false; } if (!_u.init_congruent(params.init_u) || !_deriv_udt.init_congruent(params.init_u) || !_last_deriv_udt.init_congruent(params.init_u)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on initializing u\n"); return false; } if (!_u.copy_all_data(params.init_u)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed copying to u\n"); return false; } if (!_v.init_congruent(params.init_v) || !_deriv_vdt.init_congruent(params.init_v) || !_last_deriv_vdt.init_congruent(params.init_v)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on initializing v\n"); return false; } if (!_v.copy_all_data(params.init_v)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed copying to v\n"); return false; } if (!_w.init_congruent(params.init_w) || !_deriv_wdt.init_congruent(params.init_w) || !_last_deriv_wdt.init_congruent(params.init_w)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on initializing w\n"); return false; } if (!_w.copy_all_data(params.init_w)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed copying to w\n"); return false; } //if (!_temp.init_congruent(params.init_temp) || !_deriv_tempdt.init_congruent(params.init_temp) || !_last_deriv_tempdt.init_congruent(params.init_temp)) { // printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on initializing temp\n"); // return false; //} //if (!_temp.copy_all_data(params.init_temp)) { // printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on copying temperature field\n"); // return false; //} //_projection_solver.bc = params.flow_bc; _advection_solver.interp_type = params.advection_scheme; //_thermal_solver.interp_type = params.advection_scheme; //if (!_thermal_solver.initialize_storage(this->_nx, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_u, &_v, &_w, &_temp, &_deriv_tempdt)) { // printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on _thermal_solver initialization\n"); // return false; // } if (!_advection_solver.initialize_storage(this->_nx, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_u, &_v, &_w, &_deriv_udt, &_deriv_vdt, &_deriv_wdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on _advection_solver initialization\n"); return false; } //if (!_projection_solver.initialize_storage(this->_nx, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_u, &_v, &_w)) { // printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on _projection_solver initialization\n"); // return false; //} //if (!_thermal_diffusion.initialize_storage(this->_nx, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_temp, &_deriv_tempdt)) { // printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on _thermal_diffusion initialization\n"); // return false; // } //if (!check_float(params.thermal_diffusion) || params.thermal_diffusion < 0) { // printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - invalid thermal diffusion %f\n", params.thermal_diffusion); // return false; //} //_thermal_diffusion.coefficient = params.thermal_diffusion; if (!_u_diffusion.initialize_storage(this->_nx+1, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_u, &_deriv_udt)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on _u_diffusion initialization\n"); return false; } if (!check_float(params.viscosity) || params.viscosity < 0) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - invalid viscosity %f\n", params.viscosity); return false; } _u_diffusion.coefficient = params.viscosity; if (!_v_diffusion.initialize_storage(this->_nx, this->_ny+1, this->_nz, this->_hx, this->_hy, this->_hz, &_v, &_deriv_vdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on _v_diffusion initialization\n"); return false; } _v_diffusion.coefficient = params.viscosity; if (!_w_diffusion.initialize_storage(this->_nx, this->_ny, this->_nz+1, this->_hx, this->_hy, this->_hz, &_w, &_deriv_wdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on _w_diffusion initialization\n"); return false; } _w_diffusion.coefficient = params.viscosity; if (!apply_3d_mac_boundary_conditions_level1(_u, _v, _w, params.flow_bc, this->_hx, this->_hy, this->_hz)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on enforcing flow boundary conditions\n"); return false; } //if (!apply_3d_boundary_conditions_level1(_temp, this->_thermalbc, this->_hx, this->_hy, this->_hz)) { // printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - failed on enforcing thermal boundary conditions\n"); // return false; //} _deriv_udt.clear_zero(); _deriv_vdt.clear_zero(); _deriv_wdt.clear_zero(); //_deriv_tempdt.clear_zero(); // all grid layouts should match if (!_u.check_layout_match(_v) || !_u.check_layout_match(_w) || !_u.check_layout_match(_deriv_udt) || !_u.check_layout_match(_deriv_vdt) || !_u.check_layout_match(_deriv_wdt) || //!_u.check_layout_match(_temp) || //!_u.check_layout_match(_deriv_tempdt) || //!_u.check_layout_match(_last_deriv_tempdt) || !_u.check_layout_match(_last_deriv_udt) || !_u.check_layout_match(_last_deriv_vdt) || !_u.check_layout_match(_last_deriv_wdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3D::set_parameters - grid layouts do not all match\n"); return false; } return true; } template<typename T> double Eqn_AdvectionDiffusion3D<T>::get_max_stable_timestep() const { T max_u, max_v, max_w; _u.reduce_maxabs(max_u); _v.reduce_maxabs(max_v); _w.reduce_maxabs(max_w); double ut = this->hx() / max_u; double vt = this->hy() / max_v; double wt = this->hz() / max_w; if (!check_float(ut)) ut = 1e10; if (!check_float(vt)) vt = 1e10; if (!check_float(wt)) wt = 1e10; double step = this->_cfl_factor * min3(ut, vt, wt); double minh = min3(this->hx(), this->hy(), this->hz()); //if (thermal_diffusion_coefficient() > 0) // step = std::min(step, (minh * minh) / (6 * thermal_diffusion_coefficient())); if (viscosity_coefficient() > 0) step = std::min(step, (minh * minh) / (6 * viscosity_coefficient())); printf("Eqn_AdvectionDiffusion3D<T>::get_max_stable_timestep - return %f\n", step); return step; } template<typename T> bool Eqn_AdvectionDiffusion3D<T>::test_tri_solver() { // handlers for different library cusparseStatus_t SPstatus; cusparseHandle_t SPhandle; cusparseMatDescr_t descra; //cudaError_t cudastat1,cudastat2,cudastat3; cudaError_t cudastat1; // __initialize handlers__ SPstatus = cusparseCreate(&SPhandle); if(SPstatus != CUSPARSE_STATUS_SUCCESS) { printf("CUSPARSE_Lib_Init failed"); fflush(stdout); return EXIT_FAILURE; } SPstatus = cusparseCreateMatDescr(&descra); if(SPstatus != CUSPARSE_STATUS_SUCCESS) { printf("CUSPARSE_Matrix_description_Init failed"); fflush(stdout); return EXIT_FAILURE; } double *D_low_tri, *D_mid_tri, *D_upp_tri, *D_res; double *low_tri = new double[3]; double *mid_tri = new double[3]; double *upp_tri = new double[3]; low_tri[0] = 0; low_tri[1] = 1; low_tri[2] = 1; mid_tri[0] = 1; mid_tri[1] = 2; mid_tri[2] = 2; upp_tri[0] = 1; upp_tri[1] = 1; upp_tri[2] = 0; double *res = new double[3]; res[0] = 2; res[1] =4; res[2] = 3; cudastat1 = cudaMalloc((void**)&D_low_tri,3*sizeof(double)); if(cudastat1!=cudaSuccess) { printf(" allocation failed\n");} cudastat1 = cudaMalloc((void**)&D_mid_tri,3*sizeof(double)); if(cudastat1!=cudaSuccess) { printf(" allocation failed\n");} cudastat1 = cudaMalloc((void**)&D_upp_tri,3*sizeof(double)); if(cudastat1!=cudaSuccess) { printf(" allocation failed\n");} cudastat1 = cudaMalloc((void**)&D_res,3*sizeof(double)); if(cudastat1!=cudaSuccess) { printf(" allocation failed\n");} cudastat1 = cudaMemcpy(D_low_tri,low_tri,3*sizeof(double),cudaMemcpyHostToDevice); if(cudastat1!=cudaSuccess) { printf(" mem copy failed\n");} cudastat1 = cudaMemcpy(D_mid_tri,mid_tri,3*sizeof(double),cudaMemcpyHostToDevice); if(cudastat1!=cudaSuccess) { printf(" mem copy failed\n");} cudastat1 = cudaMemcpy(D_upp_tri,upp_tri,3*sizeof(double),cudaMemcpyHostToDevice); if(cudastat1!=cudaSuccess) { printf(" mem copy failed\n");} cudastat1 = cudaMemcpy(D_res,res,3*sizeof(double),cudaMemcpyHostToDevice); if(cudastat1!=cudaSuccess) { printf(" mem copy failed\n");} SPstatus = cusparseDgtsv(SPhandle,3,1,D_low_tri,D_mid_tri, D_upp_tri,D_res,3); if(SPstatus!=CUSPARSE_STATUS_SUCCESS) { printf(" cusparseDgtsv failed !"); fflush(stdout); if(SPstatus == CUSPARSE_STATUS_NOT_INITIALIZED) { printf(" not init\n"); } if(SPstatus == CUSPARSE_STATUS_ALLOC_FAILED) { printf(" alloc failed\n"); } if(SPstatus == CUSPARSE_STATUS_INVALID_VALUE) { printf(" invalid value\n"); } if(SPstatus == CUSPARSE_STATUS_ARCH_MISMATCH) { printf(" arch mismatch\n"); } if(SPstatus == CUSPARSE_STATUS_EXECUTION_FAILED) { printf(" execution failed\n"); } if(SPstatus == CUSPARSE_STATUS_INTERNAL_ERROR) { printf(" internal error\n"); } if(SPstatus == CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED) { printf(" matrix not supported\n"); } throw SPstatus; } //low_tri[0] = 0; low_tri[1] = 0; low_tri[2] = 0; //res[0] = 0; res[1] = 0; res[2] = 0; //cudastat1 = cudaMemcpy(low_tri,D_low_tri,3*sizeof(double),cudaMemcpyDeviceToHost); //if(cudastat1!=cudaSuccess) { printf(" mem copy failed\n");} //cudastat1 = cudaMemcpy(mid_tri,D_mid_tri,3*sizeof(double),cudaMemcpyDeviceToHost); //if(cudastat1!=cudaSuccess) { printf(" mem copy failed\n");} //cudastat1 = cudaMemcpy(upp_tri,D_upp_tri,3*sizeof(double),cudaMemcpyDeviceToHost); //if(cudastat1!=cudaSuccess) { printf(" mem copy failed\n");} cudastat1 = cudaMemcpy(res,D_res,3*sizeof(double),cudaMemcpyDeviceToHost); if(cudastat1!=cudaSuccess) { printf(" mem copy failed\n");} //printf(" low_tri is : %f %f %f\n", low_tri[0],low_tri[1],low_tri[2]); printf(" result is : %f %f %f\n", res[0],res[1],res[2]); delete[] low_tri; delete[] mid_tri; delete[] upp_tri; delete[] res; return 1; } template<typename T> bool Eqn_AdvectionDiffusion3D<T>::advance_one_step(double dt) { this->clear_error(); this->num_steps++; // update dudt check_ok(_advection_solver.solve()); // updates dudt, dvdt, dwdt, overwrites whatever is there if (viscosity_coefficient() > 0) { check_ok(_u_diffusion.solve()); // dudt += \nu \nabla^2 u check_ok(_v_diffusion.solve()); // dvdt += \nu \nabla^2 v check_ok(_w_diffusion.solve()); // dwdt += \nu \nabla^2 w } // eventually this will be replaced with a grid-wide operation. //add_thermal_force(); // update dTdt //check_ok(_thermal_solver.solve()); // updates dTdt, overwrites whatever is there //if (thermal_diffusion_coefficient() > 0) { // check_ok(_thermal_diffusion.solve()); // dTdt += k \nabla^2 T //} T ab_coeff = -dt*dt / (2 * this->_lastdt); // advance T //if (this->_time_step == TS_ADAMS_BASHFORD2 && this->_lastdt > 0) { // check_ok(_temp.linear_combination((T)1.0, _temp, (T)(dt - ab_coeff), _deriv_tempdt)); // check_ok(_temp.linear_combination((T)1.0, _temp, (T)ab_coeff, _last_deriv_tempdt)); //} //else { // check_ok(_temp.linear_combination((T)1.0, _temp, (T)dt, _deriv_tempdt)); //} //check_ok(apply_3d_boundary_conditions_level1_nocorners(_temp, this->_thermalbc, this->_hx, this->_hy, this->_hz)); // advance u,v,w if (this->_time_step == TS_ADAMS_BASHFORD2 && this->_lastdt > 0) { check_ok(_u.linear_combination((T)1.0, _u, (T)(dt - ab_coeff), _deriv_udt)); check_ok(_u.linear_combination((T)1.0, _u, (T)ab_coeff, _last_deriv_udt)); check_ok(_v.linear_combination((T)1.0, _v, (T)(dt - ab_coeff), _deriv_vdt)); check_ok(_v.linear_combination((T)1.0, _v, (T)ab_coeff, _last_deriv_vdt)); check_ok(_w.linear_combination((T)1.0, _w, (T)(dt - ab_coeff), _deriv_wdt)); check_ok(_w.linear_combination((T)1.0, _w, (T)ab_coeff, _last_deriv_wdt)); } else { check_ok(_u.linear_combination((T)1.0, _u, (T)dt, _deriv_udt)); check_ok(_v.linear_combination((T)1.0, _v, (T)dt, _deriv_vdt)); check_ok(_w.linear_combination((T)1.0, _w, (T)dt, _deriv_wdt)); } // copy state for AB2 if (this->_time_step == TS_ADAMS_BASHFORD2) { this->_lastdt = dt; //_last_deriv_tempdt.copy_all_data(_deriv_tempdt); _last_deriv_udt.copy_all_data(_deriv_udt); _last_deriv_vdt.copy_all_data(_deriv_vdt); _last_deriv_wdt.copy_all_data(_deriv_wdt); } // enforce incompressibility - this enforces bc's before and after projection //check_ok(_projection_solver.solve(this->_max_divergence)); return !this->any_error(); } template<typename T> Eqn_AdvectionDiffusion3DCo<T>::Eqn_AdvectionDiffusion3DCo(const char *name): //_projection_solver((std::string(name) + std::string("._projection_solver")).c_str()), _u((std::string(name) + std::string("._u")).c_str()), _v((std::string(name) + std::string("._v")).c_str()), _w((std::string(name) + std::string("._w")).c_str()), //_temp((std::string(name) + std::string("._temp")).c_str()), _deriv_udt((std::string(name) + std::string("._deriv_udt")).c_str()), _deriv_vdt((std::string(name) + std::string("._deriv_vdt")).c_str()), _deriv_wdt((std::string(name) + std::string("._deriv_wdt")).c_str()) //_deriv_tempdt((std::string(name) + std::string("._deriv_tempdt")).c_str()) { _u_negx_hdl = -1; _v_negx_hdl = -1; _w_negx_hdl = -1; //_t_negx_hdl = -1; _u_posx_hdl = -1; _v_posx_hdl = -1; _w_posx_hdl = -1; //_t_posx_hdl = -1; } template<typename T> Eqn_AdvectionDiffusion3DCo<T>::~Eqn_AdvectionDiffusion3DCo() { CoArrayManager::barrier_deallocate(_u_negx_hdl); CoArrayManager::barrier_deallocate(_v_negx_hdl); CoArrayManager::barrier_deallocate(_w_negx_hdl); //CoArrayManager::barrier_deallocate(_t_negx_hdl); CoArrayManager::barrier_deallocate(_u_posx_hdl); CoArrayManager::barrier_deallocate(_v_posx_hdl); CoArrayManager::barrier_deallocate(_w_posx_hdl); //CoArrayManager::barrier_deallocate(_t_posx_hdl); } template<typename T> void Eqn_AdvectionDiffusion3DCo<T>::do_halo_exchange_uvw() { CoArrayManager::barrier_exchange(_u_negx_hdl); CoArrayManager::barrier_exchange(_v_negx_hdl); CoArrayManager::barrier_exchange(_w_negx_hdl); CoArrayManager::barrier_exchange(_u_posx_hdl); CoArrayManager::barrier_exchange(_v_posx_hdl); CoArrayManager::barrier_exchange(_w_posx_hdl); } /*template<typename T> void Eqn_AdvectionDiffusion3DCo<T>::do_halo_exchange_t() { CoArrayManager::barrier_exchange(_t_negx_hdl); CoArrayManager::barrier_exchange(_t_posx_hdl); }*/ template<typename T> bool Eqn_AdvectionDiffusion3DCo<T>::set_parameters(const Eqn_AdvectionDiffusion3DParams<T> &params) { int tid = ThreadManager::this_image(); int num_images = ThreadManager::num_images(); if (!set_base_parameters(params)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on base parameters\n"); return false; } if (!_u.init_congruent(params.init_u) || !_deriv_udt.init_congruent(params.init_u) || !_last_deriv_udt.init_congruent(params.init_u)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on initializing u\n"); return false; } if (!_u.copy_all_data(params.init_u)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed copying to u\n"); return false; } if (!_v.init_congruent(params.init_v) || !_deriv_vdt.init_congruent(params.init_v) || !_last_deriv_vdt.init_congruent(params.init_v)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on initializing v\n"); return false; } if (!_v.copy_all_data(params.init_v)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed copying to v\n"); return false; } if (!_w.init_congruent(params.init_w) || !_deriv_wdt.init_congruent(params.init_w) || !_last_deriv_wdt.init_congruent(params.init_w)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on initializing w\n"); return false; } if (!_w.copy_all_data(params.init_w)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed copying to w\n"); return false; } /*if (!_temp.init_congruent(params.init_temp) || !_deriv_tempdt.init_congruent(params.init_temp) || !_last_deriv_tempdt.init_congruent(params.init_temp)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on initializing temp\n"); return false; } if (!_temp.copy_all_data(params.init_temp)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on copying temperature field\n"); return false; } */ //_projection_solver.bc = params.flow_bc; _advection_solver.interp_type = params.advection_scheme; //_thermal_solver.interp_type = params.advection_scheme; /*if (!_thermal_solver.initialize_storage(this->_nx, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_u, &_v, &_w, &_temp, &_deriv_tempdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on _thermal_solver initialization\n"); return false; }*/ if (!_advection_solver.initialize_storage(this->_nx, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_u, &_v, &_w, &_deriv_udt, &_deriv_vdt, &_deriv_wdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on _advection_solver initialization\n"); return false; } /*if (!_projection_solver.initialize_storage(this->_nx, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_u, &_v, &_w)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on _projection_solver initialization\n"); return false; }*/ /*if (!_thermal_diffusion.initialize_storage(this->_nx, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_temp, &_deriv_tempdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on _thermal_diffusion initialization\n"); return false; }*/ /*if (!check_float(params.thermal_diffusion) || params.thermal_diffusion < 0) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - invalid thermal diffusion %f\n", params.thermal_diffusion); return false; } _thermal_diffusion.coefficient = params.thermal_diffusion; */ if (!_u_diffusion.initialize_storage(this->_nx+1, this->_ny, this->_nz, this->_hx, this->_hy, this->_hz, &_u, &_deriv_udt)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on _u_diffusion initialization\n"); return false; } if (!check_float(params.viscosity) || params.viscosity < 0) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - invalid viscosity %f\n", params.viscosity); return false; } _u_diffusion.coefficient = params.viscosity; if (!_v_diffusion.initialize_storage(this->_nx, this->_ny+1, this->_nz, this->_hx, this->_hy, this->_hz, &_v, &_deriv_vdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on _v_diffusion initialization\n"); return false; } _v_diffusion.coefficient = params.viscosity; if (!_w_diffusion.initialize_storage(this->_nx, this->_ny, this->_nz+1, this->_hx, this->_hy, this->_hz, &_w, &_deriv_wdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on _w_diffusion initialization\n"); return false; } _w_diffusion.coefficient = params.viscosity; // figure out who our neighbors are int negx_image = (tid - 1 + num_images) % num_images; int posx_image = (tid + 1) % num_images; if (tid == 0 && params.flow_bc.xneg.type != BC_PERIODIC) negx_image = -1; if (tid == num_images - 1 && params.flow_bc.xpos.type != BC_PERIODIC) posx_image = -1; // set up bc's & transfers here this->_local_bc = params.flow_bc; //this->_local_thermalbc = this->_thermalbc; if (negx_image != -1) { _local_bc.xneg.type = BC_NONE; //_local_thermalbc.xneg.type = BC_NONE; } if (posx_image != -1) { _local_bc.xpos.type = BC_NONE; //_local_thermalbc.xpos.type = BC_NONE; } if (posx_image != -1) { Region3D uto = _u .region(this->_nx, this->_nx+1)()(); Region3D ufrom = _u.co(posx_image)->region(0,1)()(); _u_posx_hdl = CoArrayManager::barrier_allocate(uto, ufrom); if (_u_posx_hdl == -1) printf("[ERROR] Eqn_AdvectionDiffusion3DCo::initialize_storage - failed to allocate _u_posx_hdl\n"); Region3D vto = _v .region(this->_nx)()(); Region3D vfrom = _v.co(posx_image)->region(0)()(); _v_posx_hdl = CoArrayManager::barrier_allocate(vto, vfrom); if (_v_posx_hdl == -1) printf("[ERROR] Eqn_AdvectionDiffusion3DCo::initialize_storage - failed to allocate _v_posx_hdl\n"); Region3D wto = _w .region(this->_nx)()(); Region3D wfrom = _w.co(posx_image)->region(0)()(); _w_posx_hdl = CoArrayManager::barrier_allocate(wto, wfrom); if (_w_posx_hdl == -1) printf("[ERROR] Eqn_AdvectionDiffusion3DCo::initialize_storage - failed to allocate _w_posx_hdl\n"); //Region3D tto = _temp .region(this->_nx)()(); //Region3D tfrom = _temp.co(posx_image)->region(0)()(); //_t_posx_hdl = CoArrayManager::barrier_allocate(tto, tfrom); //if (_t_posx_hdl == -1) // printf("[ERROR] Eqn_AdvectionDiffusion3DCo::initialize_storage - failed to allocate _t_posx_hdl\n"); } else { _u_posx_hdl = CoArrayManager::barrier_allocate(); _v_posx_hdl = CoArrayManager::barrier_allocate(); _w_posx_hdl = CoArrayManager::barrier_allocate(); //_t_posx_hdl = CoArrayManager::barrier_allocate(); } if (negx_image != -1) { Region3D uto = _u .region(-1)()(); Region3D ufrom = _u.co(negx_image)->region(this->_nx-1)()(); _u_negx_hdl = CoArrayManager::barrier_allocate(uto, ufrom); if (_u_negx_hdl == -1) printf("[ERROR] Sol_MultigridPressure3DDevice::initialize_storage - failed to allocate _u_negx_hdl\n"); Region3D vto = _v .region(-1)()(); Region3D vfrom = _v.co(negx_image)->region(this->_nx-1)()(); _v_negx_hdl = CoArrayManager::barrier_allocate(vto, vfrom); if (_v_negx_hdl == -1) printf("[ERROR] Sol_MultigridPressure3DDevice::initialize_storage - failed to allocate _v_negx_hdl\n"); Region3D wto = _w .region(-1)()(); Region3D wfrom = _w.co(negx_image)->region(this->_nx-1)()(); _w_negx_hdl = CoArrayManager::barrier_allocate(wto, wfrom); if (_w_negx_hdl == -1) printf("[ERROR] Sol_MultigridPressure3DDevice::initialize_storage - failed to allocate _w_negx_hdl\n"); // Region3D tto = _temp .region(-1)()(); // Region3D tfrom = _temp.co(negx_image)->region(this->_nx-1)()(); /*_t_negx_hdl = CoArrayManager::barrier_allocate(tto, tfrom); if (_t_negx_hdl == -1) printf("[ERROR] Sol_MultigridPressure3DDevice::initialize_storage - failed to allocate _t_negx_hdl\n"); */ } else { _u_negx_hdl = CoArrayManager::barrier_allocate(); _v_negx_hdl = CoArrayManager::barrier_allocate(); _w_negx_hdl = CoArrayManager::barrier_allocate(); //_t_negx_hdl = CoArrayManager::barrier_allocate(); } do_halo_exchange_uvw(); if (!apply_3d_mac_boundary_conditions_level1(_u, _v, _w, _local_bc, this->_hx, this->_hy, this->_hz)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on enforcing flow boundary conditions\n"); return false; } /*do_halo_exchange_t(); if (!apply_3d_boundary_conditions_level1(_temp, _local_thermalbc, this->_hx, this->_hy, this->_hz)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - failed on enforcing thermal boundary conditions\n"); return false; } */ _deriv_udt.clear_zero(); _deriv_vdt.clear_zero(); _deriv_wdt.clear_zero(); //_deriv_tempdt.clear_zero(); // all grid layouts should match if (!_u.check_layout_match(_v) || !_u.check_layout_match(_w) || !_u.check_layout_match(_deriv_udt) || !_u.check_layout_match(_deriv_vdt) || !_u.check_layout_match(_deriv_wdt) || //!_u.check_layout_match(_temp) || //!_u.check_layout_match(_deriv_tempdt) || //!_u.check_layout_match(_last_deriv_tempdt) || !_u.check_layout_match(_last_deriv_udt) || !_u.check_layout_match(_last_deriv_vdt) || !_u.check_layout_match(_last_deriv_wdt)) { printf("[ERROR] Eqn_AdvectionDiffusion3DCo::set_parameters - grid layouts do not all match\n"); return false; } ThreadManager::io_fence(); ThreadManager::barrier(); return true; } template<typename T> double Eqn_AdvectionDiffusion3DCo<T>::get_max_stable_timestep() const { T max_u, max_v, max_w; _u.co_reduce_maxabs(max_u); _v.co_reduce_maxabs(max_v); _w.co_reduce_maxabs(max_w); double ut = this->hx() / max_u; double vt = this->hy() / max_v; double wt = this->hz() / max_w; if (!check_float(ut)) ut = 1e10; if (!check_float(vt)) vt = 1e10; if (!check_float(wt)) wt = 1e10; double step = this->_cfl_factor * min3(ut, vt, wt); double minh = min3(this->hx(), this->hy(), this->hz()); //if (thermal_diffusion_coefficient() > 0) // step = std::min(step, (minh * minh) / (6 * this->thermal_diffusion_coefficient())); if (viscosity_coefficient() > 0) step = std::min(step, (minh * minh) / (6 * this->viscosity_coefficient())); printf("Eqn_AdvectionDiffusion3DCo<T>::get_max_stable_timestep - return %f (%f %f %f)\n", step, ut, vt, wt); return step; } /*template<typename T> void Eqn_AdvectionDiffusion3DCo<T>::add_thermal_force() { // apply thermal force by adding -gkT to dvdt (let g = -1, k = 1, so this is just dvdt += T) //_advection_solver.deriv_vdt.linear_combination((T)1.0, _advection_solver.deriv_vdt, (T)1.0, _thermal_solver.phi); int tnx = this->nz(); int tny = this->ny(); int tnz = this->nx(); int threadsInX = 16; int threadsInY = 2; int threadsInZ = 2; int blocksInX = (tnx+threadsInX-1)/threadsInX; int blocksInY = (tny+threadsInY-1)/threadsInY; int blocksInZ = (tnz+threadsInZ-1)/threadsInZ; dim3 Dg = dim3(blocksInX, blocksInY*blocksInZ); dim3 Db = dim3(threadsInX, threadsInY, threadsInZ); T direction_mult = this->_vertical_direction & DIR_NEGATIVE_FLAG ? 1 : -1; T *uvw = (this->_vertical_direction & DIR_XAXIS_FLAG) ? &_deriv_udt.at(0,0,0) : (this->_vertical_direction & DIR_YAXIS_FLAG) ? &_deriv_vdt.at(0,0,0) : &_deriv_wdt.at(0,0,0); KernelWrapper wrapper; wrapper.PreKernel(); //Eqn_AdvectionDiffusion3D_add_thermal_force<<<Dg, Db, 0, ThreadManager::get_compute_stream()>>>(uvw, direction_mult * this->_gravity * this->_bouyancy, &_temp.at(0,0,0), // _temp.xstride(), _temp.ystride(), _temp.stride(this->_vertical_direction), this->nx(), this->ny(), this->nz(), // blocksInY, 1.0f / (float)blocksInY); if (!wrapper.PostKernel("Eqn_AdvectionDiffusion3D_add_thermal_force")) this->add_error(); }*/ template<typename T> bool Eqn_AdvectionDiffusion3DCo<T>::advance_one_step(double dt) { this->clear_error(); this->num_steps++; // update dudt check_ok(_advection_solver.solve()); // updates dudt, dvdt, dwdt, overwrites whatever is there if (viscosity_coefficient() > 0) { check_ok(_u_diffusion.solve()); // dudt += \nu \nabla^2 u check_ok(_v_diffusion.solve()); // dvdt += \nu \nabla^2 v check_ok(_w_diffusion.solve()); // dwdt += \nu \nabla^2 w } // eventually this will be replaced with a grid-wide operation. //add_thermal_force(); // update dTdt /*check_ok(_thermal_solver.solve()); // updates dTdt, overwrites whatever is there if (thermal_diffusion_coefficient() > 0) { check_ok(_thermal_diffusion.solve()); // dTdt += k \nabla^2 T } */ T ab_coeff = -dt*dt / (2 * this->_lastdt); // advance T /*if (this->_time_step == TS_ADAMS_BASHFORD2 && this->_lastdt > 0) { check_ok(_temp.linear_combination((T)1.0, _temp, (T)(dt - ab_coeff), _deriv_tempdt)); check_ok(_temp.linear_combination((T)1.0, _temp, (T)ab_coeff, _last_deriv_tempdt)); } else { check_ok(_temp.linear_combination((T)1.0, _temp, (T)dt, _deriv_tempdt)); } */ //do_halo_exchange_t(); //check_ok(apply_3d_boundary_conditions_level1_nocorners(_temp, this->_local_thermalbc, this->_hx, this->_hy, this->_hz)); // advance u,v,w if (this->_time_step == TS_ADAMS_BASHFORD2 && this->_lastdt > 0) { check_ok(_u.linear_combination((T)1.0, _u, (T)(dt - ab_coeff), _deriv_udt)); check_ok(_u.linear_combination((T)1.0, _u, (T)ab_coeff, _last_deriv_udt)); check_ok(_v.linear_combination((T)1.0, _v, (T)(dt - ab_coeff), _deriv_vdt)); check_ok(_v.linear_combination((T)1.0, _v, (T)ab_coeff, _last_deriv_vdt)); check_ok(_w.linear_combination((T)1.0, _w, (T)(dt - ab_coeff), _deriv_wdt)); check_ok(_w.linear_combination((T)1.0, _w, (T)ab_coeff, _last_deriv_wdt)); } else { check_ok(_u.linear_combination((T)1.0, _u, (T)dt, _deriv_udt)); check_ok(_v.linear_combination((T)1.0, _v, (T)dt, _deriv_vdt)); check_ok(_w.linear_combination((T)1.0, _w, (T)dt, _deriv_wdt)); } // copy state for AB2 if (this->_time_step == TS_ADAMS_BASHFORD2) { this->_lastdt = dt; //_last_deriv_tempdt.copy_all_data(_deriv_tempdt); _last_deriv_udt.copy_all_data(_deriv_udt); _last_deriv_vdt.copy_all_data(_deriv_vdt); _last_deriv_wdt.copy_all_data(_deriv_wdt); } // enforce incompressibility - this enforces bc's before and after projection //check_ok(_projection_solver.solve(this->_max_divergence)); return !this->any_error(); } template class Eqn_AdvectionDiffusion3DBase<float>; template class Eqn_AdvectionDiffusion3D<float>; template class Eqn_AdvectionDiffusion3DCo<float>; #ifdef OCU_DOUBLESUPPORT template class Eqn_AdvectionDiffusion3DBase<double>; template class Eqn_AdvectionDiffusion3D<double>; template class Eqn_AdvectionDiffusion3DCo<double>; #endif } // end namespace
7e0e137437c68489c91914348658d56b4a4abf17.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" __global__ void sign_float(int n,int idx,float *dy,int incy,float *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= idx && i % incy == 0) { float x = dy[i]; result[i] = (x > 0) - (x < 0); } } }
7e0e137437c68489c91914348658d56b4a4abf17.cu
#include "includes.h" extern "C" __global__ void sign_float(int n,int idx,float *dy,int incy,float *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= idx && i % incy == 0) { float x = dy[i]; result[i] = (x > 0) - (x < 0); } } }
59c63f85417d41c66f405ce682b46e92425bc201.hip
// !!! This is a file automatically generated by hipify!!! #include "ImgProc.hpp" #include "GpuProc.cu" #include <stdio.h> #include <opencv2\\core\core.hpp> #include <opencv2\highgui\highgui.hpp> #include <iostream> #include <fstream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <ctime> #include <chrono> using namespace cv; using namespace std; #define FILTER_SIZE 3 #define M_PI 3.14159265358979323846 /* pi */ #define IMG_CHUNK 3110400 /* (1920 x 1080 x 3) / 2 */ #define THREADS_PER_BLOCK 256 //Boat from https://homepages.cae.wisc.edu/~ece533/images/boat.png; //-- Cuda Device Code /*__global__ void rgb2GrayKernel(uchar* dest_data, uchar* src_data, int rows, int cols, int chunkRows, int offset){ int idx = blockIdx.x * blockDim.x + threadIdx.x; src_data += 3 * offset; if (idx < rows * cols){ dest_data[idx] = 0.299 * src_data[3 * idx + 2] + 0.587 * src_data[3 * idx + 1] + 0.114 * src_data[3 * idx]; } }*/ /*__global__ void reverseKernel(uchar* dest_data, uchar* src_data, int srcN, int chunkRows, int offset){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx + (offset * 3) < srcN){ dest_data[idx] = 255 - src_data[idx + (offset * 3)]; } }*/ /*__global__ void gammaCorrectionKernel(uchar* dest_data, uchar* src_data, int srcRows, int srcCols, double gamma, int chunkRows, int offset){ int idx = blockIdx.x * blockDim.x + threadIdx.x; double gammaCorrect = 1.00 / gamma; if (idx + offset < 3 * srcRows * srcCols){ double color = (double)src_data[idx + offset]; uchar val = 255 * pow((color / 255), gammaCorrect); dest_data[idx] = val; } }*/ /*__global__ void directResizeKernel(uchar* dest_data, uchar* src_data, int srcRows, int srcCols, int destRows, int destCols, int chunkRows, int offset){ int idx = blockIdx.x * blockDim.x + threadIdx.x; double rRow = (double)srcRows / destRows; double rCol = (double)srcCols / destCols; if (idx + offset < 3 * destRows * destCols){ int sRow = (((idx + offset) / 3) / destCols) * rRow; int sCol = (((idx + offset) / 3) % destCols) * rCol; dest_data[idx] = src_data[3 * (sRow * srcCols + sCol) + (idx + offset) % 3]; } }*/ /*__global__ void linearResizeKernel(uchar* dest_data, uchar* src_data, int srcRows, int srcCols, int destRows, int destCols, int chunkRows, int offset){ int idx = blockIdx.x * blockDim.x + threadIdx.x; double rRow = (double)srcRows / destRows; double rCol = (double)srcCols / destCols; if (idx < 3 * destRows * destCols){ double dsRow = (((offset + idx) / 3) / destCols) * rRow; double dsCol = (((offset + idx) / 3) % destCols) * rCol; int sRow = (int)dsRow; int sCol = (int)dsCol; double deltaI = dsRow - sRow; double deltaJ = dsCol - sCol; if (deltaI + deltaJ < 0.0000000001){ dest_data[idx] = src_data[3 * (sRow * srcCols + sCol) + (idx + offset) % 3]; } else{ uchar val = 0; double area1 = (1 - deltaI) * (1 - deltaJ); double area2 = deltaI * (1 - deltaJ); double area3 = deltaI * deltaJ; double area4 = (1 - deltaI) * deltaJ; val += area1 * src_data[3 * (sRow * srcCols + sCol) + (idx + offset) % 3]; val += area2 * src_data[3 * ((sRow + 1) * srcCols + sCol) + (idx + offset) % 3]; val += area3 * src_data[3 * ((sRow + 1) * srcCols + sCol + 1) + (idx + offset) % 3]; val += area4 * src_data[3 * (sRow * srcCols + sCol + 1) + (idx + offset) % 3]; dest_data[idx] = val; } } }*/ /*__global__ void gaussianFilterKernel(uchar* dest_data, uchar* src_data, double* gKernel, int filter_size, int rows, int cols, int chunkRows, int offset){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i = ((idx + offset) / 3) / cols; int j = ((idx + offset) / 3) % cols; int maxk = min(filter_size, rows - i); int mink = min(filter_size, i); int maxl = min(filter_size, cols - j); int minl = min(filter_size, j); uchar tmp = 0; for (int k = -mink; k <= maxk; k++){ for (int l = -minl; l <= maxl; l++){ tmp += *(gKernel + (k + filter_size) * (2 * filter_size + 1) + (l + filter_size)) * src_data[(idx + offset) + 3 * (k * cols + l)]; } } dest_data[idx] = tmp; //} }*/ /*__global__ void sobelGradientKernel(short* temp_data, uchar* src_data, int* sobel_x, int* sobel_y, double* rangeMin, double* rangeMax, int srcRows, int srcCols, int offset){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i = ((idx + offset) / 3) / srcCols; int j = ((idx + offset) / 3) % srcCols; float tmpx = 0; float tmpy = 0; int maxk = min(1, srcRows - i); int mink = min(1, i); int maxl = min(1, srcCols - j); int minl = min(1, j); for (int k = -mink; k <= maxk; k++){ for (int l = -minl; l <= maxl; l++){ //if (k > 1 || k < -1 || l > 1 || l < -1) //printf("Error in Sobel!\n"); tmpx = tmpx + sobel_x[3 * (k + 1) + (l + 1)] * src_data[(idx + offset) + 3 * (k * srcCols + l)]; tmpy = tmpy + sobel_y[3 * (k + 1) + (l + 1)] * src_data[(idx + offset) + 3 * (k * srcCols + l)]; } } int value = sqrt((tmpx*tmpx) + (tmpy*tmpy)); //printf("Value: %d\n",value); temp_data[idx] = value; }*/ /*__global__ void sobelRangeKernel(uchar* dest_data, short* temp_data, double rangeMin, double rangeMax, double thresh_min, double thresh_max, int offset){ int idx = blockIdx.x * blockDim.x + threadIdx.x; dest_data[idx] = 255; int value = 255 * ((temp_data[idx] + rangeMin) / (rangeMax - rangeMin)); //int value = 255 * ((temp_data[idx + offset] + rangeMin) / (rangeMax - rangeMin)); if (value >= thresh_max){ value = 255; } else if (value < thresh_min){ value = 0; } dest_data[idx] = value; }*/ //__global__ void kMeansCountingKernel(uchar* src_data, uchar* k_index, int* k_count, int* hits, float* k_colors, bool* convergence, int k_means, int srcRows, int srcCols, int iter){ /*__global__ void kMeansCountingKernel(uchar* src_data, uchar* k_index, int* k_count, float* k_colors, bool* convergence, int k_means, int srcRows, int srcCols){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i = (idx) / srcCols; int j = (idx) % srcCols; if (i >= srcRows) return; float b2 = src_data[3 * (i * srcCols + j)]; float g2 = src_data[3 * (i * srcCols + j) + 1]; float r2 = src_data[3 * (i * srcCols + j) + 2]; float min_dist = FLT_MAX; uchar new_index = k_index[i * srcCols + j]; for (int group = 0; group < k_means; group++){ float val = 0; val += (b2 - k_colors[3 * group]) * (b2 - k_colors[3 * group]); val += (g2 - k_colors[3 * group + 1]) * (g2 - k_colors[3 * group + 1]); val += (r2 - k_colors[3 * group + 2]) * (r2 - k_colors[3 * group + 2]); float dist = sqrtf(val); if (dist < min_dist){ min_dist = dist; new_index = group; } } if (k_index[i * srcCols + j] != new_index){ k_index[i * srcCols + j] = new_index; //printf("New Index: %d", new_index); //atomicAdd((hits + new_index), 1); //if (iter > 60) // printf(" (%d, %d) \n",i,j); convergence[0] = false; } atomicAdd((k_count + new_index), 1); }*/ /*__global__ void kMeansCountingKernelOld(uchar* src_data, uchar* k_index, int* k_count, float* k_colors, bool* convergence, int k_means, int srcRows, int srcCols){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i = (idx) / srcCols; int j = (idx) % srcCols; float b2 = src_data[3 * (i * srcCols + j)]; float g2 = src_data[3 * (i * srcCols + j) + 1]; float r2 = src_data[3 * (i * srcCols + j) + 2]; float min_dist = FLT_MAX; uchar new_index; for (int group = 0; group < k_means; group++){ float b1 = k_colors[3 * group]; float g1 = k_colors[3 * group + 1]; float r1 = k_colors[3 * group + 2]; float dist = std::sqrt(pow(r2 - r1, 2) + pow(g2 - g1, 2) + pow(b2 - b1, 2)); //Combination of pow and sqrt is too much //float dist = 0; if (dist < min_dist){ min_dist = dist; //k_index[i * srcCols + j] = group; new_index = group; } } if (k_index[i * srcCols + j] != new_index){ k_index[i * srcCols + j] = new_index; //printf("New Index: %d", new_index); *convergence = false; } //k_count[new_index] += 1; atomicAdd((k_count + new_index), 1); }*/ /*__global__ void kMeansGroupAdjustKernel(uchar* src_data, uchar* k_index, int* k_count, float* k_colors, int k_means, int srcRows, int srcCols){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i = (idx) / srcCols; int j = (idx) % srcCols; int group = k_index[i * srcCols + j]; for (int color = 0; color < 3; color++){ float src_val = src_data[3 * (i * srcCols + j) + color]; int group_count = k_count[group]; float val = src_val / group_count; //k_colors[3 * group + color] += val; atomicAdd((k_colors + (3 * group + color)),val); } }*/ /*__global__ void kMeansOutputKernel(uchar* dest_data, uchar* k_index, float* k_colors, int srcRows, int srcCols){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i = (idx) / srcCols; int j = (idx) % srcCols; int group = k_index[i * srcCols + j]; for (int color = 0; color < 3; color++){ dest_data[3 * (i * srcCols + j) + color] = (uchar)k_colors[3 * group + color]; } }*/ //-- Cuda Host Code /*void cudaRgb2Gray(uchar* input, uchar* output, int srcRows, int srcCols){ uchar* deviceSrcData; uchar* deviceDestData; int threadsPerBlock = THREADS_PER_BLOCK; int blocks = 0; int chunkRows = 0; int offset = 0; int srcN = 3 * srcRows * srcCols; hipMalloc(&deviceSrcData, srcN*sizeof(uchar)); hipMemcpy(deviceSrcData, input, srcN*sizeof(uchar), hipMemcpyHostToDevice); chunkRows = IMG_CHUNK / srcCols; if (chunkRows == 0){ chunkRows = srcRows; } int rounds = ceil(srcRows / (double)chunkRows); //int destN = min(1920 * 1080, srcRows * srcCols); for (int step = 0; step < rounds; step++){ int destN = min(chunkRows * srcCols, srcRows * srcCols - offset); if (destN <= 0) break; blocks = (destN + threadsPerBlock - 1) / threadsPerBlock; hipMalloc(&deviceDestData, destN*sizeof(uchar)); rgb2GrayKernel << <blocks, threadsPerBlock >> > (deviceDestData, deviceSrcData, srcRows, srcCols, chunkRows, offset); hipMemcpy(output + offset, deviceDestData, destN*sizeof(uchar), hipMemcpyDeviceToHost); hipFree(deviceDestData); offset += destN; } hipFree(deviceSrcData); }*/ /*void cudaReverse(uchar* input, uchar* output, int srcRows, int srcCols){ uchar* deviceSrcData; uchar* deviceDestData; int threadsPerBlock = THREADS_PER_BLOCK; int blocks = 0; int chunkRows = 0; int offset = 0; int srcN = 3 * srcRows * srcCols; hipMalloc(&deviceSrcData, srcN*sizeof(uchar)); hipMemcpy(deviceSrcData, input, srcN*sizeof(uchar), hipMemcpyHostToDevice); chunkRows = IMG_CHUNK / srcCols; if (chunkRows == 0){ chunkRows = srcRows; } int rounds = ceil(srcRows / (double)chunkRows); //printf("Rounds: %d \n", rounds); for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * srcCols, 3 * srcRows * srcCols - (offset * 3)); if (destN <= 0){ //printf("Broken!\n"); break; } blocks = (destN + threadsPerBlock - 1) / threadsPerBlock; hipMalloc(&deviceDestData, destN*sizeof(uchar)); reverseKernel <<<blocks, threadsPerBlock >>>(deviceDestData, deviceSrcData, srcN, chunkRows, offset); hipMemcpy(output + (3 * offset), deviceDestData, destN*sizeof(uchar), hipMemcpyDeviceToHost); hipFree(deviceDestData); offset += destN / 3; } hipFree(deviceSrcData); }*/ /*void cudaGammaCorrection(uchar* input, uchar* output, double gamma, int srcRows, int srcCols){ uchar* deviceSrcData; uchar* deviceDestData; int threadsPerBlock = THREADS_PER_BLOCK; int blocks = 0; int chunkRows = 0; int offset = 0; int srcN = 3 * srcRows * srcCols; hipMalloc(&deviceSrcData, srcN*sizeof(uchar)); hipMemcpy(deviceSrcData, input, srcN*sizeof(uchar), hipMemcpyHostToDevice); chunkRows = IMG_CHUNK / srcCols; if (chunkRows == 0){ chunkRows = srcRows; } int rounds = ceil(srcRows / (double)chunkRows); for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * srcCols, 3 * srcRows * srcCols - offset); if (destN <= 0){ break; } blocks = (destN + threadsPerBlock - 1) / threadsPerBlock; hipMalloc(&deviceDestData, destN*sizeof(uchar)); gammaCorrectionKernel << <blocks, threadsPerBlock >> > (deviceDestData, deviceSrcData, srcRows, srcCols, gamma, chunkRows, offset); hipMemcpy(output + offset, deviceDestData, destN*sizeof(uchar), hipMemcpyDeviceToHost); hipFree(deviceDestData); offset += destN; } hipFree(deviceSrcData); }*/ /*void cudaDirectResize(uchar* input, uchar* output, int srcRows, int srcCols, int destRows, int destCols){ uchar* deviceSrcData; uchar* deviceDestData; int threadsPerBlock = THREADS_PER_BLOCK; int blocks = 0; int chunkRows = 0; int offset = 0; int srcN = 3 * srcRows * srcCols; hipMalloc(&deviceSrcData, srcN*sizeof(uchar)); hipMemcpy(deviceSrcData, input, srcN*sizeof(uchar), hipMemcpyHostToDevice); chunkRows = IMG_CHUNK / destCols; if (chunkRows == 0){ chunkRows = destRows; } int rounds = ceil(destRows / (double)chunkRows); for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * destCols, 3 * destRows * destCols - offset); if (destN <= 0){ break; } blocks = (destN + threadsPerBlock - 1) / threadsPerBlock; hipMalloc(&deviceDestData, destN*sizeof(uchar)); directResizeKernel << <blocks, threadsPerBlock >> > (deviceDestData, deviceSrcData, srcRows, srcCols, destRows, destCols, chunkRows, offset); hipMemcpy(output + offset, deviceDestData, destN*sizeof(uchar), hipMemcpyDeviceToHost); hipFree(deviceDestData); offset += destN; } hipFree(deviceSrcData); }*/ /*void cudaLinearResize(uchar* input, uchar* output, int srcRows, int srcCols, int destRows, int destCols){ uchar* deviceSrcData; uchar* deviceDestData; int threadsPerBlock = THREADS_PER_BLOCK; int blocks = 0; int chunkRows = 0; int offset = 0; int srcN = 3 * srcRows * srcCols; hipMalloc(&deviceSrcData, srcN*sizeof(uchar)); hipMemcpy(deviceSrcData, input, srcN*sizeof(uchar), hipMemcpyHostToDevice); chunkRows = IMG_CHUNK / destCols; if (chunkRows == 0){ chunkRows = destRows; } int rounds = ceil(destRows / (double)chunkRows); for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * destCols, 3 * destRows * destCols - offset); if (destN <= 0){ break; } blocks = (destN + threadsPerBlock - 1) / threadsPerBlock; hipMalloc(&deviceDestData, destN*sizeof(uchar)); linearResizeKernel << <blocks, threadsPerBlock >> > (deviceDestData, deviceSrcData, srcRows, srcCols, destRows, destCols, chunkRows, offset); hipMemcpy(output + offset, deviceDestData, destN*sizeof(uchar), hipMemcpyDeviceToHost); hipFree(deviceDestData); offset += destN; } hipFree(deviceSrcData); }*/ /*void cudaGaussianFilter(uchar* input, uchar* output, double gKernel[][2 * FILTER_SIZE + 1], int srcRows, int srcCols){ uchar* deviceSrcData; uchar* deviceDestData; double* deviceFilter; int threadsPerBlock = THREADS_PER_BLOCK; int blocks = 0; int chunkRows = 0; int offset = 0; int srcN = 3 * srcRows * srcCols; hipMalloc(&deviceSrcData, srcN*sizeof(uchar)); hipMalloc(&deviceFilter, (2 * FILTER_SIZE + 1) * (2 * FILTER_SIZE + 1) * sizeof(double)); hipMemcpy(deviceSrcData, input, srcN*sizeof(uchar), hipMemcpyHostToDevice); hipMemcpy(deviceFilter, gKernel, (2 * FILTER_SIZE + 1)*(2 * FILTER_SIZE + 1)*sizeof(double), hipMemcpyHostToDevice); chunkRows = IMG_CHUNK / srcCols; if (chunkRows == 0){ chunkRows = srcRows; } int rounds = ceil(srcRows / (double)chunkRows); for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * srcCols, 3 * srcRows * srcCols - offset); if (destN <= 0){ break; } blocks = (destN + threadsPerBlock - 1) / threadsPerBlock; hipMalloc(&deviceDestData, destN*sizeof(uchar)); gaussianFilterKernel << <blocks, threadsPerBlock >> > (deviceDestData, deviceSrcData, deviceFilter, FILTER_SIZE, srcRows, srcCols, chunkRows, offset); hipMemcpy(output + offset, deviceDestData, destN*sizeof(uchar), hipMemcpyDeviceToHost); hipFree(deviceDestData); offset += destN; } hipFree(deviceSrcData); hipFree(deviceFilter); }*/ /*void cudaSobelFilter(uchar* input, uchar* output, int srcRows, int srcCols){ uchar* deviceSrcData; uchar* deviceDestData; short* deviceTempData; int* deviceSobel_x; int* deviceSobel_y; int srcN = 3 * srcRows * srcCols; double* deviceRangeMin; double* deviceRangeMax; double rangeMin[1] = { 0.0 }; double rangeMax[1] = { 0.0 }; int threadsPerBlock = THREADS_PER_BLOCK; int blocks = 0; int chunkRows = 0; int offset = 0; int sobel_x[9], sobel_y[9]; sobel_x[0] = -1; sobel_x[1] = 0; sobel_x[2] = 1; sobel_x[3] = -2; sobel_x[4] = 0; sobel_x[5] = 2; sobel_x[6] = -1; sobel_x[7] = 0; sobel_x[8] = 1; sobel_y[0] = -1; sobel_y[1] = -2; sobel_y[2] = -1; sobel_y[3] = 0; sobel_y[4] = 0; sobel_y[5] = 0; sobel_y[6] = 1; sobel_y[7] = 2; sobel_y[8] = 1; //int threadsPerBlock = 512; //blocks = (srcN + threadsPerBlock - 1) / threadsPerBlock; //printf("Blocks: %d\n", blocks); hipMalloc(&deviceSrcData, srcN*sizeof(uchar)); hipMalloc(&deviceSobel_x, 9 * sizeof(int)); hipMalloc(&deviceSobel_y, 9 * sizeof(int)); hipMalloc(&deviceRangeMin, sizeof(double)); hipMalloc(&deviceRangeMax, sizeof(double)); hipMemcpy(deviceSrcData, input, srcN*sizeof(uchar), hipMemcpyHostToDevice); hipMemcpy(deviceSobel_x, sobel_x, 9 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(deviceSobel_y, sobel_y, 9 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(deviceRangeMin, rangeMin, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(deviceRangeMax, rangeMax, sizeof(double), hipMemcpyHostToDevice); chunkRows = IMG_CHUNK / srcCols; if (chunkRows == 0){ chunkRows = srcRows; } int rounds = ceil(srcRows / (double)chunkRows); short* temp_data = new short[3 * srcRows * srcCols]; for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * srcCols, 3 * srcRows * srcCols - offset); if (destN <= 0){ break; } blocks = (destN + threadsPerBlock - 1) / threadsPerBlock; hipMalloc(&deviceTempData, destN*sizeof(short)); sobelGradientKernel << <blocks, threadsPerBlock >> > (deviceTempData, deviceSrcData, deviceSobel_x, deviceSobel_y, deviceRangeMin, deviceRangeMax, srcRows, srcCols, offset); hipMemcpy(temp_data + offset, deviceTempData, destN*sizeof(short), hipMemcpyDeviceToHost); hipFree(deviceTempData); offset += destN; } hipFree(deviceSrcData); hipFree(deviceSobel_x); hipFree(deviceSobel_y); hipFree(deviceRangeMin); hipFree(deviceRangeMax); //printf("Works!\n"); //hipMemcpy(rangeMin, deviceRangeMin, sizeof(double), hipMemcpyDeviceToHost); //hipMemcpy(rangeMax, deviceRangeMax, sizeof(double), hipMemcpyDeviceToHost); //printf("Host temp data done"); for (int i = 0; i < srcRows; i++){ for (int j = 0; j < srcCols; j++){ for (int color = 0; color < 3; color++){ double value = temp_data[3 * (i * srcCols + j) + color];; rangeMin[0] = ::min(value, rangeMin[0]); rangeMax[0] = ::max(value, rangeMax[0]); } } } //printf("Got here!\n"); //output = (uchar*)temp_data; //return; //printf("Range Min: %f, Range Max: %f \n", rangeMin[0], rangeMax[0]); //blocks = (srcN + threadsPerBlock - 1) / threadsPerBlock; offset = 0; for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * srcCols, 3 * srcRows * srcCols - offset); if (destN <= 0){ break; } blocks = (destN + threadsPerBlock - 1) / threadsPerBlock; hipMalloc(&deviceTempData, destN*sizeof(short)); hipMemcpy(deviceTempData, temp_data + offset, destN*sizeof(short), hipMemcpyHostToDevice); hipMalloc(&deviceDestData, destN*sizeof(uchar)); sobelRangeKernel << <blocks, threadsPerBlock >> >(deviceDestData, deviceTempData, rangeMin[0], rangeMax[0], 20, 60, offset); hipMemcpy(output + offset, deviceDestData, destN*sizeof(uchar), hipMemcpyDeviceToHost); hipFree(deviceDestData); hipFree(deviceTempData); offset += destN; } //uchar minThresh = 20; //uchar maxThresh = 60; /*for (int i = 0; i < srcRows; i++){ for (int j = 0; j < srcCols; j++){ for (int color = 0; color < 3; color++){ int idx = 3 * (i * srcCols + j) + color; int value = 255 * ((temp_data[idx] + rangeMin[0])/(rangeMax[0] - rangeMin[0])); if (value >= maxThresh){ value = 255; } else if (value < minThresh){ value = 0; } output[idx] = value; } } } //hipFree(deviceTempData); //hipFree(deviceDestData); delete[] temp_data; }*/ /*void cudaKMeans(uchar* input, uchar* output, int srcRows, int srcCols, int k_means){ int threadsPerBlock = THREADS_PER_BLOCK; int blocks = 0; int chunkRows = 0; int offset = 0; uchar* deviceSrcData; uchar* deviceDestData; float* device_k_colors; int* device_k_count; //int* device_hits; uchar* device_k_index; bool* device_convergence; float* k_colors = new float[k_means * 3]; uchar* k_index = new uchar[srcRows * srcCols]; int* k_count = new int[k_means]; //* hits = new int[k_means]; int srcN = srcRows * srcCols * 3; for (int pix = 0; pix < k_means; pix++){ int i = rand() % srcRows; int j = rand() % srcCols; for (int color = 0; color < 3; color++){ k_colors[3 * pix + color] = input[3 * (i * srcCols + j) + color]; } } hipMalloc(&device_k_colors, (3 * k_means)*sizeof(float)); hipMemcpy(device_k_colors, k_colors, 3 * k_means *sizeof(float), hipMemcpyHostToDevice); //printf("=== START ===\n"); //for (int group = 0; group < k_means; group++){ //printf("Color Group %d: R=%f, G=%f, B=%f \n", group + 1, k_colors[3 * group + 2], k_colors[3 * group + 1], k_colors[3 * group]); //} bool convergence[1] = { false }; for (int k = 0; k < srcRows * srcCols; k++){ k_index[k] = 0; } chunkRows = (IMG_CHUNK * 0.5) / srcCols; if (chunkRows == 0){ chunkRows = srcRows; } hipMalloc(&device_k_count, (k_means)*sizeof(int)); //hipMalloc(&device_hits, (k_means)*sizeof(int)); //hipMalloc(&device_k_colors, (3 * k_means)*sizeof(float)); hipMalloc(&device_convergence, sizeof(bool)); //hipMalloc(&device_k_index, (srcRows * srcCols)*sizeof(uchar)); //hipMemcpy(device_k_index, k_index, srcRows * srcCols *sizeof(uchar), hipMemcpyHostToDevice); int count = 0; while (!convergence[0]){ convergence[0] = true; hipMemcpy(device_convergence, convergence, sizeof(bool), hipMemcpyHostToDevice); for (int k = 0; k < k_means; k++){ k_count[k] = 0; //hits[k] = 0; } hipMemcpy(device_k_count, k_count, k_means * sizeof(int), hipMemcpyHostToDevice); //hipMemcpy(device_hits, hits, k_means * sizeof(int), hipMemcpyHostToDevice); //printf("Count: %d\n",count); int rounds = ceil(srcRows / (float)chunkRows); offset = 0; for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * srcCols, 3 * srcRows * srcCols - offset); if (destN <= 0){ break; } blocks = ((destN/3) + threadsPerBlock - 1) / threadsPerBlock; hipMalloc(&deviceSrcData, destN*sizeof(uchar)); hipMemcpy(deviceSrcData, input + offset, destN*sizeof(uchar), hipMemcpyHostToDevice); hipMalloc(&device_k_index, destN*sizeof(uchar)/3); hipMemcpy(device_k_index, k_index + (offset / 3), destN*sizeof(uchar) / 3, hipMemcpyHostToDevice); //kernel //kMeansCountingKernel << <blocks, threadsPerBlock >> > (deviceSrcData, device_k_index, device_k_count, device_hits, device_k_colors, device_convergence, k_means, srcRows, srcCols,count); kMeansCountingKernel << <blocks, threadsPerBlock >> > (deviceSrcData, device_k_index, device_k_count, device_k_colors, device_convergence, k_means, srcRows, srcCols); hipMemcpy(k_index + (offset / 3), device_k_index, destN*sizeof(uchar) / 3, hipMemcpyDeviceToHost); hipFree(deviceSrcData); hipFree(device_k_index); offset += destN; } hipMemcpy(k_count, device_k_count, (k_means)*sizeof(int), hipMemcpyDeviceToHost); //hipMemcpy(hits, device_hits, (k_means)*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(convergence, device_convergence, sizeof(bool), hipMemcpyDeviceToHost); //printf("Group Count, step %d::\n",count); //for (int i = 0; i < k_means; i++){ // printf("Group %d: %d\n",i,k_count[i]); //} //convergence[0] = true; //Stopper //if (convergence[0]) //RE-ENABLE WHEN ACTUALLY USING //break; if (count == 200){ //printf("Stopped at %d!\n",count); break; } count++; for (int k = 0; k < 3 * k_means; k++){ k_colors[k] = 0; } hipMemcpy(device_k_colors, k_colors, 3 * k_means *sizeof(float), hipMemcpyHostToDevice); rounds = ceil(srcRows / (float)chunkRows); offset = 0; for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * srcCols, 3 * srcRows * srcCols - offset); if (destN <= 0){ break; } blocks = ((destN / 3) + threadsPerBlock - 1) / threadsPerBlock; hipMalloc(&deviceSrcData, destN*sizeof(uchar)); hipMemcpy(deviceSrcData, input + offset, destN*sizeof(uchar), hipMemcpyHostToDevice); hipMalloc(&device_k_index, destN*sizeof(uchar) / 3); hipMemcpy(device_k_index, k_index + (offset / 3), destN*sizeof(uchar) / 3, hipMemcpyHostToDevice); kMeansGroupAdjustKernel << <blocks, threadsPerBlock >> > (deviceSrcData, device_k_index, device_k_count, device_k_colors, k_means, srcRows, srcCols); hipFree(deviceSrcData); hipFree(device_k_index); offset += destN; } hipMemcpy(k_colors, device_k_colors, 3 * k_means * sizeof(float), hipMemcpyDeviceToHost); //kernel } int rounds = ceil(srcRows / (float)chunkRows); offset = 0; for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * srcCols, 3 * srcRows * srcCols - offset); if (destN <= 0){ break; } blocks = ((destN / 3) + threadsPerBlock - 1) / threadsPerBlock; hipMalloc(&deviceDestData, destN*sizeof(uchar)); hipMalloc(&device_k_index, destN*sizeof(uchar) / 3); hipMemcpy(device_k_index, k_index + (offset / 3), destN*sizeof(uchar) / 3, hipMemcpyHostToDevice); //kernel kMeansOutputKernel << <blocks, threadsPerBlock >> > (deviceDestData, device_k_index, device_k_colors, srcRows, srcCols); hipMemcpy(output + offset, deviceDestData, destN*sizeof(uchar), hipMemcpyDeviceToHost); hipFree(deviceDestData); hipFree(device_k_index); offset += destN; } //printf("Count: %d\n", count); hipFree(device_k_colors); hipFree(device_k_count); hipFree(device_convergence); delete[] k_colors; delete[] k_index; delete[] k_count; }*/ /*void cudaKMeansOld(uchar* input, uchar* output, int srcRows, int srcCols, int k_means){ int threadsPerBlock = 512; int blocks = ((srcRows * srcCols) + threadsPerBlock - 1) / threadsPerBlock; uchar* deviceSrcData; uchar* deviceDestData; float* device_k_colors; int* device_k_count; uchar* device_k_index; bool* device_convergence; //int srcN = 3 * srcRows * srcCols; int srcN = min(3 * 1920 * 1080, 3 * srcRows * srcCols); hipMalloc(&deviceSrcData, srcN*sizeof(uchar)); hipMalloc(&deviceDestData, srcN*sizeof(uchar)); hipMalloc(&device_k_colors, (3 * k_means)*sizeof(float)); hipMalloc(&device_k_index, (srcRows * srcCols)*sizeof(uchar)); hipMalloc(&device_k_count, (k_means)*sizeof(int)); hipMalloc(&device_convergence, sizeof(bool)); hipMemcpy(deviceSrcData, input, srcN*sizeof(uchar), hipMemcpyHostToDevice); float* k_colors = new float[3 * k_means]; uchar* k_index = new uchar[srcRows * srcCols]; int* k_count = new int[k_means]; for (int pix = 0; pix < k_means; pix++){ int i = rand() % srcRows; int j = rand() % srcCols; for (int color = 0; color < 3; color++){ k_colors[3 * pix + color] = input[3 * (i * srcCols + j) + color]; } } hipMemcpy(device_k_colors, k_colors, 3 * k_means *sizeof(float), hipMemcpyHostToDevice); printf("=== START ===\n"); for (int group = 0; group < k_means; group++){ printf("Color Group %d: R=%f, G=%f, B=%f \n", group + 1, k_colors[3 * group + 2], k_colors[3 * group + 1], k_colors[3 * group]); } bool convergence[1] = { false }; for (int k = 0; k < srcRows * srcCols; k++){ k_index[k] = 0; } hipMemcpy(device_k_index, k_index, srcRows * srcCols *sizeof(uchar), hipMemcpyHostToDevice); int count = 0; while (!convergence[0]){ convergence[0] = true; hipMemcpy(device_convergence, convergence, sizeof(bool), hipMemcpyHostToDevice); for (int k = 0; k < k_means; k++){ k_count[k] = 0; } hipMemcpy(device_k_count, k_count, k_means * sizeof(int), hipMemcpyHostToDevice); kMeansCountingKernelOld<<<blocks,threadsPerBlock>>> (deviceSrcData,device_k_index,device_k_count,device_k_colors, device_convergence, k_means,srcRows,srcCols); hipMemcpy(k_index, device_k_index, (srcRows*srcCols)*sizeof(uchar), hipMemcpyDeviceToHost); hipMemcpy(k_count, device_k_count, (k_means)*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(convergence, device_convergence, sizeof(bool), hipMemcpyDeviceToHost); if (count == 400){ printf("Stopped at 400!\n"); break; } count++; //printf("Bogey::\n"); if (convergence[0]) break; for (int k = 0; k < 3 * k_means; k++){ k_colors[k] = 0; } hipMemcpy(device_k_colors, k_colors, 3 * k_means *sizeof(float), hipMemcpyHostToDevice); //kMeansGroupAdjustKernel<<<blocks,threadsPerBlock>>> (deviceSrcData,device_k_index,device_k_count,device_k_colors,k_means,srcRows,srcCols); hipMemcpy(k_colors, device_k_colors, 3 * k_means * sizeof(float), hipMemcpyDeviceToHost); } hipMemcpy(device_k_colors, k_colors, 3 * k_means *sizeof(float), hipMemcpyHostToDevice); printf("=== END ===\n"); //for (int group = 0; group < k_means; group++){ //printf("Color Group %d: R=%f, G=%f, B=%f \n", group + 1, k_colors[3 * group + 2], k_colors[3 * group + 1], k_colors[3 * group]); //} kMeansOutputKernel<<<blocks,threadsPerBlock>>> (deviceDestData,device_k_index,device_k_colors,srcRows,srcCols); hipMemcpy(output, deviceDestData, srcN*sizeof(uchar), hipMemcpyDeviceToHost); hipFree(deviceSrcData); hipFree(deviceDestData); hipFree(device_k_colors); hipFree(device_k_index); hipFree(device_k_count); hipFree(device_convergence); delete[] k_colors; delete[] k_index; delete[] k_count; }*/ //-- OpenCV Handling /*Mat rgb2Gray(Mat image){ Mat out = Mat(image.rows, image.cols, CV_8UC1); uchar* input = (uchar*)image.datastart; uchar* output = (uchar*)out.datastart; cudaRgb2Gray(input, output, image.rows, image.cols); return out; }*/ /*Mat reverse(Mat image){ Mat out = Mat(image.rows, image.cols, image.type()); uchar* input = (uchar*)image.datastart; uchar* output = (uchar*)out.datastart; cudaReverse(input, output, image.rows, image.cols); return out; }*/ /*Mat gammaCorrection(Mat image, double gamma){ Mat out = Mat(image.rows, image.cols, image.type()); uchar* input = (uchar*)image.datastart; uchar* output = (uchar*)out.datastart; cudaGammaCorrection(input, output, gamma, image.rows, image.cols); return out; }*/ /*Mat directResize(Mat image, int rows, int cols){ Mat out = Mat(rows, cols, image.type()); uchar* input = (uchar*)image.datastart; uchar* output = (uchar*)out.datastart; cudaDirectResize(input, output, image.rows, image.cols, rows, cols); return out; }*/ /*Mat linearResize(Mat image, int rows, int cols){ Mat out = Mat(rows, cols, image.type()); uchar* input = (uchar*)image.datastart; uchar* output = (uchar*)out.datastart; cudaLinearResize(input, output, image.rows, image.cols, rows, cols); return out; }*/ /*void createFilter(double gKernel[][2 * FILTER_SIZE + 1], double inputSigma){ //standard deviation to 1.0 double sigma = inputSigma; double r, s = 2.0 * sigma * sigma; double sum = 0.0; for (int x = -FILTER_SIZE; x <= FILTER_SIZE; x++){ for (int y = -FILTER_SIZE; y <= FILTER_SIZE; y++){ r = sqrt(x*x + y*y); gKernel[x + FILTER_SIZE][y + FILTER_SIZE] = exp(-(r*r) / s) / (M_PI * s); sum += gKernel[x + FILTER_SIZE][y + FILTER_SIZE]; } } for (int i = 0; i < 2 * FILTER_SIZE + 1; ++i){ for (int j = 0; j < 2 * FILTER_SIZE + 1; ++j){ gKernel[i][j] /= sum; } } }*/ /*Mat gaussianFilter(Mat image, double sigma){ double gKernel[2 * FILTER_SIZE + 1][2 * FILTER_SIZE + 1]; createFilter(gKernel, sigma); Mat out = Mat(image.rows, image.cols, image.type()); uchar* input = (uchar*)image.datastart; uchar* output = (uchar*)out.datastart; cudaGaussianFilter(input,output,gKernel,image.rows,image.cols); //delete[] gKernel; return out; //return image; }*/ /*Mat sobelFilter(Mat image){ Mat out(image.rows, image.cols, image.type()); //Mat temp(image.rows, image.cols, image.type()); uchar* input = (uchar*)image.datastart; uchar* output = (uchar*)out.datastart; cudaSobelFilter(input,output,image.rows,image.cols); return out; }*/ /*Mat kMeans(Mat image, int k_means){ srand(6000); if (k_means > 256){ printf("Error: Max number of groups exceeded (256)\n"); exit(-1); } Mat out(image.rows, image.cols, image.type()); uchar* input = (uchar*)image.datastart; uchar* output = (uchar*)out.datastart; cudaKMeans(input,output,image.rows,image.cols,k_means); return out; }*/ /*Mat gaussianPyramid(cv::Mat image, uchar levels, float scale){ if (scale > 0.5){ printf("Error: Scale > 0.5\n"); exit(-1); } int srcRows = image.rows; int srcCols = image.cols; Mat output(srcRows + (srcRows * scale + 1), srcCols, image.type()); uchar* src_data = (uchar*)image.datastart; uchar* dest_data = (uchar*)output.datastart; for (int i = 0; i < srcRows; i++){ for (int j = 0; j < srcCols; j++){ for (int color = 0; color < 3; color++){ int idx = 3 * (i * srcCols + j) + color; dest_data[idx] = src_data[idx]; } } } dest_data += srcRows * srcCols * 3; int newRows = srcRows * scale; int newCols = srcCols * scale; float newScale = scale; int offset = 0; for (int level = 1; level < levels; level++){ image = gaussianFilter(image, 1.0); image = linearResize(image, newRows, newCols); src_data = (uchar*)image.datastart; for (int i = 0; i < newRows; i++){ for (int j = 0; j < newCols; j++){ for (int color = 0; color < 3; color++){ int idx = 3 * (i * srcCols + j + offset) + color; dest_data[idx] = src_data[3 * (i * newCols + j) + color]; } } } offset += newCols; newRows *= scale; newCols *= scale; newScale *= scale; } return output; }*/ //-- Main void simulate(cv::Mat& im){ //im = frgb2Gray(im); img_proc::mySift(im); mySift(im); } int main(void){ // [MAIN] //printf("%d\n", (unsigned int)powf(2, 2) & 4); //img_proc::makeFilter(1.7148); //printf("Done!\n"); //getchar(); //return; cv::Mat im, im0, im1; im = imread("D://School//Summer 2016//Research//nausicaa.jpg"); im = img_proc::linearResize(im, 640, 640); //im = img_proc::frgb2Gray(im); //im = imread("D://School//Summer 2016//Research//Stereo//storage//z_im0.png"); //im0 = imread("D://School//Summer 2016//Research//kMeans//nausicaa_float_cpu.png"); //im0 = img_proc::frgb2Gray(im0); //im1 = imread("D://School//Summer 2016//Research//kMeans//nausicaa_float_gpu.png"); //im1 = img_proc::frgb2Gray(im1); //im = imread("D://School//Summer 2016//Research//bike.jpg"); //im = imread("D://School//Summer 2016//Research//gray//einstein_gray.png"); //im0 = imread("D://School//Summer 2016//Research//Stereo//storage//ims0.png"); //im1 = imread("D://School//Summer 2016//Research//Stereo//storage//ims1.png"); if (im.empty()){ printf("Error!\n"); getchar(); return -1; } vector<int> compression_params; compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION); compression_params.push_back(9); int versionFish = 0; //img_proc::diff_count(im0, im1); //im = img_proc::frgb2Gray(im); //im = img_proc::fGaussianFilterSep(im, 1.6); //im = img_proc::mySift(im); //im = fGaussianFilterSep(im, 1.6); //im = reverse(im); //im = img_proc::mySift(im); //im = myConv2(im, filter, 0); //im1 = kMeansFixed(im, 4); //im = img_proc::linearResize(im, im.rows / 4, im.cols / 4); //im0 = imread("D://School//Summer 2016//Research//mySift//audrey_conv_cpu.png"); //im1 = imread("D://School//Summer 2016//Research//mySift//audrey_conv_gpu.png"); //im0 = img_proc::frgb2Gray(im0); //im1 = img_proc::frgb2Gray(im1); //im = img_proc::diff_count(im0, im1); //imwrite("D://School//Summer 2016//Research//mySift//reverse.png", im, compression_params); //imwrite("D://School//Summer 2016//Research//Stereo//storage//z_im1.png", im, compression_params); //im0 = img_proc::frgb2Gray(im0); //im1 = img_proc::frgb2Gray(im1); //img_proc::diff_count(im0, im1); //im = img_proc::depthFromStereo(im0, im1, 0, 0, 0); //im = img_proc::kMeans(im, 8); //im = kMeans(im, 8); //im = img_proc::kMeansFixed(im, 8); //imwrite("D://School//Summer 2016//Research//kMeans//nausicaa_float_gpu.png", im, compression_params); //hipRuntimeGetVersion(&versionFish); //printf("Version: %f\n", CV_VERSION); //cout << CV_VERSION << endl; //Vec3b intensity = im.at<Vec3b>(20, 20); //printf("rows: %d, columns: %d, %d \n", im.rows, im.cols,intensity.val[2]); //get_nvtAttrib("Bike", 0xFF222222); //img_proc::mySift(im); //mySift(im); //roctxRangePop(); //Mat resized = gaussianFilter(im,2.0); //Mat resized = img_proc::kMeans(im,4); //Mat temp = frgb2Gray(im); //Mat resized2 = fdirectResize(temp, temp.rows / 4, temp.cols / 4); //Mat resized = img_proc::fdirectResize(temp, temp.rows / 4, temp.cols / 4); //auto time1 = std::chrono::high_resolution_clock::now(); //Mat resized = img_proc::mySift(im); //nvtxEventAttributes_t eventAttrib = get_nvtAttrib("Fish", 0xFF880000); //nvtxMarkEx(&eventAttrib); //eventAttrib.message.ascii = "Fish"; //eventAttrib.color = 0xFF880000; //nvtxRangePushEx(&eventAttrib); //Mat resized, resized2; //cudaLagSetup(); //im = imread("D://School//Summer 2016//Research//audrey.jpg"); //get_nvtAttrib("Audrey", 0xFF222222); //simulate(im); //roctxRangePop(); /*im = imread("D://School//Summer 2016//Research//einstein.png"); get_nvtAttrib("Einstein", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); roctxRangePop(); im = imread("D://School//Summer 2016//Research//boat.png"); get_nvtAttrib("Boat", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); roctxRangePop(); im = imread("D://School//Summer 2016//Research//bike.jpg"); get_nvtAttrib("Bike", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); roctxRangePop(); im = imread("D://School//Summer 2016//Research//castle.png"); get_nvtAttrib("Castle", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); roctxRangePop(); im = imread("D://School//Summer 2016//Research//lena.png"); get_nvtAttrib("Lena", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); roctxRangePop(); im = imread("D://School//Summer 2016//Research//valve.png"); get_nvtAttrib("Valve", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); roctxRangePop(); im = imread("D://School//Summer 2016//Research//koala.png"); get_nvtAttrib("Koala", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); roctxRangePop(); im = imread("D://School//Summer 2016//Research//audrey.jpg"); get_nvtAttrib("Audrey", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); roctxRangePop(); im = imread("D://School//Summer 2016//Research//nausicaa.jpg"); get_nvtAttrib("Nausicaa", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); roctxRangePop();*/ //********// //printf("Done!\n"); //getchar(); //return 0; //********// ofstream avg_file, det_file, allc_file, allg_file, spd_file, thc_file, thg_file; uchar micro = 181; string name = ""; double filesize = 0; allc_file.open("D://School//Summer 2016//Research//Textfiles//averages__all_cpu.csv"); allc_file << micro << "s, rgb2Gray,reverse,gamma correction,direct resize(x2),direct resize(x0.5),linear resize(x2),linear resize(x0.5),gaussian filter,edge detection,k-means,gaussian pyramid" << endl; allg_file.open("D://School//Summer 2016//Research//Textfiles//averages__all_gpu.csv"); allg_file << micro << "s, rgb2Gray,reverse,gamma correction,direct resize(x2),direct resize(x0.5),linear resize(x2),linear resize(x0.5),gaussian filter,edge detection,k-means,gaussian pyramid" << endl; spd_file.open("D://School//Summer 2016//Research//Textfiles//averages__speedups.csv"); spd_file << "CPU/GPU, rgb2Gray,reverse,gamma correction,direct resize(x2),direct resize(x0.5),linear resize(x2),linear resize(x0.5),gaussian filter,edge detection,k-means,gaussian pyramid" << endl; thc_file.open("D://School//Summer 2016//Research//Textfiles//averages__throughput_cpu.csv"); thc_file << "MB/s, rgb2Gray,reverse,gamma correction,direct resize(x2),direct resize(x0.5),linear resize(x2),linear resize(x0.5),gaussian filter,edge detection,k-means,gaussian pyramid" << endl; thg_file.open("D://School//Summer 2016//Research//Textfiles//averages__throughput_gpu.csv"); thg_file << "MB/s, rgb2Gray,reverse,gamma correction,direct resize(x2),direct resize(x0.5),linear resize(x2),linear resize(x0.5),gaussian filter,edge detection,k-means,gaussian pyramid" << endl; //printf("Reading started... "); for (int pics = 0; pics <= 8; pics++){ Mat im1; if (pics == 0){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_einstein.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_einstein.txt"); im1 = imread("D://School//Summer 2016//Research//einstein.png"); name = "einstein"; } else if (pics == 1){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_castle.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_castle.txt"); im1 = imread("D://School//Summer 2016//Research//castle.png"); name = "castle"; } else if (pics == 2){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_lena.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_lena.txt"); im1 = imread("D://School//Summer 2016//Research//lena.png"); name = "lena"; } else if (pics == 3){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_boat.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_boat.txt"); im1 = imread("D://School//Summer 2016//Research//boat.png"); name = "boat"; } else if (pics == 4){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_bike.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_bike.txt"); im1 = imread("D://School//Summer 2016//Research//bike.jpg"); name = "bike"; } else if (pics == 5){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_valve.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_valve.txt"); im1 = imread("D://School//Summer 2016//Research//valve.png"); name = "valve"; } else if (pics == 6){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_koala.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_koala.txt"); im1 = imread("D://School//Summer 2016//Research//koala.png"); name = "koala";//[STOP HERE] } else if (pics == 7){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_nausicaa.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_nausicaa.txt"); im1 = imread("D://School//Summer 2016//Research//nausicaa.jpg"); name = "nausicaa"; } else if (pics == 8){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_audrey.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_audrey.txt"); im1 = imread("D://School//Summer 2016//Research//audrey.jpg"); name = "oranges"; } else if (pics == 9){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_oranges.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_oranges.txt"); im1 = imread("D://School//Summer 2016//Research//oranges.jpg"); name = "oranges"; } else if (pics == 10){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_mountains.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_mountains.txt"); im1 = imread("D://School//Summer 2016//Research//mountains.jpg"); name = "mountains"; } else if (pics == 11){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_tiger.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_tiger.txt"); im1 = imread("D://School//Summer 2016//Research//tiger.jpg"); name = "tiger"; } else{ printf("Error!\n"); exit(-1); } filesize = im1.rows * im1.cols * 3; det_file << name << ": " << im1.cols << "x" << im1.rows << endl << "========" << endl; avg_file << name << ": " << im1.cols << "x" << im1.rows << endl << "========" << endl; allc_file << name; allg_file << name; spd_file << name; thc_file << name; thg_file << name; //printf("Reading done\n"); //RGB 2 Gray int rounds = 10; printf("RGB2Gray\n"); double cpu_duration = 0, gpu_duration = 0; det_file << "RGB 2 GRAY" << endl; avg_file << "RGB 2 GRAY" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::rgb2Gray(im1); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); rgb2Gray(im1); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //avg_file.close(); //det_file.close(); //continue; //Reverse printf("Reverse\n"); cpu_duration = 0; gpu_duration = 0; //Mat im1 = imread("D://School//Summer 2016//Research//valve.png"); det_file << "REVERSE" << endl; avg_file << "REVERSE" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::reverse(im1); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); reverse(im1); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //Gamma Correction printf("Gamma Correction\n"); cpu_duration = 0; gpu_duration = 0; //Mat im1 = imread("D://School//Summer 2016//Research//valve.png"); det_file << "GAMMA CORRECTION" << endl; avg_file << "GAMMA CORRECTION" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::gammaCorrection(im1, 2.0); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); gammaCorrection(im1, 2.0); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //Direct Resize printf("Direct Resize x2\n"); cpu_duration = 0; gpu_duration = 0; //Mat im1 = imread("D://School//Summer 2016//Research//valve.png"); det_file << "DIRECT RESIZE (X2)" << endl; avg_file << "DIRECT RESIZE (X2)" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::directResize(im1, im1.rows * 2, im1.cols * 2); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); directResize(im1, im1.rows * 2, im1.cols * 2); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); printf("Direct Resize x0.5\n"); cpu_duration = 0; gpu_duration = 0; //Mat im1 = imread("D://School//Summer 2016//Research//valve.png"); det_file << "DIRECT RESIZE (X0.5)" << endl; avg_file << "DIRECT RESIZE (X0.5)" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::directResize(im1, im1.rows * 0.5, im1.cols * 0.5); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); directResize(im1, im1.rows * 0.5, im1.cols * 0.5); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //Linear Resize printf("Linear Resize x2\n"); cpu_duration = 0; gpu_duration = 0; //Mat im1 = imread("D://School//Summer 2016//Research//valve.png"); det_file << "LINEAR RESIZE (X2)" << endl; avg_file << "LINEAR RESIZE (X2)" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::linearResize(im1, im1.rows * 2, im1.cols * 2); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); linearResize(im1, im1.rows * 2, im1.cols * 2); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); cpu_duration = 0; gpu_duration = 0; //Mat im1 = imread("D://School//Summer 2016//Research//valve.png"); printf("Linear Resize x0.5\n"); det_file << "LINEAR RESIZE (X0.5)" << endl; avg_file << "LINEAR RESIZE (X0.5)" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::linearResize(im1, im1.rows * 0.5, im1.cols * 0.5); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); linearResize(im1, im1.rows * 0.5, im1.cols * 0.5); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //Gaussian Filter rounds = 5; cpu_duration = 0; gpu_duration = 0; printf("Gaussian Filter Sep\n"); Mat img_temp = frgb2Gray(im1); //Mat im1 = imread("D://School//Summer 2016//Research//valve.png"); det_file << "GAUSSIAN FILTER SEP (SIZE 3, SIGMA 1.0)" << endl; avg_file << "GAUSSIAN FILTER SEP (SIZE 3, SIGMA 1.0)" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); //img_proc::gaussianFilter(im1, 1.0); img_proc::fGaussianFilterSep(img_temp, 1.0); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); fGaussianFilterSep(img_temp, 1.0); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //Edge Filter rounds = 1; cpu_duration = 0; gpu_duration = 0; //Mat im2 = imread("D://School//Summer 2016//Research//valve_gray.png"); printf("Edge Filter\n"); det_file << "EDGE DETECTION" << endl; avg_file << "EDGE DETECTION" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::sobelFilter(im1); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); sobelFilter(im1); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //k-Means 8, 200 rounds rounds = 1; cpu_duration = 0; gpu_duration = 0; //Mat im2 = imread("D://School//Summer 2016//Research//valve_gray.png"); det_file << "K MEANS 8" << endl; avg_file << "K MEANS 8" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::kMeansFixed(im1, 8); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); kMeansFixed(im1, 8); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //Gaussian Pyramid rounds = 1; cpu_duration = 0; gpu_duration = 0; //Mat im2 = imread("D://School//Summer 2016//Research//valve_gray.png"); det_file << "GAUSSIAN PYRAMID 8 0.5x" << endl; avg_file << "GAUSSIAN PYRAMID 8 0.5x" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::gaussianPyramid(im1, 8, 0.5); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); gaussianPyramid(im1, 8, 0.5);; auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //SIFT rounds = 1; cpu_duration = 0; gpu_duration = 0; printf("SIFT\n"); //Mat im2 = imread("D://School//Summer 2016//Research//valve_gray.png"); int lesser = min(img_temp.rows, img_temp.cols); Mat img_sqtmp = fdirectResize(img_temp,lesser,lesser); det_file << "SIFT" << endl; avg_file << "SIFT" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::mySift(img_sqtmp); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); mySift(img_sqtmp); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); avg_file.close(); det_file.close(); allc_file << endl; allg_file << endl; spd_file << endl; thc_file << endl; thg_file << endl; } allc_file.close(); allg_file.close(); spd_file.close(); thc_file.close(); thg_file.close(); return 0; }
59c63f85417d41c66f405ce682b46e92425bc201.cu
#include "ImgProc.hpp" #include "GpuProc.cu" #include <stdio.h> #include <opencv2\\core\core.hpp> #include <opencv2\highgui\highgui.hpp> #include <iostream> #include <fstream> #include <cuda.h> #include <cuda_runtime.h> #include <ctime> #include <chrono> using namespace cv; using namespace std; #define FILTER_SIZE 3 #define M_PI 3.14159265358979323846 /* pi */ #define IMG_CHUNK 3110400 /* (1920 x 1080 x 3) / 2 */ #define THREADS_PER_BLOCK 256 //Boat from https://homepages.cae.wisc.edu/~ece533/images/boat.png; //-- Cuda Device Code /*__global__ void rgb2GrayKernel(uchar* dest_data, uchar* src_data, int rows, int cols, int chunkRows, int offset){ int idx = blockIdx.x * blockDim.x + threadIdx.x; src_data += 3 * offset; if (idx < rows * cols){ dest_data[idx] = 0.299 * src_data[3 * idx + 2] + 0.587 * src_data[3 * idx + 1] + 0.114 * src_data[3 * idx]; } }*/ /*__global__ void reverseKernel(uchar* dest_data, uchar* src_data, int srcN, int chunkRows, int offset){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx + (offset * 3) < srcN){ dest_data[idx] = 255 - src_data[idx + (offset * 3)]; } }*/ /*__global__ void gammaCorrectionKernel(uchar* dest_data, uchar* src_data, int srcRows, int srcCols, double gamma, int chunkRows, int offset){ int idx = blockIdx.x * blockDim.x + threadIdx.x; double gammaCorrect = 1.00 / gamma; if (idx + offset < 3 * srcRows * srcCols){ double color = (double)src_data[idx + offset]; uchar val = 255 * pow((color / 255), gammaCorrect); dest_data[idx] = val; } }*/ /*__global__ void directResizeKernel(uchar* dest_data, uchar* src_data, int srcRows, int srcCols, int destRows, int destCols, int chunkRows, int offset){ int idx = blockIdx.x * blockDim.x + threadIdx.x; double rRow = (double)srcRows / destRows; double rCol = (double)srcCols / destCols; if (idx + offset < 3 * destRows * destCols){ int sRow = (((idx + offset) / 3) / destCols) * rRow; int sCol = (((idx + offset) / 3) % destCols) * rCol; dest_data[idx] = src_data[3 * (sRow * srcCols + sCol) + (idx + offset) % 3]; } }*/ /*__global__ void linearResizeKernel(uchar* dest_data, uchar* src_data, int srcRows, int srcCols, int destRows, int destCols, int chunkRows, int offset){ int idx = blockIdx.x * blockDim.x + threadIdx.x; double rRow = (double)srcRows / destRows; double rCol = (double)srcCols / destCols; if (idx < 3 * destRows * destCols){ double dsRow = (((offset + idx) / 3) / destCols) * rRow; double dsCol = (((offset + idx) / 3) % destCols) * rCol; int sRow = (int)dsRow; int sCol = (int)dsCol; double deltaI = dsRow - sRow; double deltaJ = dsCol - sCol; if (deltaI + deltaJ < 0.0000000001){ dest_data[idx] = src_data[3 * (sRow * srcCols + sCol) + (idx + offset) % 3]; } else{ uchar val = 0; double area1 = (1 - deltaI) * (1 - deltaJ); double area2 = deltaI * (1 - deltaJ); double area3 = deltaI * deltaJ; double area4 = (1 - deltaI) * deltaJ; val += area1 * src_data[3 * (sRow * srcCols + sCol) + (idx + offset) % 3]; val += area2 * src_data[3 * ((sRow + 1) * srcCols + sCol) + (idx + offset) % 3]; val += area3 * src_data[3 * ((sRow + 1) * srcCols + sCol + 1) + (idx + offset) % 3]; val += area4 * src_data[3 * (sRow * srcCols + sCol + 1) + (idx + offset) % 3]; dest_data[idx] = val; } } }*/ /*__global__ void gaussianFilterKernel(uchar* dest_data, uchar* src_data, double* gKernel, int filter_size, int rows, int cols, int chunkRows, int offset){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i = ((idx + offset) / 3) / cols; int j = ((idx + offset) / 3) % cols; int maxk = min(filter_size, rows - i); int mink = min(filter_size, i); int maxl = min(filter_size, cols - j); int minl = min(filter_size, j); uchar tmp = 0; for (int k = -mink; k <= maxk; k++){ for (int l = -minl; l <= maxl; l++){ tmp += *(gKernel + (k + filter_size) * (2 * filter_size + 1) + (l + filter_size)) * src_data[(idx + offset) + 3 * (k * cols + l)]; } } dest_data[idx] = tmp; //} }*/ /*__global__ void sobelGradientKernel(short* temp_data, uchar* src_data, int* sobel_x, int* sobel_y, double* rangeMin, double* rangeMax, int srcRows, int srcCols, int offset){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i = ((idx + offset) / 3) / srcCols; int j = ((idx + offset) / 3) % srcCols; float tmpx = 0; float tmpy = 0; int maxk = min(1, srcRows - i); int mink = min(1, i); int maxl = min(1, srcCols - j); int minl = min(1, j); for (int k = -mink; k <= maxk; k++){ for (int l = -minl; l <= maxl; l++){ //if (k > 1 || k < -1 || l > 1 || l < -1) //printf("Error in Sobel!\n"); tmpx = tmpx + sobel_x[3 * (k + 1) + (l + 1)] * src_data[(idx + offset) + 3 * (k * srcCols + l)]; tmpy = tmpy + sobel_y[3 * (k + 1) + (l + 1)] * src_data[(idx + offset) + 3 * (k * srcCols + l)]; } } int value = sqrt((tmpx*tmpx) + (tmpy*tmpy)); //printf("Value: %d\n",value); temp_data[idx] = value; }*/ /*__global__ void sobelRangeKernel(uchar* dest_data, short* temp_data, double rangeMin, double rangeMax, double thresh_min, double thresh_max, int offset){ int idx = blockIdx.x * blockDim.x + threadIdx.x; dest_data[idx] = 255; int value = 255 * ((temp_data[idx] + rangeMin) / (rangeMax - rangeMin)); //int value = 255 * ((temp_data[idx + offset] + rangeMin) / (rangeMax - rangeMin)); if (value >= thresh_max){ value = 255; } else if (value < thresh_min){ value = 0; } dest_data[idx] = value; }*/ //__global__ void kMeansCountingKernel(uchar* src_data, uchar* k_index, int* k_count, int* hits, float* k_colors, bool* convergence, int k_means, int srcRows, int srcCols, int iter){ /*__global__ void kMeansCountingKernel(uchar* src_data, uchar* k_index, int* k_count, float* k_colors, bool* convergence, int k_means, int srcRows, int srcCols){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i = (idx) / srcCols; int j = (idx) % srcCols; if (i >= srcRows) return; float b2 = src_data[3 * (i * srcCols + j)]; float g2 = src_data[3 * (i * srcCols + j) + 1]; float r2 = src_data[3 * (i * srcCols + j) + 2]; float min_dist = FLT_MAX; uchar new_index = k_index[i * srcCols + j]; for (int group = 0; group < k_means; group++){ float val = 0; val += (b2 - k_colors[3 * group]) * (b2 - k_colors[3 * group]); val += (g2 - k_colors[3 * group + 1]) * (g2 - k_colors[3 * group + 1]); val += (r2 - k_colors[3 * group + 2]) * (r2 - k_colors[3 * group + 2]); float dist = sqrtf(val); if (dist < min_dist){ min_dist = dist; new_index = group; } } if (k_index[i * srcCols + j] != new_index){ k_index[i * srcCols + j] = new_index; //printf("New Index: %d", new_index); //atomicAdd((hits + new_index), 1); //if (iter > 60) // printf(" (%d, %d) \n",i,j); convergence[0] = false; } atomicAdd((k_count + new_index), 1); }*/ /*__global__ void kMeansCountingKernelOld(uchar* src_data, uchar* k_index, int* k_count, float* k_colors, bool* convergence, int k_means, int srcRows, int srcCols){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i = (idx) / srcCols; int j = (idx) % srcCols; float b2 = src_data[3 * (i * srcCols + j)]; float g2 = src_data[3 * (i * srcCols + j) + 1]; float r2 = src_data[3 * (i * srcCols + j) + 2]; float min_dist = FLT_MAX; uchar new_index; for (int group = 0; group < k_means; group++){ float b1 = k_colors[3 * group]; float g1 = k_colors[3 * group + 1]; float r1 = k_colors[3 * group + 2]; float dist = std::sqrt(pow(r2 - r1, 2) + pow(g2 - g1, 2) + pow(b2 - b1, 2)); //Combination of pow and sqrt is too much //float dist = 0; if (dist < min_dist){ min_dist = dist; //k_index[i * srcCols + j] = group; new_index = group; } } if (k_index[i * srcCols + j] != new_index){ k_index[i * srcCols + j] = new_index; //printf("New Index: %d", new_index); *convergence = false; } //k_count[new_index] += 1; atomicAdd((k_count + new_index), 1); }*/ /*__global__ void kMeansGroupAdjustKernel(uchar* src_data, uchar* k_index, int* k_count, float* k_colors, int k_means, int srcRows, int srcCols){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i = (idx) / srcCols; int j = (idx) % srcCols; int group = k_index[i * srcCols + j]; for (int color = 0; color < 3; color++){ float src_val = src_data[3 * (i * srcCols + j) + color]; int group_count = k_count[group]; float val = src_val / group_count; //k_colors[3 * group + color] += val; atomicAdd((k_colors + (3 * group + color)),val); } }*/ /*__global__ void kMeansOutputKernel(uchar* dest_data, uchar* k_index, float* k_colors, int srcRows, int srcCols){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i = (idx) / srcCols; int j = (idx) % srcCols; int group = k_index[i * srcCols + j]; for (int color = 0; color < 3; color++){ dest_data[3 * (i * srcCols + j) + color] = (uchar)k_colors[3 * group + color]; } }*/ //-- Cuda Host Code /*void cudaRgb2Gray(uchar* input, uchar* output, int srcRows, int srcCols){ uchar* deviceSrcData; uchar* deviceDestData; int threadsPerBlock = THREADS_PER_BLOCK; int blocks = 0; int chunkRows = 0; int offset = 0; int srcN = 3 * srcRows * srcCols; cudaMalloc(&deviceSrcData, srcN*sizeof(uchar)); cudaMemcpy(deviceSrcData, input, srcN*sizeof(uchar), cudaMemcpyHostToDevice); chunkRows = IMG_CHUNK / srcCols; if (chunkRows == 0){ chunkRows = srcRows; } int rounds = ceil(srcRows / (double)chunkRows); //int destN = min(1920 * 1080, srcRows * srcCols); for (int step = 0; step < rounds; step++){ int destN = min(chunkRows * srcCols, srcRows * srcCols - offset); if (destN <= 0) break; blocks = (destN + threadsPerBlock - 1) / threadsPerBlock; cudaMalloc(&deviceDestData, destN*sizeof(uchar)); rgb2GrayKernel << <blocks, threadsPerBlock >> > (deviceDestData, deviceSrcData, srcRows, srcCols, chunkRows, offset); cudaMemcpy(output + offset, deviceDestData, destN*sizeof(uchar), cudaMemcpyDeviceToHost); cudaFree(deviceDestData); offset += destN; } cudaFree(deviceSrcData); }*/ /*void cudaReverse(uchar* input, uchar* output, int srcRows, int srcCols){ uchar* deviceSrcData; uchar* deviceDestData; int threadsPerBlock = THREADS_PER_BLOCK; int blocks = 0; int chunkRows = 0; int offset = 0; int srcN = 3 * srcRows * srcCols; cudaMalloc(&deviceSrcData, srcN*sizeof(uchar)); cudaMemcpy(deviceSrcData, input, srcN*sizeof(uchar), cudaMemcpyHostToDevice); chunkRows = IMG_CHUNK / srcCols; if (chunkRows == 0){ chunkRows = srcRows; } int rounds = ceil(srcRows / (double)chunkRows); //printf("Rounds: %d \n", rounds); for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * srcCols, 3 * srcRows * srcCols - (offset * 3)); if (destN <= 0){ //printf("Broken!\n"); break; } blocks = (destN + threadsPerBlock - 1) / threadsPerBlock; cudaMalloc(&deviceDestData, destN*sizeof(uchar)); reverseKernel <<<blocks, threadsPerBlock >>>(deviceDestData, deviceSrcData, srcN, chunkRows, offset); cudaMemcpy(output + (3 * offset), deviceDestData, destN*sizeof(uchar), cudaMemcpyDeviceToHost); cudaFree(deviceDestData); offset += destN / 3; } cudaFree(deviceSrcData); }*/ /*void cudaGammaCorrection(uchar* input, uchar* output, double gamma, int srcRows, int srcCols){ uchar* deviceSrcData; uchar* deviceDestData; int threadsPerBlock = THREADS_PER_BLOCK; int blocks = 0; int chunkRows = 0; int offset = 0; int srcN = 3 * srcRows * srcCols; cudaMalloc(&deviceSrcData, srcN*sizeof(uchar)); cudaMemcpy(deviceSrcData, input, srcN*sizeof(uchar), cudaMemcpyHostToDevice); chunkRows = IMG_CHUNK / srcCols; if (chunkRows == 0){ chunkRows = srcRows; } int rounds = ceil(srcRows / (double)chunkRows); for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * srcCols, 3 * srcRows * srcCols - offset); if (destN <= 0){ break; } blocks = (destN + threadsPerBlock - 1) / threadsPerBlock; cudaMalloc(&deviceDestData, destN*sizeof(uchar)); gammaCorrectionKernel << <blocks, threadsPerBlock >> > (deviceDestData, deviceSrcData, srcRows, srcCols, gamma, chunkRows, offset); cudaMemcpy(output + offset, deviceDestData, destN*sizeof(uchar), cudaMemcpyDeviceToHost); cudaFree(deviceDestData); offset += destN; } cudaFree(deviceSrcData); }*/ /*void cudaDirectResize(uchar* input, uchar* output, int srcRows, int srcCols, int destRows, int destCols){ uchar* deviceSrcData; uchar* deviceDestData; int threadsPerBlock = THREADS_PER_BLOCK; int blocks = 0; int chunkRows = 0; int offset = 0; int srcN = 3 * srcRows * srcCols; cudaMalloc(&deviceSrcData, srcN*sizeof(uchar)); cudaMemcpy(deviceSrcData, input, srcN*sizeof(uchar), cudaMemcpyHostToDevice); chunkRows = IMG_CHUNK / destCols; if (chunkRows == 0){ chunkRows = destRows; } int rounds = ceil(destRows / (double)chunkRows); for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * destCols, 3 * destRows * destCols - offset); if (destN <= 0){ break; } blocks = (destN + threadsPerBlock - 1) / threadsPerBlock; cudaMalloc(&deviceDestData, destN*sizeof(uchar)); directResizeKernel << <blocks, threadsPerBlock >> > (deviceDestData, deviceSrcData, srcRows, srcCols, destRows, destCols, chunkRows, offset); cudaMemcpy(output + offset, deviceDestData, destN*sizeof(uchar), cudaMemcpyDeviceToHost); cudaFree(deviceDestData); offset += destN; } cudaFree(deviceSrcData); }*/ /*void cudaLinearResize(uchar* input, uchar* output, int srcRows, int srcCols, int destRows, int destCols){ uchar* deviceSrcData; uchar* deviceDestData; int threadsPerBlock = THREADS_PER_BLOCK; int blocks = 0; int chunkRows = 0; int offset = 0; int srcN = 3 * srcRows * srcCols; cudaMalloc(&deviceSrcData, srcN*sizeof(uchar)); cudaMemcpy(deviceSrcData, input, srcN*sizeof(uchar), cudaMemcpyHostToDevice); chunkRows = IMG_CHUNK / destCols; if (chunkRows == 0){ chunkRows = destRows; } int rounds = ceil(destRows / (double)chunkRows); for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * destCols, 3 * destRows * destCols - offset); if (destN <= 0){ break; } blocks = (destN + threadsPerBlock - 1) / threadsPerBlock; cudaMalloc(&deviceDestData, destN*sizeof(uchar)); linearResizeKernel << <blocks, threadsPerBlock >> > (deviceDestData, deviceSrcData, srcRows, srcCols, destRows, destCols, chunkRows, offset); cudaMemcpy(output + offset, deviceDestData, destN*sizeof(uchar), cudaMemcpyDeviceToHost); cudaFree(deviceDestData); offset += destN; } cudaFree(deviceSrcData); }*/ /*void cudaGaussianFilter(uchar* input, uchar* output, double gKernel[][2 * FILTER_SIZE + 1], int srcRows, int srcCols){ uchar* deviceSrcData; uchar* deviceDestData; double* deviceFilter; int threadsPerBlock = THREADS_PER_BLOCK; int blocks = 0; int chunkRows = 0; int offset = 0; int srcN = 3 * srcRows * srcCols; cudaMalloc(&deviceSrcData, srcN*sizeof(uchar)); cudaMalloc(&deviceFilter, (2 * FILTER_SIZE + 1) * (2 * FILTER_SIZE + 1) * sizeof(double)); cudaMemcpy(deviceSrcData, input, srcN*sizeof(uchar), cudaMemcpyHostToDevice); cudaMemcpy(deviceFilter, gKernel, (2 * FILTER_SIZE + 1)*(2 * FILTER_SIZE + 1)*sizeof(double), cudaMemcpyHostToDevice); chunkRows = IMG_CHUNK / srcCols; if (chunkRows == 0){ chunkRows = srcRows; } int rounds = ceil(srcRows / (double)chunkRows); for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * srcCols, 3 * srcRows * srcCols - offset); if (destN <= 0){ break; } blocks = (destN + threadsPerBlock - 1) / threadsPerBlock; cudaMalloc(&deviceDestData, destN*sizeof(uchar)); gaussianFilterKernel << <blocks, threadsPerBlock >> > (deviceDestData, deviceSrcData, deviceFilter, FILTER_SIZE, srcRows, srcCols, chunkRows, offset); cudaMemcpy(output + offset, deviceDestData, destN*sizeof(uchar), cudaMemcpyDeviceToHost); cudaFree(deviceDestData); offset += destN; } cudaFree(deviceSrcData); cudaFree(deviceFilter); }*/ /*void cudaSobelFilter(uchar* input, uchar* output, int srcRows, int srcCols){ uchar* deviceSrcData; uchar* deviceDestData; short* deviceTempData; int* deviceSobel_x; int* deviceSobel_y; int srcN = 3 * srcRows * srcCols; double* deviceRangeMin; double* deviceRangeMax; double rangeMin[1] = { 0.0 }; double rangeMax[1] = { 0.0 }; int threadsPerBlock = THREADS_PER_BLOCK; int blocks = 0; int chunkRows = 0; int offset = 0; int sobel_x[9], sobel_y[9]; sobel_x[0] = -1; sobel_x[1] = 0; sobel_x[2] = 1; sobel_x[3] = -2; sobel_x[4] = 0; sobel_x[5] = 2; sobel_x[6] = -1; sobel_x[7] = 0; sobel_x[8] = 1; sobel_y[0] = -1; sobel_y[1] = -2; sobel_y[2] = -1; sobel_y[3] = 0; sobel_y[4] = 0; sobel_y[5] = 0; sobel_y[6] = 1; sobel_y[7] = 2; sobel_y[8] = 1; //int threadsPerBlock = 512; //blocks = (srcN + threadsPerBlock - 1) / threadsPerBlock; //printf("Blocks: %d\n", blocks); cudaMalloc(&deviceSrcData, srcN*sizeof(uchar)); cudaMalloc(&deviceSobel_x, 9 * sizeof(int)); cudaMalloc(&deviceSobel_y, 9 * sizeof(int)); cudaMalloc(&deviceRangeMin, sizeof(double)); cudaMalloc(&deviceRangeMax, sizeof(double)); cudaMemcpy(deviceSrcData, input, srcN*sizeof(uchar), cudaMemcpyHostToDevice); cudaMemcpy(deviceSobel_x, sobel_x, 9 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(deviceSobel_y, sobel_y, 9 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(deviceRangeMin, rangeMin, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(deviceRangeMax, rangeMax, sizeof(double), cudaMemcpyHostToDevice); chunkRows = IMG_CHUNK / srcCols; if (chunkRows == 0){ chunkRows = srcRows; } int rounds = ceil(srcRows / (double)chunkRows); short* temp_data = new short[3 * srcRows * srcCols]; for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * srcCols, 3 * srcRows * srcCols - offset); if (destN <= 0){ break; } blocks = (destN + threadsPerBlock - 1) / threadsPerBlock; cudaMalloc(&deviceTempData, destN*sizeof(short)); sobelGradientKernel << <blocks, threadsPerBlock >> > (deviceTempData, deviceSrcData, deviceSobel_x, deviceSobel_y, deviceRangeMin, deviceRangeMax, srcRows, srcCols, offset); cudaMemcpy(temp_data + offset, deviceTempData, destN*sizeof(short), cudaMemcpyDeviceToHost); cudaFree(deviceTempData); offset += destN; } cudaFree(deviceSrcData); cudaFree(deviceSobel_x); cudaFree(deviceSobel_y); cudaFree(deviceRangeMin); cudaFree(deviceRangeMax); //printf("Works!\n"); //cudaMemcpy(rangeMin, deviceRangeMin, sizeof(double), cudaMemcpyDeviceToHost); //cudaMemcpy(rangeMax, deviceRangeMax, sizeof(double), cudaMemcpyDeviceToHost); //printf("Host temp data done"); for (int i = 0; i < srcRows; i++){ for (int j = 0; j < srcCols; j++){ for (int color = 0; color < 3; color++){ double value = temp_data[3 * (i * srcCols + j) + color];; rangeMin[0] = std::min(value, rangeMin[0]); rangeMax[0] = std::max(value, rangeMax[0]); } } } //printf("Got here!\n"); //output = (uchar*)temp_data; //return; //printf("Range Min: %f, Range Max: %f \n", rangeMin[0], rangeMax[0]); //blocks = (srcN + threadsPerBlock - 1) / threadsPerBlock; offset = 0; for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * srcCols, 3 * srcRows * srcCols - offset); if (destN <= 0){ break; } blocks = (destN + threadsPerBlock - 1) / threadsPerBlock; cudaMalloc(&deviceTempData, destN*sizeof(short)); cudaMemcpy(deviceTempData, temp_data + offset, destN*sizeof(short), cudaMemcpyHostToDevice); cudaMalloc(&deviceDestData, destN*sizeof(uchar)); sobelRangeKernel << <blocks, threadsPerBlock >> >(deviceDestData, deviceTempData, rangeMin[0], rangeMax[0], 20, 60, offset); cudaMemcpy(output + offset, deviceDestData, destN*sizeof(uchar), cudaMemcpyDeviceToHost); cudaFree(deviceDestData); cudaFree(deviceTempData); offset += destN; } //uchar minThresh = 20; //uchar maxThresh = 60; /*for (int i = 0; i < srcRows; i++){ for (int j = 0; j < srcCols; j++){ for (int color = 0; color < 3; color++){ int idx = 3 * (i * srcCols + j) + color; int value = 255 * ((temp_data[idx] + rangeMin[0])/(rangeMax[0] - rangeMin[0])); if (value >= maxThresh){ value = 255; } else if (value < minThresh){ value = 0; } output[idx] = value; } } } //cudaFree(deviceTempData); //cudaFree(deviceDestData); delete[] temp_data; }*/ /*void cudaKMeans(uchar* input, uchar* output, int srcRows, int srcCols, int k_means){ int threadsPerBlock = THREADS_PER_BLOCK; int blocks = 0; int chunkRows = 0; int offset = 0; uchar* deviceSrcData; uchar* deviceDestData; float* device_k_colors; int* device_k_count; //int* device_hits; uchar* device_k_index; bool* device_convergence; float* k_colors = new float[k_means * 3]; uchar* k_index = new uchar[srcRows * srcCols]; int* k_count = new int[k_means]; //* hits = new int[k_means]; int srcN = srcRows * srcCols * 3; for (int pix = 0; pix < k_means; pix++){ int i = rand() % srcRows; int j = rand() % srcCols; for (int color = 0; color < 3; color++){ k_colors[3 * pix + color] = input[3 * (i * srcCols + j) + color]; } } cudaMalloc(&device_k_colors, (3 * k_means)*sizeof(float)); cudaMemcpy(device_k_colors, k_colors, 3 * k_means *sizeof(float), cudaMemcpyHostToDevice); //printf("=== START ===\n"); //for (int group = 0; group < k_means; group++){ //printf("Color Group %d: R=%f, G=%f, B=%f \n", group + 1, k_colors[3 * group + 2], k_colors[3 * group + 1], k_colors[3 * group]); //} bool convergence[1] = { false }; for (int k = 0; k < srcRows * srcCols; k++){ k_index[k] = 0; } chunkRows = (IMG_CHUNK * 0.5) / srcCols; if (chunkRows == 0){ chunkRows = srcRows; } cudaMalloc(&device_k_count, (k_means)*sizeof(int)); //cudaMalloc(&device_hits, (k_means)*sizeof(int)); //cudaMalloc(&device_k_colors, (3 * k_means)*sizeof(float)); cudaMalloc(&device_convergence, sizeof(bool)); //cudaMalloc(&device_k_index, (srcRows * srcCols)*sizeof(uchar)); //cudaMemcpy(device_k_index, k_index, srcRows * srcCols *sizeof(uchar), cudaMemcpyHostToDevice); int count = 0; while (!convergence[0]){ convergence[0] = true; cudaMemcpy(device_convergence, convergence, sizeof(bool), cudaMemcpyHostToDevice); for (int k = 0; k < k_means; k++){ k_count[k] = 0; //hits[k] = 0; } cudaMemcpy(device_k_count, k_count, k_means * sizeof(int), cudaMemcpyHostToDevice); //cudaMemcpy(device_hits, hits, k_means * sizeof(int), cudaMemcpyHostToDevice); //printf("Count: %d\n",count); int rounds = ceil(srcRows / (float)chunkRows); offset = 0; for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * srcCols, 3 * srcRows * srcCols - offset); if (destN <= 0){ break; } blocks = ((destN/3) + threadsPerBlock - 1) / threadsPerBlock; cudaMalloc(&deviceSrcData, destN*sizeof(uchar)); cudaMemcpy(deviceSrcData, input + offset, destN*sizeof(uchar), cudaMemcpyHostToDevice); cudaMalloc(&device_k_index, destN*sizeof(uchar)/3); cudaMemcpy(device_k_index, k_index + (offset / 3), destN*sizeof(uchar) / 3, cudaMemcpyHostToDevice); //kernel //kMeansCountingKernel << <blocks, threadsPerBlock >> > (deviceSrcData, device_k_index, device_k_count, device_hits, device_k_colors, device_convergence, k_means, srcRows, srcCols,count); kMeansCountingKernel << <blocks, threadsPerBlock >> > (deviceSrcData, device_k_index, device_k_count, device_k_colors, device_convergence, k_means, srcRows, srcCols); cudaMemcpy(k_index + (offset / 3), device_k_index, destN*sizeof(uchar) / 3, cudaMemcpyDeviceToHost); cudaFree(deviceSrcData); cudaFree(device_k_index); offset += destN; } cudaMemcpy(k_count, device_k_count, (k_means)*sizeof(int), cudaMemcpyDeviceToHost); //cudaMemcpy(hits, device_hits, (k_means)*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(convergence, device_convergence, sizeof(bool), cudaMemcpyDeviceToHost); //printf("Group Count, step %d::\n",count); //for (int i = 0; i < k_means; i++){ // printf("Group %d: %d\n",i,k_count[i]); //} //convergence[0] = true; //Stopper //if (convergence[0]) //RE-ENABLE WHEN ACTUALLY USING //break; if (count == 200){ //printf("Stopped at %d!\n",count); break; } count++; for (int k = 0; k < 3 * k_means; k++){ k_colors[k] = 0; } cudaMemcpy(device_k_colors, k_colors, 3 * k_means *sizeof(float), cudaMemcpyHostToDevice); rounds = ceil(srcRows / (float)chunkRows); offset = 0; for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * srcCols, 3 * srcRows * srcCols - offset); if (destN <= 0){ break; } blocks = ((destN / 3) + threadsPerBlock - 1) / threadsPerBlock; cudaMalloc(&deviceSrcData, destN*sizeof(uchar)); cudaMemcpy(deviceSrcData, input + offset, destN*sizeof(uchar), cudaMemcpyHostToDevice); cudaMalloc(&device_k_index, destN*sizeof(uchar) / 3); cudaMemcpy(device_k_index, k_index + (offset / 3), destN*sizeof(uchar) / 3, cudaMemcpyHostToDevice); kMeansGroupAdjustKernel << <blocks, threadsPerBlock >> > (deviceSrcData, device_k_index, device_k_count, device_k_colors, k_means, srcRows, srcCols); cudaFree(deviceSrcData); cudaFree(device_k_index); offset += destN; } cudaMemcpy(k_colors, device_k_colors, 3 * k_means * sizeof(float), cudaMemcpyDeviceToHost); //kernel } int rounds = ceil(srcRows / (float)chunkRows); offset = 0; for (int step = 0; step < rounds; step++){ int destN = min(3 * chunkRows * srcCols, 3 * srcRows * srcCols - offset); if (destN <= 0){ break; } blocks = ((destN / 3) + threadsPerBlock - 1) / threadsPerBlock; cudaMalloc(&deviceDestData, destN*sizeof(uchar)); cudaMalloc(&device_k_index, destN*sizeof(uchar) / 3); cudaMemcpy(device_k_index, k_index + (offset / 3), destN*sizeof(uchar) / 3, cudaMemcpyHostToDevice); //kernel kMeansOutputKernel << <blocks, threadsPerBlock >> > (deviceDestData, device_k_index, device_k_colors, srcRows, srcCols); cudaMemcpy(output + offset, deviceDestData, destN*sizeof(uchar), cudaMemcpyDeviceToHost); cudaFree(deviceDestData); cudaFree(device_k_index); offset += destN; } //printf("Count: %d\n", count); cudaFree(device_k_colors); cudaFree(device_k_count); cudaFree(device_convergence); delete[] k_colors; delete[] k_index; delete[] k_count; }*/ /*void cudaKMeansOld(uchar* input, uchar* output, int srcRows, int srcCols, int k_means){ int threadsPerBlock = 512; int blocks = ((srcRows * srcCols) + threadsPerBlock - 1) / threadsPerBlock; uchar* deviceSrcData; uchar* deviceDestData; float* device_k_colors; int* device_k_count; uchar* device_k_index; bool* device_convergence; //int srcN = 3 * srcRows * srcCols; int srcN = min(3 * 1920 * 1080, 3 * srcRows * srcCols); cudaMalloc(&deviceSrcData, srcN*sizeof(uchar)); cudaMalloc(&deviceDestData, srcN*sizeof(uchar)); cudaMalloc(&device_k_colors, (3 * k_means)*sizeof(float)); cudaMalloc(&device_k_index, (srcRows * srcCols)*sizeof(uchar)); cudaMalloc(&device_k_count, (k_means)*sizeof(int)); cudaMalloc(&device_convergence, sizeof(bool)); cudaMemcpy(deviceSrcData, input, srcN*sizeof(uchar), cudaMemcpyHostToDevice); float* k_colors = new float[3 * k_means]; uchar* k_index = new uchar[srcRows * srcCols]; int* k_count = new int[k_means]; for (int pix = 0; pix < k_means; pix++){ int i = rand() % srcRows; int j = rand() % srcCols; for (int color = 0; color < 3; color++){ k_colors[3 * pix + color] = input[3 * (i * srcCols + j) + color]; } } cudaMemcpy(device_k_colors, k_colors, 3 * k_means *sizeof(float), cudaMemcpyHostToDevice); printf("=== START ===\n"); for (int group = 0; group < k_means; group++){ printf("Color Group %d: R=%f, G=%f, B=%f \n", group + 1, k_colors[3 * group + 2], k_colors[3 * group + 1], k_colors[3 * group]); } bool convergence[1] = { false }; for (int k = 0; k < srcRows * srcCols; k++){ k_index[k] = 0; } cudaMemcpy(device_k_index, k_index, srcRows * srcCols *sizeof(uchar), cudaMemcpyHostToDevice); int count = 0; while (!convergence[0]){ convergence[0] = true; cudaMemcpy(device_convergence, convergence, sizeof(bool), cudaMemcpyHostToDevice); for (int k = 0; k < k_means; k++){ k_count[k] = 0; } cudaMemcpy(device_k_count, k_count, k_means * sizeof(int), cudaMemcpyHostToDevice); kMeansCountingKernelOld<<<blocks,threadsPerBlock>>> (deviceSrcData,device_k_index,device_k_count,device_k_colors, device_convergence, k_means,srcRows,srcCols); cudaMemcpy(k_index, device_k_index, (srcRows*srcCols)*sizeof(uchar), cudaMemcpyDeviceToHost); cudaMemcpy(k_count, device_k_count, (k_means)*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(convergence, device_convergence, sizeof(bool), cudaMemcpyDeviceToHost); if (count == 400){ printf("Stopped at 400!\n"); break; } count++; //printf("Bogey::\n"); if (convergence[0]) break; for (int k = 0; k < 3 * k_means; k++){ k_colors[k] = 0; } cudaMemcpy(device_k_colors, k_colors, 3 * k_means *sizeof(float), cudaMemcpyHostToDevice); //kMeansGroupAdjustKernel<<<blocks,threadsPerBlock>>> (deviceSrcData,device_k_index,device_k_count,device_k_colors,k_means,srcRows,srcCols); cudaMemcpy(k_colors, device_k_colors, 3 * k_means * sizeof(float), cudaMemcpyDeviceToHost); } cudaMemcpy(device_k_colors, k_colors, 3 * k_means *sizeof(float), cudaMemcpyHostToDevice); printf("=== END ===\n"); //for (int group = 0; group < k_means; group++){ //printf("Color Group %d: R=%f, G=%f, B=%f \n", group + 1, k_colors[3 * group + 2], k_colors[3 * group + 1], k_colors[3 * group]); //} kMeansOutputKernel<<<blocks,threadsPerBlock>>> (deviceDestData,device_k_index,device_k_colors,srcRows,srcCols); cudaMemcpy(output, deviceDestData, srcN*sizeof(uchar), cudaMemcpyDeviceToHost); cudaFree(deviceSrcData); cudaFree(deviceDestData); cudaFree(device_k_colors); cudaFree(device_k_index); cudaFree(device_k_count); cudaFree(device_convergence); delete[] k_colors; delete[] k_index; delete[] k_count; }*/ //-- OpenCV Handling /*Mat rgb2Gray(Mat image){ Mat out = Mat(image.rows, image.cols, CV_8UC1); uchar* input = (uchar*)image.datastart; uchar* output = (uchar*)out.datastart; cudaRgb2Gray(input, output, image.rows, image.cols); return out; }*/ /*Mat reverse(Mat image){ Mat out = Mat(image.rows, image.cols, image.type()); uchar* input = (uchar*)image.datastart; uchar* output = (uchar*)out.datastart; cudaReverse(input, output, image.rows, image.cols); return out; }*/ /*Mat gammaCorrection(Mat image, double gamma){ Mat out = Mat(image.rows, image.cols, image.type()); uchar* input = (uchar*)image.datastart; uchar* output = (uchar*)out.datastart; cudaGammaCorrection(input, output, gamma, image.rows, image.cols); return out; }*/ /*Mat directResize(Mat image, int rows, int cols){ Mat out = Mat(rows, cols, image.type()); uchar* input = (uchar*)image.datastart; uchar* output = (uchar*)out.datastart; cudaDirectResize(input, output, image.rows, image.cols, rows, cols); return out; }*/ /*Mat linearResize(Mat image, int rows, int cols){ Mat out = Mat(rows, cols, image.type()); uchar* input = (uchar*)image.datastart; uchar* output = (uchar*)out.datastart; cudaLinearResize(input, output, image.rows, image.cols, rows, cols); return out; }*/ /*void createFilter(double gKernel[][2 * FILTER_SIZE + 1], double inputSigma){ //standard deviation to 1.0 double sigma = inputSigma; double r, s = 2.0 * sigma * sigma; double sum = 0.0; for (int x = -FILTER_SIZE; x <= FILTER_SIZE; x++){ for (int y = -FILTER_SIZE; y <= FILTER_SIZE; y++){ r = sqrt(x*x + y*y); gKernel[x + FILTER_SIZE][y + FILTER_SIZE] = exp(-(r*r) / s) / (M_PI * s); sum += gKernel[x + FILTER_SIZE][y + FILTER_SIZE]; } } for (int i = 0; i < 2 * FILTER_SIZE + 1; ++i){ for (int j = 0; j < 2 * FILTER_SIZE + 1; ++j){ gKernel[i][j] /= sum; } } }*/ /*Mat gaussianFilter(Mat image, double sigma){ double gKernel[2 * FILTER_SIZE + 1][2 * FILTER_SIZE + 1]; createFilter(gKernel, sigma); Mat out = Mat(image.rows, image.cols, image.type()); uchar* input = (uchar*)image.datastart; uchar* output = (uchar*)out.datastart; cudaGaussianFilter(input,output,gKernel,image.rows,image.cols); //delete[] gKernel; return out; //return image; }*/ /*Mat sobelFilter(Mat image){ Mat out(image.rows, image.cols, image.type()); //Mat temp(image.rows, image.cols, image.type()); uchar* input = (uchar*)image.datastart; uchar* output = (uchar*)out.datastart; cudaSobelFilter(input,output,image.rows,image.cols); return out; }*/ /*Mat kMeans(Mat image, int k_means){ srand(6000); if (k_means > 256){ printf("Error: Max number of groups exceeded (256)\n"); exit(-1); } Mat out(image.rows, image.cols, image.type()); uchar* input = (uchar*)image.datastart; uchar* output = (uchar*)out.datastart; cudaKMeans(input,output,image.rows,image.cols,k_means); return out; }*/ /*Mat gaussianPyramid(cv::Mat image, uchar levels, float scale){ if (scale > 0.5){ printf("Error: Scale > 0.5\n"); exit(-1); } int srcRows = image.rows; int srcCols = image.cols; Mat output(srcRows + (srcRows * scale + 1), srcCols, image.type()); uchar* src_data = (uchar*)image.datastart; uchar* dest_data = (uchar*)output.datastart; for (int i = 0; i < srcRows; i++){ for (int j = 0; j < srcCols; j++){ for (int color = 0; color < 3; color++){ int idx = 3 * (i * srcCols + j) + color; dest_data[idx] = src_data[idx]; } } } dest_data += srcRows * srcCols * 3; int newRows = srcRows * scale; int newCols = srcCols * scale; float newScale = scale; int offset = 0; for (int level = 1; level < levels; level++){ image = gaussianFilter(image, 1.0); image = linearResize(image, newRows, newCols); src_data = (uchar*)image.datastart; for (int i = 0; i < newRows; i++){ for (int j = 0; j < newCols; j++){ for (int color = 0; color < 3; color++){ int idx = 3 * (i * srcCols + j + offset) + color; dest_data[idx] = src_data[3 * (i * newCols + j) + color]; } } } offset += newCols; newRows *= scale; newCols *= scale; newScale *= scale; } return output; }*/ //-- Main void simulate(cv::Mat& im){ //im = frgb2Gray(im); img_proc::mySift(im); mySift(im); } int main(void){ // [MAIN] //printf("%d\n", (unsigned int)powf(2, 2) & 4); //img_proc::makeFilter(1.7148); //printf("Done!\n"); //getchar(); //return; cv::Mat im, im0, im1; im = imread("D://School//Summer 2016//Research//nausicaa.jpg"); im = img_proc::linearResize(im, 640, 640); //im = img_proc::frgb2Gray(im); //im = imread("D://School//Summer 2016//Research//Stereo//storage//z_im0.png"); //im0 = imread("D://School//Summer 2016//Research//kMeans//nausicaa_float_cpu.png"); //im0 = img_proc::frgb2Gray(im0); //im1 = imread("D://School//Summer 2016//Research//kMeans//nausicaa_float_gpu.png"); //im1 = img_proc::frgb2Gray(im1); //im = imread("D://School//Summer 2016//Research//bike.jpg"); //im = imread("D://School//Summer 2016//Research//gray//einstein_gray.png"); //im0 = imread("D://School//Summer 2016//Research//Stereo//storage//ims0.png"); //im1 = imread("D://School//Summer 2016//Research//Stereo//storage//ims1.png"); if (im.empty()){ printf("Error!\n"); getchar(); return -1; } vector<int> compression_params; compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION); compression_params.push_back(9); int versionFish = 0; //img_proc::diff_count(im0, im1); //im = img_proc::frgb2Gray(im); //im = img_proc::fGaussianFilterSep(im, 1.6); //im = img_proc::mySift(im); //im = fGaussianFilterSep(im, 1.6); //im = reverse(im); //im = img_proc::mySift(im); //im = myConv2(im, filter, 0); //im1 = kMeansFixed(im, 4); //im = img_proc::linearResize(im, im.rows / 4, im.cols / 4); //im0 = imread("D://School//Summer 2016//Research//mySift//audrey_conv_cpu.png"); //im1 = imread("D://School//Summer 2016//Research//mySift//audrey_conv_gpu.png"); //im0 = img_proc::frgb2Gray(im0); //im1 = img_proc::frgb2Gray(im1); //im = img_proc::diff_count(im0, im1); //imwrite("D://School//Summer 2016//Research//mySift//reverse.png", im, compression_params); //imwrite("D://School//Summer 2016//Research//Stereo//storage//z_im1.png", im, compression_params); //im0 = img_proc::frgb2Gray(im0); //im1 = img_proc::frgb2Gray(im1); //img_proc::diff_count(im0, im1); //im = img_proc::depthFromStereo(im0, im1, 0, 0, 0); //im = img_proc::kMeans(im, 8); //im = kMeans(im, 8); //im = img_proc::kMeansFixed(im, 8); //imwrite("D://School//Summer 2016//Research//kMeans//nausicaa_float_gpu.png", im, compression_params); //cudaRuntimeGetVersion(&versionFish); //printf("Version: %f\n", CV_VERSION); //cout << CV_VERSION << endl; //Vec3b intensity = im.at<Vec3b>(20, 20); //printf("rows: %d, columns: %d, %d \n", im.rows, im.cols,intensity.val[2]); //get_nvtAttrib("Bike", 0xFF222222); //img_proc::mySift(im); //mySift(im); //nvtxRangePop(); //Mat resized = gaussianFilter(im,2.0); //Mat resized = img_proc::kMeans(im,4); //Mat temp = frgb2Gray(im); //Mat resized2 = fdirectResize(temp, temp.rows / 4, temp.cols / 4); //Mat resized = img_proc::fdirectResize(temp, temp.rows / 4, temp.cols / 4); //auto time1 = std::chrono::high_resolution_clock::now(); //Mat resized = img_proc::mySift(im); //nvtxEventAttributes_t eventAttrib = get_nvtAttrib("Fish", 0xFF880000); //nvtxMarkEx(&eventAttrib); //eventAttrib.message.ascii = "Fish"; //eventAttrib.color = 0xFF880000; //nvtxRangePushEx(&eventAttrib); //Mat resized, resized2; //cudaLagSetup(); //im = imread("D://School//Summer 2016//Research//audrey.jpg"); //get_nvtAttrib("Audrey", 0xFF222222); //simulate(im); //nvtxRangePop(); /*im = imread("D://School//Summer 2016//Research//einstein.png"); get_nvtAttrib("Einstein", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); nvtxRangePop(); im = imread("D://School//Summer 2016//Research//boat.png"); get_nvtAttrib("Boat", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); nvtxRangePop(); im = imread("D://School//Summer 2016//Research//bike.jpg"); get_nvtAttrib("Bike", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); nvtxRangePop(); im = imread("D://School//Summer 2016//Research//castle.png"); get_nvtAttrib("Castle", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); nvtxRangePop(); im = imread("D://School//Summer 2016//Research//lena.png"); get_nvtAttrib("Lena", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); nvtxRangePop(); im = imread("D://School//Summer 2016//Research//valve.png"); get_nvtAttrib("Valve", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); nvtxRangePop(); im = imread("D://School//Summer 2016//Research//koala.png"); get_nvtAttrib("Koala", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); nvtxRangePop(); im = imread("D://School//Summer 2016//Research//audrey.jpg"); get_nvtAttrib("Audrey", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); nvtxRangePop(); im = imread("D://School//Summer 2016//Research//nausicaa.jpg"); get_nvtAttrib("Nausicaa", 0xFF222222); //im = frgb2Gray(im); //img_proc::fgaussianFilter(im, 2.0); //fGaussianFilter(im, 2.0); simulate(im); nvtxRangePop();*/ //********// //printf("Done!\n"); //getchar(); //return 0; //********// ofstream avg_file, det_file, allc_file, allg_file, spd_file, thc_file, thg_file; uchar micro = 181; string name = ""; double filesize = 0; allc_file.open("D://School//Summer 2016//Research//Textfiles//averages__all_cpu.csv"); allc_file << micro << "s, rgb2Gray,reverse,gamma correction,direct resize(x2),direct resize(x0.5),linear resize(x2),linear resize(x0.5),gaussian filter,edge detection,k-means,gaussian pyramid" << endl; allg_file.open("D://School//Summer 2016//Research//Textfiles//averages__all_gpu.csv"); allg_file << micro << "s, rgb2Gray,reverse,gamma correction,direct resize(x2),direct resize(x0.5),linear resize(x2),linear resize(x0.5),gaussian filter,edge detection,k-means,gaussian pyramid" << endl; spd_file.open("D://School//Summer 2016//Research//Textfiles//averages__speedups.csv"); spd_file << "CPU/GPU, rgb2Gray,reverse,gamma correction,direct resize(x2),direct resize(x0.5),linear resize(x2),linear resize(x0.5),gaussian filter,edge detection,k-means,gaussian pyramid" << endl; thc_file.open("D://School//Summer 2016//Research//Textfiles//averages__throughput_cpu.csv"); thc_file << "MB/s, rgb2Gray,reverse,gamma correction,direct resize(x2),direct resize(x0.5),linear resize(x2),linear resize(x0.5),gaussian filter,edge detection,k-means,gaussian pyramid" << endl; thg_file.open("D://School//Summer 2016//Research//Textfiles//averages__throughput_gpu.csv"); thg_file << "MB/s, rgb2Gray,reverse,gamma correction,direct resize(x2),direct resize(x0.5),linear resize(x2),linear resize(x0.5),gaussian filter,edge detection,k-means,gaussian pyramid" << endl; //printf("Reading started... "); for (int pics = 0; pics <= 8; pics++){ Mat im1; if (pics == 0){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_einstein.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_einstein.txt"); im1 = imread("D://School//Summer 2016//Research//einstein.png"); name = "einstein"; } else if (pics == 1){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_castle.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_castle.txt"); im1 = imread("D://School//Summer 2016//Research//castle.png"); name = "castle"; } else if (pics == 2){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_lena.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_lena.txt"); im1 = imread("D://School//Summer 2016//Research//lena.png"); name = "lena"; } else if (pics == 3){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_boat.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_boat.txt"); im1 = imread("D://School//Summer 2016//Research//boat.png"); name = "boat"; } else if (pics == 4){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_bike.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_bike.txt"); im1 = imread("D://School//Summer 2016//Research//bike.jpg"); name = "bike"; } else if (pics == 5){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_valve.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_valve.txt"); im1 = imread("D://School//Summer 2016//Research//valve.png"); name = "valve"; } else if (pics == 6){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_koala.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_koala.txt"); im1 = imread("D://School//Summer 2016//Research//koala.png"); name = "koala";//[STOP HERE] } else if (pics == 7){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_nausicaa.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_nausicaa.txt"); im1 = imread("D://School//Summer 2016//Research//nausicaa.jpg"); name = "nausicaa"; } else if (pics == 8){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_audrey.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_audrey.txt"); im1 = imread("D://School//Summer 2016//Research//audrey.jpg"); name = "oranges"; } else if (pics == 9){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_oranges.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_oranges.txt"); im1 = imread("D://School//Summer 2016//Research//oranges.jpg"); name = "oranges"; } else if (pics == 10){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_mountains.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_mountains.txt"); im1 = imread("D://School//Summer 2016//Research//mountains.jpg"); name = "mountains"; } else if (pics == 11){ avg_file.open("D://School//Summer 2016//Research//Textfiles//averages_tiger.txt"); det_file.open("D://School//Summer 2016//Research//Textfiles//details_tiger.txt"); im1 = imread("D://School//Summer 2016//Research//tiger.jpg"); name = "tiger"; } else{ printf("Error!\n"); exit(-1); } filesize = im1.rows * im1.cols * 3; det_file << name << ": " << im1.cols << "x" << im1.rows << endl << "========" << endl; avg_file << name << ": " << im1.cols << "x" << im1.rows << endl << "========" << endl; allc_file << name; allg_file << name; spd_file << name; thc_file << name; thg_file << name; //printf("Reading done\n"); //RGB 2 Gray int rounds = 10; printf("RGB2Gray\n"); double cpu_duration = 0, gpu_duration = 0; det_file << "RGB 2 GRAY" << endl; avg_file << "RGB 2 GRAY" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::rgb2Gray(im1); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); rgb2Gray(im1); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //avg_file.close(); //det_file.close(); //continue; //Reverse printf("Reverse\n"); cpu_duration = 0; gpu_duration = 0; //Mat im1 = imread("D://School//Summer 2016//Research//valve.png"); det_file << "REVERSE" << endl; avg_file << "REVERSE" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::reverse(im1); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); reverse(im1); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //Gamma Correction printf("Gamma Correction\n"); cpu_duration = 0; gpu_duration = 0; //Mat im1 = imread("D://School//Summer 2016//Research//valve.png"); det_file << "GAMMA CORRECTION" << endl; avg_file << "GAMMA CORRECTION" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::gammaCorrection(im1, 2.0); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); gammaCorrection(im1, 2.0); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //Direct Resize printf("Direct Resize x2\n"); cpu_duration = 0; gpu_duration = 0; //Mat im1 = imread("D://School//Summer 2016//Research//valve.png"); det_file << "DIRECT RESIZE (X2)" << endl; avg_file << "DIRECT RESIZE (X2)" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::directResize(im1, im1.rows * 2, im1.cols * 2); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); directResize(im1, im1.rows * 2, im1.cols * 2); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); printf("Direct Resize x0.5\n"); cpu_duration = 0; gpu_duration = 0; //Mat im1 = imread("D://School//Summer 2016//Research//valve.png"); det_file << "DIRECT RESIZE (X0.5)" << endl; avg_file << "DIRECT RESIZE (X0.5)" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::directResize(im1, im1.rows * 0.5, im1.cols * 0.5); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); directResize(im1, im1.rows * 0.5, im1.cols * 0.5); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //Linear Resize printf("Linear Resize x2\n"); cpu_duration = 0; gpu_duration = 0; //Mat im1 = imread("D://School//Summer 2016//Research//valve.png"); det_file << "LINEAR RESIZE (X2)" << endl; avg_file << "LINEAR RESIZE (X2)" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::linearResize(im1, im1.rows * 2, im1.cols * 2); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); linearResize(im1, im1.rows * 2, im1.cols * 2); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); cpu_duration = 0; gpu_duration = 0; //Mat im1 = imread("D://School//Summer 2016//Research//valve.png"); printf("Linear Resize x0.5\n"); det_file << "LINEAR RESIZE (X0.5)" << endl; avg_file << "LINEAR RESIZE (X0.5)" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::linearResize(im1, im1.rows * 0.5, im1.cols * 0.5); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); linearResize(im1, im1.rows * 0.5, im1.cols * 0.5); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //Gaussian Filter rounds = 5; cpu_duration = 0; gpu_duration = 0; printf("Gaussian Filter Sep\n"); Mat img_temp = frgb2Gray(im1); //Mat im1 = imread("D://School//Summer 2016//Research//valve.png"); det_file << "GAUSSIAN FILTER SEP (SIZE 3, SIGMA 1.0)" << endl; avg_file << "GAUSSIAN FILTER SEP (SIZE 3, SIGMA 1.0)" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); //img_proc::gaussianFilter(im1, 1.0); img_proc::fGaussianFilterSep(img_temp, 1.0); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); fGaussianFilterSep(img_temp, 1.0); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //Edge Filter rounds = 1; cpu_duration = 0; gpu_duration = 0; //Mat im2 = imread("D://School//Summer 2016//Research//valve_gray.png"); printf("Edge Filter\n"); det_file << "EDGE DETECTION" << endl; avg_file << "EDGE DETECTION" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::sobelFilter(im1); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); sobelFilter(im1); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //k-Means 8, 200 rounds rounds = 1; cpu_duration = 0; gpu_duration = 0; //Mat im2 = imread("D://School//Summer 2016//Research//valve_gray.png"); det_file << "K MEANS 8" << endl; avg_file << "K MEANS 8" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::kMeansFixed(im1, 8); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); kMeansFixed(im1, 8); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //Gaussian Pyramid rounds = 1; cpu_duration = 0; gpu_duration = 0; //Mat im2 = imread("D://School//Summer 2016//Research//valve_gray.png"); det_file << "GAUSSIAN PYRAMID 8 0.5x" << endl; avg_file << "GAUSSIAN PYRAMID 8 0.5x" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::gaussianPyramid(im1, 8, 0.5); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); gaussianPyramid(im1, 8, 0.5);; auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); //SIFT rounds = 1; cpu_duration = 0; gpu_duration = 0; printf("SIFT\n"); //Mat im2 = imread("D://School//Summer 2016//Research//valve_gray.png"); int lesser = min(img_temp.rows, img_temp.cols); Mat img_sqtmp = fdirectResize(img_temp,lesser,lesser); det_file << "SIFT" << endl; avg_file << "SIFT" << endl; for (int runs = 1; runs <= rounds; runs++){ double cpu_time = 0, gpu_time = 0; auto t1 = std::chrono::high_resolution_clock::now(); img_proc::mySift(img_sqtmp); auto t2 = std::chrono::high_resolution_clock::now(); cpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t2 - t1).count(); det_file << "CPU Run: " << fixed << cpu_time << micro << "s" << endl; cpu_duration += cpu_time; auto t3 = std::chrono::high_resolution_clock::now(); mySift(img_sqtmp); auto t4 = std::chrono::high_resolution_clock::now(); gpu_time = std::chrono::duration_cast<std::chrono::microseconds> (t4 - t3).count(); det_file << "GPU Run: " << fixed << gpu_time << micro << "s" << endl; gpu_duration += gpu_time; } det_file << "========" << endl; avg_file << "CPU Average: " << fixed << cpu_duration / rounds << micro << "s" << endl; avg_file << "GPU Average: " << fixed << gpu_duration / rounds << micro << "s" << endl; avg_file << "CPU:GPU: " << fixed << cpu_duration / gpu_duration << endl; avg_file << "========" << endl; allc_file << "," << fixed << cpu_duration / rounds; allg_file << "," << fixed << gpu_duration / rounds; spd_file << "," << fixed << cpu_duration / gpu_duration; thc_file << "," << fixed << filesize / (cpu_duration / rounds); thg_file << "," << fixed << filesize / (gpu_duration / rounds); avg_file.close(); det_file.close(); allc_file << endl; allg_file << endl; spd_file << endl; thc_file << endl; thg_file << endl; } allc_file.close(); allg_file.close(); spd_file.close(); thc_file.close(); thg_file.close(); return 0; }
4c3539b614d35306cfe02b14b996dd3cf80c0652.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Image.hpp" #include "Graph_gpu.hpp" #include <iostream> #include <fstream> #include <vector> #include <cmath> #include <stdio.h> #define cudaCheckError() { \ hipError_t e=hipGetLastError(); \ if(e!=hipSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } __global__ void display(GraphGPU* graph) { int x = blockIdx.x * 256 + threadIdx.x; printf("%d", graph->m_excessFlow[x]); } //function added to check if the graph was copied successfully on GPU void count_active_cpu(GraphGPU graph) { int count = 0; for (int i = 0; i < graph.m_maxHeight; i++) { if (graph.m_excessFlow[i] > 0 && graph.m_heights[i] < graph.m_maxHeight) count++; } //std::cout << "cpu_count = " << count << "\n"; } //Copy a cpu array onto the device void copy_pointer(int** gpu_ptr, int* cpu_ptr, size_t size) { //allocate array on the device int *host_ptr; hipMalloc((void **) &host_ptr, size); cudaCheckError(); //copy the cpu array on the device hipMemcpy(host_ptr, cpu_ptr, size, hipMemcpyHostToDevice); cudaCheckError(); //copy the gpu array adress on our pointer hipMemcpy(gpu_ptr, &host_ptr, sizeof(int*), hipMemcpyHostToDevice); hipDeviceSynchronize(); cudaCheckError(); } //Copy the CPU graph to the GPU void copy_graph(GraphGPU *gpu_graph, GraphGPU cpu_graph) { //copy the structure hipMemcpy(gpu_graph, &cpu_graph, sizeof(GraphGPU), hipMemcpyHostToDevice); cudaCheckError(); size_t matrix_size = cpu_graph.m_maxHeight * sizeof(int); //copy every matrices used copy_pointer(&(gpu_graph->m_excessFlow), cpu_graph.m_excessFlow, matrix_size); copy_pointer(&(gpu_graph->m_heights), cpu_graph.m_heights, matrix_size); copy_pointer(&(gpu_graph->m_topNeighbourCapacity), cpu_graph.m_topNeighbourCapacity, matrix_size); copy_pointer(&(gpu_graph->m_bottomNeighbourCapacity), cpu_graph.m_bottomNeighbourCapacity, matrix_size); copy_pointer(&(gpu_graph->m_rightNeighbourCapacity), cpu_graph.m_rightNeighbourCapacity, matrix_size); copy_pointer(&(gpu_graph->m_leftNeighbourCapacity), cpu_graph.m_leftNeighbourCapacity, matrix_size); copy_pointer(&(gpu_graph->m_sinkCapacity), cpu_graph.m_sinkCapacity, matrix_size); copy_pointer(&(gpu_graph->m_sourceCapacity), cpu_graph.m_sourceCapacity, matrix_size); } //copy the graph heights onto the swap __global__ void graph_to_swap(GraphGPU *graph, int *swap) { int x = blockIdx.x * 256 + threadIdx.x; if (x >= graph->m_maxHeight) return; swap[x] = graph->m_heights[x]; } //copy the swap heights onto the graph heights __global__ void swap_to_graph(GraphGPU *graph, int *swap) { int x = blockIdx.x * 256 + threadIdx.x; if (x >= graph->m_maxHeight) return; graph->m_heights[x] = swap[x]; } int main() { Image image("inputs/12003.jpg"); Image imageHelper("inputs/12003_modified.jpg"); GraphGPU graph(image, imageHelper); count_active_cpu(graph); hipDeviceProp_t device; hipGetDeviceProperties(&device,0); int blockSize = 256; int numBlocks = (graph.m_maxHeight + blockSize - 1) / blockSize; //allocate graph on GPU and copy the one initialize on CPU GraphGPU* g; hipMalloc((void **) &g, sizeof(GraphGPU)); cudaCheckError(); copy_graph(g, graph); //setup CPU counter int c = 0; int* count = &c; //setup GPU counter int* gpu_count; hipMalloc((void **)&gpu_count, sizeof(int)); cudaCheckError(); hipDeviceSynchronize(); hipMemcpy(gpu_count, count, sizeof(int), hipMemcpyHostToDevice); cudaCheckError(); hipDeviceSynchronize(); //count active nodes hipLaunchKernelGGL(( count_active), dim3(numBlocks), dim3(blockSize), 0, 0, g, gpu_count); cudaCheckError(); hipDeviceSynchronize(); //copy the counter from GPU to CPU hipMemcpy(count, gpu_count, sizeof(int), hipMemcpyDeviceToHost); //std::cout << "\ncount_gpu = " << *count << "\n"; //initialize swap heights int* swap_heights; hipMalloc((void **)&swap_heights, graph.m_maxHeight * sizeof(int)); cudaCheckError(); //copy the graph heights onto the swap hipLaunchKernelGGL(( graph_to_swap), dim3(numBlocks), dim3(blockSize), 0, 0, g, swap_heights); while(*count > 0) { hipLaunchKernelGGL(( push), dim3(numBlocks), dim3(blockSize), 0, 0, g); hipDeviceSynchronize(); //update swap heights hipLaunchKernelGGL(( relabel), dim3(numBlocks), dim3(blockSize), 0, 0, g, swap_heights); hipDeviceSynchronize(); //copy the updated heights to the graph hipLaunchKernelGGL(( swap_to_graph), dim3(numBlocks), dim3(blockSize), 0, 0, g, swap_heights); hipDeviceSynchronize(); //set gpu counter to 0 hipMemset(gpu_count,0,sizeof(int)); hipLaunchKernelGGL(( count_active), dim3(numBlocks), dim3(blockSize), 0, 0, g, gpu_count); cudaCheckError(); hipDeviceSynchronize(); //copy GPU counter onto CPU counter for the while loop hipMemcpy(count, gpu_count, sizeof(int), hipMemcpyDeviceToHost); std::cout << *count << "\n"; } //std::cout << "GraphGPU cut done\n"; //generate final image just like CPU version int x = graph.m_maxHeight; int *outDevice = swap_heights; int *out = (int*)std::malloc(graph.m_maxHeight * sizeof(int)); memset(out, 0, graph.m_maxHeight * sizeof(int)); hipMemcpy(outDevice, out, graph.m_maxHeight * sizeof(int), hipMemcpyHostToDevice); cudaCheckError(); hipLaunchKernelGGL(( setImage), dim3(numBlocks), dim3(blockSize), 0, 0, g, outDevice); cudaCheckError(); hipDeviceSynchronize(); hipMemcpy(out, outDevice, graph.m_maxHeight * sizeof(int), hipMemcpyDeviceToHost); //std::vector<std::vector<int>> out = std::vector<std::vector<int>>(graph.m_height, std::vector<int>(graph.m_width, 0)); //auto visited = graph.dfs(); std::ofstream ofs ("out.ppm", std::ios::binary); ofs << "P6\n" << graph.m_width << " " << graph.m_height << "\n255\n"; for (int i = 0; i < graph.m_maxHeight; i++) { char r = (char)(255 * out[i]); char g = (char)(255 * out[i]); char b = (char)(255 * out[i]); ofs << r << g << b; } ofs.close(); return 0; }
4c3539b614d35306cfe02b14b996dd3cf80c0652.cu
#include "Image.hpp" #include "Graph_gpu.hpp" #include <iostream> #include <fstream> #include <vector> #include <cmath> #include <stdio.h> #define cudaCheckError() { \ cudaError_t e=cudaGetLastError(); \ if(e!=cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } __global__ void display(GraphGPU* graph) { int x = blockIdx.x * 256 + threadIdx.x; printf("%d", graph->m_excessFlow[x]); } //function added to check if the graph was copied successfully on GPU void count_active_cpu(GraphGPU graph) { int count = 0; for (int i = 0; i < graph.m_maxHeight; i++) { if (graph.m_excessFlow[i] > 0 && graph.m_heights[i] < graph.m_maxHeight) count++; } //std::cout << "cpu_count = " << count << "\n"; } //Copy a cpu array onto the device void copy_pointer(int** gpu_ptr, int* cpu_ptr, size_t size) { //allocate array on the device int *host_ptr; cudaMalloc((void **) &host_ptr, size); cudaCheckError(); //copy the cpu array on the device cudaMemcpy(host_ptr, cpu_ptr, size, cudaMemcpyHostToDevice); cudaCheckError(); //copy the gpu array adress on our pointer cudaMemcpy(gpu_ptr, &host_ptr, sizeof(int*), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); cudaCheckError(); } //Copy the CPU graph to the GPU void copy_graph(GraphGPU *gpu_graph, GraphGPU cpu_graph) { //copy the structure cudaMemcpy(gpu_graph, &cpu_graph, sizeof(GraphGPU), cudaMemcpyHostToDevice); cudaCheckError(); size_t matrix_size = cpu_graph.m_maxHeight * sizeof(int); //copy every matrices used copy_pointer(&(gpu_graph->m_excessFlow), cpu_graph.m_excessFlow, matrix_size); copy_pointer(&(gpu_graph->m_heights), cpu_graph.m_heights, matrix_size); copy_pointer(&(gpu_graph->m_topNeighbourCapacity), cpu_graph.m_topNeighbourCapacity, matrix_size); copy_pointer(&(gpu_graph->m_bottomNeighbourCapacity), cpu_graph.m_bottomNeighbourCapacity, matrix_size); copy_pointer(&(gpu_graph->m_rightNeighbourCapacity), cpu_graph.m_rightNeighbourCapacity, matrix_size); copy_pointer(&(gpu_graph->m_leftNeighbourCapacity), cpu_graph.m_leftNeighbourCapacity, matrix_size); copy_pointer(&(gpu_graph->m_sinkCapacity), cpu_graph.m_sinkCapacity, matrix_size); copy_pointer(&(gpu_graph->m_sourceCapacity), cpu_graph.m_sourceCapacity, matrix_size); } //copy the graph heights onto the swap __global__ void graph_to_swap(GraphGPU *graph, int *swap) { int x = blockIdx.x * 256 + threadIdx.x; if (x >= graph->m_maxHeight) return; swap[x] = graph->m_heights[x]; } //copy the swap heights onto the graph heights __global__ void swap_to_graph(GraphGPU *graph, int *swap) { int x = blockIdx.x * 256 + threadIdx.x; if (x >= graph->m_maxHeight) return; graph->m_heights[x] = swap[x]; } int main() { Image image("inputs/12003.jpg"); Image imageHelper("inputs/12003_modified.jpg"); GraphGPU graph(image, imageHelper); count_active_cpu(graph); cudaDeviceProp device; cudaGetDeviceProperties(&device,0); int blockSize = 256; int numBlocks = (graph.m_maxHeight + blockSize - 1) / blockSize; //allocate graph on GPU and copy the one initialize on CPU GraphGPU* g; cudaMalloc((void **) &g, sizeof(GraphGPU)); cudaCheckError(); copy_graph(g, graph); //setup CPU counter int c = 0; int* count = &c; //setup GPU counter int* gpu_count; cudaMalloc((void **)&gpu_count, sizeof(int)); cudaCheckError(); cudaDeviceSynchronize(); cudaMemcpy(gpu_count, count, sizeof(int), cudaMemcpyHostToDevice); cudaCheckError(); cudaDeviceSynchronize(); //count active nodes count_active<<<numBlocks, blockSize>>>(g, gpu_count); cudaCheckError(); cudaDeviceSynchronize(); //copy the counter from GPU to CPU cudaMemcpy(count, gpu_count, sizeof(int), cudaMemcpyDeviceToHost); //std::cout << "\ncount_gpu = " << *count << "\n"; //initialize swap heights int* swap_heights; cudaMalloc((void **)&swap_heights, graph.m_maxHeight * sizeof(int)); cudaCheckError(); //copy the graph heights onto the swap graph_to_swap<<<numBlocks, blockSize>>>(g, swap_heights); while(*count > 0) { push<<<numBlocks, blockSize>>>(g); cudaDeviceSynchronize(); //update swap heights relabel<<<numBlocks, blockSize>>>(g, swap_heights); cudaDeviceSynchronize(); //copy the updated heights to the graph swap_to_graph<<<numBlocks, blockSize>>>(g, swap_heights); cudaDeviceSynchronize(); //set gpu counter to 0 cudaMemset(gpu_count,0,sizeof(int)); count_active<<<numBlocks, blockSize>>>(g, gpu_count); cudaCheckError(); cudaDeviceSynchronize(); //copy GPU counter onto CPU counter for the while loop cudaMemcpy(count, gpu_count, sizeof(int), cudaMemcpyDeviceToHost); std::cout << *count << "\n"; } //std::cout << "GraphGPU cut done\n"; //generate final image just like CPU version int x = graph.m_maxHeight; int *outDevice = swap_heights; int *out = (int*)std::malloc(graph.m_maxHeight * sizeof(int)); memset(out, 0, graph.m_maxHeight * sizeof(int)); cudaMemcpy(outDevice, out, graph.m_maxHeight * sizeof(int), cudaMemcpyHostToDevice); cudaCheckError(); setImage<<<numBlocks, blockSize>>>(g, outDevice); cudaCheckError(); cudaDeviceSynchronize(); cudaMemcpy(out, outDevice, graph.m_maxHeight * sizeof(int), cudaMemcpyDeviceToHost); //std::vector<std::vector<int>> out = std::vector<std::vector<int>>(graph.m_height, std::vector<int>(graph.m_width, 0)); //auto visited = graph.dfs(); std::ofstream ofs ("out.ppm", std::ios::binary); ofs << "P6\n" << graph.m_width << " " << graph.m_height << "\n255\n"; for (int i = 0; i < graph.m_maxHeight; i++) { char r = (char)(255 * out[i]); char g = (char)(255 * out[i]); char b = (char)(255 * out[i]); ofs << r << g << b; } ofs.close(); return 0; }
af3d018269b68125a6e509e0845fdd79350bc382.hip
// !!! This is a file automatically generated by hipify!!! #if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__) #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #endif #include <opencv2/opencv.hpp> #include <vector> #include <chrono> #include <string> using namespace std; __global__ void colored_sobel(unsigned char const* in, unsigned char* const out, std::size_t w, std::size_t h) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if (i > 1 && j > 1 && i < w - 1 && j < h - 1) { for (int c = 0; c < 3; c++) { auto hh = in[((j - 1) * w + i - 1) * 3 + c] - in[((j - 1) * w + i + 1) * 3 + c] + 2 * in[(j * w + i - 1) * 3 + c] - 2 * in[(j * w + i + 1) * 3 + c] + in[((j + 1) * w + i - 1) * 3 + c] - in[((j + 1) * w + i + 1) * 3 + c]; auto vv = in[((j - 1) * w + i - 1) * 3 + c] - in[((j + 1) * w + i - 1) * 3 + c] + 2 * in[((j - 1) * w + i) * 3 + c] - 2 * in[((j + 1) * w + i) * 3 + c] + in[((j - 1) * w + i + 1) * 3 + c] - in[((j + 1) * w + i + 1) * 3 + c]; auto res = hh * hh + vv * vv; res = res > 255 * 255 ? 255 * 255 : res; out[(j * w + i) * 3 + c] = sqrt((float)res); } } } void colored_sobel(std::string name) { cv::Mat m_in = cv::imread(name, cv::IMREAD_UNCHANGED); auto rgb = m_in.data; auto rows = m_in.rows; auto cols = m_in.cols; std::vector< unsigned char > g(3 * rows * cols); cv::Mat m_out(rows, cols, CV_8UC3, g.data()); unsigned char* rgb_d; unsigned char* out_d; auto start = std::chrono::system_clock::now(); hipEvent_t cudaStart, cudaStop; hipEventCreate(&cudaStart); hipEventCreate(&cudaStop); hipEventRecord(cudaStart); hipMalloc(&rgb_d, 3 * rows * cols); hipMalloc(&out_d, 3 * rows * cols); hipMemcpy(rgb_d, rgb, 3 * rows * cols, hipMemcpyHostToDevice); dim3 block(32, 32); dim3 grid((cols - 1) / block.x + 1, (rows - 1) / block.y + 1); //(4,4) colored_sobel << <grid, block >> > (rgb_d, out_d, cols, rows); hipMemcpy(g.data(), out_d, 3 * rows * cols, hipMemcpyDeviceToHost); hipEventRecord(cudaStop); hipEventSynchronize(cudaStop); auto stop = std::chrono::system_clock::now(); auto duration = stop - start; auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(duration).count(); float elapsedTime; hipEventElapsedTime(&elapsedTime, cudaStart, cudaStop); std::cout << "Temps kernel: " << elapsedTime << std::endl; hipEventDestroy(cudaStart); hipEventDestroy(cudaStop); auto err = hipGetLastError(); std::cout << "Erreur: " << err << std::endl; std::cout << ms << " ms" << std::endl; cv::imwrite("csCUDA.jpg", m_out); cout << "Le fichier \"csCUDA.jpg\" a bien ete genere. Toutes nos felicitations !" << endl; hipFree(rgb_d); hipFree(out_d); }
af3d018269b68125a6e509e0845fdd79350bc382.cu
#if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__) #include "cuda_runtime.h" #include "device_launch_parameters.h" #endif #include <opencv2/opencv.hpp> #include <vector> #include <chrono> #include <string> using namespace std; __global__ void colored_sobel(unsigned char const* in, unsigned char* const out, std::size_t w, std::size_t h) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if (i > 1 && j > 1 && i < w - 1 && j < h - 1) { for (int c = 0; c < 3; c++) { auto hh = in[((j - 1) * w + i - 1) * 3 + c] - in[((j - 1) * w + i + 1) * 3 + c] + 2 * in[(j * w + i - 1) * 3 + c] - 2 * in[(j * w + i + 1) * 3 + c] + in[((j + 1) * w + i - 1) * 3 + c] - in[((j + 1) * w + i + 1) * 3 + c]; auto vv = in[((j - 1) * w + i - 1) * 3 + c] - in[((j + 1) * w + i - 1) * 3 + c] + 2 * in[((j - 1) * w + i) * 3 + c] - 2 * in[((j + 1) * w + i) * 3 + c] + in[((j - 1) * w + i + 1) * 3 + c] - in[((j + 1) * w + i + 1) * 3 + c]; auto res = hh * hh + vv * vv; res = res > 255 * 255 ? 255 * 255 : res; out[(j * w + i) * 3 + c] = sqrt((float)res); } } } void colored_sobel(std::string name) { cv::Mat m_in = cv::imread(name, cv::IMREAD_UNCHANGED); auto rgb = m_in.data; auto rows = m_in.rows; auto cols = m_in.cols; std::vector< unsigned char > g(3 * rows * cols); cv::Mat m_out(rows, cols, CV_8UC3, g.data()); unsigned char* rgb_d; unsigned char* out_d; auto start = std::chrono::system_clock::now(); cudaEvent_t cudaStart, cudaStop; cudaEventCreate(&cudaStart); cudaEventCreate(&cudaStop); cudaEventRecord(cudaStart); cudaMalloc(&rgb_d, 3 * rows * cols); cudaMalloc(&out_d, 3 * rows * cols); cudaMemcpy(rgb_d, rgb, 3 * rows * cols, cudaMemcpyHostToDevice); dim3 block(32, 32); dim3 grid((cols - 1) / block.x + 1, (rows - 1) / block.y + 1); //(4,4) colored_sobel << <grid, block >> > (rgb_d, out_d, cols, rows); cudaMemcpy(g.data(), out_d, 3 * rows * cols, cudaMemcpyDeviceToHost); cudaEventRecord(cudaStop); cudaEventSynchronize(cudaStop); auto stop = std::chrono::system_clock::now(); auto duration = stop - start; auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(duration).count(); float elapsedTime; cudaEventElapsedTime(&elapsedTime, cudaStart, cudaStop); std::cout << "Temps kernel: " << elapsedTime << std::endl; cudaEventDestroy(cudaStart); cudaEventDestroy(cudaStop); auto err = cudaGetLastError(); std::cout << "Erreur: " << err << std::endl; std::cout << ms << " ms" << std::endl; cv::imwrite("csCUDA.jpg", m_out); cout << "Le fichier \"csCUDA.jpg\" a bien ete genere. Toutes nos felicitations !" << endl; cudaFree(rgb_d); cudaFree(out_d); }
4ec493e04299c2568972450104094c460de3e57a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* This file is copied from https://github.com/jzbonter/mc-cnn */ extern "C" { } #define TB 128 #define DISP_MAX 256 __global__ void copy_fill(float *in, float *out, int size, int in_size2, int in_size3, int out_size2, int out_size3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int out_x = id % out_size3; int out_y = id / out_size3; int in_x = out_x - (out_size3 - in_size3) / 2; int in_y = out_y - (out_size2 - in_size2) / 2; int x = min(in_size3 - 1, max(0, in_x)); int y = min(in_size2 - 1, max(0, in_y)); out[id] = in[y * in_size3 + x]; } }
4ec493e04299c2568972450104094c460de3e57a.cu
#include "includes.h" /* This file is copied from https://github.com/jzbonter/mc-cnn */ extern "C" { } #define TB 128 #define DISP_MAX 256 __global__ void copy_fill(float *in, float *out, int size, int in_size2, int in_size3, int out_size2, int out_size3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int out_x = id % out_size3; int out_y = id / out_size3; int in_x = out_x - (out_size3 - in_size3) / 2; int in_y = out_y - (out_size2 - in_size2) / 2; int x = min(in_size3 - 1, max(0, in_x)); int y = min(in_size2 - 1, max(0, in_y)); out[id] = in[y * in_size3 + x]; } }
aa9cc1cce0c8a0fd397f6c8cd6b5d7fcb264f308.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <assert.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <vector> #include "node.h" #include "path.h" /* includes, project */ #include <cutil_inline.h> /* includes, kernels */ #include <cppIntegration_kernel.cu> #define _DEBUG_CUDA (0) #define cpyMemory(x, y, Sz) do{ \ memcpy(((void *)(x)), \ ((const void *)(y)), \ ((size_t)(Sz)));}while(0) static volatile bool rdy2malloc = true; /* Feature HOST-GPU Functions */ extern void gpuAllocFeatures (cudaFeatures &Features) { #ifdef _DEBUG_FLOAT_GPU /* check if input Alpha vector has any bad float values */ size_t alphatmpsz = Features.cpu.halphaSz/sizeof(float); for (size_t i=0; i < alphatmpsz; i++){ if (Features.cpu.halpha[i] != Features.cpu.halpha[i]) { std::cout << "gpu_gradient, bad alpha value at: " << i << std::endl; Features.cpu.halpha[i] = 0.0; } } #endif cutilSafeCall(hipMalloc(&(Features.gpu.dAlpha), Features.cpu.halphaSz)); cutilSafeCall(hipMalloc(&(Features.gpu.dfeatures),Features.cpu.hcacheSz)); cutilSafeCall(hipMalloc(&(Features.gpu.dbaseFeat),Features.cpu.hfeatureSz)); cutilSafeCall(hipMalloc(&(Features.gpu.dexpected),Features.cpu.halphaSz)); } extern void gpuFreeFeatures (cudaFeatures & Features) { delete [] Features.cpu.halpha; delete [] Features.cpu.hexpected; free(Features.cpu.hfeatures); hipFree(Features.gpu.dAlpha); hipFree(Features.gpu.dexpected); hipFree(Features.gpu.dfeatures); hipFree(Features.gpu.dbaseFeat); } extern void gpuCpyFeatures2GPU (cudaFeatures & Features) { cutilSafeCall(hipMemcpy(Features.gpu.dAlpha,Features.cpu.halpha, Features.cpu.halphaSz, hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(Features.gpu.dfeatures,Features.cpu.hfeatures, Features.cpu.hcacheSz, hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(Features.gpu.dbaseFeat,Features.cpu.hbaseFeat, Features.cpu.hfeatureSz, hipMemcpyHostToDevice)); } extern void gpuCpyAlpha2GPU (cudaFeatures & Features) { cutilSafeCall(hipMemcpy(Features.gpu.dAlpha,Features.cpu.halpha, Features.cpu.halphaSz, hipMemcpyHostToDevice)); } extern void gpuClearExpected (cudaFeatures & Features) { hipMemset(Features.gpu.dexpected, 0.0, Features.cpu.halphaSz); } extern void gpuCpyExpected2CPU (cudaFeatures * Features) { cutilSafeCall(hipMemcpy(Features->cpu.hexpected, Features->gpu.dexpected, Features->cpu.halphaSz, hipMemcpyDeviceToHost)); } extern void gpuCpyExpected2GPU(cudaFeatures * Features) { cutilSafeCall(hipMemcpy(Features->gpu.dexpected, Features->cpu.hexpected, Features->cpu.halphaSz, hipMemcpyHostToDevice)); } /* Tagger HOST-GPU functions */ extern void gpuAllocTagger (cudaTagger & Tagger) { size_t memSz = (sizeof(float) * (Tagger.id.hx) * (Tagger.id.hy)); size_t memPathSz = (sizeof(float) * (Tagger.id.hx - 1) * (Tagger.id.hy) * (Tagger.id.hy)); /* path cost: The indexing of the paths in this structure is as * follows: * idx1D = i*(y^2)+j*(y)+k, where * i = feature index * j = output index * k = path to next output being k * x is the number of features * y is the number of possible outputs */ while(!rdy2malloc); cutilSafeCall(hipMalloc(&(Tagger.gpu.dPathCost),memPathSz)); hipMemset(Tagger.gpu.dPathCost, 0, memPathSz); /* The following variables occur once per node. The mapping to * this structure is as follows: * idx1D = i*(x) + j, where * i = feature index * j = output index */ cutilSafeCall(hipMalloc(&(Tagger.gpu.dNodeCost),memSz)); hipMemset(Tagger.gpu.dNodeCost, 0.0, memSz); cutilSafeCall(hipMalloc(&(Tagger.gpu.dBestCost),memSz)); hipMemset(Tagger.gpu.dBestCost, 0.0, memSz); cutilSafeCall(hipMalloc(&(Tagger.gpu.dtrcbck),memSz)); hipMemset(Tagger.gpu.dtrcbck, 0.0, memSz); cutilSafeCall(hipMalloc(&(Tagger.gpu.dAlpha),memSz)); hipMemset(Tagger.gpu.dAlpha, 0.0, memSz); cutilSafeCall(hipMalloc(&(Tagger.gpu.dBeta),memSz)); hipMemset(Tagger.gpu.dBeta, 0.0, memSz); cutilSafeCall(hipMalloc(&(Tagger.gpu.dZ),sizeof(float))); hipMemset(Tagger.gpu.dZ, 0.0, sizeof(float)); cutilSafeCall(hipMalloc(&(Tagger.gpu.derr),sizeof(unsigned int))); hipMemset(Tagger.gpu.derr, 0, sizeof(unsigned int)); cutilSafeCall(hipMalloc(&(Tagger.gpu.danswers), Tagger.cpu.answersSz)); cutilSafeCall(hipMalloc(&(Tagger.gpu.dresults), Tagger.cpu.answersSz)); hipMemset(Tagger.gpu.dresults, 0, Tagger.cpu.answersSz); cutilSafeCall(hipMalloc(&(Tagger.gpu.ds),sizeof(float))); hipMemset(Tagger.gpu.ds, 0.0, sizeof(float)); cutilSafeCall(hipMalloc(&(Tagger.gpu.dcost),sizeof(float))); hipMemset(Tagger.gpu.dcost, 0.0, sizeof(float)); cutilSafeCall(hipMalloc(&(Tagger.gpu.dCpath),memPathSz)); cutilSafeCall(hipMalloc(&(Tagger.gpu.dCnode),memSz)); cutilCheckMsg("Alloc Tagger failed"); /* Allocate host memory to perform memory copy in contiguous space */ #ifdef COPY_ALL_FROM_GPU Tagger.cpu.hPathCost = (float *) malloc(memPathSz); assert(Tagger.cpu.hPathCost != NULL); Tagger.cpu.hNodeCost = (float *) malloc(memSz); assert(Tagger.cpu.hNodeCost != NULL); Tagger.cpu.hAlpha = (float *) malloc(memSz); assert(Tagger.cpu.hAlpha != NULL); Tagger.cpu.hBeta = (float *) malloc(memSz); assert(Tagger.cpu.hBeta != NULL); #endif rdy2malloc = false; } extern void gpuFreeTagger (cudaTagger & Tagger) { while(rdy2malloc); #ifdef COPY_ALL_FROM_GPU free(Tagger.cpu.hPathCost); free(Tagger.cpu.hNodeCost); free(Tagger.cpu.hAlpha); free(Tagger.cpu.hBeta); #endif hipFree(Tagger.gpu.danswers); hipFree(Tagger.gpu.dresults); hipFree(Tagger.gpu.derr); hipFree(Tagger.gpu.dtrcbck); hipFree(Tagger.gpu.dBestCost); hipFree(Tagger.gpu.dNodeCost); hipFree(Tagger.gpu.dPathCost); hipFree(Tagger.gpu.dAlpha); hipFree(Tagger.gpu.dBeta); hipFree(Tagger.gpu.dZ); hipFree(Tagger.gpu.ds); hipFree(Tagger.gpu.dcost); hipFree(Tagger.gpu.dCnode); hipFree(Tagger.gpu.dCpath); hipDeviceSynchronize(); rdy2malloc = true; } /* copy results from GPU to Host */ void gpuCpyTagger2CPU(cudaTagger * Tagger) { #ifdef COPY_ALL_FROM_GPU size_t memSz = (sizeof(float) * (Tagger->id.hx) * (Tagger->id.hy)); size_t memPathSz = (sizeof(float) * (Tagger->id.hx - 1) * (Tagger->id.hy) * (Tagger->id.hy)); cutilSafeCall(hipMemcpy(Tagger->cpu.hPathCost,Tagger->gpu.dPathCost, memPathSz,hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(Tagger->cpu.hNodeCost,Tagger->gpu.dNodeCost, memSz,hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(Tagger->cpu.hAlpha,Tagger->gpu.dAlpha, memSz,hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(Tagger->cpu.hBeta,Tagger->gpu.dBeta, memSz,hipMemcpyDeviceToHost)); #endif cutilSafeCall(hipMemcpy(&Tagger->cpu.hZ,Tagger->gpu.dZ, sizeof(float),hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(&Tagger->cpu.hs,Tagger->gpu.ds, sizeof(float),hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(&Tagger->cpu.herr,Tagger->gpu.derr, sizeof(unsigned int),hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(&Tagger->cpu.hcost,Tagger->gpu.dcost, sizeof(float),hipMemcpyDeviceToHost)); } void gpuCpyAnswers2GPU(cudaTagger *Tagger){ cutilSafeCall(hipMemcpy(Tagger->gpu.danswers,Tagger->cpu.answers,Tagger->cpu.answersSz, hipMemcpyHostToDevice)); } #if _DEBUG_CUDA static void debug_gradient (cudaFeatures & Features, cudaTagger & Tagger) { if ((Tagger.id.hfeature_id_ + 2*Tagger.id.hx - 2) >= Features.cpu.hcacheSz/sizeof(int)){ std::cout << "ERROR - check feature cache!!!!" << std::endl; std::cout << "(CacheSz,featureSz, alphaSz): " << Features.cpu.hcacheSz << "," << Features.cpu.hfeatureSz << "," << Features.cpu.halphaSz << std::endl; std::cout << "hfeature_id_, x: " << Tagger.id.hfeature_id_ << " , " << Tagger.id.hx << std::endl; } if (Tagger.id.hfeature_id_ == 3662){ for (size_t i = 0; i<Tagger.id.hx; i++){ std::cout <<"node - fid,xidx: " << (Tagger.id.hfeature_id_ + i) << " , " << i << " -- "; std::cout << "fo: " << Features.cpu.hfeatures[Tagger.id.hfeature_id_ + i] << " -- "; std::cout << "base: " << Features.cpu.hbaseFeat[Features.cpu.hfeatures[Tagger.id.hfeature_id_ + i]] << std::endl; } std::cout << std::endl; for (size_t i = 1; i<Tagger.id.hx; i++) { int pathIdx = (Tagger.id.hfeature_id_ + Tagger.id.hx + (i-1)); int fo = Features.cpu.hfeatures[pathIdx]; int base =Features.cpu.hbaseFeat[fo]; std::cout <<"path - fid,xidx: " << pathIdx << " , " << i << " -- "; std::cout << "fo: " << fo << " -- "; std::cout << "base: " << base << " -- "; while (base != -1){ base = Features.cpu.hbaseFeat[++fo]; std::cout << base << " , "; } std::cout << "last yth alpha: " << Features.cpu.halpha[base + (Tagger.id.hy - 1)*Tagger.id.hy + (Tagger.id.hy - 1) ] << std::endl; } } std::cout << std::endl; std::cout << "gpu_gradient called: ------------------------" << std::endl; std::cout << "(CacheSz,featureSz, alphaSz): " << Features.cpu.hcacheSz << "," << Features.cpu.hfeatureSz << "," << Features.cpu.halphaSz << std::endl; std::cout << "PATH feature Idx: " << Features.cpu.hfeatures[Tagger.id.hfeature_id_ + 2*Tagger.id.hx - 2] << std::endl; int * idx = &Features.cpu.hbaseFeat[Features.cpu.hfeatures[Tagger.id.hfeature_id_ + 2*Tagger.id.hx - 2]]; std::cout << "PATH *f(0): " << *idx << std::endl; std::cout << "Alpha for this idx: " << Features.cpu.halpha[*idx] << std::endl; std::cout << "Last Alpha for this idx: " << Features.cpu.halpha[*idx + (Tagger.id.hy-1)*Tagger.id.hy + Tagger.id.hy] << std::endl; /* calculate C in debug mode */ float c = 0.0; for (size_t i = 0; i < Tagger.id.hy ; i++) { idx = &Features.cpu.hbaseFeat[Features.cpu.hfeatures[Tagger.id.hfeature_id_ + 2*Tagger.id.hx - 2]]; while (*idx != -1) { c += Features.cpu.halpha[*idx + i*Tagger.id.hy + (Tagger.id.hy-1)]; /* calculate cost for y-1 */ idx++; } std::cout << "cost for xmax, ymax, path: " << i << " c: " << c << std::endl; } std::cout << "(x,y,fid) " << Tagger.id.hx<<","<<Tagger.id.hy<<","<<Tagger.id.hfeature_id_ << std::endl; } #endif extern void gpu_gradient(cudaFeatures & Features, cudaTagger & Tagger) { /* CUDA memory allocation and initialization. This is equivalent * to the buildlattice function. */ #if _DEBUG_CUDA debug_gradient(Features,Tagger); cudaPrintfInit(); #endif /* setup execution parameters and build lattice*/ dim3 grid(0, 0, 0); dim3 threads(0, 0, 0); calcBLKThreads((Tagger.id.hx*Tagger.id.hy),grid,threads); hipLaunchKernelGGL(( buildlatticekernel) , dim3(grid.x), dim3(threads.x) , 0, 0, Tagger.gpu,Tagger.id,Features.gpu); /* check if kernel execution generated an error */ cutilCheckMsg("Kernel buildlatticekernel failed"); cutilSafeThreadSync(); #ifdef GPU_SEQ_PATH_COST hipLaunchKernelGGL(( pathCost) , dim3(1), dim3(1) , 0, 0, Features.gpu.dAlpha, (Features.cpu.halphaSz/sizeof(float)), Features.gpu.dfeatures, Features.gpu.dbaseFeat, Tagger.gpu.dPathCost, Tagger.gpu.dNodeCost, Tagger.id.hx, Tagger.id.hy, Tagger.id.hfeature_id_); cutilCheckMsg("pathCost failed"); #endif /* Perform Alpha Beta Sum */ grid.x = grid.y = grid.z = 1; threads.x = Tagger.id.hy; threads.y = threads.z = 1; hipLaunchKernelGGL(( forwardBackwardKernel) , dim3(grid.x), dim3(threads.x) , 0, 0, Tagger.gpu,Tagger.id,Features.gpu); /* check if kernel execution generated an error */ cutilCheckMsg("Kernel forwardBackwardKernel failed"); cutilSafeThreadSync(); #ifdef PARALELL_EXP calcBLKThreads((Tagger.id.hx*Tagger.id.hy),grid,threads); hipLaunchKernelGGL(( calcCKernel) , dim3(grid.x), dim3(threads.x) , 0, 0, Tagger.gpu,Tagger.id,Features.gpu); /* check if kernel execution generated an error */ cutilCheckMsg("Kernel calcCKernel failed"); cutilSafeThreadSync(); #endif #ifdef GPU_PARALELL_EXPECTATION calcBLKThreads((Tagger.id.hx*Tagger.id.hy),grid,threads); cudaPrintfInit(); hipLaunchKernelGGL(( calcExpectationKernel) , dim3(grid.x), dim3(threads.x) , 0, 0, Tagger.gpu,Tagger.id,Features.gpu); cutilCheckMsg("Kernel calcExpectationKernel failed"); cutilSafeThreadSync(); #else /* Expectaction executed in a sequential manner to avoid write on write problems with the expectation vector. */ hipLaunchKernelGGL(( calcExpectationKernelSingle) , dim3(1), dim3(1) , 0, 0, Tagger.gpu,Tagger.id,Features.gpu); /* check if kernel execution generated an error */ cutilCheckMsg("Kernel calcExpectationKernelSingle failed"); cutilSafeThreadSync(); #endif hipLaunchKernelGGL(( calcSSingle) , dim3(1), dim3(1) , 0, 0, Tagger.gpu,Tagger.id,Features.gpu); /* check if kernel execution generated an error */ cutilCheckMsg("Kernel calcSSingle failed"); cutilSafeThreadSync(); hipLaunchKernelGGL(( cu_viterbi) , dim3(1), dim3(Tagger.id.hy) , 0, 0, Tagger.gpu,Tagger.id,Features.gpu); /* check if kernel execution generated an error */ cutilCheckMsg("Kernel cu_viterbi failed"); cutilSafeThreadSync(); #if _DEBUG_CUDA cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); #endif }
aa9cc1cce0c8a0fd397f6c8cd6b5d7fcb264f308.cu
#include <algorithm> #include <assert.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <vector> #include "node.h" #include "path.h" /* includes, project */ #include <cutil_inline.h> /* includes, kernels */ #include <cppIntegration_kernel.cu> #define _DEBUG_CUDA (0) #define cpyMemory(x, y, Sz) do{ \ memcpy(((void *)(x)), \ ((const void *)(y)), \ ((size_t)(Sz)));}while(0) static volatile bool rdy2malloc = true; /* Feature HOST-GPU Functions */ extern void gpuAllocFeatures (cudaFeatures &Features) { #ifdef _DEBUG_FLOAT_GPU /* check if input Alpha vector has any bad float values */ size_t alphatmpsz = Features.cpu.halphaSz/sizeof(float); for (size_t i=0; i < alphatmpsz; i++){ if (Features.cpu.halpha[i] != Features.cpu.halpha[i]) { std::cout << "gpu_gradient, bad alpha value at: " << i << std::endl; Features.cpu.halpha[i] = 0.0; } } #endif cutilSafeCall(cudaMalloc(&(Features.gpu.dAlpha), Features.cpu.halphaSz)); cutilSafeCall(cudaMalloc(&(Features.gpu.dfeatures),Features.cpu.hcacheSz)); cutilSafeCall(cudaMalloc(&(Features.gpu.dbaseFeat),Features.cpu.hfeatureSz)); cutilSafeCall(cudaMalloc(&(Features.gpu.dexpected),Features.cpu.halphaSz)); } extern void gpuFreeFeatures (cudaFeatures & Features) { delete [] Features.cpu.halpha; delete [] Features.cpu.hexpected; free(Features.cpu.hfeatures); cudaFree(Features.gpu.dAlpha); cudaFree(Features.gpu.dexpected); cudaFree(Features.gpu.dfeatures); cudaFree(Features.gpu.dbaseFeat); } extern void gpuCpyFeatures2GPU (cudaFeatures & Features) { cutilSafeCall(cudaMemcpy(Features.gpu.dAlpha,Features.cpu.halpha, Features.cpu.halphaSz, cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(Features.gpu.dfeatures,Features.cpu.hfeatures, Features.cpu.hcacheSz, cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(Features.gpu.dbaseFeat,Features.cpu.hbaseFeat, Features.cpu.hfeatureSz, cudaMemcpyHostToDevice)); } extern void gpuCpyAlpha2GPU (cudaFeatures & Features) { cutilSafeCall(cudaMemcpy(Features.gpu.dAlpha,Features.cpu.halpha, Features.cpu.halphaSz, cudaMemcpyHostToDevice)); } extern void gpuClearExpected (cudaFeatures & Features) { cudaMemset(Features.gpu.dexpected, 0.0, Features.cpu.halphaSz); } extern void gpuCpyExpected2CPU (cudaFeatures * Features) { cutilSafeCall(cudaMemcpy(Features->cpu.hexpected, Features->gpu.dexpected, Features->cpu.halphaSz, cudaMemcpyDeviceToHost)); } extern void gpuCpyExpected2GPU(cudaFeatures * Features) { cutilSafeCall(cudaMemcpy(Features->gpu.dexpected, Features->cpu.hexpected, Features->cpu.halphaSz, cudaMemcpyHostToDevice)); } /* Tagger HOST-GPU functions */ extern void gpuAllocTagger (cudaTagger & Tagger) { size_t memSz = (sizeof(float) * (Tagger.id.hx) * (Tagger.id.hy)); size_t memPathSz = (sizeof(float) * (Tagger.id.hx - 1) * (Tagger.id.hy) * (Tagger.id.hy)); /* path cost: The indexing of the paths in this structure is as * follows: * idx1D = i*(y^2)+j*(y)+k, where * i = feature index * j = output index * k = path to next output being k * x is the number of features * y is the number of possible outputs */ while(!rdy2malloc); cutilSafeCall(cudaMalloc(&(Tagger.gpu.dPathCost),memPathSz)); cudaMemset(Tagger.gpu.dPathCost, 0, memPathSz); /* The following variables occur once per node. The mapping to * this structure is as follows: * idx1D = i*(x) + j, where * i = feature index * j = output index */ cutilSafeCall(cudaMalloc(&(Tagger.gpu.dNodeCost),memSz)); cudaMemset(Tagger.gpu.dNodeCost, 0.0, memSz); cutilSafeCall(cudaMalloc(&(Tagger.gpu.dBestCost),memSz)); cudaMemset(Tagger.gpu.dBestCost, 0.0, memSz); cutilSafeCall(cudaMalloc(&(Tagger.gpu.dtrcbck),memSz)); cudaMemset(Tagger.gpu.dtrcbck, 0.0, memSz); cutilSafeCall(cudaMalloc(&(Tagger.gpu.dAlpha),memSz)); cudaMemset(Tagger.gpu.dAlpha, 0.0, memSz); cutilSafeCall(cudaMalloc(&(Tagger.gpu.dBeta),memSz)); cudaMemset(Tagger.gpu.dBeta, 0.0, memSz); cutilSafeCall(cudaMalloc(&(Tagger.gpu.dZ),sizeof(float))); cudaMemset(Tagger.gpu.dZ, 0.0, sizeof(float)); cutilSafeCall(cudaMalloc(&(Tagger.gpu.derr),sizeof(unsigned int))); cudaMemset(Tagger.gpu.derr, 0, sizeof(unsigned int)); cutilSafeCall(cudaMalloc(&(Tagger.gpu.danswers), Tagger.cpu.answersSz)); cutilSafeCall(cudaMalloc(&(Tagger.gpu.dresults), Tagger.cpu.answersSz)); cudaMemset(Tagger.gpu.dresults, 0, Tagger.cpu.answersSz); cutilSafeCall(cudaMalloc(&(Tagger.gpu.ds),sizeof(float))); cudaMemset(Tagger.gpu.ds, 0.0, sizeof(float)); cutilSafeCall(cudaMalloc(&(Tagger.gpu.dcost),sizeof(float))); cudaMemset(Tagger.gpu.dcost, 0.0, sizeof(float)); cutilSafeCall(cudaMalloc(&(Tagger.gpu.dCpath),memPathSz)); cutilSafeCall(cudaMalloc(&(Tagger.gpu.dCnode),memSz)); cutilCheckMsg("Alloc Tagger failed"); /* Allocate host memory to perform memory copy in contiguous space */ #ifdef COPY_ALL_FROM_GPU Tagger.cpu.hPathCost = (float *) malloc(memPathSz); assert(Tagger.cpu.hPathCost != NULL); Tagger.cpu.hNodeCost = (float *) malloc(memSz); assert(Tagger.cpu.hNodeCost != NULL); Tagger.cpu.hAlpha = (float *) malloc(memSz); assert(Tagger.cpu.hAlpha != NULL); Tagger.cpu.hBeta = (float *) malloc(memSz); assert(Tagger.cpu.hBeta != NULL); #endif rdy2malloc = false; } extern void gpuFreeTagger (cudaTagger & Tagger) { while(rdy2malloc); #ifdef COPY_ALL_FROM_GPU free(Tagger.cpu.hPathCost); free(Tagger.cpu.hNodeCost); free(Tagger.cpu.hAlpha); free(Tagger.cpu.hBeta); #endif cudaFree(Tagger.gpu.danswers); cudaFree(Tagger.gpu.dresults); cudaFree(Tagger.gpu.derr); cudaFree(Tagger.gpu.dtrcbck); cudaFree(Tagger.gpu.dBestCost); cudaFree(Tagger.gpu.dNodeCost); cudaFree(Tagger.gpu.dPathCost); cudaFree(Tagger.gpu.dAlpha); cudaFree(Tagger.gpu.dBeta); cudaFree(Tagger.gpu.dZ); cudaFree(Tagger.gpu.ds); cudaFree(Tagger.gpu.dcost); cudaFree(Tagger.gpu.dCnode); cudaFree(Tagger.gpu.dCpath); cudaThreadSynchronize(); rdy2malloc = true; } /* copy results from GPU to Host */ void gpuCpyTagger2CPU(cudaTagger * Tagger) { #ifdef COPY_ALL_FROM_GPU size_t memSz = (sizeof(float) * (Tagger->id.hx) * (Tagger->id.hy)); size_t memPathSz = (sizeof(float) * (Tagger->id.hx - 1) * (Tagger->id.hy) * (Tagger->id.hy)); cutilSafeCall(cudaMemcpy(Tagger->cpu.hPathCost,Tagger->gpu.dPathCost, memPathSz,cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(Tagger->cpu.hNodeCost,Tagger->gpu.dNodeCost, memSz,cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(Tagger->cpu.hAlpha,Tagger->gpu.dAlpha, memSz,cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(Tagger->cpu.hBeta,Tagger->gpu.dBeta, memSz,cudaMemcpyDeviceToHost)); #endif cutilSafeCall(cudaMemcpy(&Tagger->cpu.hZ,Tagger->gpu.dZ, sizeof(float),cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(&Tagger->cpu.hs,Tagger->gpu.ds, sizeof(float),cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(&Tagger->cpu.herr,Tagger->gpu.derr, sizeof(unsigned int),cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(&Tagger->cpu.hcost,Tagger->gpu.dcost, sizeof(float),cudaMemcpyDeviceToHost)); } void gpuCpyAnswers2GPU(cudaTagger *Tagger){ cutilSafeCall(cudaMemcpy(Tagger->gpu.danswers,Tagger->cpu.answers,Tagger->cpu.answersSz, cudaMemcpyHostToDevice)); } #if _DEBUG_CUDA static void debug_gradient (cudaFeatures & Features, cudaTagger & Tagger) { if ((Tagger.id.hfeature_id_ + 2*Tagger.id.hx - 2) >= Features.cpu.hcacheSz/sizeof(int)){ std::cout << "ERROR - check feature cache!!!!" << std::endl; std::cout << "(CacheSz,featureSz, alphaSz): " << Features.cpu.hcacheSz << "," << Features.cpu.hfeatureSz << "," << Features.cpu.halphaSz << std::endl; std::cout << "hfeature_id_, x: " << Tagger.id.hfeature_id_ << " , " << Tagger.id.hx << std::endl; } if (Tagger.id.hfeature_id_ == 3662){ for (size_t i = 0; i<Tagger.id.hx; i++){ std::cout <<"node - fid,xidx: " << (Tagger.id.hfeature_id_ + i) << " , " << i << " -- "; std::cout << "fo: " << Features.cpu.hfeatures[Tagger.id.hfeature_id_ + i] << " -- "; std::cout << "base: " << Features.cpu.hbaseFeat[Features.cpu.hfeatures[Tagger.id.hfeature_id_ + i]] << std::endl; } std::cout << std::endl; for (size_t i = 1; i<Tagger.id.hx; i++) { int pathIdx = (Tagger.id.hfeature_id_ + Tagger.id.hx + (i-1)); int fo = Features.cpu.hfeatures[pathIdx]; int base =Features.cpu.hbaseFeat[fo]; std::cout <<"path - fid,xidx: " << pathIdx << " , " << i << " -- "; std::cout << "fo: " << fo << " -- "; std::cout << "base: " << base << " -- "; while (base != -1){ base = Features.cpu.hbaseFeat[++fo]; std::cout << base << " , "; } std::cout << "last yth alpha: " << Features.cpu.halpha[base + (Tagger.id.hy - 1)*Tagger.id.hy + (Tagger.id.hy - 1) ] << std::endl; } } std::cout << std::endl; std::cout << "gpu_gradient called: ------------------------" << std::endl; std::cout << "(CacheSz,featureSz, alphaSz): " << Features.cpu.hcacheSz << "," << Features.cpu.hfeatureSz << "," << Features.cpu.halphaSz << std::endl; std::cout << "PATH feature Idx: " << Features.cpu.hfeatures[Tagger.id.hfeature_id_ + 2*Tagger.id.hx - 2] << std::endl; int * idx = &Features.cpu.hbaseFeat[Features.cpu.hfeatures[Tagger.id.hfeature_id_ + 2*Tagger.id.hx - 2]]; std::cout << "PATH *f(0): " << *idx << std::endl; std::cout << "Alpha for this idx: " << Features.cpu.halpha[*idx] << std::endl; std::cout << "Last Alpha for this idx: " << Features.cpu.halpha[*idx + (Tagger.id.hy-1)*Tagger.id.hy + Tagger.id.hy] << std::endl; /* calculate C in debug mode */ float c = 0.0; for (size_t i = 0; i < Tagger.id.hy ; i++) { idx = &Features.cpu.hbaseFeat[Features.cpu.hfeatures[Tagger.id.hfeature_id_ + 2*Tagger.id.hx - 2]]; while (*idx != -1) { c += Features.cpu.halpha[*idx + i*Tagger.id.hy + (Tagger.id.hy-1)]; /* calculate cost for y-1 */ idx++; } std::cout << "cost for xmax, ymax, path: " << i << " c: " << c << std::endl; } std::cout << "(x,y,fid) " << Tagger.id.hx<<","<<Tagger.id.hy<<","<<Tagger.id.hfeature_id_ << std::endl; } #endif extern void gpu_gradient(cudaFeatures & Features, cudaTagger & Tagger) { /* CUDA memory allocation and initialization. This is equivalent * to the buildlattice function. */ #if _DEBUG_CUDA debug_gradient(Features,Tagger); cudaPrintfInit(); #endif /* setup execution parameters and build lattice*/ dim3 grid(0, 0, 0); dim3 threads(0, 0, 0); calcBLKThreads((Tagger.id.hx*Tagger.id.hy),grid,threads); buildlatticekernel <<< grid.x, threads.x >>> (Tagger.gpu,Tagger.id,Features.gpu); /* check if kernel execution generated an error */ cutilCheckMsg("Kernel buildlatticekernel failed"); cutilSafeThreadSync(); #ifdef GPU_SEQ_PATH_COST pathCost <<< 1, 1 >>> (Features.gpu.dAlpha, (Features.cpu.halphaSz/sizeof(float)), Features.gpu.dfeatures, Features.gpu.dbaseFeat, Tagger.gpu.dPathCost, Tagger.gpu.dNodeCost, Tagger.id.hx, Tagger.id.hy, Tagger.id.hfeature_id_); cutilCheckMsg("pathCost failed"); #endif /* Perform Alpha Beta Sum */ grid.x = grid.y = grid.z = 1; threads.x = Tagger.id.hy; threads.y = threads.z = 1; forwardBackwardKernel <<< grid.x, threads.x >>> (Tagger.gpu,Tagger.id,Features.gpu); /* check if kernel execution generated an error */ cutilCheckMsg("Kernel forwardBackwardKernel failed"); cutilSafeThreadSync(); #ifdef PARALELL_EXP calcBLKThreads((Tagger.id.hx*Tagger.id.hy),grid,threads); calcCKernel <<< grid.x, threads.x >>> (Tagger.gpu,Tagger.id,Features.gpu); /* check if kernel execution generated an error */ cutilCheckMsg("Kernel calcCKernel failed"); cutilSafeThreadSync(); #endif #ifdef GPU_PARALELL_EXPECTATION calcBLKThreads((Tagger.id.hx*Tagger.id.hy),grid,threads); cudaPrintfInit(); calcExpectationKernel <<< grid.x, threads.x >>> (Tagger.gpu,Tagger.id,Features.gpu); cutilCheckMsg("Kernel calcExpectationKernel failed"); cutilSafeThreadSync(); #else /* Expectaction executed in a sequential manner to avoid write on write problems with the expectation vector. */ calcExpectationKernelSingle <<< 1, 1 >>> (Tagger.gpu,Tagger.id,Features.gpu); /* check if kernel execution generated an error */ cutilCheckMsg("Kernel calcExpectationKernelSingle failed"); cutilSafeThreadSync(); #endif calcSSingle <<< 1, 1 >>> (Tagger.gpu,Tagger.id,Features.gpu); /* check if kernel execution generated an error */ cutilCheckMsg("Kernel calcSSingle failed"); cutilSafeThreadSync(); cu_viterbi <<< 1, Tagger.id.hy >>> (Tagger.gpu,Tagger.id,Features.gpu); /* check if kernel execution generated an error */ cutilCheckMsg("Kernel cu_viterbi failed"); cutilSafeThreadSync(); #if _DEBUG_CUDA cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); #endif }
159c8924bc8eb93780e04ce6a767d113f5d55571.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "cudaProcessFrame.h" #define MUL(x,y) (x*y) __device__ void YUV2RGB(uint32 *yuvi, float *rgb) { float luma, chromaCb, chromaCr; luma = (float)yuvi[0]; chromaCb = (float)((int32)yuvi[1] - 512.0f); chromaCr = (float)((int32)yuvi[2] - 512.0f); rgb[0] = MUL(luma, 1.1644) + MUL(chromaCb, 0) + MUL(chromaCr, 1.5960); rgb[1] = MUL(luma, 1.1644) + MUL(chromaCb, -0.3918) + MUL(chromaCr, -0.8130); rgb[2] = MUL(luma, 1.1644) + MUL(chromaCb, 2.0172) + MUL(chromaCr, 0); } __device__ void RGB2YUV(uint32 *rgb, float *yuv) { float r = (float)rgb[0]; float g = (float)rgb[1]; float b = (float)rgb[2]; yuv[0] = MUL(r, 0.2568) + MUL(g, 0.5041) + MUL(b, 0.0979); yuv[1]= MUL(r, -0.1482) + MUL(g, -0.2910) + MUL(b, 0.4392) + 512; yuv[2] = MUL(r, 0.4392) + MUL(g, -0.3678) + MUL(b, -0.0714) + 512; } __device__ uint32 RGBAPACK_10bit(uint32* irgb) { float rgb[3]; uint32 ARGBpixel = 0; rgb[0] = min(max((float)irgb[0], 0.0f), 1023.f); rgb[1] = min(max((float)irgb[1], 0.0f), 1023.f); rgb[2] = min(max((float)irgb[2], 0.0f), 1023.f); ARGBpixel = (((uint32)rgb[2] >> 2) | (((uint32)rgb[1] >> 2) << 8) | (((uint32)rgb[0] >> 2) << 16) | ((uint32)0xff<< 24)); return ARGBpixel; } __device__ uint32 RGBAPACK_10bit(float* rgb) { uint32 ARGBpixel = 0; rgb[0] = min(max(rgb[0], 0.0f), 1023.f); rgb[1] = min(max(rgb[1], 0.0f), 1023.f); rgb[2] = min(max(rgb[2], 0.0f), 1023.f); ARGBpixel = (((uint32)rgb[2] >> 2) | (((uint32)rgb[1] >> 2) << 8) | (((uint32)rgb[0] >> 2) << 16) | ((uint32)0xff<< 24)); return ARGBpixel; } __device__ void RGBAUNPACK_10bit(uint32 pixel, uint32* rgb) { rgb[2] = (pixel & 0xFF) << 2; rgb[1] = ((pixel>>8) & 0xFF) << 2; rgb[0] = ((pixel>>16) & 0xFF) << 2; } extern "C" __global__ void NV12ToARGBdrvapi(uint32 *srcImage, size_t nSourcePitch, uint32 *dstImage, size_t nDestPitch, uint32 width, uint32 height) { // process 2 pixels per thread int32 x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1); int32 y = blockIdx.y * blockDim.y + threadIdx.y; if (x+1 >= width || y >= height) return; uint32 processingPitch = nSourcePitch; uint32 dstImagePitch = nDestPitch >> 2; uint8 *srcImageU8 = (uint8 *)srcImage; // Read 2 Y components at a time uint32 yuvi[6]; yuvi[0] = (srcImageU8[y * processingPitch + x ]) << 2; yuvi[3] = (srcImageU8[y * processingPitch + x + 1]) << 2; uint32 chromaOffset = processingPitch * height; int32 y_chroma = y >> 1; if (y & 1) // odd scanline { uint32 chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ]; uint32 chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1]; if (y_chroma < ((height >> 1) - 1)) // interpolate vertically { chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1; chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1; } yuvi[1] = yuvi[4] = chromaCb << 2; yuvi[2] = yuvi[5] = chromaCr << 2; } else { yuvi[1] = yuvi[4] = (uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << 2; yuvi[2] = yuvi[5] = (uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << 2; } // YUV to RGB Transformation conversion float rgb[6]; YUV2RGB(&yuvi[0], &rgb[0]); YUV2RGB(&yuvi[3], &rgb[3]); // save to dest dstImage[y * dstImagePitch + x ] = RGBAPACK_10bit(&rgb[0]); dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_10bit(&rgb[3]); } extern "C" __global__ void ARGBToNv12drvapi(uint32 *srcImage, size_t nSourcePitch, uint32 *dstImage, size_t nDestPitch, uint32 width, uint32 height) { int32 x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1); int32 y = blockIdx.y * blockDim.y + threadIdx.y; if (x+1 >= width || y >= height) return; uint32 processingPitch = nSourcePitch>>2; uint8 *dstImageU8 = (uint8 *)dstImage; uint32 rgb[6]; float yuv[6]; // Clamp the results to RGBA RGBAUNPACK_10bit(srcImage[y * processingPitch + x ], &rgb[0]); RGBAUNPACK_10bit(srcImage[y * processingPitch + x + 1 ], &rgb[3]); // YUV to RGB Transformation conversion RGB2YUV(&rgb[0], &yuv[0]); RGB2YUV(&rgb[3], &yuv[3]); yuv[0] = min(max(yuv[0]/4, 0.0f), 255.f); yuv[1] = min(max((yuv[1])/4, 0.0f), 255.f); yuv[2] = min(max((yuv[2])/4, 0.0f), 255.f); yuv[3] = min(max(yuv[3]/4, 0.0f), 255.f); yuv[4] = min(max((yuv[4])/4, 0.0f), 255.f); yuv[5] = min(max((yuv[5])/4, 0.0f), 255.f); uint32 dstImagePitch = nDestPitch; dstImageU8[y * dstImagePitch + x] = ((uint32)yuv[0]); dstImageU8[y * dstImagePitch + x + 1] = ((uint32)yuv[3]); if (y & 1){ } else { int32 y_chroma = y >> 1; uint32 chromaOffset = dstImagePitch* height; dstImageU8[chromaOffset + y_chroma * dstImagePitch + x] = (uint8)((uint32)yuv[1]); dstImageU8[chromaOffset + y_chroma * dstImagePitch + x+1] = (uint8)((uint32)yuv[2]); } } extern "C" __global__ void ARGBpostprocess(uint32 *srcImage, size_t pitch, uint32 width, uint32 height) { int32 x = blockIdx.x * blockDim.x + threadIdx.x; int32 y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; uint32 processingPitch = pitch>>2; uint32 rgb[3]; RGBAUNPACK_10bit(srcImage[y*processingPitch + x], rgb); //todo rgb[1] = rgb[2] = 0; srcImage[y*processingPitch + x] = RGBAPACK_10bit(rgb); }
159c8924bc8eb93780e04ce6a767d113f5d55571.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "cudaProcessFrame.h" #define MUL(x,y) (x*y) __device__ void YUV2RGB(uint32 *yuvi, float *rgb) { float luma, chromaCb, chromaCr; luma = (float)yuvi[0]; chromaCb = (float)((int32)yuvi[1] - 512.0f); chromaCr = (float)((int32)yuvi[2] - 512.0f); rgb[0] = MUL(luma, 1.1644) + MUL(chromaCb, 0) + MUL(chromaCr, 1.5960); rgb[1] = MUL(luma, 1.1644) + MUL(chromaCb, -0.3918) + MUL(chromaCr, -0.8130); rgb[2] = MUL(luma, 1.1644) + MUL(chromaCb, 2.0172) + MUL(chromaCr, 0); } __device__ void RGB2YUV(uint32 *rgb, float *yuv) { float r = (float)rgb[0]; float g = (float)rgb[1]; float b = (float)rgb[2]; yuv[0] = MUL(r, 0.2568) + MUL(g, 0.5041) + MUL(b, 0.0979); yuv[1]= MUL(r, -0.1482) + MUL(g, -0.2910) + MUL(b, 0.4392) + 512; yuv[2] = MUL(r, 0.4392) + MUL(g, -0.3678) + MUL(b, -0.0714) + 512; } __device__ uint32 RGBAPACK_10bit(uint32* irgb) { float rgb[3]; uint32 ARGBpixel = 0; rgb[0] = min(max((float)irgb[0], 0.0f), 1023.f); rgb[1] = min(max((float)irgb[1], 0.0f), 1023.f); rgb[2] = min(max((float)irgb[2], 0.0f), 1023.f); ARGBpixel = (((uint32)rgb[2] >> 2) | (((uint32)rgb[1] >> 2) << 8) | (((uint32)rgb[0] >> 2) << 16) | ((uint32)0xff<< 24)); return ARGBpixel; } __device__ uint32 RGBAPACK_10bit(float* rgb) { uint32 ARGBpixel = 0; rgb[0] = min(max(rgb[0], 0.0f), 1023.f); rgb[1] = min(max(rgb[1], 0.0f), 1023.f); rgb[2] = min(max(rgb[2], 0.0f), 1023.f); ARGBpixel = (((uint32)rgb[2] >> 2) | (((uint32)rgb[1] >> 2) << 8) | (((uint32)rgb[0] >> 2) << 16) | ((uint32)0xff<< 24)); return ARGBpixel; } __device__ void RGBAUNPACK_10bit(uint32 pixel, uint32* rgb) { rgb[2] = (pixel & 0xFF) << 2; rgb[1] = ((pixel>>8) & 0xFF) << 2; rgb[0] = ((pixel>>16) & 0xFF) << 2; } extern "C" __global__ void NV12ToARGBdrvapi(uint32 *srcImage, size_t nSourcePitch, uint32 *dstImage, size_t nDestPitch, uint32 width, uint32 height) { // process 2 pixels per thread int32 x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1); int32 y = blockIdx.y * blockDim.y + threadIdx.y; if (x+1 >= width || y >= height) return; uint32 processingPitch = nSourcePitch; uint32 dstImagePitch = nDestPitch >> 2; uint8 *srcImageU8 = (uint8 *)srcImage; // Read 2 Y components at a time uint32 yuvi[6]; yuvi[0] = (srcImageU8[y * processingPitch + x ]) << 2; yuvi[3] = (srcImageU8[y * processingPitch + x + 1]) << 2; uint32 chromaOffset = processingPitch * height; int32 y_chroma = y >> 1; if (y & 1) // odd scanline { uint32 chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ]; uint32 chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1]; if (y_chroma < ((height >> 1) - 1)) // interpolate vertically { chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1; chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1; } yuvi[1] = yuvi[4] = chromaCb << 2; yuvi[2] = yuvi[5] = chromaCr << 2; } else { yuvi[1] = yuvi[4] = (uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << 2; yuvi[2] = yuvi[5] = (uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << 2; } // YUV to RGB Transformation conversion float rgb[6]; YUV2RGB(&yuvi[0], &rgb[0]); YUV2RGB(&yuvi[3], &rgb[3]); // save to dest dstImage[y * dstImagePitch + x ] = RGBAPACK_10bit(&rgb[0]); dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_10bit(&rgb[3]); } extern "C" __global__ void ARGBToNv12drvapi(uint32 *srcImage, size_t nSourcePitch, uint32 *dstImage, size_t nDestPitch, uint32 width, uint32 height) { int32 x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1); int32 y = blockIdx.y * blockDim.y + threadIdx.y; if (x+1 >= width || y >= height) return; uint32 processingPitch = nSourcePitch>>2; uint8 *dstImageU8 = (uint8 *)dstImage; uint32 rgb[6]; float yuv[6]; // Clamp the results to RGBA RGBAUNPACK_10bit(srcImage[y * processingPitch + x ], &rgb[0]); RGBAUNPACK_10bit(srcImage[y * processingPitch + x + 1 ], &rgb[3]); // YUV to RGB Transformation conversion RGB2YUV(&rgb[0], &yuv[0]); RGB2YUV(&rgb[3], &yuv[3]); yuv[0] = min(max(yuv[0]/4, 0.0f), 255.f); yuv[1] = min(max((yuv[1])/4, 0.0f), 255.f); yuv[2] = min(max((yuv[2])/4, 0.0f), 255.f); yuv[3] = min(max(yuv[3]/4, 0.0f), 255.f); yuv[4] = min(max((yuv[4])/4, 0.0f), 255.f); yuv[5] = min(max((yuv[5])/4, 0.0f), 255.f); uint32 dstImagePitch = nDestPitch; dstImageU8[y * dstImagePitch + x] = ((uint32)yuv[0]); dstImageU8[y * dstImagePitch + x + 1] = ((uint32)yuv[3]); if (y & 1){ } else { int32 y_chroma = y >> 1; uint32 chromaOffset = dstImagePitch* height; dstImageU8[chromaOffset + y_chroma * dstImagePitch + x] = (uint8)((uint32)yuv[1]); dstImageU8[chromaOffset + y_chroma * dstImagePitch + x+1] = (uint8)((uint32)yuv[2]); } } extern "C" __global__ void ARGBpostprocess(uint32 *srcImage, size_t pitch, uint32 width, uint32 height) { int32 x = blockIdx.x * blockDim.x + threadIdx.x; int32 y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; uint32 processingPitch = pitch>>2; uint32 rgb[3]; RGBAUNPACK_10bit(srcImage[y*processingPitch + x], rgb); //todo rgb[1] = rgb[2] = 0; srcImage[y*processingPitch + x] = RGBAPACK_10bit(rgb); }
6c17acda1d30b0df591d83241a94e48af6e60700.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<stdlib.h> #include <hiprand/hiprand_kernel.h> #include <ctime> #define thrdperblc 128 #define POPULATION 10000 __global__ void crossover(char* population) { int i, first_start, second_start, first_end, second_end, iterator,endofpopulation; int tId = blockIdx.x * thrdperblc + threadIdx.x; if (tId <= POPULATION / 2) { iterator = 0; first_start = tId * 11 * 2; first_end = first_start + 7; endofpopulation = (tId / 2 * 11) + POPULATION * 11; for (i = first_start; i <first_end ;i++) { population[ endofpopulation + iterator] = population[i]; iterator++; } second_start = first_start + 18; second_end = second_start + 4; for (i = second_start; i < second_end ; i++) { population[endofpopulation + iterator] = population[i]; iterator++; } } } __global__ void mutatation(char* population) { int i,start,end; int tId = blockIdx.x * thrdperblc + threadIdx.x; if (tId <= POPULATION / 2) { hiprandState_t state; char letters[] = " ABCDEFGHIJKLMNOPQRSTUVWXYZ"; start = POPULATION * 11 + (tId * 11); end = start + 11; for (i = start; i < end ; i++) { hiprand_init((unsigned long long)clock() + tId + 1, 0, 0, &state); int randnum = (int)( 834 * hiprand_uniform_double(&state)); if ((int)randnum % 10 > 7) { population[i] = letters[randnum % 27]; } } } } int fitness(char* chr) { int i = 0; int fitness_val = 0; char word[] = "HELLO WORLD"; for (i = 0; i < 11; i++) { if(chr[i] == word[i]) fitness_val += 5; else { fitness_val -= abs(chr[i] - word[i]); } } return fitness_val; } int compare(const void* first, const void* second) { char* f = ((char*)first); char* s = ((char*)second); int temp = fitness(s) - fitness(f); return temp; } void init_population(char* population) { char letters[] = " ABCDEFGHIJKLMNOPQRSTUVWXYZ"; int i; for (i = 0; i < POPULATION * 11; i++) { population[i] = letters[rand() % 27]; } } int main() { int i; float time; hipEvent_t start, stop; char* population = (char*)malloc(sizeof(char) * (POPULATION + POPULATION / 2) * 11 ); char* d_population; hipMalloc(&d_population, sizeof(char) * (POPULATION + POPULATION / 2) * 11 ); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); init_population(population); qsort(population, POPULATION, sizeof(char) * 11, compare); while (fitness(population) != 55) { hipMemcpy(d_population, population, sizeof(char) * 11 * (POPULATION), hipMemcpyHostToDevice); crossover << < POPULATION/ 2 / thrdperblc + 1, thrdperblc >> > (d_population); mutatation << < POPULATION / 2 / thrdperblc + 1, thrdperblc >> > (d_population); hipMemcpy(population, d_population, sizeof(char) * 11 * (POPULATION + POPULATION/2), hipMemcpyDeviceToHost); qsort(population, POPULATION + POPULATION / 2, sizeof(char) * 11, compare); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); for (i = 0; i < 11; i++) printf("%c", population[i]); printf("\n"); printf("execution time is : %f miliseconds \n", time); hipFree(d_population); free(population); return 0; }
6c17acda1d30b0df591d83241a94e48af6e60700.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<stdlib.h> #include <curand_kernel.h> #include <ctime> #define thrdperblc 128 #define POPULATION 10000 __global__ void crossover(char* population) { int i, first_start, second_start, first_end, second_end, iterator,endofpopulation; int tId = blockIdx.x * thrdperblc + threadIdx.x; if (tId <= POPULATION / 2) { iterator = 0; first_start = tId * 11 * 2; first_end = first_start + 7; endofpopulation = (tId / 2 * 11) + POPULATION * 11; for (i = first_start; i <first_end ;i++) { population[ endofpopulation + iterator] = population[i]; iterator++; } second_start = first_start + 18; second_end = second_start + 4; for (i = second_start; i < second_end ; i++) { population[endofpopulation + iterator] = population[i]; iterator++; } } } __global__ void mutatation(char* population) { int i,start,end; int tId = blockIdx.x * thrdperblc + threadIdx.x; if (tId <= POPULATION / 2) { curandState state; char letters[] = " ABCDEFGHIJKLMNOPQRSTUVWXYZ"; start = POPULATION * 11 + (tId * 11); end = start + 11; for (i = start; i < end ; i++) { curand_init((unsigned long long)clock() + tId + 1, 0, 0, &state); int randnum = (int)( 834 * curand_uniform_double(&state)); if ((int)randnum % 10 > 7) { population[i] = letters[randnum % 27]; } } } } int fitness(char* chr) { int i = 0; int fitness_val = 0; char word[] = "HELLO WORLD"; for (i = 0; i < 11; i++) { if(chr[i] == word[i]) fitness_val += 5; else { fitness_val -= abs(chr[i] - word[i]); } } return fitness_val; } int compare(const void* first, const void* second) { char* f = ((char*)first); char* s = ((char*)second); int temp = fitness(s) - fitness(f); return temp; } void init_population(char* population) { char letters[] = " ABCDEFGHIJKLMNOPQRSTUVWXYZ"; int i; for (i = 0; i < POPULATION * 11; i++) { population[i] = letters[rand() % 27]; } } int main() { int i; float time; cudaEvent_t start, stop; char* population = (char*)malloc(sizeof(char) * (POPULATION + POPULATION / 2) * 11 ); char* d_population; cudaMalloc(&d_population, sizeof(char) * (POPULATION + POPULATION / 2) * 11 ); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); init_population(population); qsort(population, POPULATION, sizeof(char) * 11, compare); while (fitness(population) != 55) { cudaMemcpy(d_population, population, sizeof(char) * 11 * (POPULATION), cudaMemcpyHostToDevice); crossover << < POPULATION/ 2 / thrdperblc + 1, thrdperblc >> > (d_population); mutatation << < POPULATION / 2 / thrdperblc + 1, thrdperblc >> > (d_population); cudaMemcpy(population, d_population, sizeof(char) * 11 * (POPULATION + POPULATION/2), cudaMemcpyDeviceToHost); qsort(population, POPULATION + POPULATION / 2, sizeof(char) * 11, compare); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); for (i = 0; i < 11; i++) printf("%c", population[i]); printf("\n"); printf("execution time is : %f miliseconds \n", time); cudaFree(d_population); free(population); return 0; }
39a811e4aa966aea4269a38ab2b8de991a739618.hip
// !!! This is a file automatically generated by hipify!!! #include <Hornet.hpp> #include "StandardAPI.hpp" #include "Util/BatchFunctions.hpp" #include "Util/RandomGraphData.cuh" //#include <Device/Util/CudaUtil.cuh> //xlib::deviceInfo //#include <Device/Util/Timer.cuh> //xlib::Timer #include <string> #include <algorithm> //std:.generate using namespace std::string_literals; using vert_t = int; using eoff_t = int; using HornetGPU = hornet::gpu::Hornet<vert_t>; using UpdatePtr = hornet::BatchUpdatePtr<vert_t, hornet::EMPTY, hornet::DeviceType::HOST>; using Update = hornet::gpu::BatchUpdate<vert_t>; using Init = hornet::HornetInit<vert_t>; using hornet::SoAData; using hornet::TypeList; using hornet::DeviceType; using hornet::print; //using hornet::generateBatchData; //#define RANDOM void deleteBatch(HornetGPU &hornet, vert_t * src, vert_t * dst, const int batch_size, const bool print_debug) { UpdatePtr ptr(batch_size, src, dst); Update batch_update(ptr); if (print_debug) { batch_update.print(); std::cout<<"ne: "<<hornet.nE()<<"\n=======\n"; hornet.print(); } hornet.erase(batch_update); if (print_debug) { std::cout<<"ne: "<<hornet.nE()<<"\n=======\n"; hornet.print(); } } void deleteBatchTest(HornetGPU &hornet, graph::GraphStd<vert_t, eoff_t> &graph, int batch_size, const bool print_debug) { #ifndef RANDOM vert_t batch_src[] = {1, 5, 2, 4}; vert_t batch_dst[] = {2, 4, 1, 5}; batch_size = 4; #else vert_t* batch_src, *batch_dst; host::allocatePageLocked(batch_src, batch_size); host::allocatePageLocked(batch_dst, batch_size); generateBatch(graph, batch_size, batch_src, batch_dst, BatchGenType::INSERT); #endif deleteBatch(hornet, batch_src, batch_dst, batch_size, print_debug); #ifndef RANDOM #else host::freePageLocked(batch_src, batch_dst); #endif } int exec(int argc, char* argv[]) { using namespace graph::structure_prop; using namespace graph::parsing_prop; graph::GraphStd<vert_t, vert_t> graph; graph.read(argv[1]); int batch_size = std::stoi(argv[2]); Init hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges()); HornetGPU hornet_gpu(hornet_init); auto init_coo = hornet_gpu.getCOO(true); hornet::RandomGenTraits<hornet::EMPTY> cooGenTraits; auto randomBatch = hornet::selectRandom(init_coo, batch_size, cooGenTraits); Update batch_update(randomBatch); hornet_gpu.erase(batch_update); auto inst_coo = hornet_gpu.getCOO(true); inst_coo.append(randomBatch); inst_coo.sort(); hornet::COO<DeviceType::HOST, vert_t, hornet::EMPTY, eoff_t> host_init_coo = init_coo; hornet::COO<DeviceType::HOST, vert_t, hornet::EMPTY, eoff_t> host_inst_coo = inst_coo; auto *s = host_init_coo.srcPtr(); auto *d = host_init_coo.dstPtr(); auto *S = host_inst_coo.srcPtr(); auto *D = host_inst_coo.dstPtr(); auto len = host_init_coo.size(); bool err = false; if (host_inst_coo.size() != host_init_coo.size()) { err = true; std::cerr<<"\nInit Size "<<host_init_coo.size()<<" != Combined size "<<host_inst_coo.size()<<"\n"; len = ::min(host_init_coo.size(), host_inst_coo.size()); } for (int i = 0; i < len; ++i) { if ((s[i] != S[i]) || (d[i] != D[i])) { err = true; std::cout<<"ERR : "; std::cout<<s[i]<<" "<<d[i]<<"\t"; std::cout<<"\t\t"; std::cout<<S[i]<<" "<<D[i]; std::cout<<"\n"; } } if (!err) { std::cout<<"PASSED\n"; } else { std::cout<<"NOT PASSED\n"; } return 0; } int main(int argc, char* argv[]) { int ret = 0; #if defined(RMM_WRAPPER) gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory. {//scoping technique to make sure that gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. #endif ret = exec(argc, argv); #if defined(RMM_WRAPPER) }//scoping technique to make sure that gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. gpu::finalizeRMMPoolAllocation(); #endif return ret; }
39a811e4aa966aea4269a38ab2b8de991a739618.cu
#include <Hornet.hpp> #include "StandardAPI.hpp" #include "Util/BatchFunctions.hpp" #include "Util/RandomGraphData.cuh" //#include <Device/Util/CudaUtil.cuh> //xlib::deviceInfo //#include <Device/Util/Timer.cuh> //xlib::Timer #include <string> #include <algorithm> //std:.generate using namespace std::string_literals; using vert_t = int; using eoff_t = int; using HornetGPU = hornet::gpu::Hornet<vert_t>; using UpdatePtr = hornet::BatchUpdatePtr<vert_t, hornet::EMPTY, hornet::DeviceType::HOST>; using Update = hornet::gpu::BatchUpdate<vert_t>; using Init = hornet::HornetInit<vert_t>; using hornet::SoAData; using hornet::TypeList; using hornet::DeviceType; using hornet::print; //using hornet::generateBatchData; //#define RANDOM void deleteBatch(HornetGPU &hornet, vert_t * src, vert_t * dst, const int batch_size, const bool print_debug) { UpdatePtr ptr(batch_size, src, dst); Update batch_update(ptr); if (print_debug) { batch_update.print(); std::cout<<"ne: "<<hornet.nE()<<"\n=======\n"; hornet.print(); } hornet.erase(batch_update); if (print_debug) { std::cout<<"ne: "<<hornet.nE()<<"\n=======\n"; hornet.print(); } } void deleteBatchTest(HornetGPU &hornet, graph::GraphStd<vert_t, eoff_t> &graph, int batch_size, const bool print_debug) { #ifndef RANDOM vert_t batch_src[] = {1, 5, 2, 4}; vert_t batch_dst[] = {2, 4, 1, 5}; batch_size = 4; #else vert_t* batch_src, *batch_dst; host::allocatePageLocked(batch_src, batch_size); host::allocatePageLocked(batch_dst, batch_size); generateBatch(graph, batch_size, batch_src, batch_dst, BatchGenType::INSERT); #endif deleteBatch(hornet, batch_src, batch_dst, batch_size, print_debug); #ifndef RANDOM #else host::freePageLocked(batch_src, batch_dst); #endif } int exec(int argc, char* argv[]) { using namespace graph::structure_prop; using namespace graph::parsing_prop; graph::GraphStd<vert_t, vert_t> graph; graph.read(argv[1]); int batch_size = std::stoi(argv[2]); Init hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges()); HornetGPU hornet_gpu(hornet_init); auto init_coo = hornet_gpu.getCOO(true); hornet::RandomGenTraits<hornet::EMPTY> cooGenTraits; auto randomBatch = hornet::selectRandom(init_coo, batch_size, cooGenTraits); Update batch_update(randomBatch); hornet_gpu.erase(batch_update); auto inst_coo = hornet_gpu.getCOO(true); inst_coo.append(randomBatch); inst_coo.sort(); hornet::COO<DeviceType::HOST, vert_t, hornet::EMPTY, eoff_t> host_init_coo = init_coo; hornet::COO<DeviceType::HOST, vert_t, hornet::EMPTY, eoff_t> host_inst_coo = inst_coo; auto *s = host_init_coo.srcPtr(); auto *d = host_init_coo.dstPtr(); auto *S = host_inst_coo.srcPtr(); auto *D = host_inst_coo.dstPtr(); auto len = host_init_coo.size(); bool err = false; if (host_inst_coo.size() != host_init_coo.size()) { err = true; std::cerr<<"\nInit Size "<<host_init_coo.size()<<" != Combined size "<<host_inst_coo.size()<<"\n"; len = std::min(host_init_coo.size(), host_inst_coo.size()); } for (int i = 0; i < len; ++i) { if ((s[i] != S[i]) || (d[i] != D[i])) { err = true; std::cout<<"ERR : "; std::cout<<s[i]<<" "<<d[i]<<"\t"; std::cout<<"\t\t"; std::cout<<S[i]<<" "<<D[i]; std::cout<<"\n"; } } if (!err) { std::cout<<"PASSED\n"; } else { std::cout<<"NOT PASSED\n"; } return 0; } int main(int argc, char* argv[]) { int ret = 0; #if defined(RMM_WRAPPER) gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory. {//scoping technique to make sure that gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. #endif ret = exec(argc, argv); #if defined(RMM_WRAPPER) }//scoping technique to make sure that gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. gpu::finalizeRMMPoolAllocation(); #endif return ret; }
54e2090b584d1bfae238bfe238820ff24e320c19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "convolutionSeparable15.hpp" #include "defines.hpp" #define ROW_BLOCK_DIM_X15 32 #define ROW_BLOCK_DIM_Y15 16 __constant__ float c_Kernel15[256]; void setConvolutionKernel15(float* h_Kernel, int k_length) { hipMemcpyToSymbol(c_Kernel15, h_Kernel, k_length * sizeof(float)); } __global__ void rowConvolutionFilter15( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ){ __shared__ float sData[ROW_BLOCK_DIM_Y15][(ROW_RESULT_STEP + 2*ROW_HALO_STEP) * ROW_BLOCK_DIM_X15]; //offset to left halo edge const int baseX = (blockIdx.x * ROW_RESULT_STEP) * ROW_BLOCK_DIM_X15 - ROW_HALO_STEP * ROW_BLOCK_DIM_X15 + threadIdx.x; const int baseY = blockIdx.y * ROW_BLOCK_DIM_Y15 + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //load main data #pragma unroll for (int i = ROW_HALO_STEP; i < ROW_HALO_STEP + ROW_RESULT_STEP; i++) { sData[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X15] = (baseX + i * ROW_BLOCK_DIM_X15) < imageW ? d_Src[i*ROW_BLOCK_DIM_X15] : 0; } //load left halo #pragma unroll for (int i = 0; i < ROW_HALO_STEP; i++) { sData[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X15] = (baseX + i * ROW_BLOCK_DIM_X15) >= 0 ? d_Src[i*ROW_BLOCK_DIM_X15] : 0; } //load right halo #pragma unroll for (int i = ROW_HALO_STEP + ROW_RESULT_STEP; i < ROW_HALO_STEP + ROW_RESULT_STEP + ROW_HALO_STEP; i++) { sData[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X15] = (baseX + i * ROW_BLOCK_DIM_X15) < imageW ? d_Src[i * ROW_BLOCK_DIM_X15] : 0; } __syncthreads(); if (baseY >= imageH) { return; } //convolve #pragma unroll for (int i = ROW_HALO_STEP; i < ROW_HALO_STEP+ROW_RESULT_STEP; i++){ if(baseX + i * ROW_BLOCK_DIM_X15 < imageW){ float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS15; j <= KERNEL_RADIUS15; j++) { sum += c_Kernel15[KERNEL_RADIUS15 - j] * sData[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X15 + j]; } d_Dst[i*ROW_BLOCK_DIM_X15] = sum; } } } #define COL_BLOCK_DIM_X15 16 #define COL_BLOCK_DIM_Y15 32 __global__ void colConvolutionFilter15( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ) { __shared__ float sData[COL_BLOCK_DIM_X15][(COL_RESULT_STEP + 2 * COL_HALO_STEP) * COL_BLOCK_DIM_Y15 + 1]; //+1 to avoid shared mem bank conflicts const int baseX = blockIdx.x * COL_BLOCK_DIM_X15 + threadIdx.x; const int baseY = blockIdx.y * COL_BLOCK_DIM_Y15 * COL_RESULT_STEP - COL_HALO_STEP * COL_BLOCK_DIM_Y15 + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //load main data #pragma unroll for (int i = COL_HALO_STEP; i < COL_HALO_STEP + COL_RESULT_STEP; i++) { sData[threadIdx.x][threadIdx.y + i * COL_BLOCK_DIM_Y15] = (baseY + i * COL_BLOCK_DIM_Y15) < imageH ? d_Src[i * COL_BLOCK_DIM_Y15 * pitch] : 0; } //load top halo #pragma unroll for (int i = 0; i < COL_HALO_STEP; i ++) { sData[threadIdx.x][threadIdx.y + i * COL_BLOCK_DIM_Y15] = (baseY + i * COL_BLOCK_DIM_Y15) >= 0 ? d_Src[i * COL_BLOCK_DIM_Y15 * pitch] : 0; } //load bottom halo #pragma unroll for (int i = COL_HALO_STEP + COL_RESULT_STEP; i < COL_HALO_STEP + COL_RESULT_STEP + COL_HALO_STEP; i++) { sData[threadIdx.x][threadIdx.y + i * COL_BLOCK_DIM_Y15] = (baseY + i * COL_BLOCK_DIM_Y15) < imageH ? d_Src[i * COL_BLOCK_DIM_Y15 * pitch] : 0; } __syncthreads(); if (baseX >= imageW) { return; } //convolve #pragma unroll for (int i = COL_HALO_STEP; i < COL_HALO_STEP + COL_RESULT_STEP; i++) { if ((baseY + i * COL_BLOCK_DIM_Y15) < imageH) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS15; j <= KERNEL_RADIUS15; j++) { sum += c_Kernel15[KERNEL_RADIUS15 - j] * sData[threadIdx.x][threadIdx.y + i * COL_BLOCK_DIM_Y15 + j]; } d_Dst[i * COL_BLOCK_DIM_Y15 * pitch] = sum; } } } void rowConvolve15( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ){ dim3 blocks((imageW + (ROW_RESULT_STEP * ROW_BLOCK_DIM_X15) - 1) / (ROW_RESULT_STEP * ROW_BLOCK_DIM_X15), (imageH + ROW_BLOCK_DIM_Y15 - 1) / ROW_BLOCK_DIM_Y15); dim3 threads(ROW_BLOCK_DIM_X15, ROW_BLOCK_DIM_Y15); hipLaunchKernelGGL(( rowConvolutionFilter15), dim3(blocks),dim3(threads), 0, 0, d_Dst,d_Src,imageW,imageH,pitch); } void colConvolve15( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ){ dim3 blocks((imageW + COL_BLOCK_DIM_X15 - 1) / COL_BLOCK_DIM_X15, (imageH + COL_BLOCK_DIM_Y15 * COL_RESULT_STEP - 1) / (COL_BLOCK_DIM_Y15 * COL_RESULT_STEP)); dim3 threads(COL_BLOCK_DIM_X15, COL_BLOCK_DIM_Y15); hipLaunchKernelGGL(( colConvolutionFilter15), dim3(blocks),dim3(threads), 0, 0, d_Dst,d_Src,imageW,imageH,pitch); }
54e2090b584d1bfae238bfe238820ff24e320c19.cu
#include "convolutionSeparable15.hpp" #include "defines.hpp" #define ROW_BLOCK_DIM_X15 32 #define ROW_BLOCK_DIM_Y15 16 __constant__ float c_Kernel15[256]; void setConvolutionKernel15(float* h_Kernel, int k_length) { cudaMemcpyToSymbol(c_Kernel15, h_Kernel, k_length * sizeof(float)); } __global__ void rowConvolutionFilter15( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ){ __shared__ float sData[ROW_BLOCK_DIM_Y15][(ROW_RESULT_STEP + 2*ROW_HALO_STEP) * ROW_BLOCK_DIM_X15]; //offset to left halo edge const int baseX = (blockIdx.x * ROW_RESULT_STEP) * ROW_BLOCK_DIM_X15 - ROW_HALO_STEP * ROW_BLOCK_DIM_X15 + threadIdx.x; const int baseY = blockIdx.y * ROW_BLOCK_DIM_Y15 + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //load main data #pragma unroll for (int i = ROW_HALO_STEP; i < ROW_HALO_STEP + ROW_RESULT_STEP; i++) { sData[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X15] = (baseX + i * ROW_BLOCK_DIM_X15) < imageW ? d_Src[i*ROW_BLOCK_DIM_X15] : 0; } //load left halo #pragma unroll for (int i = 0; i < ROW_HALO_STEP; i++) { sData[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X15] = (baseX + i * ROW_BLOCK_DIM_X15) >= 0 ? d_Src[i*ROW_BLOCK_DIM_X15] : 0; } //load right halo #pragma unroll for (int i = ROW_HALO_STEP + ROW_RESULT_STEP; i < ROW_HALO_STEP + ROW_RESULT_STEP + ROW_HALO_STEP; i++) { sData[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X15] = (baseX + i * ROW_BLOCK_DIM_X15) < imageW ? d_Src[i * ROW_BLOCK_DIM_X15] : 0; } __syncthreads(); if (baseY >= imageH) { return; } //convolve #pragma unroll for (int i = ROW_HALO_STEP; i < ROW_HALO_STEP+ROW_RESULT_STEP; i++){ if(baseX + i * ROW_BLOCK_DIM_X15 < imageW){ float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS15; j <= KERNEL_RADIUS15; j++) { sum += c_Kernel15[KERNEL_RADIUS15 - j] * sData[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X15 + j]; } d_Dst[i*ROW_BLOCK_DIM_X15] = sum; } } } #define COL_BLOCK_DIM_X15 16 #define COL_BLOCK_DIM_Y15 32 __global__ void colConvolutionFilter15( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ) { __shared__ float sData[COL_BLOCK_DIM_X15][(COL_RESULT_STEP + 2 * COL_HALO_STEP) * COL_BLOCK_DIM_Y15 + 1]; //+1 to avoid shared mem bank conflicts const int baseX = blockIdx.x * COL_BLOCK_DIM_X15 + threadIdx.x; const int baseY = blockIdx.y * COL_BLOCK_DIM_Y15 * COL_RESULT_STEP - COL_HALO_STEP * COL_BLOCK_DIM_Y15 + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //load main data #pragma unroll for (int i = COL_HALO_STEP; i < COL_HALO_STEP + COL_RESULT_STEP; i++) { sData[threadIdx.x][threadIdx.y + i * COL_BLOCK_DIM_Y15] = (baseY + i * COL_BLOCK_DIM_Y15) < imageH ? d_Src[i * COL_BLOCK_DIM_Y15 * pitch] : 0; } //load top halo #pragma unroll for (int i = 0; i < COL_HALO_STEP; i ++) { sData[threadIdx.x][threadIdx.y + i * COL_BLOCK_DIM_Y15] = (baseY + i * COL_BLOCK_DIM_Y15) >= 0 ? d_Src[i * COL_BLOCK_DIM_Y15 * pitch] : 0; } //load bottom halo #pragma unroll for (int i = COL_HALO_STEP + COL_RESULT_STEP; i < COL_HALO_STEP + COL_RESULT_STEP + COL_HALO_STEP; i++) { sData[threadIdx.x][threadIdx.y + i * COL_BLOCK_DIM_Y15] = (baseY + i * COL_BLOCK_DIM_Y15) < imageH ? d_Src[i * COL_BLOCK_DIM_Y15 * pitch] : 0; } __syncthreads(); if (baseX >= imageW) { return; } //convolve #pragma unroll for (int i = COL_HALO_STEP; i < COL_HALO_STEP + COL_RESULT_STEP; i++) { if ((baseY + i * COL_BLOCK_DIM_Y15) < imageH) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS15; j <= KERNEL_RADIUS15; j++) { sum += c_Kernel15[KERNEL_RADIUS15 - j] * sData[threadIdx.x][threadIdx.y + i * COL_BLOCK_DIM_Y15 + j]; } d_Dst[i * COL_BLOCK_DIM_Y15 * pitch] = sum; } } } void rowConvolve15( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ){ dim3 blocks((imageW + (ROW_RESULT_STEP * ROW_BLOCK_DIM_X15) - 1) / (ROW_RESULT_STEP * ROW_BLOCK_DIM_X15), (imageH + ROW_BLOCK_DIM_Y15 - 1) / ROW_BLOCK_DIM_Y15); dim3 threads(ROW_BLOCK_DIM_X15, ROW_BLOCK_DIM_Y15); rowConvolutionFilter15<<<blocks,threads>>>(d_Dst,d_Src,imageW,imageH,pitch); } void colConvolve15( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ){ dim3 blocks((imageW + COL_BLOCK_DIM_X15 - 1) / COL_BLOCK_DIM_X15, (imageH + COL_BLOCK_DIM_Y15 * COL_RESULT_STEP - 1) / (COL_BLOCK_DIM_Y15 * COL_RESULT_STEP)); dim3 threads(COL_BLOCK_DIM_X15, COL_BLOCK_DIM_Y15); colConvolutionFilter15<<<blocks,threads>>>(d_Dst,d_Src,imageW,imageH,pitch); }
478548414311aa4e7fb6c19e01677f70905d4628.hip
// !!! This is a file automatically generated by hipify!!! // Version 1.0.0 CUDA-C: Omega // Dr. Gonzalo Damin Quiroga // Universidad Nacional de Crdoba #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <ctype.h> #include <time.h> #include "../ODEs/rhs.h" __global__ #include "../solvers/rk4.cu" __global__ #include "../solvers/dp45.cu" __global__ #include "../solvers/rkf78.cu" __device__ #include "../ODEs/rhs.c"
478548414311aa4e7fb6c19e01677f70905d4628.cu
// Version 1.0.0 CUDA-C: Omega // Dr. Gonzalo Damián Quiroga // Universidad Nacional de Córdoba #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <ctype.h> #include <time.h> #include "../ODEs/rhs.h" __global__ #include "../solvers/rk4.cu" __global__ #include "../solvers/dp45.cu" __global__ #include "../solvers/rkf78.cu" __device__ #include "../ODEs/rhs.c"
7c3e2a721398667d4da44fdf5673c7139cda1f66.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaType.h" #include "Indice2D.h" #include "IndiceTools.h" #include "ColorTools.h" #include "HeatTransfertMath.h" __global__ void diffuse(float* ptrDevImageInput, float* ptrDevImageOutput, unsigned int w, unsigned int h, float propSpeed); __global__ void crush(float* ptrDevImageHeater, float* ptrDevImage, unsigned int size); __global__ void toScreen(float* ptrDevImage, uchar4* ptrDevPixels, unsigned int size); __global__ void diffuse(float* ptrDevImageInput, float* ptrDevImageOutput, unsigned int w, unsigned int h, float propSpeed) { const int NB_THREADS = Indice2D::nbThread(); const int TID = Indice2D::tid(); HeatTransfertMath math; unsigned int wh = w*h; unsigned int s = TID; while ( s < wh ) { int i, j; IndiceTools::toIJ(s, w, &i, &j); if (i > 0 && i < (h - 1) && j > 0 && j < (w - 1)) { float neighborsHeat[4]; neighborsHeat[0] = ptrDevImageInput[IndiceTools::toS(w, i - 1, j)]; neighborsHeat[1] = ptrDevImageInput[IndiceTools::toS(w, i + 1, j)]; neighborsHeat[2] = ptrDevImageInput[IndiceTools::toS(w, i, j - 1)]; neighborsHeat[3] = ptrDevImageInput[IndiceTools::toS(w, i, j + 1)]; ptrDevImageOutput[s] = math.computeHeat(ptrDevImageInput[s], neighborsHeat, 4, propSpeed); } else { ptrDevImageOutput[s] = ptrDevImageInput[s]; } s += NB_THREADS; } } __global__ void crush(float* ptrDevImageHeater, float* ptrDevImage, unsigned int size) { const int NB_THREADS = Indice2D::nbThread(); const int TID = Indice2D::tid(); unsigned int s = TID; while ( s < size ) { if (ptrDevImageHeater[s] > 0.0) { ptrDevImage[s] = ptrDevImageHeater[s]; } s += NB_THREADS; } } __global__ void toScreen(float* ptrDevImage, uchar4* ptrDevPixels, unsigned int size) { const int NB_THREADS = Indice2D::nbThread(); const int TID = Indice2D::tid(); unsigned int s = TID; while ( s < size ) { float hue = 0.7 - ptrDevImage[s] * 0.7; ColorTools::HSB_TO_RVB(hue, 1, 1, &ptrDevPixels[s]); ptrDevPixels[s].w = 255; s += NB_THREADS; } }
7c3e2a721398667d4da44fdf5673c7139cda1f66.cu
#include "cudaType.h" #include "Indice2D.h" #include "IndiceTools.h" #include "ColorTools.h" #include "HeatTransfertMath.h" __global__ void diffuse(float* ptrDevImageInput, float* ptrDevImageOutput, unsigned int w, unsigned int h, float propSpeed); __global__ void crush(float* ptrDevImageHeater, float* ptrDevImage, unsigned int size); __global__ void toScreen(float* ptrDevImage, uchar4* ptrDevPixels, unsigned int size); __global__ void diffuse(float* ptrDevImageInput, float* ptrDevImageOutput, unsigned int w, unsigned int h, float propSpeed) { const int NB_THREADS = Indice2D::nbThread(); const int TID = Indice2D::tid(); HeatTransfertMath math; unsigned int wh = w*h; unsigned int s = TID; while ( s < wh ) { int i, j; IndiceTools::toIJ(s, w, &i, &j); if (i > 0 && i < (h - 1) && j > 0 && j < (w - 1)) { float neighborsHeat[4]; neighborsHeat[0] = ptrDevImageInput[IndiceTools::toS(w, i - 1, j)]; neighborsHeat[1] = ptrDevImageInput[IndiceTools::toS(w, i + 1, j)]; neighborsHeat[2] = ptrDevImageInput[IndiceTools::toS(w, i, j - 1)]; neighborsHeat[3] = ptrDevImageInput[IndiceTools::toS(w, i, j + 1)]; ptrDevImageOutput[s] = math.computeHeat(ptrDevImageInput[s], neighborsHeat, 4, propSpeed); } else { ptrDevImageOutput[s] = ptrDevImageInput[s]; } s += NB_THREADS; } } __global__ void crush(float* ptrDevImageHeater, float* ptrDevImage, unsigned int size) { const int NB_THREADS = Indice2D::nbThread(); const int TID = Indice2D::tid(); unsigned int s = TID; while ( s < size ) { if (ptrDevImageHeater[s] > 0.0) { ptrDevImage[s] = ptrDevImageHeater[s]; } s += NB_THREADS; } } __global__ void toScreen(float* ptrDevImage, uchar4* ptrDevPixels, unsigned int size) { const int NB_THREADS = Indice2D::nbThread(); const int TID = Indice2D::tid(); unsigned int s = TID; while ( s < size ) { float hue = 0.7 - ptrDevImage[s] * 0.7; ColorTools::HSB_TO_RVB(hue, 1, 1, &ptrDevPixels[s]); ptrDevPixels[s].w = 255; s += NB_THREADS; } }
f157a9027c24508ced400be20bb27340dbda382b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // MP 5 Scan // Given a list (lst) of length n // Output its prefix sum = {lst[0], lst[0] + lst[1], lst[0] + lst[1] + ... + lst[n-1]} // Due Tuesday, January 22, 2013 at 11:59 p.m. PST #include <wb.h> #define BLOCK_SIZE 128 //@@ You can change this #define wbCheck(stmt) do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while(0) __global__ void scan(float * input, float * output, int len) { //@@ Modify the body of this function to complete the functionality of //@@ the scan on the device //@@ You may need multiple kernel calls; write your kernels before this //@@ function and call them from here __shared__ float scan_array[BLOCK_SIZE]; int global_id = threadIdx.x + blockDim.x * blockIdx.x; if (global_id < len) scan_array[threadIdx.x] = input[global_id]; else scan_array[threadIdx.x] = 0; __syncthreads(); int stride = 1; while (stride < BLOCK_SIZE) { int index = (threadIdx.x + 1) * stride * 2 - 1; if (index < BLOCK_SIZE) scan_array[index] += scan_array[index - stride]; stride = stride << 1; __syncthreads(); } for(int stride = BLOCK_SIZE >> 1; stride > 0; stride = stride >> 1) { __syncthreads(); int index = (threadIdx.x + 1) * stride * 2 - 1; if (index + stride < BLOCK_SIZE) scan_array[index + stride] += scan_array[index]; } __syncthreads(); if (global_id < len) output[global_id] = scan_array[threadIdx.x]; if (global_id < BLOCK_SIZE) { __syncthreads(); for (int block_idx = 1; block_idx <= (len / BLOCK_SIZE) ; ++block_idx) { float offset = output[block_idx * BLOCK_SIZE - 1]; if ((threadIdx.x + block_idx * blockDim.x) < len) output[threadIdx.x + block_idx * blockDim.x] += offset; __syncthreads(); } } } int main(int argc, char ** argv) { wbArg_t args; float * hostInput; // The input 1D list float * hostOutput; // The output list float * deviceInput; float * deviceOutput; int numElements; // number of elements in the list args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numElements); hostOutput = (float*) malloc(numElements * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The number of input elements in the input is ", numElements); wbTime_start(GPU, "Allocating GPU memory."); wbCheck(hipMalloc((void**)&deviceInput, numElements*sizeof(float))); wbCheck(hipMalloc((void**)&deviceOutput, numElements*sizeof(float))); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Clearing output memory."); wbCheck(hipMemset(deviceOutput, 0, numElements*sizeof(float))); wbTime_stop(GPU, "Clearing output memory."); wbTime_start(GPU, "Copying input memory to the GPU."); wbCheck(hipMemcpy(deviceInput, hostInput, numElements*sizeof(float), hipMemcpyHostToDevice)); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 DimGrid((numElements -1) / BLOCK_SIZE + 1 , 1, 1); dim3 DimBlock(BLOCK_SIZE, 1, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Modify this to complete the functionality of the scan //@@ on the deivce hipLaunchKernelGGL(( scan), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceInput, deviceOutput, numElements); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); wbCheck(hipMemcpy(hostOutput, deviceOutput, numElements*sizeof(float), hipMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); hipFree(deviceInput); hipFree(deviceOutput); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostOutput, numElements); free(hostInput); free(hostOutput); return 0; }
f157a9027c24508ced400be20bb27340dbda382b.cu
// MP 5 Scan // Given a list (lst) of length n // Output its prefix sum = {lst[0], lst[0] + lst[1], lst[0] + lst[1] + ... + lst[n-1]} // Due Tuesday, January 22, 2013 at 11:59 p.m. PST #include <wb.h> #define BLOCK_SIZE 128 //@@ You can change this #define wbCheck(stmt) do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while(0) __global__ void scan(float * input, float * output, int len) { //@@ Modify the body of this function to complete the functionality of //@@ the scan on the device //@@ You may need multiple kernel calls; write your kernels before this //@@ function and call them from here __shared__ float scan_array[BLOCK_SIZE]; int global_id = threadIdx.x + blockDim.x * blockIdx.x; if (global_id < len) scan_array[threadIdx.x] = input[global_id]; else scan_array[threadIdx.x] = 0; __syncthreads(); int stride = 1; while (stride < BLOCK_SIZE) { int index = (threadIdx.x + 1) * stride * 2 - 1; if (index < BLOCK_SIZE) scan_array[index] += scan_array[index - stride]; stride = stride << 1; __syncthreads(); } for(int stride = BLOCK_SIZE >> 1; stride > 0; stride = stride >> 1) { __syncthreads(); int index = (threadIdx.x + 1) * stride * 2 - 1; if (index + stride < BLOCK_SIZE) scan_array[index + stride] += scan_array[index]; } __syncthreads(); if (global_id < len) output[global_id] = scan_array[threadIdx.x]; if (global_id < BLOCK_SIZE) { __syncthreads(); for (int block_idx = 1; block_idx <= (len / BLOCK_SIZE) ; ++block_idx) { float offset = output[block_idx * BLOCK_SIZE - 1]; if ((threadIdx.x + block_idx * blockDim.x) < len) output[threadIdx.x + block_idx * blockDim.x] += offset; __syncthreads(); } } } int main(int argc, char ** argv) { wbArg_t args; float * hostInput; // The input 1D list float * hostOutput; // The output list float * deviceInput; float * deviceOutput; int numElements; // number of elements in the list args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numElements); hostOutput = (float*) malloc(numElements * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The number of input elements in the input is ", numElements); wbTime_start(GPU, "Allocating GPU memory."); wbCheck(cudaMalloc((void**)&deviceInput, numElements*sizeof(float))); wbCheck(cudaMalloc((void**)&deviceOutput, numElements*sizeof(float))); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Clearing output memory."); wbCheck(cudaMemset(deviceOutput, 0, numElements*sizeof(float))); wbTime_stop(GPU, "Clearing output memory."); wbTime_start(GPU, "Copying input memory to the GPU."); wbCheck(cudaMemcpy(deviceInput, hostInput, numElements*sizeof(float), cudaMemcpyHostToDevice)); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 DimGrid((numElements -1) / BLOCK_SIZE + 1 , 1, 1); dim3 DimBlock(BLOCK_SIZE, 1, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Modify this to complete the functionality of the scan //@@ on the deivce scan<<<DimGrid,DimBlock>>>(deviceInput, deviceOutput, numElements); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); wbCheck(cudaMemcpy(hostOutput, deviceOutput, numElements*sizeof(float), cudaMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); cudaFree(deviceInput); cudaFree(deviceOutput); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostOutput, numElements); free(hostInput); free(hostOutput); return 0; }
65a3cf803c7716ef533fbb904f3c99a092f2e5d3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include "common.h" #include "params.h" #include "bnd.h" #include "GPU.h" #include "Atomic.h" #ifdef WMPI #include "communication.h" #endif extern "C" void Allocation(void); extern "C" void cuAllocation(void); //************************************************************************* //************************************************************************* #define CUERR() printf("\n %s \n",hipGetErrorString(hipGetLastError())) //************************************************************************* //************************************************************************* void Allocation(void) { egy= (float *) calloc(NGRP*(NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2),sizeof(float)); flx= (float *) calloc(NGRP*(NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2)*3,sizeof(float)); // dedd= (float *) calloc((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2)*3*3,sizeof(float)); egy_new= (float *) calloc(NGRP*(NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2),sizeof(float)); flx_new= (float *) calloc(NGRP*(NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2)*3,sizeof(float)); #ifndef SDISCRETE src0= (float *) calloc((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2),sizeof(float)); #else src0= (float *) calloc(nsource,sizeof(float)); src0pos= (int *) calloc(nsource*3,sizeof(int)); #endif xion= (float *) calloc((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2),sizeof(float)); density= (float *) calloc((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2),sizeof(float)); temperature= (float *) calloc((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2),sizeof(float)); #ifdef WMPI buff=(float*) calloc(NGRP*NBUFF*NBUFF*4,sizeof(float)); // 4 for EGY + FLUX // HERE WE ASSUME THAT NCELLZ >=NCELLX,Y #endif } void cuAllocation(void) { hipMalloc((void**)&cuegy,NGRP*((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2))*sizeof(float)); //1 hipMalloc((void**)&cuflx,NGRP*((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2)*3)*sizeof(float)); //3 hipMalloc((void**)&cuegy_new,NGRP*((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2))*sizeof(float));//1 hipMalloc((void**)&cuflx_new,NGRP*((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2)*3)*sizeof(float)); //3 #ifdef SDISCRETE hipMalloc((void**)&cusrc0,nsource*sizeof(float));//1 hipMalloc((void**)&cusrc0pos,3*nsource*sizeof(int));//1 #else hipMalloc((void**)&cusrc0,((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2))*sizeof(float));//1 #endif hipMalloc((void**)&cuxion,((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2))*sizeof(float));// 1 hipMalloc((void**)&cudensity,((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2))*sizeof(float));//1 hipMalloc((void**)&cutemperature,((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2))*sizeof(float));//1 #ifdef WMPI hipMalloc((void**)&cubuff,NGRP*NBUFF*NBUFF*4*sizeof(float)); // 4 for EGY + FLUX // HERE WE ASSUME THAT NCELLZ >=NCELLX,Y #endif hipDeviceSynchronize(); //CUERR(); }
65a3cf803c7716ef533fbb904f3c99a092f2e5d3.cu
#include <stdio.h> #include <stdlib.h> #include "common.h" #include "params.h" #include "bnd.h" #include "GPU.h" #include "Atomic.h" #ifdef WMPI #include "communication.h" #endif extern "C" void Allocation(void); extern "C" void cuAllocation(void); //************************************************************************* //************************************************************************* #define CUERR() printf("\n %s \n",cudaGetErrorString(cudaGetLastError())) //************************************************************************* //************************************************************************* void Allocation(void) { egy= (float *) calloc(NGRP*(NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2),sizeof(float)); flx= (float *) calloc(NGRP*(NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2)*3,sizeof(float)); // dedd= (float *) calloc((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2)*3*3,sizeof(float)); egy_new= (float *) calloc(NGRP*(NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2),sizeof(float)); flx_new= (float *) calloc(NGRP*(NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2)*3,sizeof(float)); #ifndef SDISCRETE src0= (float *) calloc((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2),sizeof(float)); #else src0= (float *) calloc(nsource,sizeof(float)); src0pos= (int *) calloc(nsource*3,sizeof(int)); #endif xion= (float *) calloc((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2),sizeof(float)); density= (float *) calloc((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2),sizeof(float)); temperature= (float *) calloc((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2),sizeof(float)); #ifdef WMPI buff=(float*) calloc(NGRP*NBUFF*NBUFF*4,sizeof(float)); // 4 for EGY + FLUX // HERE WE ASSUME THAT NCELLZ >=NCELLX,Y #endif } void cuAllocation(void) { cudaMalloc((void**)&cuegy,NGRP*((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2))*sizeof(float)); //1 cudaMalloc((void**)&cuflx,NGRP*((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2)*3)*sizeof(float)); //3 cudaMalloc((void**)&cuegy_new,NGRP*((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2))*sizeof(float));//1 cudaMalloc((void**)&cuflx_new,NGRP*((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2)*3)*sizeof(float)); //3 #ifdef SDISCRETE cudaMalloc((void**)&cusrc0,nsource*sizeof(float));//1 cudaMalloc((void**)&cusrc0pos,3*nsource*sizeof(int));//1 #else cudaMalloc((void**)&cusrc0,((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2))*sizeof(float));//1 #endif cudaMalloc((void**)&cuxion,((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2))*sizeof(float));// 1 cudaMalloc((void**)&cudensity,((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2))*sizeof(float));//1 cudaMalloc((void**)&cutemperature,((NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2))*sizeof(float));//1 #ifdef WMPI cudaMalloc((void**)&cubuff,NGRP*NBUFF*NBUFF*4*sizeof(float)); // 4 for EGY + FLUX // HERE WE ASSUME THAT NCELLZ >=NCELLX,Y #endif cudaThreadSynchronize(); //CUERR(); }
355d0e69306a08f29b6012b029868c91286f449d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2017-2020 by Contributors */ #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <GPUTreeShap/gpu_treeshap.h> #include <memory> #include "xgboost/data.h" #include "xgboost/predictor.h" #include "xgboost/tree_model.h" #include "xgboost/tree_updater.h" #include "xgboost/host_device_vector.h" #include "../gbm/gbtree_model.h" #include "../data/ellpack_page.cuh" #include "../data/device_adapter.cuh" #include "../common/common.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace predictor { DMLC_REGISTRY_FILE_TAG(gpu_predictor); struct SparsePageView { common::Span<const Entry> d_data; common::Span<const bst_row_t> d_row_ptr; bst_feature_t num_features; XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data, common::Span<const bst_row_t> row_ptr, bst_feature_t num_features) : d_data{data}, d_row_ptr{row_ptr}, num_features(num_features) {} __device__ float GetElement(size_t ridx, size_t fidx) const { // Binary search auto begin_ptr = d_data.begin() + d_row_ptr[ridx]; auto end_ptr = d_data.begin() + d_row_ptr[ridx + 1]; if (end_ptr - begin_ptr == this->NumCols()) { // Bypass span check for dense data return d_data.data()[d_row_ptr[ridx] + fidx].fvalue; } common::Span<const Entry>::iterator previous_middle; while (end_ptr != begin_ptr) { auto middle = begin_ptr + (end_ptr - begin_ptr) / 2; if (middle == previous_middle) { break; } else { previous_middle = middle; } if (middle->index == fidx) { return middle->fvalue; } else if (middle->index < fidx) { begin_ptr = middle; } else { end_ptr = middle; } } // Value is missing return nanf(""); } XGBOOST_DEVICE size_t NumRows() const { return d_row_ptr.size() - 1; } XGBOOST_DEVICE size_t NumCols() const { return num_features; } }; struct SparsePageLoader { bool use_shared; SparsePageView data; float* smem; size_t entry_start; __device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : use_shared(use_shared), data(data), entry_start(entry_start) { extern __shared__ float _smem[]; smem = _smem; // Copy instances if (use_shared) { bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; int shared_elements = blockDim.x * data.num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { bst_uint elem_begin = data.d_row_ptr[global_idx]; bst_uint elem_end = data.d_row_ptr[global_idx + 1]; for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) { Entry elem = data.d_data[elem_idx - entry_start]; smem[threadIdx.x * data.num_features + elem.index] = elem.fvalue; } } __syncthreads(); } } __device__ float GetElement(size_t ridx, size_t fidx) const { if (use_shared) { return smem[threadIdx.x * data.num_features + fidx]; } else { return data.GetElement(ridx, fidx); } } }; struct EllpackLoader { EllpackDeviceAccessor const& matrix; XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : matrix{m} {} __device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const { auto gidx = matrix.GetBinIndex(ridx, fidx); if (gidx == -1) { return nan(""); } // The gradient index needs to be shifted by one as min values are not included in the // cuts. if (gidx == matrix.feature_segments[fidx]) { return matrix.min_fvalue[fidx]; } return matrix.gidx_fvalue_map[gidx - 1]; } }; template <typename Batch> struct DeviceAdapterLoader { Batch batch; bst_feature_t columns; float* smem; bool use_shared; using BatchT = Batch; XGBOOST_DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : batch{batch}, columns{num_features}, use_shared{use_shared} { extern __shared__ float _smem[]; smem = _smem; if (use_shared) { uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x; size_t shared_elements = blockDim.x * num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { auto beg = global_idx * columns; auto end = (global_idx + 1) * columns; for (size_t i = beg; i < end; ++i) { smem[threadIdx.x * num_features + (i - beg)] = batch.GetElement(i).value; } } } __syncthreads(); } XGBOOST_DEV_INLINE float GetElement(size_t ridx, size_t fidx) const { if (use_shared) { return smem[threadIdx.x * columns + fidx]; } return batch.GetElement(ridx * columns + fidx).value; } }; template <typename Loader> __device__ float GetLeafWeight(bst_uint ridx, const RegTree::Node* tree, Loader* loader) { RegTree::Node n = tree[0]; while (!n.IsLeaf()) { float fvalue = loader->GetElement(ridx, n.SplitIndex()); // Missing value if (isnan(fvalue)) { n = tree[n.DefaultChild()]; } else { if (fvalue < n.SplitCond()) { n = tree[n.LeftChild()]; } else { n = tree[n.RightChild()]; } } } return n.LeafValue(); } template <typename Loader, typename Data> __global__ void PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes, common::Span<float> d_out_predictions, common::Span<size_t> d_tree_segments, common::Span<int> d_tree_group, size_t tree_begin, size_t tree_end, size_t num_features, size_t num_rows, size_t entry_start, bool use_shared, int num_group) { bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; Loader loader(data, use_shared, num_features, num_rows, entry_start); if (global_idx >= num_rows) return; if (num_group == 1) { float sum = 0; for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { const RegTree::Node* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; float leaf = GetLeafWeight(global_idx, d_tree, &loader); sum += leaf; } d_out_predictions[global_idx] += sum; } else { for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { int tree_group = d_tree_group[tree_idx]; const RegTree::Node* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; bst_uint out_prediction_idx = global_idx * num_group + tree_group; d_out_predictions[out_prediction_idx] += GetLeafWeight(global_idx, d_tree, &loader); } } } class DeviceModel { public: // Need to lazily construct the vectors because GPU id is only known at runtime HostDeviceVector<RegTree::Node> nodes; HostDeviceVector<size_t> tree_segments; HostDeviceVector<int> tree_group; size_t tree_beg_; // NOLINT size_t tree_end_; // NOLINT int num_group; void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) { dh::safe_cuda(hipSetDevice(gpu_id)); CHECK_EQ(model.param.size_leaf_vector, 0); // Copy decision trees to device tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id)); auto& h_tree_segments = tree_segments.HostVector(); h_tree_segments.reserve((tree_end - tree_begin) + 1); size_t sum = 0; h_tree_segments.push_back(sum); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { sum += model.trees.at(tree_idx)->GetNodes().size(); h_tree_segments.push_back(sum); } nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(), gpu_id)); auto& h_nodes = nodes.HostVector(); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { auto& src_nodes = model.trees.at(tree_idx)->GetNodes(); std::copy(src_nodes.begin(), src_nodes.end(), h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]); } tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id)); auto& h_tree_group = tree_group.HostVector(); std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size()); this->tree_beg_ = tree_begin; this->tree_end_ = tree_end; this->num_group = model.learner_model_param->num_output_group; } }; class GPUPredictor : public xgboost::Predictor { private: void PredictInternal(const SparsePage& batch, size_t num_features, HostDeviceVector<bst_float>* predictions, size_t batch_offset) { batch.offset.SetDevice(generic_param_->gpu_id); batch.data.SetDevice(generic_param_->gpu_id); const uint32_t BLOCK_THREADS = 128; size_t num_rows = batch.Size(); auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS)); auto shared_memory_bytes = static_cast<size_t>(sizeof(float) * num_features * BLOCK_THREADS); bool use_shared = true; if (shared_memory_bytes > max_shared_memory_bytes_) { shared_memory_bytes = 0; use_shared = false; } size_t entry_start = 0; SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(), num_features); dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} ( PredictKernel<SparsePageLoader, SparsePageView>, data, model_.nodes.DeviceSpan(), predictions->DeviceSpan().subspan(batch_offset), model_.tree_segments.DeviceSpan(), model_.tree_group.DeviceSpan(), model_.tree_beg_, model_.tree_end_, num_features, num_rows, entry_start, use_shared, model_.num_group); } void PredictInternal(EllpackDeviceAccessor const& batch, HostDeviceVector<bst_float>* out_preds, size_t batch_offset) { const uint32_t BLOCK_THREADS = 256; size_t num_rows = batch.n_rows; auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS)); bool use_shared = false; size_t entry_start = 0; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} ( PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch, model_.nodes.DeviceSpan(), out_preds->DeviceSpan().subspan(batch_offset), model_.tree_segments.DeviceSpan(), model_.tree_group.DeviceSpan(), model_.tree_beg_, model_.tree_end_, batch.NumFeatures(), num_rows, entry_start, use_shared, model_.num_group); } void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds, const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) { dh::safe_cuda(hipSetDevice(generic_param_->gpu_id)); if (tree_end - tree_begin == 0) { return; } model_.Init(model, tree_begin, tree_end, generic_param_->gpu_id); out_preds->SetDevice(generic_param_->gpu_id); if (dmat->PageExists<SparsePage>()) { size_t batch_offset = 0; for (auto &batch : dmat->GetBatches<SparsePage>()) { this->PredictInternal(batch, model.learner_model_param->num_feature, out_preds, batch_offset); batch_offset += batch.Size() * model.learner_model_param->num_output_group; } } else { size_t batch_offset = 0; for (auto const& page : dmat->GetBatches<EllpackPage>()) { this->PredictInternal( page.Impl()->GetDeviceAccessor(generic_param_->gpu_id), out_preds, batch_offset); batch_offset += page.Impl()->n_rows; } } } public: explicit GPUPredictor(GenericParameter const* generic_param) : Predictor::Predictor{generic_param} {} ~GPUPredictor() override { if (generic_param_->gpu_id >= 0) { dh::safe_cuda(hipSetDevice(generic_param_->gpu_id)); } } void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts, const gbm::GBTreeModel& model, int tree_begin, unsigned ntree_limit = 0) override { // This function is duplicated with CPU predictor PredictBatch, see comments in there. // FIXME(trivialfis): Remove the duplication. std::lock_guard<std::mutex> const guard(lock_); int device = generic_param_->gpu_id; CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data."; ConfigureDevice(device); CHECK_EQ(tree_begin, 0); auto* out_preds = &predts->predictions; CHECK_GE(predts->version, tree_begin); if (out_preds->Size() == 0 && dmat->Info().num_row_ != 0) { CHECK_EQ(predts->version, 0); } if (predts->version == 0) { this->InitOutPredictions(dmat->Info(), out_preds, model); } uint32_t const output_groups = model.learner_model_param->num_output_group; CHECK_NE(output_groups, 0); uint32_t real_ntree_limit = ntree_limit * output_groups; if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) { real_ntree_limit = static_cast<uint32_t>(model.trees.size()); } uint32_t const end_version = (tree_begin + real_ntree_limit) / output_groups; if (predts->version > end_version) { CHECK_NE(ntree_limit, 0); this->InitOutPredictions(dmat->Info(), out_preds, model); predts->version = 0; } uint32_t const beg_version = predts->version; CHECK_LE(beg_version, end_version); if (beg_version < end_version) { this->DevicePredictInternal(dmat, out_preds, model, beg_version * output_groups, end_version * output_groups); } uint32_t delta = end_version - beg_version; CHECK_LE(delta, model.trees.size()); predts->Update(delta); CHECK(out_preds->Size() == output_groups * dmat->Info().num_row_ || out_preds->Size() == dmat->Info().num_row_); } template <typename Adapter, typename Loader> void DispatchedInplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model, float missing, PredictionCacheEntry *out_preds, uint32_t tree_begin, uint32_t tree_end) const { auto max_shared_memory_bytes = dh::MaxSharedMemory(this->generic_param_->gpu_id); uint32_t const output_groups = model.learner_model_param->num_output_group; DeviceModel d_model; d_model.Init(model, tree_begin, tree_end, this->generic_param_->gpu_id); auto m = dmlc::get<std::shared_ptr<Adapter>>(x); CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature) << "Number of columns in data must equal to trained model."; CHECK_EQ(this->generic_param_->gpu_id, m->DeviceIdx()) << "XGBoost is running on device: " << this->generic_param_->gpu_id << ", " << "but data is on: " << m->DeviceIdx(); MetaInfo info; info.num_col_ = m->NumColumns(); info.num_row_ = m->NumRows(); this->InitOutPredictions(info, &(out_preds->predictions), model); const uint32_t BLOCK_THREADS = 128; auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(info.num_row_, BLOCK_THREADS)); auto shared_memory_bytes = static_cast<size_t>(sizeof(float) * m->NumColumns() * BLOCK_THREADS); bool use_shared = true; if (shared_memory_bytes > max_shared_memory_bytes) { shared_memory_bytes = 0; use_shared = false; } size_t entry_start = 0; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} ( PredictKernel<Loader, typename Loader::BatchT>, m->Value(), d_model.nodes.DeviceSpan(), out_preds->predictions.DeviceSpan(), d_model.tree_segments.DeviceSpan(), d_model.tree_group.DeviceSpan(), tree_begin, tree_end, m->NumColumns(), info.num_row_, entry_start, use_shared, output_groups); } void InplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model, float missing, PredictionCacheEntry *out_preds, uint32_t tree_begin, unsigned tree_end) const override { if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) { this->DispatchedInplacePredict< data::CupyAdapter, DeviceAdapterLoader<data::CupyAdapterBatch>>( x, model, missing, out_preds, tree_begin, tree_end); } else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) { this->DispatchedInplacePredict< data::CudfAdapter, DeviceAdapterLoader<data::CudfAdapterBatch>>( x, model, missing, out_preds, tree_begin, tree_end); } else { LOG(FATAL) << "Only CuPy and CuDF are supported by GPU Predictor."; } } void PredictContribution(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, std::vector<bst_float>* tree_weights, bool approximate, int condition, unsigned condition_feature) override { if (approximate) { LOG(FATAL) << "[Internal error]: " << __func__ << " approximate is not implemented in GPU Predictor."; } uint32_t real_ntree_limit = ntree_limit * model.learner_model_param->num_output_group; if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) { real_ntree_limit = static_cast<uint32_t>(model.trees.size()); } const int ngroup = model.learner_model_param->num_output_group; CHECK_NE(ngroup, 0); // allocate space for (number of features + bias) times the number of rows std::vector<bst_float>& contribs = *out_contribs; size_t contributions_columns = model.learner_model_param->num_feature + 1; // +1 for bias contribs.resize(p_fmat->Info().num_row_ * contributions_columns * model.learner_model_param->num_output_group); dh::TemporaryArray<float> phis(contribs.size(), 0.0); p_fmat->Info().base_margin_.SetDevice(generic_param_->gpu_id); const auto margin = p_fmat->Info().base_margin_.ConstDeviceSpan(); float base_score = model.learner_model_param->base_score; auto d_phis = phis.data().get(); // Add the base margin term to last column dh::LaunchN( generic_param_->gpu_id, p_fmat->Info().num_row_ * model.learner_model_param->num_output_group, [=] __device__(size_t idx) { d_phis[(idx + 1) * contributions_columns - 1] = margin.empty() ? base_score : margin[idx]; }); const auto& paths = this->ExtractPaths(model, real_ntree_limit); for (auto& batch : p_fmat->GetBatches<SparsePage>()) { batch.data.SetDevice(generic_param_->gpu_id); batch.offset.SetDevice(generic_param_->gpu_id); SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(), model.learner_model_param->num_feature); gpu_treeshap::GPUTreeShap( X, paths, ngroup, phis.data().get() + batch.base_rowid * contributions_columns); } dh::safe_cuda(hipMemcpyAsync(contribs.data(), phis.data().get(), sizeof(float) * phis.size(), hipMemcpyDefault)); } protected: void InitOutPredictions(const MetaInfo& info, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model) const { size_t n_classes = model.learner_model_param->num_output_group; size_t n = n_classes * info.num_row_; const HostDeviceVector<bst_float>& base_margin = info.base_margin_; out_preds->SetDevice(generic_param_->gpu_id); out_preds->Resize(n); if (base_margin.Size() != 0) { CHECK_EQ(base_margin.Size(), n); out_preds->Copy(base_margin); } else { out_preds->Fill(model.learner_model_param->base_score); } } void PredictInstance(const SparsePage::Inst& inst, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void PredictInteractionContributions(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, std::vector<bst_float>* tree_weights, bool approximate) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override { Predictor::Configure(cfg); } private: /*! \brief Reconfigure the device when GPU is changed. */ void ConfigureDevice(int device) { if (device >= 0) { max_shared_memory_bytes_ = dh::MaxSharedMemory(device); } } std::vector<gpu_treeshap::PathElement> ExtractPaths( const gbm::GBTreeModel& model, size_t tree_limit) { std::vector<gpu_treeshap::PathElement> paths; size_t path_idx = 0; CHECK_LE(tree_limit, model.trees.size()); for (auto i = 0ull; i < tree_limit; i++) { const auto& tree = *model.trees.at(i); size_t group = model.tree_info[i]; const auto& nodes = tree.GetNodes(); for (auto j = 0ull; j < nodes.size(); j++) { if (nodes[j].IsLeaf() && !nodes[j].IsDeleted()) { auto child = nodes[j]; float v = child.LeafValue(); size_t child_idx = j; const float inf = std::numeric_limits<float>::infinity(); while (!child.IsRoot()) { float child_cover = tree.Stat(child_idx).sum_hess; float parent_cover = tree.Stat(child.Parent()).sum_hess; float zero_fraction = child_cover / parent_cover; CHECK(zero_fraction >= 0.0 && zero_fraction <= 1.0); auto parent = nodes[child.Parent()]; CHECK(parent.LeftChild() == child_idx || parent.RightChild() == child_idx); bool is_left_path = parent.LeftChild() == child_idx; bool is_missing_path = (!parent.DefaultLeft() && !is_left_path) || (parent.DefaultLeft() && is_left_path); float lower_bound = is_left_path ? -inf : parent.SplitCond(); float upper_bound = is_left_path ? parent.SplitCond() : inf; paths.emplace_back(path_idx, parent.SplitIndex(), group, lower_bound, upper_bound, is_missing_path, zero_fraction, v); child_idx = child.Parent(); child = parent; } // Root node has feature -1 paths.emplace_back(path_idx, -1, group, -inf, inf, false, 1.0, v); path_idx++; } } } return paths; } std::mutex lock_; DeviceModel model_; size_t max_shared_memory_bytes_; }; XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor") .describe("Make predictions using GPU.") .set_body([](GenericParameter const* generic_param) { return new GPUPredictor(generic_param); }); } // namespace predictor } // namespace xgboost
355d0e69306a08f29b6012b029868c91286f449d.cu
/*! * Copyright 2017-2020 by Contributors */ #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <GPUTreeShap/gpu_treeshap.h> #include <memory> #include "xgboost/data.h" #include "xgboost/predictor.h" #include "xgboost/tree_model.h" #include "xgboost/tree_updater.h" #include "xgboost/host_device_vector.h" #include "../gbm/gbtree_model.h" #include "../data/ellpack_page.cuh" #include "../data/device_adapter.cuh" #include "../common/common.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace predictor { DMLC_REGISTRY_FILE_TAG(gpu_predictor); struct SparsePageView { common::Span<const Entry> d_data; common::Span<const bst_row_t> d_row_ptr; bst_feature_t num_features; XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data, common::Span<const bst_row_t> row_ptr, bst_feature_t num_features) : d_data{data}, d_row_ptr{row_ptr}, num_features(num_features) {} __device__ float GetElement(size_t ridx, size_t fidx) const { // Binary search auto begin_ptr = d_data.begin() + d_row_ptr[ridx]; auto end_ptr = d_data.begin() + d_row_ptr[ridx + 1]; if (end_ptr - begin_ptr == this->NumCols()) { // Bypass span check for dense data return d_data.data()[d_row_ptr[ridx] + fidx].fvalue; } common::Span<const Entry>::iterator previous_middle; while (end_ptr != begin_ptr) { auto middle = begin_ptr + (end_ptr - begin_ptr) / 2; if (middle == previous_middle) { break; } else { previous_middle = middle; } if (middle->index == fidx) { return middle->fvalue; } else if (middle->index < fidx) { begin_ptr = middle; } else { end_ptr = middle; } } // Value is missing return nanf(""); } XGBOOST_DEVICE size_t NumRows() const { return d_row_ptr.size() - 1; } XGBOOST_DEVICE size_t NumCols() const { return num_features; } }; struct SparsePageLoader { bool use_shared; SparsePageView data; float* smem; size_t entry_start; __device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : use_shared(use_shared), data(data), entry_start(entry_start) { extern __shared__ float _smem[]; smem = _smem; // Copy instances if (use_shared) { bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; int shared_elements = blockDim.x * data.num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { bst_uint elem_begin = data.d_row_ptr[global_idx]; bst_uint elem_end = data.d_row_ptr[global_idx + 1]; for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) { Entry elem = data.d_data[elem_idx - entry_start]; smem[threadIdx.x * data.num_features + elem.index] = elem.fvalue; } } __syncthreads(); } } __device__ float GetElement(size_t ridx, size_t fidx) const { if (use_shared) { return smem[threadIdx.x * data.num_features + fidx]; } else { return data.GetElement(ridx, fidx); } } }; struct EllpackLoader { EllpackDeviceAccessor const& matrix; XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : matrix{m} {} __device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const { auto gidx = matrix.GetBinIndex(ridx, fidx); if (gidx == -1) { return nan(""); } // The gradient index needs to be shifted by one as min values are not included in the // cuts. if (gidx == matrix.feature_segments[fidx]) { return matrix.min_fvalue[fidx]; } return matrix.gidx_fvalue_map[gidx - 1]; } }; template <typename Batch> struct DeviceAdapterLoader { Batch batch; bst_feature_t columns; float* smem; bool use_shared; using BatchT = Batch; XGBOOST_DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : batch{batch}, columns{num_features}, use_shared{use_shared} { extern __shared__ float _smem[]; smem = _smem; if (use_shared) { uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x; size_t shared_elements = blockDim.x * num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { auto beg = global_idx * columns; auto end = (global_idx + 1) * columns; for (size_t i = beg; i < end; ++i) { smem[threadIdx.x * num_features + (i - beg)] = batch.GetElement(i).value; } } } __syncthreads(); } XGBOOST_DEV_INLINE float GetElement(size_t ridx, size_t fidx) const { if (use_shared) { return smem[threadIdx.x * columns + fidx]; } return batch.GetElement(ridx * columns + fidx).value; } }; template <typename Loader> __device__ float GetLeafWeight(bst_uint ridx, const RegTree::Node* tree, Loader* loader) { RegTree::Node n = tree[0]; while (!n.IsLeaf()) { float fvalue = loader->GetElement(ridx, n.SplitIndex()); // Missing value if (isnan(fvalue)) { n = tree[n.DefaultChild()]; } else { if (fvalue < n.SplitCond()) { n = tree[n.LeftChild()]; } else { n = tree[n.RightChild()]; } } } return n.LeafValue(); } template <typename Loader, typename Data> __global__ void PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes, common::Span<float> d_out_predictions, common::Span<size_t> d_tree_segments, common::Span<int> d_tree_group, size_t tree_begin, size_t tree_end, size_t num_features, size_t num_rows, size_t entry_start, bool use_shared, int num_group) { bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; Loader loader(data, use_shared, num_features, num_rows, entry_start); if (global_idx >= num_rows) return; if (num_group == 1) { float sum = 0; for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { const RegTree::Node* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; float leaf = GetLeafWeight(global_idx, d_tree, &loader); sum += leaf; } d_out_predictions[global_idx] += sum; } else { for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { int tree_group = d_tree_group[tree_idx]; const RegTree::Node* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; bst_uint out_prediction_idx = global_idx * num_group + tree_group; d_out_predictions[out_prediction_idx] += GetLeafWeight(global_idx, d_tree, &loader); } } } class DeviceModel { public: // Need to lazily construct the vectors because GPU id is only known at runtime HostDeviceVector<RegTree::Node> nodes; HostDeviceVector<size_t> tree_segments; HostDeviceVector<int> tree_group; size_t tree_beg_; // NOLINT size_t tree_end_; // NOLINT int num_group; void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) { dh::safe_cuda(cudaSetDevice(gpu_id)); CHECK_EQ(model.param.size_leaf_vector, 0); // Copy decision trees to device tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id)); auto& h_tree_segments = tree_segments.HostVector(); h_tree_segments.reserve((tree_end - tree_begin) + 1); size_t sum = 0; h_tree_segments.push_back(sum); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { sum += model.trees.at(tree_idx)->GetNodes().size(); h_tree_segments.push_back(sum); } nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(), gpu_id)); auto& h_nodes = nodes.HostVector(); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { auto& src_nodes = model.trees.at(tree_idx)->GetNodes(); std::copy(src_nodes.begin(), src_nodes.end(), h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]); } tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id)); auto& h_tree_group = tree_group.HostVector(); std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size()); this->tree_beg_ = tree_begin; this->tree_end_ = tree_end; this->num_group = model.learner_model_param->num_output_group; } }; class GPUPredictor : public xgboost::Predictor { private: void PredictInternal(const SparsePage& batch, size_t num_features, HostDeviceVector<bst_float>* predictions, size_t batch_offset) { batch.offset.SetDevice(generic_param_->gpu_id); batch.data.SetDevice(generic_param_->gpu_id); const uint32_t BLOCK_THREADS = 128; size_t num_rows = batch.Size(); auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS)); auto shared_memory_bytes = static_cast<size_t>(sizeof(float) * num_features * BLOCK_THREADS); bool use_shared = true; if (shared_memory_bytes > max_shared_memory_bytes_) { shared_memory_bytes = 0; use_shared = false; } size_t entry_start = 0; SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(), num_features); dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} ( PredictKernel<SparsePageLoader, SparsePageView>, data, model_.nodes.DeviceSpan(), predictions->DeviceSpan().subspan(batch_offset), model_.tree_segments.DeviceSpan(), model_.tree_group.DeviceSpan(), model_.tree_beg_, model_.tree_end_, num_features, num_rows, entry_start, use_shared, model_.num_group); } void PredictInternal(EllpackDeviceAccessor const& batch, HostDeviceVector<bst_float>* out_preds, size_t batch_offset) { const uint32_t BLOCK_THREADS = 256; size_t num_rows = batch.n_rows; auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS)); bool use_shared = false; size_t entry_start = 0; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} ( PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch, model_.nodes.DeviceSpan(), out_preds->DeviceSpan().subspan(batch_offset), model_.tree_segments.DeviceSpan(), model_.tree_group.DeviceSpan(), model_.tree_beg_, model_.tree_end_, batch.NumFeatures(), num_rows, entry_start, use_shared, model_.num_group); } void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds, const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) { dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id)); if (tree_end - tree_begin == 0) { return; } model_.Init(model, tree_begin, tree_end, generic_param_->gpu_id); out_preds->SetDevice(generic_param_->gpu_id); if (dmat->PageExists<SparsePage>()) { size_t batch_offset = 0; for (auto &batch : dmat->GetBatches<SparsePage>()) { this->PredictInternal(batch, model.learner_model_param->num_feature, out_preds, batch_offset); batch_offset += batch.Size() * model.learner_model_param->num_output_group; } } else { size_t batch_offset = 0; for (auto const& page : dmat->GetBatches<EllpackPage>()) { this->PredictInternal( page.Impl()->GetDeviceAccessor(generic_param_->gpu_id), out_preds, batch_offset); batch_offset += page.Impl()->n_rows; } } } public: explicit GPUPredictor(GenericParameter const* generic_param) : Predictor::Predictor{generic_param} {} ~GPUPredictor() override { if (generic_param_->gpu_id >= 0) { dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id)); } } void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts, const gbm::GBTreeModel& model, int tree_begin, unsigned ntree_limit = 0) override { // This function is duplicated with CPU predictor PredictBatch, see comments in there. // FIXME(trivialfis): Remove the duplication. std::lock_guard<std::mutex> const guard(lock_); int device = generic_param_->gpu_id; CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data."; ConfigureDevice(device); CHECK_EQ(tree_begin, 0); auto* out_preds = &predts->predictions; CHECK_GE(predts->version, tree_begin); if (out_preds->Size() == 0 && dmat->Info().num_row_ != 0) { CHECK_EQ(predts->version, 0); } if (predts->version == 0) { this->InitOutPredictions(dmat->Info(), out_preds, model); } uint32_t const output_groups = model.learner_model_param->num_output_group; CHECK_NE(output_groups, 0); uint32_t real_ntree_limit = ntree_limit * output_groups; if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) { real_ntree_limit = static_cast<uint32_t>(model.trees.size()); } uint32_t const end_version = (tree_begin + real_ntree_limit) / output_groups; if (predts->version > end_version) { CHECK_NE(ntree_limit, 0); this->InitOutPredictions(dmat->Info(), out_preds, model); predts->version = 0; } uint32_t const beg_version = predts->version; CHECK_LE(beg_version, end_version); if (beg_version < end_version) { this->DevicePredictInternal(dmat, out_preds, model, beg_version * output_groups, end_version * output_groups); } uint32_t delta = end_version - beg_version; CHECK_LE(delta, model.trees.size()); predts->Update(delta); CHECK(out_preds->Size() == output_groups * dmat->Info().num_row_ || out_preds->Size() == dmat->Info().num_row_); } template <typename Adapter, typename Loader> void DispatchedInplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model, float missing, PredictionCacheEntry *out_preds, uint32_t tree_begin, uint32_t tree_end) const { auto max_shared_memory_bytes = dh::MaxSharedMemory(this->generic_param_->gpu_id); uint32_t const output_groups = model.learner_model_param->num_output_group; DeviceModel d_model; d_model.Init(model, tree_begin, tree_end, this->generic_param_->gpu_id); auto m = dmlc::get<std::shared_ptr<Adapter>>(x); CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature) << "Number of columns in data must equal to trained model."; CHECK_EQ(this->generic_param_->gpu_id, m->DeviceIdx()) << "XGBoost is running on device: " << this->generic_param_->gpu_id << ", " << "but data is on: " << m->DeviceIdx(); MetaInfo info; info.num_col_ = m->NumColumns(); info.num_row_ = m->NumRows(); this->InitOutPredictions(info, &(out_preds->predictions), model); const uint32_t BLOCK_THREADS = 128; auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(info.num_row_, BLOCK_THREADS)); auto shared_memory_bytes = static_cast<size_t>(sizeof(float) * m->NumColumns() * BLOCK_THREADS); bool use_shared = true; if (shared_memory_bytes > max_shared_memory_bytes) { shared_memory_bytes = 0; use_shared = false; } size_t entry_start = 0; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} ( PredictKernel<Loader, typename Loader::BatchT>, m->Value(), d_model.nodes.DeviceSpan(), out_preds->predictions.DeviceSpan(), d_model.tree_segments.DeviceSpan(), d_model.tree_group.DeviceSpan(), tree_begin, tree_end, m->NumColumns(), info.num_row_, entry_start, use_shared, output_groups); } void InplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model, float missing, PredictionCacheEntry *out_preds, uint32_t tree_begin, unsigned tree_end) const override { if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) { this->DispatchedInplacePredict< data::CupyAdapter, DeviceAdapterLoader<data::CupyAdapterBatch>>( x, model, missing, out_preds, tree_begin, tree_end); } else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) { this->DispatchedInplacePredict< data::CudfAdapter, DeviceAdapterLoader<data::CudfAdapterBatch>>( x, model, missing, out_preds, tree_begin, tree_end); } else { LOG(FATAL) << "Only CuPy and CuDF are supported by GPU Predictor."; } } void PredictContribution(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, std::vector<bst_float>* tree_weights, bool approximate, int condition, unsigned condition_feature) override { if (approximate) { LOG(FATAL) << "[Internal error]: " << __func__ << " approximate is not implemented in GPU Predictor."; } uint32_t real_ntree_limit = ntree_limit * model.learner_model_param->num_output_group; if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) { real_ntree_limit = static_cast<uint32_t>(model.trees.size()); } const int ngroup = model.learner_model_param->num_output_group; CHECK_NE(ngroup, 0); // allocate space for (number of features + bias) times the number of rows std::vector<bst_float>& contribs = *out_contribs; size_t contributions_columns = model.learner_model_param->num_feature + 1; // +1 for bias contribs.resize(p_fmat->Info().num_row_ * contributions_columns * model.learner_model_param->num_output_group); dh::TemporaryArray<float> phis(contribs.size(), 0.0); p_fmat->Info().base_margin_.SetDevice(generic_param_->gpu_id); const auto margin = p_fmat->Info().base_margin_.ConstDeviceSpan(); float base_score = model.learner_model_param->base_score; auto d_phis = phis.data().get(); // Add the base margin term to last column dh::LaunchN( generic_param_->gpu_id, p_fmat->Info().num_row_ * model.learner_model_param->num_output_group, [=] __device__(size_t idx) { d_phis[(idx + 1) * contributions_columns - 1] = margin.empty() ? base_score : margin[idx]; }); const auto& paths = this->ExtractPaths(model, real_ntree_limit); for (auto& batch : p_fmat->GetBatches<SparsePage>()) { batch.data.SetDevice(generic_param_->gpu_id); batch.offset.SetDevice(generic_param_->gpu_id); SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(), model.learner_model_param->num_feature); gpu_treeshap::GPUTreeShap( X, paths, ngroup, phis.data().get() + batch.base_rowid * contributions_columns); } dh::safe_cuda(cudaMemcpyAsync(contribs.data(), phis.data().get(), sizeof(float) * phis.size(), cudaMemcpyDefault)); } protected: void InitOutPredictions(const MetaInfo& info, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model) const { size_t n_classes = model.learner_model_param->num_output_group; size_t n = n_classes * info.num_row_; const HostDeviceVector<bst_float>& base_margin = info.base_margin_; out_preds->SetDevice(generic_param_->gpu_id); out_preds->Resize(n); if (base_margin.Size() != 0) { CHECK_EQ(base_margin.Size(), n); out_preds->Copy(base_margin); } else { out_preds->Fill(model.learner_model_param->base_score); } } void PredictInstance(const SparsePage::Inst& inst, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void PredictInteractionContributions(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, std::vector<bst_float>* tree_weights, bool approximate) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override { Predictor::Configure(cfg); } private: /*! \brief Reconfigure the device when GPU is changed. */ void ConfigureDevice(int device) { if (device >= 0) { max_shared_memory_bytes_ = dh::MaxSharedMemory(device); } } std::vector<gpu_treeshap::PathElement> ExtractPaths( const gbm::GBTreeModel& model, size_t tree_limit) { std::vector<gpu_treeshap::PathElement> paths; size_t path_idx = 0; CHECK_LE(tree_limit, model.trees.size()); for (auto i = 0ull; i < tree_limit; i++) { const auto& tree = *model.trees.at(i); size_t group = model.tree_info[i]; const auto& nodes = tree.GetNodes(); for (auto j = 0ull; j < nodes.size(); j++) { if (nodes[j].IsLeaf() && !nodes[j].IsDeleted()) { auto child = nodes[j]; float v = child.LeafValue(); size_t child_idx = j; const float inf = std::numeric_limits<float>::infinity(); while (!child.IsRoot()) { float child_cover = tree.Stat(child_idx).sum_hess; float parent_cover = tree.Stat(child.Parent()).sum_hess; float zero_fraction = child_cover / parent_cover; CHECK(zero_fraction >= 0.0 && zero_fraction <= 1.0); auto parent = nodes[child.Parent()]; CHECK(parent.LeftChild() == child_idx || parent.RightChild() == child_idx); bool is_left_path = parent.LeftChild() == child_idx; bool is_missing_path = (!parent.DefaultLeft() && !is_left_path) || (parent.DefaultLeft() && is_left_path); float lower_bound = is_left_path ? -inf : parent.SplitCond(); float upper_bound = is_left_path ? parent.SplitCond() : inf; paths.emplace_back(path_idx, parent.SplitIndex(), group, lower_bound, upper_bound, is_missing_path, zero_fraction, v); child_idx = child.Parent(); child = parent; } // Root node has feature -1 paths.emplace_back(path_idx, -1, group, -inf, inf, false, 1.0, v); path_idx++; } } } return paths; } std::mutex lock_; DeviceModel model_; size_t max_shared_memory_bytes_; }; XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor") .describe("Make predictions using GPU.") .set_body([](GenericParameter const* generic_param) { return new GPUPredictor(generic_param); }); } // namespace predictor } // namespace xgboost
216439de5b2fd9016e7bcf7771adc98ee1944f15.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cuda_conv2D_ff.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *pA = NULL; hipMalloc(&pA, XSIZE*YSIZE); double *pNet = NULL; hipMalloc(&pNet, XSIZE*YSIZE); const double *in = NULL; hipMalloc(&in, XSIZE*YSIZE); const double *pKernels = NULL; hipMalloc(&pKernels, XSIZE*YSIZE); const double *pBias = NULL; hipMalloc(&pBias, XSIZE*YSIZE); size_t kernelCount = 1; size_t kernelRows = 1; size_t kernelCols = 1; size_t outputRows = 1; size_t outputCols = 1; size_t inputRows = 1; size_t inputCols = 1; size_t inputChannels = 1; size_t padding = 1; size_t stride = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cuda_conv2D_ff), dim3(gridBlock),dim3(threadBlock), 0, 0, pA,pNet,in,pKernels,pBias,kernelCount,kernelRows,kernelCols,outputRows,outputCols,inputRows,inputCols,inputChannels,padding,stride); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cuda_conv2D_ff), dim3(gridBlock),dim3(threadBlock), 0, 0, pA,pNet,in,pKernels,pBias,kernelCount,kernelRows,kernelCols,outputRows,outputCols,inputRows,inputCols,inputChannels,padding,stride); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cuda_conv2D_ff), dim3(gridBlock),dim3(threadBlock), 0, 0, pA,pNet,in,pKernels,pBias,kernelCount,kernelRows,kernelCols,outputRows,outputCols,inputRows,inputCols,inputChannels,padding,stride); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
216439de5b2fd9016e7bcf7771adc98ee1944f15.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cuda_conv2D_ff.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *pA = NULL; cudaMalloc(&pA, XSIZE*YSIZE); double *pNet = NULL; cudaMalloc(&pNet, XSIZE*YSIZE); const double *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); const double *pKernels = NULL; cudaMalloc(&pKernels, XSIZE*YSIZE); const double *pBias = NULL; cudaMalloc(&pBias, XSIZE*YSIZE); size_t kernelCount = 1; size_t kernelRows = 1; size_t kernelCols = 1; size_t outputRows = 1; size_t outputCols = 1; size_t inputRows = 1; size_t inputCols = 1; size_t inputChannels = 1; size_t padding = 1; size_t stride = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cuda_conv2D_ff<<<gridBlock,threadBlock>>>(pA,pNet,in,pKernels,pBias,kernelCount,kernelRows,kernelCols,outputRows,outputCols,inputRows,inputCols,inputChannels,padding,stride); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cuda_conv2D_ff<<<gridBlock,threadBlock>>>(pA,pNet,in,pKernels,pBias,kernelCount,kernelRows,kernelCols,outputRows,outputCols,inputRows,inputCols,inputChannels,padding,stride); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cuda_conv2D_ff<<<gridBlock,threadBlock>>>(pA,pNet,in,pKernels,pBias,kernelCount,kernelRows,kernelCols,outputRows,outputCols,inputRows,inputCols,inputChannels,padding,stride); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
859a2bf2df10a3769f9276be155b5e3d59881120.hip
// !!! This is a file automatically generated by hipify!!! #include "chainerx/cuda/cuda_device.h" #include <cstdint> #include <mutex> #include <type_traits> #include <rocblas.h> #include <hip/hip_runtime.h> #include <cusolverDn.h> #include <cuda_fp16.hpp> #include "chainerx/array.h" #include "chainerx/axes.h" #include "chainerx/backend.h" #include "chainerx/backend_util.h" #include "chainerx/cuda/rocblas.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/cusolver.h" #include "chainerx/cuda/data_type.cuh" #include "chainerx/cuda/float16.cuh" #include "chainerx/cuda/kernel_regist.h" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/error.h" #include "chainerx/float16.h" #include "chainerx/kernels/creation.h" #include "chainerx/kernels/linalg.h" #include "chainerx/kernels/misc.h" #include "chainerx/macro.h" #include "chainerx/native/native_device.h" #include "chainerx/routines/creation.h" #include "chainerx/routines/linalg.h" namespace chainerx { namespace cuda { namespace { template <typename T> cusolverStatus_t GetrfBuffersize(hipsolverDnHandle_t /*handle*/, int /*m*/, int /*n*/, T* /*a*/, int /*lda*/, int* /*lwork*/) { throw DtypeError{"Only Arrays of float or double type are supported by getrf (LU)"}; } template <typename T> cusolverStatus_t Getrf( hipsolverDnHandle_t /*handle*/, int /*m*/, int /*n*/, T* /*a*/, int /*lda*/, T* /*workspace*/, int* /*devipiv*/, int* /*devinfo*/) { throw DtypeError{"Only Arrays of float or double type are supported by getrf (LU)"}; } template <typename T> cusolverStatus_t Getrs( hipsolverDnHandle_t /*handle*/, hipblasOperation_t /*trans*/, int /*n*/, int /*nrhs*/, T* /*a*/, int /*lda*/, int* /*devipiv*/, T* /*b*/, int /*ldb*/, int* /*devinfo*/) { throw DtypeError{"Only Arrays of float or double type are supported by getrs (Solve)"}; } template <> cusolverStatus_t GetrfBuffersize<double>(hipsolverDnHandle_t handle, int m, int n, double* a, int lda, int* lwork) { return hipsolverDnDgetrf_bufferSize(handle, m, n, a, lda, lwork); } template <> cusolverStatus_t GetrfBuffersize<float>(hipsolverDnHandle_t handle, int m, int n, float* a, int lda, int* lwork) { return hipsolverDnSgetrf_bufferSize(handle, m, n, a, lda, lwork); } template <> cusolverStatus_t Getrf<double>(hipsolverDnHandle_t handle, int m, int n, double* a, int lda, double* workspace, int* devipiv, int* devinfo) { return hipsolverDnDgetrf(handle, m, n, a, lda, workspace, devipiv, devinfo); } template <> cusolverStatus_t Getrf<float>(hipsolverDnHandle_t handle, int m, int n, float* a, int lda, float* workspace, int* devipiv, int* devinfo) { return hipsolverDnSgetrf(handle, m, n, a, lda, workspace, devipiv, devinfo); } template <> cusolverStatus_t Getrs<double>( hipsolverDnHandle_t handle, hipblasOperation_t trans, int n, int nrhs, double* a, int lda, int* devipiv, double* b, int ldb, int* devinfo) { return hipsolverDnDgetrs(handle, trans, n, nrhs, a, lda, devipiv, b, ldb, devinfo); } template <> cusolverStatus_t Getrs<float>( hipsolverDnHandle_t handle, hipblasOperation_t trans, int n, int nrhs, float* a, int lda, int* devipiv, float* b, int ldb, int* devinfo) { return hipsolverDnSgetrs(handle, trans, n, nrhs, a, lda, devipiv, b, ldb, devinfo); } template <typename T> void SolveImpl(const Array& a, const Array& b, const Array& out) { Device& device = a.device(); Dtype dtype = a.dtype(); cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(static_cast<CudaDevice&>(device)); Array lu_matrix = Empty(a.shape(), dtype, device); device.backend().CallKernel<CopyKernel>(a.Transpose(), lu_matrix); auto lu_ptr = static_cast<T*>(internal::GetRawOffsetData(lu_matrix)); int64_t m = a.shape()[0]; int64_t nrhs = 1; if (b.ndim() == 2) { nrhs = b.shape()[1]; } Array ipiv = Empty(Shape{m}, Dtype::kInt32, device); auto ipiv_ptr = static_cast<int*>(internal::GetRawOffsetData(ipiv)); int buffersize = 0; device_internals.cusolverdn_handle().Call(GetrfBuffersize<T>, m, m, lu_ptr, m, &buffersize); Array work = Empty(Shape{buffersize}, dtype, device); auto work_ptr = static_cast<T*>(internal::GetRawOffsetData(work)); std::shared_ptr<void> devinfo = device.Allocate(sizeof(int)); device_internals.cusolverdn_handle().Call(Getrf<T>, m, m, lu_ptr, m, work_ptr, ipiv_ptr, static_cast<int*>(devinfo.get())); int devinfo_h = 0; Device& native_device = GetDefaultContext().GetDevice({"native", 0}); device.MemoryCopyTo(&devinfo_h, devinfo.get(), sizeof(int), native_device); if (devinfo_h != 0) { throw ChainerxError{"Unsuccessful getrf (LU) execution. Info = ", devinfo_h}; } Array out_transposed = b.Transpose().Copy(); auto out_ptr = static_cast<T*>(internal::GetRawOffsetData(out_transposed)); device_internals.cusolverdn_handle().Call( Getrs<T>, HIPBLAS_OP_N, m, nrhs, lu_ptr, m, ipiv_ptr, out_ptr, m, static_cast<int*>(devinfo.get())); device.MemoryCopyTo(&devinfo_h, devinfo.get(), sizeof(int), native_device); if (devinfo_h != 0) { throw ChainerxError{"Unsuccessful getrs (Solve) execution. Info = ", devinfo_h}; } device.backend().CallKernel<CopyKernel>(out_transposed.Transpose(), out); } } // namespace class CudaSolveKernel : public SolveKernel { public: void Call(const Array& a, const Array& b, const Array& out) override { Device& device = a.device(); Dtype dtype = a.dtype(); CudaSetDeviceScope scope{device.index()}; CHAINERX_ASSERT(a.ndim() == 2); CHAINERX_ASSERT(a.shape()[0] == a.shape()[1]); VisitFloatingPointDtype(dtype, [&](auto pt) { using T = typename decltype(pt)::type; SolveImpl<T>(a, b, out); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(SolveKernel, CudaSolveKernel); class CudaInverseKernel : public InverseKernel { public: void Call(const Array& a, const Array& out) override { Device& device = a.device(); Dtype dtype = a.dtype(); CudaSetDeviceScope scope{device.index()}; CHAINERX_ASSERT(a.ndim() == 2); CHAINERX_ASSERT(a.shape()[0] == a.shape()[1]); // There is LAPACK routine ``getri`` for computing the inverse of an LU-factored matrix, // but cuSOLVER does not have it implemented, therefore inverse is obtained with ``getrs`` // inv(A) == solve(A, Identity) Array b = Identity(a.shape()[0], dtype, device); device.backend().CallKernel<SolveKernel>(a, b, out); } }; CHAINERX_CUDA_REGISTER_KERNEL(InverseKernel, CudaInverseKernel); } // namespace cuda } // namespace chainerx
859a2bf2df10a3769f9276be155b5e3d59881120.cu
#include "chainerx/cuda/cuda_device.h" #include <cstdint> #include <mutex> #include <type_traits> #include <cublas_v2.h> #include <cuda_runtime.h> #include <cusolverDn.h> #include <cuda_fp16.hpp> #include "chainerx/array.h" #include "chainerx/axes.h" #include "chainerx/backend.h" #include "chainerx/backend_util.h" #include "chainerx/cuda/cublas.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/cusolver.h" #include "chainerx/cuda/data_type.cuh" #include "chainerx/cuda/float16.cuh" #include "chainerx/cuda/kernel_regist.h" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/error.h" #include "chainerx/float16.h" #include "chainerx/kernels/creation.h" #include "chainerx/kernels/linalg.h" #include "chainerx/kernels/misc.h" #include "chainerx/macro.h" #include "chainerx/native/native_device.h" #include "chainerx/routines/creation.h" #include "chainerx/routines/linalg.h" namespace chainerx { namespace cuda { namespace { template <typename T> cusolverStatus_t GetrfBuffersize(cusolverDnHandle_t /*handle*/, int /*m*/, int /*n*/, T* /*a*/, int /*lda*/, int* /*lwork*/) { throw DtypeError{"Only Arrays of float or double type are supported by getrf (LU)"}; } template <typename T> cusolverStatus_t Getrf( cusolverDnHandle_t /*handle*/, int /*m*/, int /*n*/, T* /*a*/, int /*lda*/, T* /*workspace*/, int* /*devipiv*/, int* /*devinfo*/) { throw DtypeError{"Only Arrays of float or double type are supported by getrf (LU)"}; } template <typename T> cusolverStatus_t Getrs( cusolverDnHandle_t /*handle*/, cublasOperation_t /*trans*/, int /*n*/, int /*nrhs*/, T* /*a*/, int /*lda*/, int* /*devipiv*/, T* /*b*/, int /*ldb*/, int* /*devinfo*/) { throw DtypeError{"Only Arrays of float or double type are supported by getrs (Solve)"}; } template <> cusolverStatus_t GetrfBuffersize<double>(cusolverDnHandle_t handle, int m, int n, double* a, int lda, int* lwork) { return cusolverDnDgetrf_bufferSize(handle, m, n, a, lda, lwork); } template <> cusolverStatus_t GetrfBuffersize<float>(cusolverDnHandle_t handle, int m, int n, float* a, int lda, int* lwork) { return cusolverDnSgetrf_bufferSize(handle, m, n, a, lda, lwork); } template <> cusolverStatus_t Getrf<double>(cusolverDnHandle_t handle, int m, int n, double* a, int lda, double* workspace, int* devipiv, int* devinfo) { return cusolverDnDgetrf(handle, m, n, a, lda, workspace, devipiv, devinfo); } template <> cusolverStatus_t Getrf<float>(cusolverDnHandle_t handle, int m, int n, float* a, int lda, float* workspace, int* devipiv, int* devinfo) { return cusolverDnSgetrf(handle, m, n, a, lda, workspace, devipiv, devinfo); } template <> cusolverStatus_t Getrs<double>( cusolverDnHandle_t handle, cublasOperation_t trans, int n, int nrhs, double* a, int lda, int* devipiv, double* b, int ldb, int* devinfo) { return cusolverDnDgetrs(handle, trans, n, nrhs, a, lda, devipiv, b, ldb, devinfo); } template <> cusolverStatus_t Getrs<float>( cusolverDnHandle_t handle, cublasOperation_t trans, int n, int nrhs, float* a, int lda, int* devipiv, float* b, int ldb, int* devinfo) { return cusolverDnSgetrs(handle, trans, n, nrhs, a, lda, devipiv, b, ldb, devinfo); } template <typename T> void SolveImpl(const Array& a, const Array& b, const Array& out) { Device& device = a.device(); Dtype dtype = a.dtype(); cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(static_cast<CudaDevice&>(device)); Array lu_matrix = Empty(a.shape(), dtype, device); device.backend().CallKernel<CopyKernel>(a.Transpose(), lu_matrix); auto lu_ptr = static_cast<T*>(internal::GetRawOffsetData(lu_matrix)); int64_t m = a.shape()[0]; int64_t nrhs = 1; if (b.ndim() == 2) { nrhs = b.shape()[1]; } Array ipiv = Empty(Shape{m}, Dtype::kInt32, device); auto ipiv_ptr = static_cast<int*>(internal::GetRawOffsetData(ipiv)); int buffersize = 0; device_internals.cusolverdn_handle().Call(GetrfBuffersize<T>, m, m, lu_ptr, m, &buffersize); Array work = Empty(Shape{buffersize}, dtype, device); auto work_ptr = static_cast<T*>(internal::GetRawOffsetData(work)); std::shared_ptr<void> devinfo = device.Allocate(sizeof(int)); device_internals.cusolverdn_handle().Call(Getrf<T>, m, m, lu_ptr, m, work_ptr, ipiv_ptr, static_cast<int*>(devinfo.get())); int devinfo_h = 0; Device& native_device = GetDefaultContext().GetDevice({"native", 0}); device.MemoryCopyTo(&devinfo_h, devinfo.get(), sizeof(int), native_device); if (devinfo_h != 0) { throw ChainerxError{"Unsuccessful getrf (LU) execution. Info = ", devinfo_h}; } Array out_transposed = b.Transpose().Copy(); auto out_ptr = static_cast<T*>(internal::GetRawOffsetData(out_transposed)); device_internals.cusolverdn_handle().Call( Getrs<T>, CUBLAS_OP_N, m, nrhs, lu_ptr, m, ipiv_ptr, out_ptr, m, static_cast<int*>(devinfo.get())); device.MemoryCopyTo(&devinfo_h, devinfo.get(), sizeof(int), native_device); if (devinfo_h != 0) { throw ChainerxError{"Unsuccessful getrs (Solve) execution. Info = ", devinfo_h}; } device.backend().CallKernel<CopyKernel>(out_transposed.Transpose(), out); } } // namespace class CudaSolveKernel : public SolveKernel { public: void Call(const Array& a, const Array& b, const Array& out) override { Device& device = a.device(); Dtype dtype = a.dtype(); CudaSetDeviceScope scope{device.index()}; CHAINERX_ASSERT(a.ndim() == 2); CHAINERX_ASSERT(a.shape()[0] == a.shape()[1]); VisitFloatingPointDtype(dtype, [&](auto pt) { using T = typename decltype(pt)::type; SolveImpl<T>(a, b, out); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(SolveKernel, CudaSolveKernel); class CudaInverseKernel : public InverseKernel { public: void Call(const Array& a, const Array& out) override { Device& device = a.device(); Dtype dtype = a.dtype(); CudaSetDeviceScope scope{device.index()}; CHAINERX_ASSERT(a.ndim() == 2); CHAINERX_ASSERT(a.shape()[0] == a.shape()[1]); // There is LAPACK routine ``getri`` for computing the inverse of an LU-factored matrix, // but cuSOLVER does not have it implemented, therefore inverse is obtained with ``getrs`` // inv(A) == solve(A, Identity) Array b = Identity(a.shape()[0], dtype, device); device.backend().CallKernel<SolveKernel>(a, b, out); } }; CHAINERX_CUDA_REGISTER_KERNEL(InverseKernel, CudaInverseKernel); } // namespace cuda } // namespace chainerx
b0ce7619ae8b4877c9ae094efa6b9e5c0ba3d319.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* thanks old college classmate Sean Anderson! * http://graphics.stanford.edu/~seander/bithacks.html#DetermineIfPowerOf2 */ #include "checker_helper.cu" #include <math.h> // #define BOARDSIZE 6 // #define NUMQUEENS 3 // #define BOARDSIZE 4 // #define NUMQUEENS 2 __global__ void qgdKernel(int n, int a, bool one, bool all, int pitch, unsigned int * d_solutions, unsigned int * count) { // this kernel is completely hardcoded to the 4x4 board // I'm not pretending otherwise int tid = threadIdx.x + blockIdx.x * blockDim.x; if ( all == true ){ // we know that for n=4, a=2, so knock out all boards where a != 2 int bitcount = countBits(tid); if (bitcount != a) { return; } int width = n; int numQueens = a; // create queens List for n = 4 the size of the proposed solution is 2 unsigned int queensList[16] = {0}; int temp = tid; for (unsigned int c = 0, qi = 0 ; temp ; temp >>= 1, c++){ if (temp & 1){ queensList[qi] = c; qi++; } } // unsigned int list[NUMQUEENS] = { 0 , 2 , 3 , 16 , 51}; // for (int q=0; q< numQueens; q++) // queensList[q] = list[q]; // if (checkerFunc (queensList, width, numQueens)) // // addSolution (queensList, numQueens, d_solutions, count, pitch); // printf("Solution\n"); // else // printf("This is not a Solution\n"); if (checkerFunc (queensList, width, numQueens)) { addSolution (queensList, numQueens, d_solutions, count, pitch); } } } // Store your solutions in d_solution, which has already been allocated for you void qgd(int n, int a, bool one, bool all, int pitch, unsigned long long numSolutions, unsigned int * d_solutions, unsigned int * count) { // there are 2^16 possible configurations of queens on a 4x4 chessboard // 2^8 blocks of 2^8 threads each will check them all (brute force) int width = n; int numQueens = a; hipLaunchKernelGGL(( qgdKernel), dim3(1<<8), dim3(1<<8) , 0, 0, 0, 0, width, numQueens, one, all, pitch, d_solutions, count); // qgdKernel<<< 1<<10, 1<<6 >>>(n, a, one, all, pitch, d_solutions, count); }
b0ce7619ae8b4877c9ae094efa6b9e5c0ba3d319.cu
/* thanks old college classmate Sean Anderson! * http://graphics.stanford.edu/~seander/bithacks.html#DetermineIfPowerOf2 */ #include "checker_helper.cu" #include <math.h> // #define BOARDSIZE 6 // #define NUMQUEENS 3 // #define BOARDSIZE 4 // #define NUMQUEENS 2 __global__ void qgdKernel(int n, int a, bool one, bool all, int pitch, unsigned int * d_solutions, unsigned int * count) { // this kernel is completely hardcoded to the 4x4 board // I'm not pretending otherwise int tid = threadIdx.x + blockIdx.x * blockDim.x; if ( all == true ){ // we know that for n=4, a=2, so knock out all boards where a != 2 int bitcount = countBits(tid); if (bitcount != a) { return; } int width = n; int numQueens = a; // create queens List for n = 4 the size of the proposed solution is 2 unsigned int queensList[16] = {0}; int temp = tid; for (unsigned int c = 0, qi = 0 ; temp ; temp >>= 1, c++){ if (temp & 1){ queensList[qi] = c; qi++; } } // unsigned int list[NUMQUEENS] = { 0 , 2 , 3 , 16 , 51}; // for (int q=0; q< numQueens; q++) // queensList[q] = list[q]; // if (checkerFunc (queensList, width, numQueens)) // // addSolution (queensList, numQueens, d_solutions, count, pitch); // printf("Solution\n"); // else // printf("This is not a Solution\n"); if (checkerFunc (queensList, width, numQueens)) { addSolution (queensList, numQueens, d_solutions, count, pitch); } } } // Store your solutions in d_solution, which has already been allocated for you void qgd(int n, int a, bool one, bool all, int pitch, unsigned long long numSolutions, unsigned int * d_solutions, unsigned int * count) { // there are 2^16 possible configurations of queens on a 4x4 chessboard // 2^8 blocks of 2^8 threads each will check them all (brute force) int width = n; int numQueens = a; qgdKernel<<< 1<<8, 1<<8 >>>(width, numQueens, one, all, pitch, d_solutions, count); // qgdKernel<<< 1<<10, 1<<6 >>>(n, a, one, all, pitch, d_solutions, count); }
01d832aef2a2af52ccc458dcae45d6045377700a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * * Created on: 27-10-2015 * Author: Kamil Szewc ([email protected]) */ #include "../../sph.h" #include "../../hlp.h" #include "../../methods/kernels.cuh" #include "../../methods/calcGridHash.cuh" #include "../../methods/calcGridPos.cuh" #include "../../methods/calcRelPosVelNoSlip.cuh" __device__ static real interaction(uint i, uint j, real2 dpos, real2 dvel, Particle *p, Parameters *par) { real r = sqrt(pow2(dpos.x) + pow2(dpos.y)); real q = r * par->I_H; if (q < 2.0) { real k = kern(q, par->I_H); return p[j].m * k / p[j].di; } else { return 0.0; } } __global__ void calcFluidVolumeFractionWSDP(Particle *p, uint *gridParticleIndex, uint *cellStart, uint *cellEnd, Particle *pPDPF, Parameters *par) { uint index = threadIdx.x + blockIdx.x*blockDim.x; if (index < par->N) { real2 pos = MAKE_REAL2(p[index].pos.x, p[index].pos.y); int2 gridPos = calcGridPos(pos, par); uint gridHash0 = calcGridHash(gridPos, par); real result = 0.0; for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int2 gridPos2; gridPos2.x = gridPos.x + x; gridPos2.y = gridPos.y + y; if ((gridPos2.x < 0) || (gridPos2.x > par->NXC - 1) || (gridPos2.y < 0) || (gridPos2.y > par->NYC - 1)) continue; uint gridHash = calcGridHash(gridPos2, par); uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { real2 dpos, dvel; real2 pos1 = p[index].pos; real2 pos2 = pPDPF[j].pos; real2 vel1 = p[index].vel; real2 vel2 = pPDPF[j].vel; calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); if (((gridPos.x == 0) && (gridPos2.x == 0)) || ((gridPos.x == par->NXC - 1) && (gridPos2.x == par->NXC - 1)) || ((gridPos.y == 0) && (gridPos2.y == 0)) || ((gridPos.y == par->NYC - 1) && (gridPos2.y == par->NYC - 1))) { if (par->T_BOUNDARY_PERIODICITY != 1) { if (gridPos.y == gridPos2.y) { if (gridPos.y == par->NYC - 1) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 1, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } if (gridPos.y == 0) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 3, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } } } if (par->T_BOUNDARY_PERIODICITY == 0) { if (gridPos.x == gridPos2.x) { if (gridPos2.x == 0) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 4, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } if (gridPos2.x == par->NXC - 1) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 2, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } } if ((gridPos.x == gridPos2.x) && (gridPos.y == gridPos.y)) { if ((gridPos.x == 0) && (gridPos.y == 0)) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 7, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } if ((gridPos.x == par->NXC - 1) && (gridPos.y == 0)) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 6, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } if ((gridPos.x == 0) && (gridPos.y == par->NYC - 1)) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 8, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } if ((gridPos.x == par->NXC - 1) && (gridPos.y == par->NYC - 1)) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 5, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } } } } } } } } if ((par->T_BOUNDARY_PERIODICITY > 0) && ((gridPos.x == 0) || (gridPos.x == par->NXC - 1))) { for (int y = -1; y <= 1; y++) { int2 gridPos2; if (gridPos.x == 0) gridPos2.x = par->NXC - 1; if (gridPos.x == par->NXC - 1) gridPos2.x = 0; gridPos2.y = gridPos.y + y; if ((gridPos2.y < 0) || (gridPos2.y > par->NYC - 1)) continue; uint gridHash = calcGridHash(gridPos2, par); uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { real2 dpos, dvel; real2 pos1 = p[index].pos; if (gridPos.x == 0) pos1.x += par->XCV; if (gridPos.x == par->NXC - 1) pos1.x -= par->XCV; real2 pos2 = pPDPF[j].pos; real2 vel1 = p[index].vel; real2 vel2 = pPDPF[j].vel; calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } } } } if ((par->T_BOUNDARY_PERIODICITY == 1) && ((gridPos.y == 0) || (gridPos.y == par->NYC - 1))) { for (int x = -1; x <= 1; x++) { int2 gridPos2; if (gridPos.y == 0) gridPos2.y = par->NYC - 1; if (gridPos.y == par->NYC - 1) gridPos2.y = 0; gridPos2.x = gridPos.x + x; if ((gridPos2.x < 0) || (gridPos2.x > par->NXC - 1)) continue; uint gridHash = calcGridHash(gridPos2, par); uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { real2 dpos, dvel; real2 pos1 = p[index].pos; if (gridPos.y == 0) pos1.y += par->YCV; if (gridPos.y == par->NYC - 1) pos1.y -= par->YCV; real2 pos2 = pPDPF[j].pos; real2 vel1 = p[index].vel; real2 vel2 = pPDPF[j].vel; calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } } } } if (par->T_BOUNDARY_PERIODICITY == 1) { if (((gridPos.x == 0) && (gridPos.y == 0)) || (gridPos.x == 0) && (gridPos.y == par->NYC - 1) || (gridPos.x == par->NXC - 1) && (gridPos.y == 0) || (gridPos.x == par->NXC - 1) && (gridPos.y == par->NYC - 1)) { int2 gridPos2; if (gridPos.x == 0) gridPos2.x = par->NXC - 1; else gridPos2.x = 0; if (gridPos.y == 0) gridPos2.y = par->NYC - 1; else gridPos2.y = 0; uint gridHash = calcGridHash(gridPos2, par); uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { real2 dpos, dvel; real2 pos1 = p[index].pos; if (gridPos.x == 0) pos1.x += par->XCV; else pos1.x -= par->XCV; if (gridPos.y == 0) pos1.y += par->YCV; else pos1.y -= par->YCV; real2 pos2 = pPDPF[j].pos; real2 vel1 = p[index].vel; real2 vel2 = pPDPF[j].vel; calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } } } } if (par->T_BOUNDARY_PERIODICITY == 2) { if (((gridPos.x == 0) && (gridPos.y == 0)) || (gridPos.x == 0) && (gridPos.y == par->NYC - 1) || (gridPos.x == par->NXC - 1) && (gridPos.y == 0) || (gridPos.x == par->NXC - 1) && (gridPos.y == par->NYC - 1)) { int2 gridPos2; if (gridPos.x == 0) gridPos2.x = par->NXC - 1; else gridPos2.x = 0; gridPos2.y = gridPos.y; uint gridHash = calcGridHash(gridPos2, par); uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { real2 dpos, dvel; real2 pos1 = p[index].pos; if (gridPos.x == 0) pos1.x += par->XCV; else pos1.x -= par->XCV; real2 pos2 = pPDPF[j].pos; real2 vel1 = p[index].vel; real2 vel2 = pPDPF[j].vel; if (gridPos.y == par->NYC - 1) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 1, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } if (gridPos.y == 0) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 3, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } } } } } p[index].o = 1.0 - result; p[index].d = p[index].o * p[index].di; p[index].m = par->XCV * par->YCV * p[index].d / (par->NX * par->NY); p[index].d = p[index].d * pow(1.0 + 1000.0*fabs(par->G_Y)*(0.8 - p[index].pos.y) / p[index].b, 1.0 / 7.0); } }
01d832aef2a2af52ccc458dcae45d6045377700a.cu
/* * * * Created on: 27-10-2015 * Author: Kamil Szewc ([email protected]) */ #include "../../sph.h" #include "../../hlp.h" #include "../../methods/kernels.cuh" #include "../../methods/calcGridHash.cuh" #include "../../methods/calcGridPos.cuh" #include "../../methods/calcRelPosVelNoSlip.cuh" __device__ static real interaction(uint i, uint j, real2 dpos, real2 dvel, Particle *p, Parameters *par) { real r = sqrt(pow2(dpos.x) + pow2(dpos.y)); real q = r * par->I_H; if (q < 2.0) { real k = kern(q, par->I_H); return p[j].m * k / p[j].di; } else { return 0.0; } } __global__ void calcFluidVolumeFractionWSDP(Particle *p, uint *gridParticleIndex, uint *cellStart, uint *cellEnd, Particle *pPDPF, Parameters *par) { uint index = threadIdx.x + blockIdx.x*blockDim.x; if (index < par->N) { real2 pos = MAKE_REAL2(p[index].pos.x, p[index].pos.y); int2 gridPos = calcGridPos(pos, par); uint gridHash0 = calcGridHash(gridPos, par); real result = 0.0; for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int2 gridPos2; gridPos2.x = gridPos.x + x; gridPos2.y = gridPos.y + y; if ((gridPos2.x < 0) || (gridPos2.x > par->NXC - 1) || (gridPos2.y < 0) || (gridPos2.y > par->NYC - 1)) continue; uint gridHash = calcGridHash(gridPos2, par); uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { real2 dpos, dvel; real2 pos1 = p[index].pos; real2 pos2 = pPDPF[j].pos; real2 vel1 = p[index].vel; real2 vel2 = pPDPF[j].vel; calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); if (((gridPos.x == 0) && (gridPos2.x == 0)) || ((gridPos.x == par->NXC - 1) && (gridPos2.x == par->NXC - 1)) || ((gridPos.y == 0) && (gridPos2.y == 0)) || ((gridPos.y == par->NYC - 1) && (gridPos2.y == par->NYC - 1))) { if (par->T_BOUNDARY_PERIODICITY != 1) { if (gridPos.y == gridPos2.y) { if (gridPos.y == par->NYC - 1) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 1, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } if (gridPos.y == 0) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 3, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } } } if (par->T_BOUNDARY_PERIODICITY == 0) { if (gridPos.x == gridPos2.x) { if (gridPos2.x == 0) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 4, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } if (gridPos2.x == par->NXC - 1) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 2, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } } if ((gridPos.x == gridPos2.x) && (gridPos.y == gridPos.y)) { if ((gridPos.x == 0) && (gridPos.y == 0)) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 7, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } if ((gridPos.x == par->NXC - 1) && (gridPos.y == 0)) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 6, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } if ((gridPos.x == 0) && (gridPos.y == par->NYC - 1)) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 8, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } if ((gridPos.x == par->NXC - 1) && (gridPos.y == par->NYC - 1)) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 5, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } } } } } } } } if ((par->T_BOUNDARY_PERIODICITY > 0) && ((gridPos.x == 0) || (gridPos.x == par->NXC - 1))) { for (int y = -1; y <= 1; y++) { int2 gridPos2; if (gridPos.x == 0) gridPos2.x = par->NXC - 1; if (gridPos.x == par->NXC - 1) gridPos2.x = 0; gridPos2.y = gridPos.y + y; if ((gridPos2.y < 0) || (gridPos2.y > par->NYC - 1)) continue; uint gridHash = calcGridHash(gridPos2, par); uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { real2 dpos, dvel; real2 pos1 = p[index].pos; if (gridPos.x == 0) pos1.x += par->XCV; if (gridPos.x == par->NXC - 1) pos1.x -= par->XCV; real2 pos2 = pPDPF[j].pos; real2 vel1 = p[index].vel; real2 vel2 = pPDPF[j].vel; calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } } } } if ((par->T_BOUNDARY_PERIODICITY == 1) && ((gridPos.y == 0) || (gridPos.y == par->NYC - 1))) { for (int x = -1; x <= 1; x++) { int2 gridPos2; if (gridPos.y == 0) gridPos2.y = par->NYC - 1; if (gridPos.y == par->NYC - 1) gridPos2.y = 0; gridPos2.x = gridPos.x + x; if ((gridPos2.x < 0) || (gridPos2.x > par->NXC - 1)) continue; uint gridHash = calcGridHash(gridPos2, par); uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { real2 dpos, dvel; real2 pos1 = p[index].pos; if (gridPos.y == 0) pos1.y += par->YCV; if (gridPos.y == par->NYC - 1) pos1.y -= par->YCV; real2 pos2 = pPDPF[j].pos; real2 vel1 = p[index].vel; real2 vel2 = pPDPF[j].vel; calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } } } } if (par->T_BOUNDARY_PERIODICITY == 1) { if (((gridPos.x == 0) && (gridPos.y == 0)) || (gridPos.x == 0) && (gridPos.y == par->NYC - 1) || (gridPos.x == par->NXC - 1) && (gridPos.y == 0) || (gridPos.x == par->NXC - 1) && (gridPos.y == par->NYC - 1)) { int2 gridPos2; if (gridPos.x == 0) gridPos2.x = par->NXC - 1; else gridPos2.x = 0; if (gridPos.y == 0) gridPos2.y = par->NYC - 1; else gridPos2.y = 0; uint gridHash = calcGridHash(gridPos2, par); uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { real2 dpos, dvel; real2 pos1 = p[index].pos; if (gridPos.x == 0) pos1.x += par->XCV; else pos1.x -= par->XCV; if (gridPos.y == 0) pos1.y += par->YCV; else pos1.y -= par->YCV; real2 pos2 = pPDPF[j].pos; real2 vel1 = p[index].vel; real2 vel2 = pPDPF[j].vel; calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } } } } if (par->T_BOUNDARY_PERIODICITY == 2) { if (((gridPos.x == 0) && (gridPos.y == 0)) || (gridPos.x == 0) && (gridPos.y == par->NYC - 1) || (gridPos.x == par->NXC - 1) && (gridPos.y == 0) || (gridPos.x == par->NXC - 1) && (gridPos.y == par->NYC - 1)) { int2 gridPos2; if (gridPos.x == 0) gridPos2.x = par->NXC - 1; else gridPos2.x = 0; gridPos2.y = gridPos.y; uint gridHash = calcGridHash(gridPos2, par); uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { real2 dpos, dvel; real2 pos1 = p[index].pos; if (gridPos.x == 0) pos1.x += par->XCV; else pos1.x -= par->XCV; real2 pos2 = pPDPF[j].pos; real2 vel1 = p[index].vel; real2 vel2 = pPDPF[j].vel; if (gridPos.y == par->NYC - 1) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 1, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } if (gridPos.y == 0) { calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 3, &dpos, &dvel, par); result += interaction(index, j, dpos, dvel, pPDPF, par); } } } } } p[index].o = 1.0 - result; p[index].d = p[index].o * p[index].di; p[index].m = par->XCV * par->YCV * p[index].d / (par->NX * par->NY); p[index].d = p[index].d * pow(1.0 + 1000.0*fabs(par->G_Y)*(0.8 - p[index].pos.y) / p[index].b, 1.0 / 7.0); } }
00d43f861c0198a608febee4bb9fb4de91f79642.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "odd.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned *v = NULL; hipMalloc(&v, XSIZE*YSIZE); unsigned base = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( odd), dim3(gridBlock),dim3(threadBlock), 0, 0, v,base); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( odd), dim3(gridBlock),dim3(threadBlock), 0, 0, v,base); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( odd), dim3(gridBlock),dim3(threadBlock), 0, 0, v,base); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
00d43f861c0198a608febee4bb9fb4de91f79642.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "odd.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned *v = NULL; cudaMalloc(&v, XSIZE*YSIZE); unsigned base = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); odd<<<gridBlock,threadBlock>>>(v,base); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { odd<<<gridBlock,threadBlock>>>(v,base); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { odd<<<gridBlock,threadBlock>>>(v,base); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
78504795e506edecf8ab2611ebbf15aa131f9842.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" #define PI 3.1415926 namespace caffe { __device__ double squareD(double x) { return x * x; } __device__ float squareD(float x) { return x * x; } template <typename Dtype> __global__ void ComputeN(const int nthreads, const Dtype* mean, const Dtype* variance, const Dtype* correlation, const Dtype* gt, const unsigned int num_mixtures, Dtype* mixture_prob) { CUDA_KERNEL_LOOP(index, nthreads) { int sample_index = index / num_mixtures; const Dtype* mean_cur = mean + index * 2; const Dtype* variance_cur = variance + index * 2; const Dtype* correlation_cur = correlation + index; const Dtype* gt_cur = gt + sample_index * 2; Dtype* mixture_prob_cur = mixture_prob + index; Dtype normalize = Dtype(1) / (Dtype(2) * Dtype(PI) * variance_cur[0] * variance_cur[1] * sqrt(Dtype(1) - squareD(correlation_cur[0]))); Dtype Z = squareD((gt_cur[0] - mean_cur[0]) / variance_cur[0]) + squareD((gt_cur[1] - mean_cur[1]) / variance_cur[1]) - Dtype(2) * correlation_cur[0] / variance_cur[0] / variance_cur[1] * (gt_cur[0] - mean_cur[0]) * (gt_cur[1] - mean_cur[1]); mixture_prob_cur[0] = normalize * exp(-Z/Dtype(2)/(Dtype(1) - squareD(correlation_cur[0]))); } } template <typename Dtype> __global__ void ComputeP(const int nthreads, const Dtype* mixture_prob, const Dtype* alpha, const unsigned int num_mixtures, const Dtype base, Dtype* prob) { CUDA_KERNEL_LOOP(index, nthreads) { const Dtype* mixture_prob_cur = mixture_prob + index * num_mixtures; const Dtype* alpha_cur = alpha + index * num_mixtures; Dtype* prob_cur = prob + index; *prob_cur = Dtype(base); for(int i = 0; i < num_mixtures; i++) { *prob_cur += alpha_cur[i] * mixture_prob_cur[i]; } } } template <typename Dtype> __global__ void ComputeLogLike(const unsigned int nthreads, const Dtype* prob, Dtype* log_like) { CUDA_KERNEL_LOOP(index, nthreads) { const Dtype* prob_cur = prob + index; Dtype* log_like_cur = log_like + index; *log_like_cur = -log(*prob_cur); } } template <typename Dtype> void GMMLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int count = bottom[0]->count(); const Dtype* alpha = bottom[0]->gpu_data(); const Dtype* mean = bottom[1]->gpu_data(); const Dtype* variance = bottom[2]->gpu_data(); const Dtype* correlation = bottom[3]->gpu_data(); const Dtype* label = bottom[4]->gpu_data(); Dtype* mixture_prob = N_.mutable_gpu_data(); Dtype* prob = P_.mutable_gpu_data(); Dtype* log_like = P_.mutable_gpu_diff(); // store the log likelihood in P_.diff hipLaunchKernelGGL(( ComputeN<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, mean, variance, correlation, label, num_mixtures_, mixture_prob); count /= num_mixtures_; hipLaunchKernelGGL(( ComputeP<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, mixture_prob, alpha, num_mixtures_, base_, prob); hipLaunchKernelGGL(( ComputeLogLike<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, prob, log_like); Dtype loss = Dtype(0); //caffe_gpu_asum(count, log_like, &loss); const Dtype* sum_ones = sum_ones_.gpu_data(); caffe_gpu_dot(P_.count(), log_like, sum_ones, &loss); //top[0]->mutable_cpu_data()[0] = loss / Dtype(num_) / Dtype(time_step_); top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> __global__ void GMMBackward(const unsigned int nthreads, const Dtype* alpha, const Dtype* mean, const Dtype* variance, const Dtype* correlation, const Dtype* label, const Dtype* mixture_prob, const Dtype* prob, const unsigned int num_mixtures, const Dtype loss_weight, Dtype* alpha_diff, Dtype* mean_diff, Dtype* variance_diff, Dtype* correlation_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int sample_index = index / num_mixtures; const Dtype* alpha_cur = alpha + index; const Dtype* mean_cur = mean + 2 * index; const Dtype* variance_cur = variance + 2 * index; const Dtype* correlation_cur = correlation + index; const Dtype* label_cur = label + 2 * sample_index; const Dtype* mixture_prob_cur = mixture_prob + index; const Dtype* prob_cur = prob + sample_index; Dtype* alpha_diff_cur = alpha_diff + index; Dtype* mean_diff_cur = mean_diff + 2 * index; Dtype* variance_diff_cur = variance_diff + 2 * index; Dtype* correlation_diff_cur = correlation_diff + index; // alpha_diff alpha_diff_cur[0] = -loss_weight * mixture_prob_cur[0] / prob_cur[0]; //alpha_diff_cur[0] = -2 * mixture_prob_cur[0] / prob_cur[0]; Dtype t1, t2, t3; // mean1_diff t1 = alpha_diff_cur[0] * alpha_cur[0]; t2 = Dtype(1) / variance_cur[0] / (Dtype(1) - squareD(correlation_cur[0])); t3 = (label_cur[0] - mean_cur[0]) / variance_cur[0] - correlation_cur[0] * (label_cur[1] - mean_cur[1]) / variance_cur[1]; mean_diff_cur[0] = t1 * t2 * t3; // mean2_diff t2 = Dtype(1) / variance_cur[1] / (Dtype(1) - squareD(correlation_cur[0])); t3 = (label_cur[1] - mean_cur[1]) / variance_cur[1] - correlation_cur[0] * (label_cur[0] - mean_cur[0]) / variance_cur[0]; mean_diff_cur[1] = t1 * t2 * t3; // variance1_diff t2 = Dtype(1) / variance_cur[0] / (Dtype(1) - squareD(correlation_cur[0])) * (label_cur[0] - mean_cur[0]); t3 = ((label_cur[0] - mean_cur[0]) / variance_cur[0] - correlation_cur[0] * (label_cur[1] - mean_cur[1]) / variance_cur[1]); variance_diff_cur[0] = (t1 * (t2 * t3 - Dtype(1))) / variance_cur[0]; // varaince2_dif t2 = Dtype(1) / variance_cur[1] / (Dtype(1) - squareD(correlation_cur[0])) * (label_cur[1] - mean_cur[1]); t3 = ((label_cur[1] - mean_cur[1]) / variance_cur[1] - correlation_cur[0] * (label_cur[0] - mean_cur[0]) / variance_cur[0]); variance_diff_cur[1] = (t1 * (t2 * t3 - Dtype(1))) / variance_cur[1]; // correlation_diff t2 = (label_cur[0] - mean_cur[0]) * (label_cur[1] - mean_cur[1]) / variance_cur[0] / variance_cur[1]; Dtype Z = squareD((label_cur[0] - mean_cur[0]) / variance_cur[0]) + squareD((label_cur[1] - mean_cur[1]) / variance_cur[1]) - Dtype(2) * correlation_cur[0] / variance_cur[0] / variance_cur[1] * (label_cur[0] - mean_cur[0]) * (label_cur[1] - mean_cur[1]); t3 = correlation_cur[0] * (Dtype(1) - Dtype(1) /(Dtype(1) - squareD(correlation_cur[0])) * Z); correlation_diff_cur[0] = t1 * (t2 + t3) / (Dtype(1) - squareD(correlation_cur[0])); } } template <typename Dtype> void GMMLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int count = bottom[0]->count(); const Dtype* alpha = bottom[0]->gpu_data(); const Dtype* mean = bottom[1]->gpu_data(); const Dtype* variance = bottom[2]->gpu_data(); const Dtype* correlation = bottom[3]->gpu_data(); const Dtype* label = bottom[4]->gpu_data(); const Dtype* mixture_prob = N_.gpu_data(); const Dtype* prob = P_.gpu_data(); const Dtype loss_weight = top[0]->cpu_diff()[0]; Dtype* alpha_diff = bottom[0]->mutable_gpu_diff(); Dtype* mean_diff = bottom[1]->mutable_gpu_diff(); Dtype* variance_diff = bottom[2]->mutable_gpu_diff(); Dtype* correlation_diff = bottom[3]->mutable_gpu_diff(); hipLaunchKernelGGL(( GMMBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, alpha, mean, variance, correlation, label, mixture_prob, prob, num_mixtures_, loss_weight, alpha_diff, mean_diff, variance_diff, correlation_diff); } INSTANTIATE_LAYER_GPU_FUNCS(GMMLossLayer); } // namespace caffe
78504795e506edecf8ab2611ebbf15aa131f9842.cu
#include <vector> #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" #define PI 3.1415926 namespace caffe { __device__ double squareD(double x) { return x * x; } __device__ float squareD(float x) { return x * x; } template <typename Dtype> __global__ void ComputeN(const int nthreads, const Dtype* mean, const Dtype* variance, const Dtype* correlation, const Dtype* gt, const unsigned int num_mixtures, Dtype* mixture_prob) { CUDA_KERNEL_LOOP(index, nthreads) { int sample_index = index / num_mixtures; const Dtype* mean_cur = mean + index * 2; const Dtype* variance_cur = variance + index * 2; const Dtype* correlation_cur = correlation + index; const Dtype* gt_cur = gt + sample_index * 2; Dtype* mixture_prob_cur = mixture_prob + index; Dtype normalize = Dtype(1) / (Dtype(2) * Dtype(PI) * variance_cur[0] * variance_cur[1] * sqrt(Dtype(1) - squareD(correlation_cur[0]))); Dtype Z = squareD((gt_cur[0] - mean_cur[0]) / variance_cur[0]) + squareD((gt_cur[1] - mean_cur[1]) / variance_cur[1]) - Dtype(2) * correlation_cur[0] / variance_cur[0] / variance_cur[1] * (gt_cur[0] - mean_cur[0]) * (gt_cur[1] - mean_cur[1]); mixture_prob_cur[0] = normalize * exp(-Z/Dtype(2)/(Dtype(1) - squareD(correlation_cur[0]))); } } template <typename Dtype> __global__ void ComputeP(const int nthreads, const Dtype* mixture_prob, const Dtype* alpha, const unsigned int num_mixtures, const Dtype base, Dtype* prob) { CUDA_KERNEL_LOOP(index, nthreads) { const Dtype* mixture_prob_cur = mixture_prob + index * num_mixtures; const Dtype* alpha_cur = alpha + index * num_mixtures; Dtype* prob_cur = prob + index; *prob_cur = Dtype(base); for(int i = 0; i < num_mixtures; i++) { *prob_cur += alpha_cur[i] * mixture_prob_cur[i]; } } } template <typename Dtype> __global__ void ComputeLogLike(const unsigned int nthreads, const Dtype* prob, Dtype* log_like) { CUDA_KERNEL_LOOP(index, nthreads) { const Dtype* prob_cur = prob + index; Dtype* log_like_cur = log_like + index; *log_like_cur = -log(*prob_cur); } } template <typename Dtype> void GMMLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int count = bottom[0]->count(); const Dtype* alpha = bottom[0]->gpu_data(); const Dtype* mean = bottom[1]->gpu_data(); const Dtype* variance = bottom[2]->gpu_data(); const Dtype* correlation = bottom[3]->gpu_data(); const Dtype* label = bottom[4]->gpu_data(); Dtype* mixture_prob = N_.mutable_gpu_data(); Dtype* prob = P_.mutable_gpu_data(); Dtype* log_like = P_.mutable_gpu_diff(); // store the log likelihood in P_.diff ComputeN<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, mean, variance, correlation, label, num_mixtures_, mixture_prob); count /= num_mixtures_; ComputeP<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, mixture_prob, alpha, num_mixtures_, base_, prob); ComputeLogLike<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, prob, log_like); Dtype loss = Dtype(0); //caffe_gpu_asum(count, log_like, &loss); const Dtype* sum_ones = sum_ones_.gpu_data(); caffe_gpu_dot(P_.count(), log_like, sum_ones, &loss); //top[0]->mutable_cpu_data()[0] = loss / Dtype(num_) / Dtype(time_step_); top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> __global__ void GMMBackward(const unsigned int nthreads, const Dtype* alpha, const Dtype* mean, const Dtype* variance, const Dtype* correlation, const Dtype* label, const Dtype* mixture_prob, const Dtype* prob, const unsigned int num_mixtures, const Dtype loss_weight, Dtype* alpha_diff, Dtype* mean_diff, Dtype* variance_diff, Dtype* correlation_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int sample_index = index / num_mixtures; const Dtype* alpha_cur = alpha + index; const Dtype* mean_cur = mean + 2 * index; const Dtype* variance_cur = variance + 2 * index; const Dtype* correlation_cur = correlation + index; const Dtype* label_cur = label + 2 * sample_index; const Dtype* mixture_prob_cur = mixture_prob + index; const Dtype* prob_cur = prob + sample_index; Dtype* alpha_diff_cur = alpha_diff + index; Dtype* mean_diff_cur = mean_diff + 2 * index; Dtype* variance_diff_cur = variance_diff + 2 * index; Dtype* correlation_diff_cur = correlation_diff + index; // alpha_diff alpha_diff_cur[0] = -loss_weight * mixture_prob_cur[0] / prob_cur[0]; //alpha_diff_cur[0] = -2 * mixture_prob_cur[0] / prob_cur[0]; Dtype t1, t2, t3; // mean1_diff t1 = alpha_diff_cur[0] * alpha_cur[0]; t2 = Dtype(1) / variance_cur[0] / (Dtype(1) - squareD(correlation_cur[0])); t3 = (label_cur[0] - mean_cur[0]) / variance_cur[0] - correlation_cur[0] * (label_cur[1] - mean_cur[1]) / variance_cur[1]; mean_diff_cur[0] = t1 * t2 * t3; // mean2_diff t2 = Dtype(1) / variance_cur[1] / (Dtype(1) - squareD(correlation_cur[0])); t3 = (label_cur[1] - mean_cur[1]) / variance_cur[1] - correlation_cur[0] * (label_cur[0] - mean_cur[0]) / variance_cur[0]; mean_diff_cur[1] = t1 * t2 * t3; // variance1_diff t2 = Dtype(1) / variance_cur[0] / (Dtype(1) - squareD(correlation_cur[0])) * (label_cur[0] - mean_cur[0]); t3 = ((label_cur[0] - mean_cur[0]) / variance_cur[0] - correlation_cur[0] * (label_cur[1] - mean_cur[1]) / variance_cur[1]); variance_diff_cur[0] = (t1 * (t2 * t3 - Dtype(1))) / variance_cur[0]; // varaince2_dif t2 = Dtype(1) / variance_cur[1] / (Dtype(1) - squareD(correlation_cur[0])) * (label_cur[1] - mean_cur[1]); t3 = ((label_cur[1] - mean_cur[1]) / variance_cur[1] - correlation_cur[0] * (label_cur[0] - mean_cur[0]) / variance_cur[0]); variance_diff_cur[1] = (t1 * (t2 * t3 - Dtype(1))) / variance_cur[1]; // correlation_diff t2 = (label_cur[0] - mean_cur[0]) * (label_cur[1] - mean_cur[1]) / variance_cur[0] / variance_cur[1]; Dtype Z = squareD((label_cur[0] - mean_cur[0]) / variance_cur[0]) + squareD((label_cur[1] - mean_cur[1]) / variance_cur[1]) - Dtype(2) * correlation_cur[0] / variance_cur[0] / variance_cur[1] * (label_cur[0] - mean_cur[0]) * (label_cur[1] - mean_cur[1]); t3 = correlation_cur[0] * (Dtype(1) - Dtype(1) /(Dtype(1) - squareD(correlation_cur[0])) * Z); correlation_diff_cur[0] = t1 * (t2 + t3) / (Dtype(1) - squareD(correlation_cur[0])); } } template <typename Dtype> void GMMLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int count = bottom[0]->count(); const Dtype* alpha = bottom[0]->gpu_data(); const Dtype* mean = bottom[1]->gpu_data(); const Dtype* variance = bottom[2]->gpu_data(); const Dtype* correlation = bottom[3]->gpu_data(); const Dtype* label = bottom[4]->gpu_data(); const Dtype* mixture_prob = N_.gpu_data(); const Dtype* prob = P_.gpu_data(); const Dtype loss_weight = top[0]->cpu_diff()[0]; Dtype* alpha_diff = bottom[0]->mutable_gpu_diff(); Dtype* mean_diff = bottom[1]->mutable_gpu_diff(); Dtype* variance_diff = bottom[2]->mutable_gpu_diff(); Dtype* correlation_diff = bottom[3]->mutable_gpu_diff(); GMMBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, alpha, mean, variance, correlation, label, mixture_prob, prob, num_mixtures_, loss_weight, alpha_diff, mean_diff, variance_diff, correlation_diff); } INSTANTIATE_LAYER_GPU_FUNCS(GMMLossLayer); } // namespace caffe
467473738fac5951e9d5290564c255fc5ec30cf8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2018-2020, Michael P. Howard // Copyright (c) 2021, Auburn University // This file is part of the azplugins project, released under the Modified BSD License. /*! * \file ImplicitPlaneEvaporatorGPU.cu * \brief Definition of kernel drivers and kernels for ImplicitPlaneEvaporatorGPU */ #include "ImplicitPlaneEvaporatorGPU.cuh" namespace azplugins { namespace gpu { namespace kernel { /*! * \param d_force Particle forces * \param d_virial Particle virial * \param d_pos Particle positions * \param d_params Per-type parameters * \param interf_origin Position of interface origin * \param N Number of particles * \param ntypes Number of types * * Using one thread per particle, the force of the harmonic potential is computed * per-particle. The per-particle-type parameters are cached into shared memory. * This method does not compute the virial. * */ __global__ void compute_implicit_evap_force(Scalar4 *d_force, Scalar *d_virial, const Scalar4 *d_pos, const Scalar4 *d_params, const Scalar interf_origin, const unsigned int N, const unsigned int ntypes) { // load per-type parameters into shared memory extern __shared__ Scalar4 s_params[]; for (unsigned int cur_offset = 0; cur_offset < ntypes; cur_offset += blockDim.x) { if (cur_offset + threadIdx.x < ntypes) { s_params[cur_offset + threadIdx.x] = d_params[cur_offset + threadIdx.x]; } } __syncthreads(); // one thread per particle unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; const Scalar4 postype_i = d_pos[idx]; const Scalar z_i = postype_i.z; const unsigned int type_i = __scalar_as_int(postype_i.w); const Scalar4 params = s_params[type_i]; const Scalar k = params.x; const Scalar offset = params.y; const Scalar g = params.z; const Scalar cutoff = params.w; const Scalar dz = z_i - (interf_origin + offset); if (cutoff < Scalar(0.0) || dz < Scalar(0.0)) return; Scalar fz(0.0), e(0.0); if (dz < cutoff) // harmonic { fz = -k * dz; e = Scalar(-0.5) * fz * dz; // (k/2) dz^2 } else // linear { fz = -g; e = Scalar(0.5) * k * cutoff * cutoff + g * (dz - cutoff); } d_force[idx] = make_scalar4(0.0, 0.0, fz, e); } } // end namespace kernel /*! * \param d_force Particle forces * \param d_virial Particle virial * \param d_pos Particle positions * \param d_params Per-type parameters * \param interf_origin Position of interface origin * \param N Number of particles * \param ntypes Number of types * \param block_size Number of threads per block * * This kernel driver is a wrapper around kernel::compute_implicit_evap_force. * The forces and virial are both set to zero before calculation. */ hipError_t compute_implicit_evap_force(Scalar4 *d_force, Scalar *d_virial, const Scalar4 *d_pos, const Scalar4 *d_params, const Scalar interf_origin, const unsigned int N, const unsigned int ntypes, const unsigned int block_size) { // zero the force and virial datasets before launch hipMemset(d_force, 0, sizeof(Scalar4)*N); hipMemset(d_virial, 0, 6*sizeof(Scalar)*N); static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)kernel::compute_implicit_evap_force); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); unsigned int shared_size = sizeof(Scalar4) * ntypes; dim3 grid(N / run_block_size + 1); hipLaunchKernelGGL(( kernel::compute_implicit_evap_force), dim3(grid), dim3(run_block_size), shared_size, 0, d_force, d_virial, d_pos, d_params, interf_origin, N, ntypes); return hipSuccess; } } // end namespace gpu } // end namespace azplugins
467473738fac5951e9d5290564c255fc5ec30cf8.cu
// Copyright (c) 2018-2020, Michael P. Howard // Copyright (c) 2021, Auburn University // This file is part of the azplugins project, released under the Modified BSD License. /*! * \file ImplicitPlaneEvaporatorGPU.cu * \brief Definition of kernel drivers and kernels for ImplicitPlaneEvaporatorGPU */ #include "ImplicitPlaneEvaporatorGPU.cuh" namespace azplugins { namespace gpu { namespace kernel { /*! * \param d_force Particle forces * \param d_virial Particle virial * \param d_pos Particle positions * \param d_params Per-type parameters * \param interf_origin Position of interface origin * \param N Number of particles * \param ntypes Number of types * * Using one thread per particle, the force of the harmonic potential is computed * per-particle. The per-particle-type parameters are cached into shared memory. * This method does not compute the virial. * */ __global__ void compute_implicit_evap_force(Scalar4 *d_force, Scalar *d_virial, const Scalar4 *d_pos, const Scalar4 *d_params, const Scalar interf_origin, const unsigned int N, const unsigned int ntypes) { // load per-type parameters into shared memory extern __shared__ Scalar4 s_params[]; for (unsigned int cur_offset = 0; cur_offset < ntypes; cur_offset += blockDim.x) { if (cur_offset + threadIdx.x < ntypes) { s_params[cur_offset + threadIdx.x] = d_params[cur_offset + threadIdx.x]; } } __syncthreads(); // one thread per particle unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; const Scalar4 postype_i = d_pos[idx]; const Scalar z_i = postype_i.z; const unsigned int type_i = __scalar_as_int(postype_i.w); const Scalar4 params = s_params[type_i]; const Scalar k = params.x; const Scalar offset = params.y; const Scalar g = params.z; const Scalar cutoff = params.w; const Scalar dz = z_i - (interf_origin + offset); if (cutoff < Scalar(0.0) || dz < Scalar(0.0)) return; Scalar fz(0.0), e(0.0); if (dz < cutoff) // harmonic { fz = -k * dz; e = Scalar(-0.5) * fz * dz; // (k/2) dz^2 } else // linear { fz = -g; e = Scalar(0.5) * k * cutoff * cutoff + g * (dz - cutoff); } d_force[idx] = make_scalar4(0.0, 0.0, fz, e); } } // end namespace kernel /*! * \param d_force Particle forces * \param d_virial Particle virial * \param d_pos Particle positions * \param d_params Per-type parameters * \param interf_origin Position of interface origin * \param N Number of particles * \param ntypes Number of types * \param block_size Number of threads per block * * This kernel driver is a wrapper around kernel::compute_implicit_evap_force. * The forces and virial are both set to zero before calculation. */ cudaError_t compute_implicit_evap_force(Scalar4 *d_force, Scalar *d_virial, const Scalar4 *d_pos, const Scalar4 *d_params, const Scalar interf_origin, const unsigned int N, const unsigned int ntypes, const unsigned int block_size) { // zero the force and virial datasets before launch cudaMemset(d_force, 0, sizeof(Scalar4)*N); cudaMemset(d_virial, 0, 6*sizeof(Scalar)*N); static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)kernel::compute_implicit_evap_force); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); unsigned int shared_size = sizeof(Scalar4) * ntypes; dim3 grid(N / run_block_size + 1); kernel::compute_implicit_evap_force<<<grid, run_block_size, shared_size>>>(d_force, d_virial, d_pos, d_params, interf_origin, N, ntypes); return cudaSuccess; } } // end namespace gpu } // end namespace azplugins
19143aa9873c8426ea65f4c64051b1a5697354f7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******* local memory *******/ __global__ void use_local_gpu_memory(float in){ float x; // Variable "x" and "in" are in local memory and private to each thread. x = in; } int main(){ hipLaunchKernelGGL(( use_local_gpu_memory), dim3(128), dim3(1), 0, 0, 2.0f); } /****** Global memory *******/ __global__ void use_global_gpu_memory(float *array){ array[threadIdx.x] = 2.0f * (float)threadIdx.x; // "array" is a ponter into global memory on device. } int main(){ float h_arra(128); float *d_arr; // allocate global memory on the device, place result in "d_arr" hipMalloc((void **) &d_arr, sizeof(float)*128); // Copy data from host memory "h_arr" to device memory "d_arr" hipMemcpy((void *)d_arr, (void *) h_arr, sizeof(float)*128, hipMemcpyHostToDevice); // Launch the kernel hipLaunchKernelGGL(( use_global_gpu_memory), dim3(128), dim3(1), 0, 0, d_arr); // modifes the contents of d_arr // copy the modified array back to the host, overriting contents of h_arr hipMemcpy((void*)h_arr, (void*)d_arr, sizeof(float)*128, hipMemcpyDeviceToHost); } /******* Shared memory *******/ __global__ void use_shared_gpu_memory(float *array){ float i, index = threadIdx.x; float average, sum = 0.0f; // shared variables are visible to all threads in the thread block // and have the same lifetime as the thread block. __shared__ float sh_arr[128]; // copy data from "array" in global memory to "sh_arr" in shared memory. sh_arr[index] = array[index]; _syncthreads(); //enssure all the writes to shared memory have completed. // as an example, lets find the average of all previous elements. for(i = 0; i < index; i++){ sum += sh_arr[i]; } average = sum / (index+1.0f); // if array[index] is greater than the average of array[0..index-1] replace with average. // since array[] is in global memory, this change will be seen by the host (and potentually // other thread block, if any) if (array[index] > average){ array[index] = average;} _syncthreads(); } int main(){ // same as global }
19143aa9873c8426ea65f4c64051b1a5697354f7.cu
/******* local memory *******/ __global__ void use_local_gpu_memory(float in){ float x; // Variable "x" and "in" are in local memory and private to each thread. x = in; } int main(){ use_local_gpu_memory<<<128, 1>>>(2.0f); } /****** Global memory *******/ __global__ void use_global_gpu_memory(float *array){ array[threadIdx.x] = 2.0f * (float)threadIdx.x; // "array" is a ponter into global memory on device. } int main(){ float h_arra(128); float *d_arr; // allocate global memory on the device, place result in "d_arr" cudaMalloc((void **) &d_arr, sizeof(float)*128); // Copy data from host memory "h_arr" to device memory "d_arr" cudaMemcpy((void *)d_arr, (void *) h_arr, sizeof(float)*128, cudaMemcpyHostToDevice); // Launch the kernel use_global_gpu_memory<<<128, 1>>>(d_arr); // modifes the contents of d_arr // copy the modified array back to the host, overriting contents of h_arr cudaMemcpy((void*)h_arr, (void*)d_arr, sizeof(float)*128, cudaMemcpyDeviceToHost); } /******* Shared memory *******/ __global__ void use_shared_gpu_memory(float *array){ float i, index = threadIdx.x; float average, sum = 0.0f; // shared variables are visible to all threads in the thread block // and have the same lifetime as the thread block. __shared__ float sh_arr[128]; // copy data from "array" in global memory to "sh_arr" in shared memory. sh_arr[index] = array[index]; _syncthreads(); //enssure all the writes to shared memory have completed. // as an example, lets find the average of all previous elements. for(i = 0; i < index; i++){ sum += sh_arr[i]; } average = sum / (index+1.0f); // if array[index] is greater than the average of array[0..index-1] replace with average. // since array[] is in global memory, this change will be seen by the host (and potentually // other thread block, if any) if (array[index] > average){ array[index] = average;} _syncthreads(); } int main(){ // same as global }
5aba1b5d01e19de4c96214a0f1064cc197fa0de3.hip
// !!! This is a file automatically generated by hipify!!! /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #include <legacy/NativeOpExecutioner.h> #include <legacy/NativeOps.h> #include <hip/hip_runtime.h> #include <system/buffer.h> #include <loops/transform_any.h> #include <loops/reduce_bool.h> #include <loops/reduce_long.h> #include <loops/scalar.h> #include <helpers/threshold.h> #include <ops/specials_cuda.h> #include <helpers/DebugHelper.h> #include <execution/AffinityManager.h> #include <exceptions/datatype_exception.h> #include <exceptions/cuda_exception.h> #include <helpers/CudaLaunchHelper.h> #include <graph/GraphExecutioner.h> #include <helpers/BlasHelper.h> #include <graph/GraphHolder.h> #include <ops/declarable/CustomOperations.h> #include <helpers/PointersManager.h> //#include <sys/time.h> #include <hiprand/hiprand.h> #include <graph/Status.h> #include <helpers/DebugHelper.h> using namespace sd; #include <loops/special_kernels.h> #include <performance/benchmarking/FullBenchmarkSuit.h> #include <performance/benchmarking/LightBenchmarkSuit.h> hipDeviceProp_t *deviceProperties; hipFuncAttributes *funcAttributes = new hipFuncAttributes[64]; int blockLimit = 128; int maxThreads = 512; bool allowedP2P = false; bool supportedP2P = false; #ifdef __ND4J_EXPERIMENTAL__ bool experimentalSupport = true; #else bool experimentalSupport = false; #endif int minThreads = 32; __constant__ char deviceConstantMemory[49152]; // this method just does type conversion in fancy way int getDeviceId(Nd4jPointer ptrToDeviceId) { return (int)(Nd4jLong)ptrToDeviceId; } /* * Basic CUDA constants here: number of blocks per MP */ int getDeviceBlockThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; int blockThreshold = 8; if (ccMajor >= 5) blockThreshold = 32; else if (ccMajor == 3) blockThreshold = 16; else if (ccMajor < 3) blockThreshold = 8; return blockThreshold; } /* * This message returns shared memory threshold value. default overflow ratio is 0.3 */ int getDeviceSharedThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; // please note threshold isn't multiple of 32, and that's NOT a mistake int shmemThreshold; if (ccMajor == 6 && ccMinor == 0) shmemThreshold = 65536; else if (ccMajor == 6 && ccMinor == 1) shmemThreshold = 49152; else if (ccMajor == 5 && ccMinor == 2) shmemThreshold = 98304; else if (ccMajor == 5) shmemThreshold = 65536; else if (ccMajor == 3 && ccMinor == 7) shmemThreshold = 114688; else shmemThreshold = 49152; return shmemThreshold / 0.3; } sd::buffer::Buffer<Nd4jLong> * createScalarBuffer(hipStream_t stream) { auto scalarShapeInfo = shape::createScalarShapeInfo(); auto buff = sd::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream); sd::buffer::copyDataToGpu(&buff, stream); return buff; } class ScalarShapeInformation { private: sd::buffer::Buffer<Nd4jLong> *scalarDimension; sd::buffer::Buffer<Nd4jLong> *scalarShapeInfo; // std::thread::id threadId; public: ScalarShapeInformation(hipStream_t stream) { auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong))); CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer", sizeof(Nd4jLong)); scalarDimensionBuff[0] = MAX_DIMENSION; scalarDimension = sd::buffer::createBuffer(scalarDimensionBuff,1, stream); scalarShapeInfo = createScalarBuffer(stream); // threadId = std::this_thread::get_id(); } ~ScalarShapeInformation() { sd::buffer::freeBuffer(&scalarShapeInfo); sd::buffer::freeBuffer(&scalarDimension); } Nd4jLong *getShapeInfoHostPointer() { return scalarShapeInfo->data; } Nd4jLong * getShapeInfoGpuPointer() { return scalarShapeInfo->gData; } Nd4jLong * getDimensionHostPointer() { return scalarDimension->data; } Nd4jLong * getDimensionGpuPointer() { return scalarDimension->gData; } }; template <typename T> class ScalarInfo { sd::buffer::Buffer<T> *scalarData; ScalarShapeInformation *shapeInfo; T finalResult; hipStream_t streamRef; public: ScalarInfo(hipStream_t stream) { T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T))); CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer", sizeof(T)); shapeInfo = new ScalarShapeInformation(stream); scalarData = sd::buffer::createBuffer(scalarResult,1, stream); streamRef = stream; sd::buffer::copyDataToGpu(&scalarData, stream); } T getFinalResultFromDevice() { sd::buffer::copyDataFromGpu(&scalarData, streamRef); return scalarData->data[0]; } /** * Get the device shape information * representing a scalar */ Nd4jLong *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); } /** * Get the dZ pointers */ T *getDevicePointer() { return scalarData->gData; } /** * Get the infinite dimension device pointer */ Nd4jLong *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); } ~ScalarInfo() { sd::buffer::freeBuffer(&scalarData); delete shapeInfo; } }; void execPairwiseTransform( Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execPairwiseTransform(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execPairwiseTransformBool(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execPairwiseBoolTransform(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execSummaryStatsScalar(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, bool biasCorrected) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStatsScalar(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), biasCorrected); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execBroadcastBool(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraParams, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execBroadcastBool(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param dY * @param dYShapeInfo * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void execBroadcast( Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execBroadcast(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ //////////////////////////////////////////////////////////////////////// void execReduceFloat(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceFloatScalar(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceSame(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceSameScalar(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceSame2(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); const auto zLen = shape::length(hZShapeInfo); std::vector<int> dimensions(dimension, dimension + dimensionLength); const Nd4jLong* zShapeInfoH = hZShapeInfo; if(shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) { auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions); zShapeInfoH = reinterpret_cast<Nd4jLong const*>(zPack.primary()); } std::vector<int> dims = (zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>(); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceSame(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(), dims.data(), dims.size()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceLong2(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); const auto zLen = shape::length(hZShapeInfo); std::vector<int> dimensions(dimension, dimension + dimensionLength); const Nd4jLong* zShapeInfoH = hZShapeInfo; if(shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) { auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions); zShapeInfoH = reinterpret_cast<Nd4jLong const*>(zPack.primary()); } std::vector<int> dims = (zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>(); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceLong(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(), dims.data(), dims.size()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceLong(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (zType != sd::DataType::INT64) throw datatype_exception::build("execReduceLong wrong Z data type", sd::DataType::INT64, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceScalar(launchDims, stream, opNum, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), hXShapeInfo, extraParams, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), hXShapeInfo, nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES); sd::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed"); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceBool2(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); const auto zLen = shape::length(hZShapeInfo); std::vector<int> dimensions(dimension, dimension + dimensionLength); const Nd4jLong* zShapeInfoH = hZShapeInfo; if(shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) { auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions); zShapeInfoH = reinterpret_cast<Nd4jLong const*>(zPack.primary()); } std::vector<int> dims = (zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>(); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceBool(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(), dims.data(), dims.size()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceBool(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (zType != sd::DataType::BOOL) throw std::runtime_error("execReduceBool requires Z operand to have BOOL type"); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceScalar(launchDims, stream, opNum, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), hXShapeInfo, extraParams, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), hZShapeInfo, nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES); sd::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed"); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ //////////////////////////////////////////////////////////////////////// void execIndexReduce(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(hXShapeInfo, dimension, shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execIndexReduce(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), (int *) dbDimension->special(), dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ //////////////////////////////////////////////////////////////////////// void execReduceFloat2(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); const auto zLen = shape::length(hZShapeInfo); std::vector<int> dimensions(dimension, dimension + dimensionLength); const Nd4jLong* zShapeInfoH = hZShapeInfo; if(shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) { auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions); zShapeInfoH = reinterpret_cast<Nd4jLong const*>(zPack.primary()); } std::vector<int> dims = (zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>(); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceFloat(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(), dims.data(), dims.size()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams */ //////////////////////////////////////////////////////////////////////// void execIndexReduceScalar( Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo){ try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execIndexReduceScalar(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformSame(Nd4jPointer *extraPointers,int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformSame(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams, tadShapeInfo, tadOffsets); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformBool(Nd4jPointer *extraPointers,int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformBool(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams, tadShapeInfo, tadOffsets); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformAny(Nd4jPointer *extraPointers,int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto streamSpecial = reinterpret_cast<hipStream_t &>(extraPointers[4]); LaunchContext lc(stream, streamSpecial, extraPointers[5], extraPointers[3], reinterpret_cast<int *>(extraPointers[6])); NativeOpExecutioner::execTransformAny(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams, nullptr, nullptr); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformStrict(Nd4jPointer *extraPointers,int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformStrict(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams, tadShapeInfo, tadOffsets); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformFloat(Nd4jPointer *extraPointers,int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformFloat(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams, tadShapeInfo, tadOffsets); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void checkP2P() { int curDevice = 0; hipGetDevice(&curDevice); int devCnt = 0; hipGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; bool tempSupport = true; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; hipSetDevice(dX); hipDeviceCanAccessPeer(&canAccess, dX , dY); if (!canAccess) { tempSupport = false; break; } } } supportedP2P = tempSupport; hipSetDevice(curDevice); } else { // if we have only 1 device - we say that we support P2P, since all data will be on 1 device supportedP2P = true; } } void enableP2P(bool enable) { if (enable == allowedP2P) return; int curDevice = 0; hipGetDevice(&curDevice); int devCnt = 0; hipGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; hipSetDevice(dX); hipDeviceCanAccessPeer(&canAccess, dX , dY); if (canAccess) { if (enable) { hipDeviceEnablePeerAccess(dY, 0); } else { hipDeviceDisablePeerAccess(dY); } } else { if (sd::Environment::getInstance().isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY); } } } hipSetDevice(curDevice); } allowedP2P = enable; hipSetDevice(curDevice); } bool isP2PAvailable() { return supportedP2P; } void initializeDevicesAndFunctions() { try { int devCnt = 0; hipGetDeviceCount(&devCnt); deviceProperties = new hipDeviceProp_t[devCnt]; for (int i = 0; i < devCnt; i++) { hipSetDevice(i); hipGetDeviceProperties(&deviceProperties[i], i); hipDeviceSetLimit(hipLimitStackSize, 4096); } hipSetDevice(0); checkP2P(); // enabling p2p gpu access if it's supported if (supportedP2P && devCnt > 1) enableP2P(allowedP2P); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void initializeFunctions(Nd4jPointer *functions) { sd::BlasHelper::getInstance().initializeDeviceFunctions(functions); /* hipblasSgemv = (CublasSgemv)functions[0]; hipblasDgemv = (CublasDgemv)functions[1]; hipblasHgemm = (CublasHgemm)functions[2]; hipblasSgemm = (CublasSgemm)functions[3]; hipblasDgemm = (CublasDgemm)functions[4]; cublasSgemmEx = (CublasSgemmEx)functions[5]; hipblasHgemmBatched = (CublasHgemmBatched)functions[6]; hipblasSgemmBatched = (CublasSgemmBatched)functions[7]; hipblasDgemmBatched = (CublasDgemmBatched)functions[8]; */ } /** * This method acquires memory chunk of requested size on host side * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param flags optional parameter */ Nd4jPointer mallocHost(Nd4jLong memorySize, int flags) { Nd4jPointer pointer; // hipHostMallocMapped |hipHostMallocPortable auto res = hipHostMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8, hipHostMallocDefault); if (res != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipHostMalloc failed"); } return reinterpret_cast<int8_t*>(pointer); } /** * This method acquires memory chunk of requested size on specified device * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc * @param flags optional parameter */ Nd4jPointer mallocDevice(Nd4jLong memorySize, int deviceId, int flags) { Nd4jPointer pointer; auto res = hipMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8); if (res != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMalloc failed"); } return reinterpret_cast<int8_t*>(pointer); } /** * This method releases previously allocated host memory space * * @param pointer pointer that'll be freed */ int freeHost(Nd4jPointer pointer) { auto res = hipHostFree(reinterpret_cast<void *>(pointer)); if (res != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipHostFree failed"); } return 1L; } /** * This method releases previously allocated memory space on device * * @param pointer pointer that'll be freed * @param ptrToDeviceId pointer to deviceId. */ int freeDevice(Nd4jPointer pointer, int deviceId) { auto res = hipFree(reinterpret_cast<void *>(pointer)); // we're intentionally skipping if (res != 0 && res != 1) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipFree failed"); } return res == 0 ? 1L : 0L; } Nd4jPointer createContext() { return 0L; } Nd4jPointer createStream() { auto stream = new hipStream_t(); auto dZ = hipStreamCreate(stream); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipStreamCreate failed"); } return stream; } Nd4jPointer createEvent() { Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(hipEvent_t)); CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer", sizeof(hipEvent_t)); auto dZ = hipEventCreateWithFlags(reinterpret_cast<hipEvent_t *>(&nativeEvent), hipEventDisableTiming); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventCreateWithFlags failed"); } return nativeEvent; } int registerEvent(Nd4jPointer event, Nd4jPointer stream) { auto pEvent = reinterpret_cast<hipEvent_t *>(&event); auto pStream = reinterpret_cast<hipStream_t *>(stream); auto dZ = hipEventRecord(*pEvent, *pStream); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventRecord failed"); } return 1; } int setDevice(int deviceId) { AffinityManager::setCurrentDevice(deviceId); return 1; } Nd4jLong getDeviceFreeMemoryDefault() { size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); return (Nd4jLong) memFree; } Nd4jLong getDeviceFreeMemory(int device) { int orig = -1; hipGetDevice(&orig); if (device >= 0 && device != orig) { hipSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { hipSetDevice(orig); } return (Nd4jLong) memFree; } Nd4jLong getDeviceTotalMemory(int device) { int orig = -1; hipGetDevice(&orig); if (device >= 0 && device != orig) { hipSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { hipSetDevice(orig); } return (Nd4jLong) memTotal; } int memcpySync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { hipMemcpyKind kind; switch (flags) { case 0: { kind = hipMemcpyHostToHost; } break; case 1: { kind = hipMemcpyHostToDevice; } break; case 2: { kind = hipMemcpyDeviceToHost; } break; case 3: { kind = hipMemcpyDeviceToDevice; } break; default: { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY"); return 0; } } auto dZ = hipMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind); if (dZ != 0) { printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ)); fflush(stdout); fflush(stderr); sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemcpy failed"); return 0; } return 1; } int memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { auto pStream = reinterpret_cast<hipStream_t *>(reserved); hipMemcpyKind kind; //sd::DebugHelper::checkErrorCode(pStream, "Preliminary sync failed"); switch (flags) { case 0: { kind = hipMemcpyHostToHost; } break; case 1: { kind = hipMemcpyHostToDevice; } break; case 2: { kind = hipMemcpyDeviceToHost; } break; case 3: { kind = hipMemcpyDeviceToDevice; } break; default: { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY"); return 0; } } auto dZ = hipMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream); //auto dZ = hipMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind); if (dZ != 0) { printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ)); fflush(stdout); fflush(stderr); sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemcpyAsync failed"); return 0; } return 1; } int memsetSync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { auto dZ = hipMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size)); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemset failed"); } return 1; } int memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { auto pStream = reinterpret_cast<hipStream_t *>(reserved); auto dZ = hipMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemsetAsync failed"); } return 1; } int destroyEvent(Nd4jPointer event) { auto pEvent = reinterpret_cast<hipEvent_t *>(&event); auto dZ = hipEventDestroy(*pEvent); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventDestroy failed"); } return 1; } int streamSynchronize(Nd4jPointer stream) { auto pStream = reinterpret_cast<hipStream_t *>(stream); auto dZ = hipStreamSynchronize(*pStream); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipStreamSynchronize failed"); } return 1L; } int eventSynchronize(Nd4jPointer event) { auto pEvent = reinterpret_cast<hipEvent_t *>(&event); auto dZ = hipEventSynchronize(*pEvent); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventSynchronize failed"); } return 1L; } int getAvailableDevices() { int devCnt = 0; hipGetDeviceCount(&devCnt); return devCnt; } void enableDebugMode(bool reallyEnable) { sd::Environment::getInstance().setDebug(reallyEnable); } void setGridLimit(int gridSize) { if (gridSize > 8192) gridSize = 8192; if (gridSize < 1) gridSize = 1; blockLimit = gridSize; } int ompGetMaxThreads() { return maxThreads; } int ompGetNumThreads() { return maxThreads; } void setOmpNumThreads(int threads) { if (threads > 1024) threads = 1024; if (threads < 32) threads = 32; maxThreads = threads; } void enableVerboseMode(bool reallyEnable) { sd::Environment::getInstance().setVerbose(reallyEnable); } int getDeviceMajor(int device) { return deviceProperties[device].major; } int getDeviceMinor(int device) { return deviceProperties[device].minor; } const char * getDeviceName(int device) { return deviceProperties[device].name; } void specialConcat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, void *dZ, Nd4jLong const* dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { try { BUILD_SINGLE_SELECTOR(ArrayOptions::dataType(dZShapeInfo), sd::SpecialMethods, ::concatCpuGeneric(dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo), LIBND4J_TYPES); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * This method saves */ sd::TadPack* tadOnlyShapeInfo(Nd4jLong const* dXShapeInfo, int *dimension, int dimensionLength) { try { auto pack = new TadPack(); *pack = sd::ConstantTadHelper::getInstance().tadForDimensions(dXShapeInfo, dimension, dimensionLength); return pack; } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong const* getPrimaryShapeInfo(sd::TadPack* pack) { return pack->primaryShapeInfo(); } Nd4jLong const* getPrimaryOffsets(sd::TadPack* pack) { return pack->primaryOffsets(); } Nd4jLong const* getSpecialShapeInfo(sd::TadPack* pack) { return pack->specialShapeInfo(); } Nd4jLong const* getSpecialOffsets(sd::TadPack* pack) { return pack->specialOffsets(); } Nd4jLong getNumberOfTads(sd::TadPack* pack) { return pack->numberOfTads(); } int getShapeInfoLength(sd::TadPack* pack) { return pack->shapeInfoLength(); } int memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(reserved); hipMemcpyKind kind; DEBUG_KERNEL(pStream, -1); switch (flags) { case 0: { kind = hipMemcpyHostToHost; } break; case 1: { kind = hipMemcpyHostToDevice; } break; case 2: { kind = hipMemcpyDeviceToHost; } case 3: { kind = hipMemcpyDeviceToDevice; } break; } auto dZ = hipMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemcpyToSymbolAsync failed"); } return 1; } Nd4jPointer getConstantSpace() { Nd4jPointer dConstAddr; hipError_t dZ = hipGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipGetSymbolAddress failed"); } return dConstAddr; } void pullRows(Nd4jPointer *extraPointers, OpaqueDataBuffer *dbX, Nd4jLong const* xShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* zShapeInfo, Nd4jLong const* dZShapeInfo, Nd4jLong n, Nd4jLong *indexes, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* zTadShapeInfo, Nd4jLong const* zTadOffsets) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); dim3 launchDims(64, 256, 1024); auto xType = sd::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric, (launchDims, stream, dbX->special(), dbZ->special(), n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets), LIBND4J_TYPES); DEBUG_KERNEL(stream, -1); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void average(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong const* xShapeInfo, Nd4jPointer *dx, Nd4jLong const* dXShapeInfo, void *z, Nd4jLong const* zShapeInfo, void *dz, Nd4jLong const* dzShapeInfo, int n, Nd4jLong length, bool propagate) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("averageFloat called\n"); auto xType = sd::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(256, 256, 4096); BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate), LIBND4J_TYPES); sd::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate), LIBND4J_TYPES); } } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void accumulate(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong const* xShapeInfo, Nd4jPointer *dx, Nd4jLong const* dXShapeInfo, void *z, Nd4jLong const* zShapeInfo, void *dz, Nd4jLong const* dzShapeInfo, int n, Nd4jLong length) { try { auto stream = reinterpret_cast<hipStream_t *>(extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("accumulateFloat called\n"); auto xType = sd::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(n, 256, 16384); BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n, length), LIBND4J_TYPES); sd::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length), LIBND4J_TYPES); } } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void shuffle(Nd4jPointer *extras, Nd4jPointer *x, Nd4jPointer *xShapeInfo, Nd4jPointer *dx, Nd4jPointer *dXShapeInfo, Nd4jPointer *z, Nd4jPointer *zShapeInfo, Nd4jPointer *dz, Nd4jPointer *dZShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]); auto dX = reinterpret_cast<void **>(dx); auto dZ = reinterpret_cast<void **>(dz); auto xShape = reinterpret_cast<Nd4jLong**>(xShapeInfo); auto dxShape = reinterpret_cast<Nd4jLong**>(dXShapeInfo); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo); auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets); auto xType = sd::ArrayOptions::dataType(xShape[0]); dim3 launchDims(256, 512, 8192); BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric, (launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset), LIBND4J_TYPES); sd::DebugHelper::checkErrorCode(stream, "shuffle(...) failed"); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } bool isExperimentalEnabled() { return sd::Environment::getInstance().isExperimentalBuild(); } void setOmpMinThreads(int threads) { minThreads = sd::math::nd4j_max<int>(32, threads); minThreads = sd::math::nd4j_min<int>(maxThreads, minThreads); } int getDevice() { return sd::AffinityManager::currentDeviceId(); } void setElementThreshold(int num) { // this is no-op for CUDA } void setTADThreshold(int num) { // this is no-op for CUDA } //////////////////////////////////////////////////////////////////////// void execSummaryStats(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, bool biasCorrected) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStats(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), biasCorrected); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execSummaryStatsTad(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape, bool biasCorrected, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbDimension}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStats(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), reinterpret_cast<int *>(dbDimension->special()), dimensionLength, tadShapeInfo, tadOffsets, biasCorrected); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbDimension}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3Tad(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* yTadOnlyShapeInfo, Nd4jLong const* yTadOffsets) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(hXShapeInfo, dimension, shape::length(hDimensionShape)); auto tadLength = shape::length(tadPack.primaryShapeInfo()); auto yLength = shape::length(hYShapeInfo); auto xLength = shape::length(hXShapeInfo); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); if (tadLength == yLength || tadLength == xLength) { // nd4j_printf("== way\n",""); NativeOpExecutioner::execReduce3(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } else NativeOpExecutioner::execReduce3TAD(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dimension, dimensionLength, tadOnlyShapeInfo, yTadOffsets, yTadOnlyShapeInfo, yTadOffsets); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3Scalar(Nd4jPointer *extraPointers,int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3Scalar(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarBool(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbScalar, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalarBool(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dbScalar->primary(), hScalarShapeInfo, dbScalar->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(), extraParams); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarBoolTad(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbScalars, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo, void *extraParams, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadShapeInfoZ, Nd4jLong const* tadOffsetsZ) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalarBool(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dbScalars->primary(), hScalarShapeInfo, dbScalars->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(), dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalar(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbScalar, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalar(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dbScalar->primary(), hScalarShapeInfo, dbScalar->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(), extraParams); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarTad(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbScalars, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo, void *extraParams, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadShapeInfoZ, Nd4jLong const* tadOffsetsZ) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (yType != xType && yType != sd::DataType::BOOL && !isExperimentalEnabled()) throw sd::datatype_exception::build("execScalar both operands must have same data type", xType, yType); dim3 launchDims(256, 256, 16384); #ifdef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dbScalars->special(), extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void execAggregate(Nd4jPointer *extraPointers, int opNum, void **arguments, int numArguments, Nd4jLong **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, void *realArguments, int numRealArguments, sd::DataType dtype) { } void batchExecutor(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments, sd::DataType dtype) { } void execAggregateBatch(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments, sd::DataType dtype) { } //////////////////////////////////////////////////////////////////////// void execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraArguments) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraArguments); InteropDataBuffer::registerSpecialUse({dbZ}, {}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execRandom2(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraArguments) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraArguments); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execRandom3(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraArguments) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraArguments); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } Nd4jPointer initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) { unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); // we don't synchronize at random initialization, it's safe to go unsync here // hipStreamSynchronize(*stream); auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer); auto buffer = new sd::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev)); buffer->propagateToDevice(buffer, *stream); sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A"); // we generate sequence in the host memory sd::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // and copy it to gpu hipMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, hipMemcpyHostToDevice, *stream); sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B"); return buffer; } void destroyRandom(Nd4jPointer ptrBuffer) { sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrBuffer); // FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice hipDeviceSynchronize(); delete buffer; } void refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrRandom); unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); hipStreamSynchronize(*stream); uint64_t *ptrDev = buffer->getDeviceBuffer(); // update rng state buffer->setSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); // refresh buffer on host size sd::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // copy back to gpu hipMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, hipMemcpyHostToDevice, *stream); } void reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrRandom); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); hipStreamSynchronize(*stream); // update rng state buffer->reSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); } /** * Return the length of a shape buffer * based on the pointer * @param buffer the buffer pointer to check * @return */ int lengthForShapeBufferPointer(Nd4jPointer buffer) { auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer); return shape::shapeInfoLength(shape::rank(shapeBuffer)); } /** * The pointer to get the address for * * @param address the address to get the pointer * @return the pointer for the given address */ Nd4jPointer pointerForAddress(Nd4jLong address) { return reinterpret_cast<Nd4jPointer >(address); } void tear(Nd4jPointer *extras, OpaqueDataBuffer *dbX, Nd4jLong const* xShapeInfo, Nd4jLong const* dXShapeInfo, Nd4jPointer *targets, Nd4jLong const* zShapeInfo, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets) { try { InteropDataBuffer::prepareSpecialUse({}, {dbX}); hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]); dim3 launchDims(512, 512, 512); auto xType = sd::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric, (launchDims, stream, dbX->special(), dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets), LIBND4J_TYPES); sd::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed"); InteropDataBuffer::registerSpecialUse({}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) { auto stream = reinterpret_cast<hipStream_t *>(extras[1]); auto g_scanBlockSums = reinterpret_cast<int **>(extras[2]); int blockSize = 512; // max size of the thread blocks int numBlocks = sd::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize)))); int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (sd::isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = sd::floorPow2(numElements); int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; int numThreadsLastBlock = sd::math::nd4j_max<int>(1, numEltsLastBlock / 2); int np2LastBlock = 0; int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts int extraSpace = numEltsPerBlock / NUM_BANKS; int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace); // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); dim3 gridOnes(1, 1, 1); dim3 threadsOnes(numThreadsLastBlock, 1, 1); if (sharedMemSize < 2048) sharedMemSize = 2048; if (sharedMemLastBlock < 2048) sharedMemLastBlock = 2048; // execute the scan if (numBlocks > 1) { sd::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0); if (np2LastBlock) { sd::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be sdded to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); hipLaunchKernelGGL(( sd::uniformAdd), dim3(grid), dim3(threads), 1024, *stream, dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); if (np2LastBlock) { hipLaunchKernelGGL(( sd::uniformAdd), dim3(1), dim3(numThreadsLastBlock), 1024, *stream, dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } } else if (isPowerOfTwo(numElements)) { sd::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0); } else { sd::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0); } sd::DebugHelper::checkErrorCode(stream, "prescanArray(...) failed"); } //////////////////////////////////////////////////////////////////////// void execReduce3All(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParamsVals, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape, Nd4jLong const* xTadShapeInfo, Nd4jLong const* xOffsets, Nd4jLong const* yTadShapeInfo, Nd4jLong const* yOffsets) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY, dbDimension}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3All(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParamsVals, dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), reinterpret_cast<int *>(dbDimension->special()), dimensionLength, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sort(Nd4jPointer *extraPointers, void *x, Nd4jLong const* xShapeInfo, void *dX, Nd4jLong const* dXShapeInfo, bool descending) { try { hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = sd::ArrayOptions::dataType(xShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = sd::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric, (launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES); } } } else { int numThreads = sd::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = sd::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric, (launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } sd::DebugHelper::checkErrorCode(stream, "sort(...) failed"); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortByKey(Nd4jPointer *extraPointers, void *x, Nd4jLong const* xShapeInfo, void *dX, Nd4jLong const* dXShapeInfo, void *y, Nd4jLong const* yShapeInfo, void *dy, Nd4jLong const* dyShapeInfo, bool descending) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto yLength = shape::length(yShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = sd::ArrayOptions::dataType(xShapeInfo); auto yType = sd::ArrayOptions::dataType(yShapeInfo); if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo)) return; if (xLength != yLength) throw std::runtime_error("sortByKey: keys and values must have the same size"); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = sd::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, j, k, xLength, descending), LIBND4J_TYPES, LIBND4J_TYPES); } } } else { int numThreads = sd::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = sd::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES, LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortByValue(Nd4jPointer *extraPointers, void *x, Nd4jLong const* xShapeInfo, void *dX, Nd4jLong const* dXShapeInfo, void *y, Nd4jLong const* yShapeInfo, void *dy, Nd4jLong const* dyShapeInfo, bool descending) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto yLength = shape::length(yShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = sd::ArrayOptions::dataType(yShapeInfo); auto yType = sd::ArrayOptions::dataType(xShapeInfo); if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo)) return; if (xLength != yLength) throw std::runtime_error("sortByValue: keys and values must have the same size"); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = sd::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES, LIBND4J_TYPES); } } } else { int numThreads = sd::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = sd::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES, LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTadByKey(Nd4jPointer *extraPointers, void *x, Nd4jLong const* xShapeInfo, void *dX, Nd4jLong const* dXShapeInfo, void *y, Nd4jLong const* yShapeInfo, void *dy, Nd4jLong const* dyShapeInfo, int *dimension, int dimensionLength, bool descending) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048); auto xType = sd::ArrayOptions::dataType(xShapeInfo); auto yType = sd::ArrayOptions::dataType(yShapeInfo); BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending), LIBND4J_TYPES, LIBND4J_TYPES); sd::DebugHelper::checkErrorCode(stream, "sortTadKey(...) failed"); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTadByValue(Nd4jPointer *extraPointers, void *x, Nd4jLong const* xShapeInfo, void *dX, Nd4jLong const* dXShapeInfo, void *y, Nd4jLong const* yShapeInfo, void *dy, Nd4jLong const* dyShapeInfo, int *dimension, int dimensionLength, bool descending) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048); auto xType = sd::ArrayOptions::dataType(yShapeInfo); auto yType = sd::ArrayOptions::dataType(xShapeInfo); BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending), LIBND4J_TYPES, LIBND4J_TYPES); sd::DebugHelper::checkErrorCode(stream, "sortTadValue(...) failed"); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTad(Nd4jPointer *extraPointers, void *x, Nd4jLong const* xShapeInfo, void *dX, Nd4jLong const* dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, bool descending) { try { // to be implemented auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 512, 33768); auto xType = sd::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, oesTadGeneric, (launchDims, stream, dX, dXShapeInfo, nullptr, dimensionLength, tadShapeInfo, tadOffsets, descending), LIBND4J_TYPES); sd::DebugHelper::checkErrorCode(stream, "sortTad(...) failed"); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, const Nd4jLong *xShapeInfo) { throw std::runtime_error("sortCooIndices:: Not implemented yet"); } void ravelMultiIndex(Nd4jPointer *extraPointers, Nd4jLong *indices, Nd4jLong *flatIndices, Nd4jLong length, Nd4jLong *shapeInfo, int mode) { throw std::runtime_error("ravelMultiIndex:: Not implemented yet"); } void unravelIndex(Nd4jPointer *extraPointers, Nd4jLong *indices, Nd4jLong *flatIndices, Nd4jLong length, Nd4jLong *shapeInfo) { throw std::runtime_error("unravelIndex:: Not implemented yet"); } Nd4jLong* mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) { return nullptr; } void munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) { } sd::graph::ResultWrapper* executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) { try { return sd::graph::GraphExecutioner::executeFlatBuffer(flatBufferPointer); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getResultWrapperSize(sd::graph::ResultWrapper* ptr) { return ptr->size(); } Nd4jPointer getResultWrapperPointer(sd::graph::ResultWrapper* ptr) { return ptr->pointer(); } const char* getAllCustomOps() { return sd::ops::OpRegistrator::getInstance().getAllCustomOperations(); } sd::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, sd::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs, int numDArgs) { sd::graph::VariableSpace varSpace; Context block(2, &varSpace); sd::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numBArgs; e++) block.getBArguments()->push_back(bArgs[e]); for (int e = 0; e < numDArgs; e++) block.getDArguments()->push_back((sd::DataType) dArgs[e]); for (int e = 0; e < numInputShapes; e++) { auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]); // we shouldn't copy buffer if that's empty array void *buffer_ = sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; void *bufferD_ = sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputShapes]; auto array = new sd::NDArray(buffer_, bufferD_, shape_); // block should contain references to proper variable varSpace.putVariable(1, e, array); block.pickInput(1, e); inShapes.push_back(shape_); } auto shapeList = op->calculateOutputShape(&inShapes, block); if (varSpace.launchContext()->getWorkspace() != nullptr) shapeList->detach(); return shapeList; } sd::ShapeList* calculateOutputShapes2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs, int numDArgs) { try { auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, dArgs, numDArgs); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } sd::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, sd::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { Context block(1); sd::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numInputShapes; e++) inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e])); auto shapeList = op->calculateOutputShape(&inShapes, block); return shapeList; } sd::ShapeList* calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { try { auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getShapeListSize(sd::ShapeList* list) { return list->size(); } Nd4jLong const* getShape(sd::ShapeList* list, Nd4jLong i) { return list->at(i); } static FORCEINLINE Nd4jStatus realExec(sd::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { if (op == nullptr) nd4j_printf("Can't find requested operation: [%lld]\n", hash); // we're using the same fake nodeId everywhere here std::vector<sd::NDArray*> inputs(numInputs); std::vector<sd::NDArray*> outputs(numOutputs); std::vector<double> ttArgs(numTArgs); std::vector<bool> bbArgs(numBArgs); std::vector<Nd4jLong> iiArgs(numIArgs); // filling block now with inputs for (int e = 0; e < numInputs; e++) { auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]); void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputs]; inputs[e] = new sd::NDArray(buffer, bufferD, shape); } // if not inplace - transferring output arrays if (!isInplace) for (int e = 0; e < numOutputs; e++) { // we want to keep original output shape intact auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e])); void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e]; void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e + numOutputs]; // FIXME: revisit this. bool canNullify = true; for (int i = 0; i < numInputs; i++) { void *ibuffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i]; if (ibuffer == buffer) { canNullify = false; break; } } if (canNullify && buffer != nullptr) memset((uint8_t *) buffer, '\0', shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape))); auto array = new sd::NDArray(buffer, bufferD, shape); outputs[e] = array; } for (int e = 0; e < numIArgs; e++) iiArgs[e] = iArgs[e]; for (int e = 0; e < numTArgs; e++) ttArgs[e] = tArgs[e]; for (int e = 0; e < numBArgs; e++) bbArgs[e] = bArgs[e]; // hypothetically at this point we have everything filled auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, std::vector<sd::DataType>(), isInplace); //auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace); if (!isInplace) for (int e = 0; e < numOutputs; e++) { //shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]); //shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo()); //outputs[e]->printIndexedBuffer("C++ raw output"); //outputs[e]->printBuffer("C++ indexed output"); if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))) outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))); } for (auto v: inputs) delete v; for (auto v: outputs) delete v; return Status::OK(); } int execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { try { auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash); return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } int execCustomOp2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer opContext) { try { auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash); auto context = reinterpret_cast<Context *>(opContext); auto result = op->execute(context); auto res = hipStreamSynchronize(*context->launchContext()->getCudaStream()); if (res != 0) throw sd::cuda_exception::build("customOp execution failed", res); for (auto v:context->fastpath_in()) { if (!v->isEmpty()) v->syncToDevice(); } for (auto v:context->fastpath_out()) { if (!v->isEmpty()) v->syncToDevice(); } return result; } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } int registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) { try { auto graph = sd::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer); sd::graph::GraphHolder::getInstance().registerGraph(graphId, graph); return ND4J_STATUS_OK; } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { auto graph = sd::graph::GraphHolder::getInstance().pullGraph(graphId); auto varSpace = graph->getVariableSpace()->clone(); std::vector<sd::NDArray*> handles; for (int e = 0; e < numInputs; e++) { auto idx = inputIndices[e]; // we'll delete this array later, together with cloned VariableSpace auto array = new sd::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e])); handles.emplace_back(array); if (varSpace->hasVariable(idx)) { auto var = varSpace->getVariable(idx); if (var->hasNDArray()) delete var->getNDArray(); var->setNDArray(array); } else varSpace->putVariable(idx, array); } auto dZ = sd::graph::GraphExecutioner::execute(graph, varSpace); auto varSet = new sd::graph::VariablesSet(dZ); if (dZ == ND4J_STATUS_OK) { // pull back results, and provide them auto outputs = graph->fetchOutputs(); for (int e = 0; e < outputs->size(); e++) { // we're only getting variable ID/Index from original grap. values will be taken from cloned workspace std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index()); auto var = varSpace->getVariable(varId); varSet->push_back(var->clone()); } delete outputs; } delete varSpace; return varSet; } VariablesSet* executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { try { return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getVariablesSetSize(sd::graph::VariablesSet* set) { return set->size(); } Nd4jStatus getVariablesSetStatus(sd::graph::VariablesSet* set) { return set->status(); } sd::graph::Variable* getVariable(sd::graph::VariablesSet* set, Nd4jLong i) { return set->at(i); } int getVariableId(sd::graph::Variable* variable) { return variable->id(); } int getVariableIndex(sd::graph::Variable* variable) { return variable->index(); } const char* getVariableName(sd::graph::Variable* variable) { return variable->getName()->c_str(); } Nd4jLong const* getVariableShape(sd::graph::Variable* variable) { return variable->getNDArray()->shapeInfo(); } void* getVariableBuffer(sd::graph::Variable* variable) { return variable->getNDArray()->buffer(); } int unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) { try { sd::graph::GraphHolder::getInstance().dropGraphAny(graphId); return ND4J_STATUS_OK; } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } void deletePointerArray(Nd4jPointer pointer) { Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer); delete[] ptr; } void deleteCharArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<char *>(pointer); delete[] ptr; } void deleteIntArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<int *>(pointer); delete[] ptr; } void deleteLongArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<Nd4jLong *>(pointer); delete[] ptr; } void deleteVariablesSet(sd::graph::VariablesSet* pointer) { delete pointer; } void deleteShapeList(Nd4jPointer shapeList) { sd::ShapeList* list = reinterpret_cast<sd::ShapeList*>(shapeList); //list->destroy(); delete list; } const char* getAllOperations() { return sd::OpTracker::getInstance().exportOperations(); } Nd4jPointer getGraphState(Nd4jLong id) { return (Nd4jPointer) new sd::graph::GraphState(id); } void deleteGraphState(Nd4jPointer state) { auto stateP = reinterpret_cast<sd::graph::GraphState*>(state); delete stateP; } Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, sd::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { /** * That's basically exec, with VariableSpace provided in GraphState: * depending on operation (i.e. while of if), different logic executors could be used */ auto graph = state->graph(); auto varSpace = state->variableSpace(); // Node is dynamically created, and has nothing beyond it: only inputs and outputs // this node has id of 0, and inputs are Node node(OpType_LOGIC, opHash, 0); // mapping inputs for (int e = 0; e < numInputs; e++) { auto buffer = inputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]); auto array = new sd::NDArray(buffer, shapeInfo, varSpace->launchContext()); // now we just put array to VarSpace varSpace->putVariable(0, e, array); node.pickInput(0, e); } // mapping scopes for (int e = 0; e < numScopes; e++) { // we should check scope existence in GraphState/Graph int scopeId = (int) scopes[e]; if (!state->hasScope(scopeId)) { // nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId); return Status::THROW(); } node.pickInput(scopeId, 0); } auto dZ = LogicExecutor::processNode(graph, &node); if (dZ != Status::OK()) return dZ; // mapping outputs for (int e = 0; e < numOutputs; e++) { auto buffer = outputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]); NDArray array(buffer, shapeInfo, varSpace->launchContext()); // now we just put array to VarSpace to the same ID //varSpace->putVariable(0, e, array); auto t = varSpace->getVariable(0, e)->getNDArray(); array.assign(t); } // removing input variables for (int e = 0; e < numInputs; e++) { varSpace->dropVariable(0, e); } // after some bla-bla-bla we should have Graph and Node for current op return Status::OK(); } Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { try { return execCustomOpWithScope(extraPointers, reinterpret_cast<sd::graph::GraphState *>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } void deleteResultWrapper(Nd4jPointer ptr) { // just 0 room for compiler s@!t auto p = reinterpret_cast<sd::graph::ResultWrapper *>(ptr); delete p; } int estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong const* dXShapeInfo, int N, float threshold) { throw std::runtime_error("estimateThreshold: Not implemented yet"); } /* * TypeDef: * void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ); */ void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) { try { auto dx = reinterpret_cast<void *>(dX); auto dz = reinterpret_cast<void *>(dZ); if (srcType == ND4J_FLOAT8) { if (dstType == ND4J_FLOAT8) { // convertKernel<double, sd::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //sd::TypeCast::convertGenericCuda<sd::float8, sd::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { //sd::TypeCast::convertGenericCuda<sd::float8, sd::uint8>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { //sd::TypeCast::convertGenericCuda<sd::float8, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { //sd::TypeCast::convertGenericCuda<sd::float8, sd::int16>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { //sd::TypeCast::convertGenericCuda<sd::float8, sd::uint16>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { //sd::TypeCast::convertGenericCuda<sd::float8, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //sd::TypeCast::convertGenericCuda<sd::float8, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT8) { if (dstType == ND4J_FLOAT8) { //sd::TypeCast::convertGenericCuda<sd::int8, sd::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //convertKernel<sd::int8, sd::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { sd::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { sd::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { sd::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { sd::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: eventually we might want to add it } else if (dstType == ND4J_FLOAT32) { sd::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { sd::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_UINT8) { if (dstType == ND4J_FLOAT8) { //sd::TypeCast::convertGenericCuda<uint8_t, sd::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { sd::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { sd::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { sd::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { sd::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { sd::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: still might want to add } else if (dstType == ND4J_FLOAT32) { sd::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { sd::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT16) { if (dstType == ND4J_FLOAT8) { //sd::TypeCast::convertGenericCuda<float16, sd::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { sd::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { sd::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { sd::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { sd::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { sd::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: .... ^^^ } else if (dstType == ND4J_FLOAT32) { sd::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { sd::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //sd::convertToThreshold<float16>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT16) { if (dstType == ND4J_FLOAT8) { //sd::TypeCast::convertGenericCuda<int16_t, sd::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { sd::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { sd::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { sd::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { sd::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { sd::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO... } else if (dstType == ND4J_FLOAT32) { sd::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { sd::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz); } else { printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT24) { } else if (srcType == ND4J_FLOAT32) { if (dstType == ND4J_FLOAT8) { //sd::TypeCast::convertGenericCuda<float, sd::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { sd::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { sd::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { sd::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { sd::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { sd::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_DOUBLE) { sd::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //sd::convertToThreshold<float>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_DOUBLE) { if (dstType == ND4J_FLOAT8) { //sd::TypeCast::convertGenericCuda<double, sd::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { sd::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { sd::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { sd::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { sd::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { sd::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { sd::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { // } else if (dstType == ND4J_THRESHOLD) { //sd::convertToThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_THRESHOLD) { if (dstType == ND4J_FLOAT16) { //sd::convertFromThreshold<float16>(nullptr, dx, N, dz); } else if (dstType == ND4J_FLOAT32) { //sd::convertFromThreshold<float>(nullptr, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //sd::convertFromThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } Nd4jPointer createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) { auto u = new sd::utf8string(string, length); return reinterpret_cast<Nd4jPointer>(u); } Nd4jLong getUtf8StringLength(Nd4jPointer *extraPointers, Nd4jPointer ptr) { return reinterpret_cast<sd::utf8string*>(ptr)->_length; } char* getUtf8StringBuffer(Nd4jPointer *extraPointers, Nd4jPointer ptr) { return reinterpret_cast<sd::utf8string*>(ptr)->_buffer; } void deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) { delete(reinterpret_cast<sd::utf8string*>(ptr)); } /////////////////////////////////////////////////////////////////// template<typename T, typename I> __global__ static void scatterUpdateCuda(const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const void* vindexes) { __shared__ T *x, *y; __shared__ Nd4jLong arrLenX, arrLenY; auto indexes = reinterpret_cast<const I*>(vindexes); for (int e = 0; e < numOfSubArrs; e++ ) { const auto xIndex = indexes[e]; const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x; if (!isOwner) continue; if (threadIdx.x == 0) { x = reinterpret_cast<T*>(vx) + xOffsets[xIndex]; y = reinterpret_cast<T*>(vy) + yOffsets[e]; arrLenX = shape::length(xShapeInfo); arrLenY = shape::length(yShapeInfo); } __syncthreads(); if (arrLenX != arrLenY) return; for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo); const auto yOffset = shape::getIndexOffset(i, yShapeInfo); switch (opCode) { case 0: x[xOffset] += y[yOffset]; break; case 1: x[xOffset] -= y[yOffset]; break; case 2: x[xOffset] *= y[yOffset]; break; case 3: x[xOffset] /= y[yOffset]; break; case 4: x[xOffset] = y[yOffset] - x[xOffset]; break; case 5: x[xOffset] = y[yOffset] / x[xOffset]; break; case 6: x[xOffset] = y[yOffset]; break; default: continue; } } __syncthreads(); } } template<typename T, typename I> __host__ static void scatterUpdateCudaLauncher(const hipStream_t* stream, const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong const* xShapeInfo, const Nd4jLong* xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const void* indexes) { hipLaunchKernelGGL(( scatterUpdateCuda<T, I>), dim3(512), dim3(256), MAX_NUM_THREADS, *stream, opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes); } ////////////////////////////////////////////////////////////////////////// void scatterUpdate(Nd4jPointer *extraPointers, int opCode, int numOfSubArrs, void* hX, Nd4jLong const* hXShapeInfo, Nd4jLong const* hXOffsets, void* dX, Nd4jLong const* dXShapeInfo, Nd4jLong const* dXOffsets, void* hY, Nd4jLong const* hYShapeInfo, Nd4jLong const* hYOffsets, void* dY, Nd4jLong const* dYShapeInfo, Nd4jLong const* dYOffsets, void* hIindexes, Nd4jLong const* hIndicesShapeInfo, void* dIindexes, Nd4jLong const* dIndicesShapeInfo) { try { auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]); auto type = ArrayOptions::dataType(hXShapeInfo); auto iType = ArrayOptions::dataType(hIndicesShapeInfo); BUILD_DOUBLE_SELECTOR(type, iType, scatterUpdateCudaLauncher, (stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIindexes), LIBND4J_TYPES, INDEXING_TYPES); sd::DebugHelper::checkErrorCode(stream, "scatterUpdate(...) failed"); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void inspectArray(Nd4jPointer *extraPointers, Nd4jPointer buffer, Nd4jLong *shapeInfo, Nd4jPointer specialBuffer, Nd4jLong *specialShapeInfo, Nd4jPointer debugInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); auto p = reinterpret_cast<sd::DebugInfo *>(debugInfo); NDArray array(buffer, specialBuffer, shapeInfo, &lc); sd::DebugHelper::retrieveDebugStatistics(p, &array); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void __global__ tryPointerKernel(void* p, int len) { auto buf = reinterpret_cast<int8_t*>(p); auto tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int b; if (tid < len) atomicAdd(&b, buf[tid]); __syncthreads(); if (threadIdx.x ==0 && blockIdx.x == 0) printf("Pointer check complete: %i\n", b); } void tryPointer(Nd4jPointer extra, Nd4jPointer p, int len) { try { hipStream_t stream; hipStreamCreate(&stream); hipLaunchKernelGGL(( tryPointerKernel) , dim3(256), dim3(512), len + 64, stream, p, len); auto e = hipStreamSynchronize(stream); if (e != 0) throw sd::cuda_exception::build("tryPointer failed", e); hipStreamDestroy(stream); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } int dataTypeFromNpyHeader(void *header) { return (int) cnpy::dataTypeFromHeader(reinterpret_cast<char *>(header)); } OpaqueConstantShapeBuffer* shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *strides, sd::DataType dtype, char order, Nd4jLong ews, bool empty) { return shapeBufferEx(rank, shape, strides, dtype, order, ews, empty ? ARRAY_EMPTY : 0); } OpaqueConstantShapeBuffer* shapeBufferEx(int rank, Nd4jLong *shape, Nd4jLong *strides, sd::DataType dtype, char order, Nd4jLong ews, Nd4jLong extras) { try { auto buffer = new ConstantShapeBuffer(); *buffer = sd::ConstantShapeHelper::getInstance().bufferForShapeInfo( ShapeDescriptor(dtype, order, shape, strides, rank, ews, extras)); return buffer; } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } void deleteConstantShapeBuffer(OpaqueConstantShapeBuffer* ptr) { delete ptr; } void deleteConstantDataBuffer(OpaqueConstantDataBuffer* ptr) { delete ptr; } void deleteTadPack(sd::TadPack* ptr) { delete ptr; } bool isBlasVersionMatches(int major, int minor, int build) { auto result = major == Environment::getInstance()._blasMajorVersion && minor == Environment::getInstance()._blasMinorVersion && build == Environment::getInstance()._blasPatchVersion; if (!result) { nd4j_printf("CUDA/cuBLAS version mismatch. Expected: %i.%i.%i but got %i.%i.%i instead\n", Environment::getInstance()._blasMajorVersion, Environment::getInstance()._blasMinorVersion, Environment::getInstance()._blasPatchVersion, major, minor, build); sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(152); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("CUDA/cuBLAS version mismatch"); } return result; } sd::ConstantDataBuffer* constantBufferLong(sd::DataType dtype, Nd4jLong const* data, int length) { return sd::ConstantHelper::getInstance().constantBuffer(ConstantDescriptor(data, length), dtype); } sd::ConstantDataBuffer* constantBufferDouble(sd::DataType dtype, double *data, int length) { return sd::ConstantHelper::getInstance().constantBuffer(ConstantDescriptor(data, length), dtype); } sd::ConstantDataBuffer* constantBuffer(sd::DataType dtype, sd::ConstantDescriptor *descriptor) { return sd::ConstantHelper::getInstance().constantBuffer(*descriptor, dtype); } Nd4jPointer getConstantDataBufferPrimary(sd::ConstantDataBuffer* dbf) { return dbf->primary(); } Nd4jPointer getConstantDataBufferSpecial(sd::ConstantDataBuffer* dbf) { return dbf->special(); } Nd4jLong getConstantDataBufferLength(sd::ConstantDataBuffer* dbf) { return dbf->length(); } Nd4jLong getConstantDataBufferSizeOf(sd::ConstantDataBuffer* dbf) { return dbf->sizeOf(); } Nd4jPointer getConstantShapeBufferPrimary(OpaqueConstantShapeBuffer* dbf) { return const_cast<Nd4jLong*>(dbf->primary()); } Nd4jPointer getConstantShapeBufferSpecial(OpaqueConstantShapeBuffer* dbf) { return const_cast<Nd4jLong*>(dbf->special()); } sd::graph::Context* createGraphContext(int nodeId) { return new sd::graph::Context(nodeId); } sd::graph::RandomGenerator* getGraphContextRandomGenerator(sd::graph::Context* ptr) { return &ptr->randomGenerator(); } void markGraphContextInplace(sd::graph::Context* ptr, bool reallyInplace) { ptr->markInplace(reallyInplace); } void setGraphContextCudaContext(sd::graph::Context* ptr, void *stream, void *reductionPointer, void *allocationPointer) { ptr->setCudaContext(stream, reductionPointer, allocationPointer); } void setGraphContextInputArray(sd::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) { ptr->setInputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo); } void setGraphContextOutputArray(sd::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) { ptr->setOutputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo); } void setGraphContextInputBuffer(OpaqueContext* ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo, void *specialShapeInfo) { ptr->setInputArray(index, buffer, shapeInfo, specialShapeInfo); } void setGraphContextOutputBuffer(OpaqueContext* ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo, void *specialShapeInfo) { ptr->setOutputArray(index, buffer, shapeInfo, specialShapeInfo); } void setGraphContextTArguments(sd::graph::Context* ptr, double *arguments, int numberOfArguments) { ptr->setTArguments(arguments, numberOfArguments); } void setGraphContextIArguments(sd::graph::Context* ptr, Nd4jLong *arguments, int numberOfArguments) { ptr->setIArguments(arguments, numberOfArguments); } void setGraphContextBArguments(sd::graph::Context* ptr, bool *arguments, int numberOfArguments) { ptr->setBArguments(arguments, numberOfArguments); } void setGraphContextDArguments(OpaqueContext* ptr, int *arguments, int numberOfArguments) { std::vector<sd::DataType> dtypes(numberOfArguments); for (int e = 0; e < numberOfArguments; e++) dtypes[e] = (sd::DataType) arguments[e]; ptr->setDArguments(dtypes); } void deleteGraphContext(sd::graph::Context* ptr) { delete ptr; } sd::graph::RandomGenerator* createRandomGenerator(Nd4jLong rootSeed, Nd4jLong nodeSeed) { try { return new sd::graph::RandomGenerator(rootSeed, nodeSeed); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getRandomGeneratorRootState(sd::graph::RandomGenerator* ptr) { return ptr->rootState(); } Nd4jLong getRandomGeneratorNodeState(sd::graph::RandomGenerator* ptr) { return ptr->nodeState(); } void setRandomGeneratorStates(sd::graph::RandomGenerator* ptr, Nd4jLong rootSeed, Nd4jLong nodeSeed) { ptr->setStates(rootSeed, nodeSeed); } float getRandomGeneratorRelativeFloat(sd::graph::RandomGenerator* ptr, Nd4jLong index) { return ptr->relativeT<float>(index); } double getRandomGeneratorRelativeDouble(sd::graph::RandomGenerator* ptr, Nd4jLong index) { return ptr->relativeT<double>(index); } int getRandomGeneratorRelativeInt(sd::graph::RandomGenerator* ptr, Nd4jLong index) { return ptr->relativeInt(index); } Nd4jLong getRandomGeneratorRelativeLong(sd::graph::RandomGenerator* ptr, Nd4jLong index) { return ptr->relativeLong(index); } void deleteRandomGenerator(sd::graph::RandomGenerator* ptr) { delete ptr; } Nd4jPointer shapeBufferForNumpy(Nd4jPointer npyArray) { try { cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray)); unsigned int shapeSize = arr.shape.size(); std::vector<Nd4jLong> shape(shapeSize); bool _empty = false; for (unsigned int i = 0; i < shapeSize; i++) { shape[i] = arr.shape[i]; if (arr.shape[i] == 0) _empty = true; } auto dtype = cnpy::dataTypeFromHeader(reinterpret_cast<char *>(npyArray)); Nd4jLong *shapeBuffer; if (shape.size() == 1 && shape[0] == 0) { // scalar case shapeBuffer = sd::ShapeBuilders::createScalarShapeInfo(dtype); } else if (_empty) { if (shapeSize > 0) shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape); else shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype); } else { shapeBuffer = sd::ShapeBuilders::createShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape); } return (Nd4jPointer)(sd::ConstantShapeHelper::getInstance().createFromExisting(shapeBuffer, true)); // TO DO: this can lead to unpleasant crash sometimes } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } const char* runLightBenchmarkSuit(bool printOut) { try { sd::LightBenchmarkSuit suit; auto result = suit.runSuit(); if (printOut) nd4j_printf("%s\n", result.data()); auto chars = new char[result.length() + 1]; std::memcpy(chars, result.data(), result.length()); chars[result.length()] = (char) 0x0; return chars; } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } const char* runFullBenchmarkSuit(bool printOut) { try { sd::FullBenchmarkSuit suit; auto result = suit.runSuit(); if (printOut) nd4j_printf("%s\n", result.data()); auto chars = new char[result.length() + 1]; std::memcpy(chars, result.data(), result.length()); chars[result.length()] = (char) 0x0; return chars; } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getCachedMemory(int deviceId) { return sd::ConstantHelper::getInstance().getCachedAmount(deviceId); } sd::LaunchContext* defaultLaunchContext() { return LaunchContext::defaultContext(); } Nd4jPointer lcScalarPointer(OpaqueLaunchContext* lc) { return lc->getScalarPointer(); } Nd4jPointer lcReductionPointer(OpaqueLaunchContext* lc) { return lc->getReductionPointer(); } Nd4jPointer lcAllocationPointer(OpaqueLaunchContext* lc) { return lc->getAllocationPointer(); } Nd4jPointer lcExecutionStream(OpaqueLaunchContext* lc) { return lc->getCudaStream(); } Nd4jPointer lcCopyStream(OpaqueLaunchContext* lc) { return lc->getCudaSpecialStream(); } Nd4jPointer lcBlasHandle(OpaqueLaunchContext* lc) { return lc->getCublasHandle(); } Nd4jPointer lcSolverHandle(OpaqueLaunchContext* lc) { return lc->getCusolverHandle(); } int lastErrorCode() { return sd::LaunchContext::defaultContext()->errorReference()->errorCode(); } const char* lastErrorMessage() { return sd::LaunchContext::defaultContext()->errorReference()->errorMessage(); } void ctxShapeFunctionOverride(OpaqueContext* ptr, bool reallyOverride) { ptr->setShapeFunctionOverride(reallyOverride); } void ctxPurge(OpaqueContext* ptr) { ptr->clearFastPath(); } int binaryLevel() { return 0; } int optimalLevel() { return 0; } bool isMinimalRequirementsMet() { return true; } bool isOptimalRequirementsMet() { return true; } void ctxAllowHelpers(OpaqueContext* ptr, bool reallyAllow) { ptr->allowHelpers(reallyAllow); } void ctxSetExecutionMode(OpaqueContext* ptr, int execMode) { if (execMode < 0 || execMode > 2) execMode = 0; ptr->setExecutionMode((samediff::ExecutionMode) execMode); } OpaqueDataBuffer* dbCreateExternalDataBuffer(Nd4jLong elements, int dataType, Nd4jPointer primary, Nd4jPointer special) { auto buffer = dbAllocateDataBuffer(0, dataType, false); if (primary != nullptr) buffer->setPrimary(primary, elements); if (special != nullptr) buffer->setSpecial(special, elements); return buffer; } OpaqueDataBuffer* dbAllocateDataBuffer(Nd4jLong elements, int dataType, bool allocateBoth) { return allocateDataBuffer(elements, dataType, allocateBoth); } OpaqueDataBuffer* allocateDataBuffer(Nd4jLong elements, int dataType, bool allocateBoth) { try { auto dtype = DataTypeUtils::fromInt(dataType); return new sd::InteropDataBuffer(elements * DataTypeUtils::sizeOf(dtype), dtype, allocateBoth); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jPointer dbPrimaryBuffer(OpaqueDataBuffer *dataBuffer) { return dataBuffer->primary(); } Nd4jPointer dbSpecialBuffer(OpaqueDataBuffer *dataBuffer) { return dataBuffer->special(); } void deleteDataBuffer(OpaqueDataBuffer *dataBuffer) { delete dataBuffer; } void dbSetPrimaryBuffer(OpaqueDataBuffer *dataBuffer, Nd4jPointer primaryBuffer, Nd4jLong numBytes) { dataBuffer->setPrimary(primaryBuffer, numBytes); } void dbSetSpecialBuffer(OpaqueDataBuffer *dataBuffer, Nd4jPointer specialBuffer, Nd4jLong numBytes) { dataBuffer->setSpecial(specialBuffer, numBytes); } void dbAllocatePrimaryBuffer(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->allocatePrimary(); } void dbAllocateSpecialBuffer(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->allocateSpecial(); } void dbExpandBuffer(OpaqueDataBuffer *dataBuffer, Nd4jLong elements) { try { dataBuffer->dataBuffer()->expand(elements * DataTypeUtils::sizeOf(dataBuffer->dataBuffer()->getDataType())); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } OpaqueDataBuffer* dbCreateView(OpaqueDataBuffer *dataBuffer, Nd4jLong length, Nd4jLong offset) { return new InteropDataBuffer(*dataBuffer, length, offset); } void dbSyncToSpecial(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->syncToSpecial(); } void dbSyncToPrimary(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->syncToPrimary(nullptr); } void dbTickHostRead(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->readPrimary(); } void dbTickHostWrite(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->writePrimary(); } void dbTickDeviceRead(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->readSpecial(); } void dbTickDeviceWrite(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->writeSpecial(); } void dbExpand(OpaqueDataBuffer *dataBuffer, Nd4jLong elements) { dataBuffer->expand(elements); } void dbClose(OpaqueDataBuffer *dataBuffer) { dataBuffer->getDataBuffer()->close(); } int dbDeviceId(OpaqueDataBuffer *dataBuffer) { return dataBuffer->deviceId(); } void dbSetDeviceId(OpaqueDataBuffer *dataBuffer, int deviceId) { dataBuffer->setDeviceId(deviceId); } int dbLocality(OpaqueDataBuffer *dataBuffer) { auto p = dataBuffer->dataBuffer()->isPrimaryActual(); auto d = dataBuffer->dataBuffer()->isSpecialActual(); if (p && d) return 0; else if (p) return -1; else return 1; }
5aba1b5d01e19de4c96214a0f1064cc197fa0de3.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #include <legacy/NativeOpExecutioner.h> #include <legacy/NativeOps.h> #include <cuda.h> #include <system/buffer.h> #include <loops/transform_any.h> #include <loops/reduce_bool.h> #include <loops/reduce_long.h> #include <loops/scalar.h> #include <helpers/threshold.h> #include <ops/specials_cuda.h> #include <helpers/DebugHelper.h> #include <execution/AffinityManager.h> #include <exceptions/datatype_exception.h> #include <exceptions/cuda_exception.h> #include <helpers/CudaLaunchHelper.h> #include <graph/GraphExecutioner.h> #include <helpers/BlasHelper.h> #include <graph/GraphHolder.h> #include <ops/declarable/CustomOperations.h> #include <helpers/PointersManager.h> //#include <sys/time.h> #include <curand.h> #include <graph/Status.h> #include <helpers/DebugHelper.h> using namespace sd; #include <loops/special_kernels.h> #include <performance/benchmarking/FullBenchmarkSuit.h> #include <performance/benchmarking/LightBenchmarkSuit.h> cudaDeviceProp *deviceProperties; cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[64]; int blockLimit = 128; int maxThreads = 512; bool allowedP2P = false; bool supportedP2P = false; #ifdef __ND4J_EXPERIMENTAL__ bool experimentalSupport = true; #else bool experimentalSupport = false; #endif int minThreads = 32; __constant__ char deviceConstantMemory[49152]; // this method just does type conversion in fancy way int getDeviceId(Nd4jPointer ptrToDeviceId) { return (int)(Nd4jLong)ptrToDeviceId; } /* * Basic CUDA constants here: number of blocks per MP */ int getDeviceBlockThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; int blockThreshold = 8; if (ccMajor >= 5) blockThreshold = 32; else if (ccMajor == 3) blockThreshold = 16; else if (ccMajor < 3) blockThreshold = 8; return blockThreshold; } /* * This message returns shared memory threshold value. default overflow ratio is 0.3 */ int getDeviceSharedThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; // please note threshold isn't multiple of 32, and that's NOT a mistake int shmemThreshold; if (ccMajor == 6 && ccMinor == 0) shmemThreshold = 65536; else if (ccMajor == 6 && ccMinor == 1) shmemThreshold = 49152; else if (ccMajor == 5 && ccMinor == 2) shmemThreshold = 98304; else if (ccMajor == 5) shmemThreshold = 65536; else if (ccMajor == 3 && ccMinor == 7) shmemThreshold = 114688; else shmemThreshold = 49152; return shmemThreshold / 0.3; } sd::buffer::Buffer<Nd4jLong> * createScalarBuffer(cudaStream_t stream) { auto scalarShapeInfo = shape::createScalarShapeInfo(); auto buff = sd::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream); sd::buffer::copyDataToGpu(&buff, stream); return buff; } class ScalarShapeInformation { private: sd::buffer::Buffer<Nd4jLong> *scalarDimension; sd::buffer::Buffer<Nd4jLong> *scalarShapeInfo; // std::thread::id threadId; public: ScalarShapeInformation(cudaStream_t stream) { auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong))); CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer", sizeof(Nd4jLong)); scalarDimensionBuff[0] = MAX_DIMENSION; scalarDimension = sd::buffer::createBuffer(scalarDimensionBuff,1, stream); scalarShapeInfo = createScalarBuffer(stream); // threadId = std::this_thread::get_id(); } ~ScalarShapeInformation() { sd::buffer::freeBuffer(&scalarShapeInfo); sd::buffer::freeBuffer(&scalarDimension); } Nd4jLong *getShapeInfoHostPointer() { return scalarShapeInfo->data; } Nd4jLong * getShapeInfoGpuPointer() { return scalarShapeInfo->gData; } Nd4jLong * getDimensionHostPointer() { return scalarDimension->data; } Nd4jLong * getDimensionGpuPointer() { return scalarDimension->gData; } }; template <typename T> class ScalarInfo { sd::buffer::Buffer<T> *scalarData; ScalarShapeInformation *shapeInfo; T finalResult; cudaStream_t streamRef; public: ScalarInfo(cudaStream_t stream) { T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T))); CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer", sizeof(T)); shapeInfo = new ScalarShapeInformation(stream); scalarData = sd::buffer::createBuffer(scalarResult,1, stream); streamRef = stream; sd::buffer::copyDataToGpu(&scalarData, stream); } T getFinalResultFromDevice() { sd::buffer::copyDataFromGpu(&scalarData, streamRef); return scalarData->data[0]; } /** * Get the device shape information * representing a scalar */ Nd4jLong *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); } /** * Get the dZ pointers */ T *getDevicePointer() { return scalarData->gData; } /** * Get the infinite dimension device pointer */ Nd4jLong *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); } ~ScalarInfo() { sd::buffer::freeBuffer(&scalarData); delete shapeInfo; } }; void execPairwiseTransform( Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execPairwiseTransform(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execPairwiseTransformBool(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execPairwiseBoolTransform(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execSummaryStatsScalar(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, bool biasCorrected) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStatsScalar(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), biasCorrected); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execBroadcastBool(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraParams, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execBroadcastBool(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param dY * @param dYShapeInfo * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ void execBroadcast( Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]); auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]); auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hYShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execBroadcast(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ //////////////////////////////////////////////////////////////////////// void execReduceFloat(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceFloatScalar(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceSame(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceSameScalar(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceSame2(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); const auto zLen = shape::length(hZShapeInfo); std::vector<int> dimensions(dimension, dimension + dimensionLength); const Nd4jLong* zShapeInfoH = hZShapeInfo; if(shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) { auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions); zShapeInfoH = reinterpret_cast<Nd4jLong const*>(zPack.primary()); } std::vector<int> dims = (zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>(); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceSame(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(), dims.data(), dims.size()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceLong2(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); const auto zLen = shape::length(hZShapeInfo); std::vector<int> dimensions(dimension, dimension + dimensionLength); const Nd4jLong* zShapeInfoH = hZShapeInfo; if(shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) { auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions); zShapeInfoH = reinterpret_cast<Nd4jLong const*>(zPack.primary()); } std::vector<int> dims = (zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>(); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceLong(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(), dims.data(), dims.size()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceLong(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (zType != sd::DataType::INT64) throw datatype_exception::build("execReduceLong wrong Z data type", sd::DataType::INT64, zType); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction, ::execReduceScalar(launchDims, stream, opNum, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), hXShapeInfo, extraParams, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), hXShapeInfo, nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES); sd::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed"); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceBool2(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); const auto zLen = shape::length(hZShapeInfo); std::vector<int> dimensions(dimension, dimension + dimensionLength); const Nd4jLong* zShapeInfoH = hZShapeInfo; if(shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) { auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions); zShapeInfoH = reinterpret_cast<Nd4jLong const*>(zPack.primary()); } std::vector<int> dims = (zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>(); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceBool(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(), dims.data(), dims.size()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduceBool(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]); auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (zType != sd::DataType::BOOL) throw std::runtime_error("execReduceBool requires Z operand to have BOOL type"); auto xLength = shape::length(hXShapeInfo); auto blockWidth = 256; auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth); dim3 launchDims(numBlocks, blockWidth, 32768); BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction, ::execReduceScalar(launchDims, stream, opNum, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), hXShapeInfo, extraParams, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), hZShapeInfo, nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES); sd::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed"); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo * @param dimension * @param dimensionLength */ //////////////////////////////////////////////////////////////////////// void execIndexReduce(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(hXShapeInfo, dimension, shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execIndexReduce(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), (int *) dbDimension->special(), dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams * @param dZ * @param dZShapeInfo */ //////////////////////////////////////////////////////////////////////// void execReduceFloat2(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); const auto zLen = shape::length(hZShapeInfo); std::vector<int> dimensions(dimension, dimension + dimensionLength); const Nd4jLong* zShapeInfoH = hZShapeInfo; if(shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) { auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions); zShapeInfoH = reinterpret_cast<Nd4jLong const*>(zPack.primary()); } std::vector<int> dims = (zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>(); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduceFloat(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(), dims.data(), dims.size()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * * @param opNum * @param dX * @param dXShapeInfo * @param extraParams */ //////////////////////////////////////////////////////////////////////// void execIndexReduceScalar( Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo){ try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execIndexReduceScalar(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformSame(Nd4jPointer *extraPointers,int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformSame(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams, tadShapeInfo, tadOffsets); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformBool(Nd4jPointer *extraPointers,int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformBool(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams, tadShapeInfo, tadOffsets); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformAny(Nd4jPointer *extraPointers,int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto streamSpecial = reinterpret_cast<cudaStream_t &>(extraPointers[4]); LaunchContext lc(stream, streamSpecial, extraPointers[5], extraPointers[3], reinterpret_cast<int *>(extraPointers[6])); NativeOpExecutioner::execTransformAny(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams, nullptr, nullptr); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformStrict(Nd4jPointer *extraPointers,int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformStrict(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams, tadShapeInfo, tadOffsets); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execTransformFloat(Nd4jPointer *extraPointers,int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr); auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execTransformFloat(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams, tadShapeInfo, tadOffsets); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void checkP2P() { int curDevice = 0; cudaGetDevice(&curDevice); int devCnt = 0; cudaGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; bool tempSupport = true; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; cudaSetDevice(dX); cudaDeviceCanAccessPeer(&canAccess, dX , dY); if (!canAccess) { tempSupport = false; break; } } } supportedP2P = tempSupport; cudaSetDevice(curDevice); } else { // if we have only 1 device - we say that we support P2P, since all data will be on 1 device supportedP2P = true; } } void enableP2P(bool enable) { if (enable == allowedP2P) return; int curDevice = 0; cudaGetDevice(&curDevice); int devCnt = 0; cudaGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; if (devCnt > 1) { for (int dX = 0; dX < devCnt; dX++) { for (int dY = 0; dY < devCnt; dY++) { if (dX == dY) continue; int canAccess = 0; cudaSetDevice(dX); cudaDeviceCanAccessPeer(&canAccess, dX , dY); if (canAccess) { if (enable) { cudaDeviceEnablePeerAccess(dY, 0); } else { cudaDeviceDisablePeerAccess(dY); } } else { if (sd::Environment::getInstance().isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY); } } } cudaSetDevice(curDevice); } allowedP2P = enable; cudaSetDevice(curDevice); } bool isP2PAvailable() { return supportedP2P; } void initializeDevicesAndFunctions() { try { int devCnt = 0; cudaGetDeviceCount(&devCnt); deviceProperties = new cudaDeviceProp[devCnt]; for (int i = 0; i < devCnt; i++) { cudaSetDevice(i); cudaGetDeviceProperties(&deviceProperties[i], i); cudaDeviceSetLimit(cudaLimitStackSize, 4096); } cudaSetDevice(0); checkP2P(); // enabling p2p gpu access if it's supported if (supportedP2P && devCnt > 1) enableP2P(allowedP2P); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void initializeFunctions(Nd4jPointer *functions) { sd::BlasHelper::getInstance().initializeDeviceFunctions(functions); /* cublasSgemv = (CublasSgemv)functions[0]; cublasDgemv = (CublasDgemv)functions[1]; cublasHgemm = (CublasHgemm)functions[2]; cublasSgemm = (CublasSgemm)functions[3]; cublasDgemm = (CublasDgemm)functions[4]; cublasSgemmEx = (CublasSgemmEx)functions[5]; cublasHgemmBatched = (CublasHgemmBatched)functions[6]; cublasSgemmBatched = (CublasSgemmBatched)functions[7]; cublasDgemmBatched = (CublasDgemmBatched)functions[8]; */ } /** * This method acquires memory chunk of requested size on host side * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param flags optional parameter */ Nd4jPointer mallocHost(Nd4jLong memorySize, int flags) { Nd4jPointer pointer; // cudaHostAllocMapped |cudaHostAllocPortable auto res = cudaHostAlloc(reinterpret_cast<void **>(&pointer), memorySize + 8, cudaHostAllocDefault); if (res != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaHostAlloc failed"); } return reinterpret_cast<int8_t*>(pointer); } /** * This method acquires memory chunk of requested size on specified device * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc * @param flags optional parameter */ Nd4jPointer mallocDevice(Nd4jLong memorySize, int deviceId, int flags) { Nd4jPointer pointer; auto res = cudaMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8); if (res != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMalloc failed"); } return reinterpret_cast<int8_t*>(pointer); } /** * This method releases previously allocated host memory space * * @param pointer pointer that'll be freed */ int freeHost(Nd4jPointer pointer) { auto res = cudaFreeHost(reinterpret_cast<void *>(pointer)); if (res != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaFreeHost failed"); } return 1L; } /** * This method releases previously allocated memory space on device * * @param pointer pointer that'll be freed * @param ptrToDeviceId pointer to deviceId. */ int freeDevice(Nd4jPointer pointer, int deviceId) { auto res = cudaFree(reinterpret_cast<void *>(pointer)); // we're intentionally skipping if (res != 0 && res != 1) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaFree failed"); } return res == 0 ? 1L : 0L; } Nd4jPointer createContext() { return 0L; } Nd4jPointer createStream() { auto stream = new cudaStream_t(); auto dZ = cudaStreamCreate(stream); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaStreamCreate failed"); } return stream; } Nd4jPointer createEvent() { Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(cudaEvent_t)); CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer", sizeof(cudaEvent_t)); auto dZ = cudaEventCreateWithFlags(reinterpret_cast<cudaEvent_t *>(&nativeEvent), cudaEventDisableTiming); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventCreateWithFlags failed"); } return nativeEvent; } int registerEvent(Nd4jPointer event, Nd4jPointer stream) { auto pEvent = reinterpret_cast<cudaEvent_t *>(&event); auto pStream = reinterpret_cast<cudaStream_t *>(stream); auto dZ = cudaEventRecord(*pEvent, *pStream); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventRecord failed"); } return 1; } int setDevice(int deviceId) { AffinityManager::setCurrentDevice(deviceId); return 1; } Nd4jLong getDeviceFreeMemoryDefault() { size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); return (Nd4jLong) memFree; } Nd4jLong getDeviceFreeMemory(int device) { int orig = -1; cudaGetDevice(&orig); if (device >= 0 && device != orig) { cudaSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { cudaSetDevice(orig); } return (Nd4jLong) memFree; } Nd4jLong getDeviceTotalMemory(int device) { int orig = -1; cudaGetDevice(&orig); if (device >= 0 && device != orig) { cudaSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { cudaSetDevice(orig); } return (Nd4jLong) memTotal; } int memcpySync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { cudaMemcpyKind kind; switch (flags) { case 0: { kind = cudaMemcpyHostToHost; } break; case 1: { kind = cudaMemcpyHostToDevice; } break; case 2: { kind = cudaMemcpyDeviceToHost; } break; case 3: { kind = cudaMemcpyDeviceToDevice; } break; default: { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY"); return 0; } } auto dZ = cudaMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind); if (dZ != 0) { printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ)); fflush(stdout); fflush(stderr); sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpy failed"); return 0; } return 1; } int memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { auto pStream = reinterpret_cast<cudaStream_t *>(reserved); cudaMemcpyKind kind; //sd::DebugHelper::checkErrorCode(pStream, "Preliminary sync failed"); switch (flags) { case 0: { kind = cudaMemcpyHostToHost; } break; case 1: { kind = cudaMemcpyHostToDevice; } break; case 2: { kind = cudaMemcpyDeviceToHost; } break; case 3: { kind = cudaMemcpyDeviceToDevice; } break; default: { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY"); return 0; } } auto dZ = cudaMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream); //auto dZ = cudaMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind); if (dZ != 0) { printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ)); fflush(stdout); fflush(stderr); sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpyAsync failed"); return 0; } return 1; } int memsetSync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { auto dZ = cudaMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size)); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemset failed"); } return 1; } int memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) { auto pStream = reinterpret_cast<cudaStream_t *>(reserved); auto dZ = cudaMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemsetAsync failed"); } return 1; } int destroyEvent(Nd4jPointer event) { auto pEvent = reinterpret_cast<cudaEvent_t *>(&event); auto dZ = cudaEventDestroy(*pEvent); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventDestroy failed"); } return 1; } int streamSynchronize(Nd4jPointer stream) { auto pStream = reinterpret_cast<cudaStream_t *>(stream); auto dZ = cudaStreamSynchronize(*pStream); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaStreamSynchronize failed"); } return 1L; } int eventSynchronize(Nd4jPointer event) { auto pEvent = reinterpret_cast<cudaEvent_t *>(&event); auto dZ = cudaEventSynchronize(*pEvent); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventSynchronize failed"); } return 1L; } int getAvailableDevices() { int devCnt = 0; cudaGetDeviceCount(&devCnt); return devCnt; } void enableDebugMode(bool reallyEnable) { sd::Environment::getInstance().setDebug(reallyEnable); } void setGridLimit(int gridSize) { if (gridSize > 8192) gridSize = 8192; if (gridSize < 1) gridSize = 1; blockLimit = gridSize; } int ompGetMaxThreads() { return maxThreads; } int ompGetNumThreads() { return maxThreads; } void setOmpNumThreads(int threads) { if (threads > 1024) threads = 1024; if (threads < 32) threads = 32; maxThreads = threads; } void enableVerboseMode(bool reallyEnable) { sd::Environment::getInstance().setVerbose(reallyEnable); } int getDeviceMajor(int device) { return deviceProperties[device].major; } int getDeviceMinor(int device) { return deviceProperties[device].minor; } const char * getDeviceName(int device) { return deviceProperties[device].name; } void specialConcat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, void *dZ, Nd4jLong const* dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { try { BUILD_SINGLE_SELECTOR(ArrayOptions::dataType(dZShapeInfo), sd::SpecialMethods, ::concatCpuGeneric(dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo), LIBND4J_TYPES); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } /** * This method saves */ sd::TadPack* tadOnlyShapeInfo(Nd4jLong const* dXShapeInfo, int *dimension, int dimensionLength) { try { auto pack = new TadPack(); *pack = sd::ConstantTadHelper::getInstance().tadForDimensions(dXShapeInfo, dimension, dimensionLength); return pack; } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong const* getPrimaryShapeInfo(sd::TadPack* pack) { return pack->primaryShapeInfo(); } Nd4jLong const* getPrimaryOffsets(sd::TadPack* pack) { return pack->primaryOffsets(); } Nd4jLong const* getSpecialShapeInfo(sd::TadPack* pack) { return pack->specialShapeInfo(); } Nd4jLong const* getSpecialOffsets(sd::TadPack* pack) { return pack->specialOffsets(); } Nd4jLong getNumberOfTads(sd::TadPack* pack) { return pack->numberOfTads(); } int getShapeInfoLength(sd::TadPack* pack) { return pack->shapeInfoLength(); } int memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(reserved); cudaMemcpyKind kind; DEBUG_KERNEL(pStream, -1); switch (flags) { case 0: { kind = cudaMemcpyHostToHost; } break; case 1: { kind = cudaMemcpyHostToDevice; } break; case 2: { kind = cudaMemcpyDeviceToHost; } case 3: { kind = cudaMemcpyDeviceToDevice; } break; } auto dZ = cudaMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpyToSymbolAsync failed"); } return 1; } Nd4jPointer getConstantSpace() { Nd4jPointer dConstAddr; cudaError_t dZ = cudaGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory); if (dZ != 0) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaGetSymbolAddress failed"); } return dConstAddr; } void pullRows(Nd4jPointer *extraPointers, OpaqueDataBuffer *dbX, Nd4jLong const* xShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* zShapeInfo, Nd4jLong const* dZShapeInfo, Nd4jLong n, Nd4jLong *indexes, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* zTadShapeInfo, Nd4jLong const* zTadOffsets) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); dim3 launchDims(64, 256, 1024); auto xType = sd::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric, (launchDims, stream, dbX->special(), dbZ->special(), n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets), LIBND4J_TYPES); DEBUG_KERNEL(stream, -1); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void average(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong const* xShapeInfo, Nd4jPointer *dx, Nd4jLong const* dXShapeInfo, void *z, Nd4jLong const* zShapeInfo, void *dz, Nd4jLong const* dzShapeInfo, int n, Nd4jLong length, bool propagate) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("averageFloat called\n"); auto xType = sd::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(256, 256, 4096); BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate), LIBND4J_TYPES); sd::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate), LIBND4J_TYPES); } } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void accumulate(Nd4jPointer *extras, Nd4jPointer *x, Nd4jLong const* xShapeInfo, Nd4jPointer *dx, Nd4jLong const* dXShapeInfo, void *z, Nd4jLong const* zShapeInfo, void *dz, Nd4jLong const* dzShapeInfo, int n, Nd4jLong length) { try { auto stream = reinterpret_cast<cudaStream_t *>(extras[1]); int mode = getDeviceId(extras[3]); auto dX = reinterpret_cast<void **>(dx); if (sd::Environment::getInstance().isDebugAndVerbose()) printf("accumulateFloat called\n"); auto xType = sd::ArrayOptions::dataType(xShapeInfo); // launching on gpu if (mode == 0) { dim3 launchDims(n, 256, 16384); BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n, length), LIBND4J_TYPES); sd::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed"); } else { // launching on host memory BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length), LIBND4J_TYPES); } } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void shuffle(Nd4jPointer *extras, Nd4jPointer *x, Nd4jPointer *xShapeInfo, Nd4jPointer *dx, Nd4jPointer *dXShapeInfo, Nd4jPointer *z, Nd4jPointer *zShapeInfo, Nd4jPointer *dz, Nd4jPointer *dZShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]); auto dX = reinterpret_cast<void **>(dx); auto dZ = reinterpret_cast<void **>(dz); auto xShape = reinterpret_cast<Nd4jLong**>(xShapeInfo); auto dxShape = reinterpret_cast<Nd4jLong**>(dXShapeInfo); auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo); auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets); auto xType = sd::ArrayOptions::dataType(xShape[0]); dim3 launchDims(256, 512, 8192); BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric, (launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset), LIBND4J_TYPES); sd::DebugHelper::checkErrorCode(stream, "shuffle(...) failed"); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } bool isExperimentalEnabled() { return sd::Environment::getInstance().isExperimentalBuild(); } void setOmpMinThreads(int threads) { minThreads = sd::math::nd4j_max<int>(32, threads); minThreads = sd::math::nd4j_min<int>(maxThreads, minThreads); } int getDevice() { return sd::AffinityManager::currentDeviceId(); } void setElementThreshold(int num) { // this is no-op for CUDA } void setTADThreshold(int num) { // this is no-op for CUDA } //////////////////////////////////////////////////////////////////////// void execSummaryStats(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, bool biasCorrected) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStats(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), biasCorrected); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execSummaryStatsTad(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape, bool biasCorrected, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbDimension}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execSummaryStats(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), reinterpret_cast<int *>(dbDimension->special()), dimensionLength, tadShapeInfo, tadOffsets, biasCorrected); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbDimension}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3Tad(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* yTadOnlyShapeInfo, Nd4jLong const* yTadOffsets) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(hXShapeInfo, dimension, shape::length(hDimensionShape)); auto tadLength = shape::length(tadPack.primaryShapeInfo()); auto yLength = shape::length(hYShapeInfo); auto xLength = shape::length(hXShapeInfo); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); if (tadLength == yLength || tadLength == xLength) { // nd4j_printf("== way\n",""); NativeOpExecutioner::execReduce3(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } else NativeOpExecutioner::execReduce3TAD(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dimension, dimensionLength, tadOnlyShapeInfo, yTadOffsets, yTadOnlyShapeInfo, yTadOffsets); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execReduce3Scalar(Nd4jPointer *extraPointers,int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3Scalar(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special()); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarBool(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbScalar, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalarBool(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dbScalar->primary(), hScalarShapeInfo, dbScalar->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(), extraParams); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarBoolTad(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbScalars, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo, void *extraParams, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadShapeInfoZ, Nd4jLong const* tadOffsetsZ) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalarBool(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dbScalars->primary(), hScalarShapeInfo, dbScalars->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(), dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalar(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbScalar, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo, void *extraParams) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execScalar(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dbScalar->primary(), hScalarShapeInfo, dbScalar->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(), extraParams); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execScalarTad(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbScalars, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo, void *extraParams, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadShapeInfoZ, Nd4jLong const* tadOffsetsZ) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto xType = sd::ArrayOptions::dataType(hXShapeInfo); auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo); auto zType = sd::ArrayOptions::dataType(hZShapeInfo); if (yType != xType && yType != sd::DataType::BOOL && !isExperimentalEnabled()) throw sd::datatype_exception::build("execScalar both operands must have same data type", xType, yType); dim3 launchDims(256, 256, 16384); #ifdef __ND4J_EXPERIMENTAL__ BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES); #else BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dbScalars->special(), extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES); #endif DEBUG_KERNEL(stream, opNum); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void execAggregate(Nd4jPointer *extraPointers, int opNum, void **arguments, int numArguments, Nd4jLong **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, void *realArguments, int numRealArguments, sd::DataType dtype) { } void batchExecutor(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments, sd::DataType dtype) { } void execAggregateBatch(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments, sd::DataType dtype) { } //////////////////////////////////////////////////////////////////////// void execRandom(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraArguments) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraArguments); InteropDataBuffer::registerSpecialUse({dbZ}, {}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execRandom2(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraArguments) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraArguments); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } //////////////////////////////////////////////////////////////////////// void execRandom3(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, void *extraArguments) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY}); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execRandom(&lc, opNum, stateHost, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraArguments); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } Nd4jPointer initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) { unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); // we don't synchronize at random initialization, it's safe to go unsync here // cudaStreamSynchronize(*stream); auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer); auto buffer = new sd::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev)); buffer->propagateToDevice(buffer, *stream); sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A"); // we generate sequence in the host memory sd::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // and copy it to gpu cudaMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, cudaMemcpyHostToDevice, *stream); sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B"); return buffer; } void destroyRandom(Nd4jPointer ptrBuffer) { sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrBuffer); // FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice cudaDeviceSynchronize(); delete buffer; } void refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrRandom); unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); cudaStreamSynchronize(*stream); uint64_t *ptrDev = buffer->getDeviceBuffer(); // update rng state buffer->setSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); // refresh buffer on host size sd::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // copy back to gpu cudaMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, cudaMemcpyHostToDevice, *stream); } void reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrRandom); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); cudaStreamSynchronize(*stream); // update rng state buffer->reSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); } /** * Return the length of a shape buffer * based on the pointer * @param buffer the buffer pointer to check * @return */ int lengthForShapeBufferPointer(Nd4jPointer buffer) { auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer); return shape::shapeInfoLength(shape::rank(shapeBuffer)); } /** * The pointer to get the address for * * @param address the address to get the pointer * @return the pointer for the given address */ Nd4jPointer pointerForAddress(Nd4jLong address) { return reinterpret_cast<Nd4jPointer >(address); } void tear(Nd4jPointer *extras, OpaqueDataBuffer *dbX, Nd4jLong const* xShapeInfo, Nd4jLong const* dXShapeInfo, Nd4jPointer *targets, Nd4jLong const* zShapeInfo, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets) { try { InteropDataBuffer::prepareSpecialUse({}, {dbX}); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]); dim3 launchDims(512, 512, 512); auto xType = sd::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric, (launchDims, stream, dbX->special(), dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets), LIBND4J_TYPES); sd::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed"); InteropDataBuffer::registerSpecialUse({}, {dbX}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) { auto stream = reinterpret_cast<cudaStream_t *>(extras[1]); auto g_scanBlockSums = reinterpret_cast<int **>(extras[2]); int blockSize = 512; // max size of the thread blocks int numBlocks = sd::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize)))); int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (sd::isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = sd::floorPow2(numElements); int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; int numThreadsLastBlock = sd::math::nd4j_max<int>(1, numEltsLastBlock / 2); int np2LastBlock = 0; int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts int extraSpace = numEltsPerBlock / NUM_BANKS; int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace); // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); dim3 gridOnes(1, 1, 1); dim3 threadsOnes(numThreadsLastBlock, 1, 1); if (sharedMemSize < 2048) sharedMemSize = 2048; if (sharedMemLastBlock < 2048) sharedMemLastBlock = 2048; // execute the scan if (numBlocks > 1) { sd::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0); if (np2LastBlock) { sd::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be sdded to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); sd::uniformAdd<<<grid, threads, 1024, *stream>>>(dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); if (np2LastBlock) { sd::uniformAdd<<<1, numThreadsLastBlock, 1024, *stream>>>(dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } } else if (isPowerOfTwo(numElements)) { sd::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0); } else { sd::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0); } sd::DebugHelper::checkErrorCode(stream, "prescanArray(...) failed"); } //////////////////////////////////////////////////////////////////////// void execReduce3All(Nd4jPointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo, void *extraParamsVals, OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo, OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo, OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape, Nd4jLong const* xTadShapeInfo, Nd4jLong const* xOffsets, Nd4jLong const* yTadShapeInfo, Nd4jLong const* yOffsets) { try { InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY, dbDimension}); InteropDataBuffer::preparePrimaryUse({}, {dbDimension}); auto dimension = reinterpret_cast<int *>(dbDimension->primary()); int dimensionLength = static_cast<int>(shape::length(hDimensionShape)); LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); NativeOpExecutioner::execReduce3All(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParamsVals, dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), reinterpret_cast<int *>(dbDimension->special()), dimensionLength, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets); InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY}); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sort(Nd4jPointer *extraPointers, void *x, Nd4jLong const* xShapeInfo, void *dX, Nd4jLong const* dXShapeInfo, bool descending) { try { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = sd::ArrayOptions::dataType(xShapeInfo); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = sd::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric, (launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES); } } } else { int numThreads = sd::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = sd::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric, (launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } sd::DebugHelper::checkErrorCode(stream, "sort(...) failed"); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortByKey(Nd4jPointer *extraPointers, void *x, Nd4jLong const* xShapeInfo, void *dX, Nd4jLong const* dXShapeInfo, void *y, Nd4jLong const* yShapeInfo, void *dy, Nd4jLong const* dyShapeInfo, bool descending) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto yLength = shape::length(yShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = sd::ArrayOptions::dataType(xShapeInfo); auto yType = sd::ArrayOptions::dataType(yShapeInfo); if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo)) return; if (xLength != yLength) throw std::runtime_error("sortByKey: keys and values must have the same size"); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = sd::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, j, k, xLength, descending), LIBND4J_TYPES, LIBND4J_TYPES); } } } else { int numThreads = sd::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = sd::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES, LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortByValue(Nd4jPointer *extraPointers, void *x, Nd4jLong const* xShapeInfo, void *dX, Nd4jLong const* dXShapeInfo, void *y, Nd4jLong const* yShapeInfo, void *dy, Nd4jLong const* dyShapeInfo, bool descending) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto xLength = shape::length(xShapeInfo); auto yLength = shape::length(yShapeInfo); auto xEWS = shape::elementWiseStride(xShapeInfo); auto xType = sd::ArrayOptions::dataType(yShapeInfo); auto yType = sd::ArrayOptions::dataType(xShapeInfo); if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo)) return; if (xLength != yLength) throw std::runtime_error("sortByValue: keys and values must have the same size"); // check if xLength is a power of 2, and use bitonic sort, if that's the case if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) { int numThreads = sd::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; dim3 launchDims(numBlocks, numThreads, 32768); for (int k = 2; k <= xLength; k = 2 * k) { for (int j = k >> 1; j > 0; j = j >> 1) { BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, j, k, xLength, descending), LIBND4J_TYPES, LIBND4J_TYPES); } } } else { int numThreads = sd::math::nd4j_min<int>(512, xLength); int numBlocks = xLength / numThreads; if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++; numBlocks = sd::math::nd4j_min<int>(512, numBlocks); dim3 launchDims(numBlocks, numThreads, 32768); int max = 2, dg = 0; while (max < xLength) { max <<= 1; dg++; } max <<= 1; for (int window = 2; window < max; window <<= 1) { int n = window; int rev = 0; do { int half = n >> 1; BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, n, xLength, rev, descending), LIBND4J_TYPES, LIBND4J_TYPES); n >>= 1; rev = 1; } while (n > 1); } } } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTadByKey(Nd4jPointer *extraPointers, void *x, Nd4jLong const* xShapeInfo, void *dX, Nd4jLong const* dXShapeInfo, void *y, Nd4jLong const* yShapeInfo, void *dy, Nd4jLong const* dyShapeInfo, int *dimension, int dimensionLength, bool descending) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048); auto xType = sd::ArrayOptions::dataType(xShapeInfo); auto yType = sd::ArrayOptions::dataType(yShapeInfo); BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey, (launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending), LIBND4J_TYPES, LIBND4J_TYPES); sd::DebugHelper::checkErrorCode(stream, "sortTadKey(...) failed"); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTadByValue(Nd4jPointer *extraPointers, void *x, Nd4jLong const* xShapeInfo, void *dX, Nd4jLong const* dXShapeInfo, void *y, Nd4jLong const* yShapeInfo, void *dy, Nd4jLong const* dyShapeInfo, int *dimension, int dimensionLength, bool descending) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048); auto xType = sd::ArrayOptions::dataType(yShapeInfo); auto yType = sd::ArrayOptions::dataType(xShapeInfo); BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey, (launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending), LIBND4J_TYPES, LIBND4J_TYPES); sd::DebugHelper::checkErrorCode(stream, "sortTadValue(...) failed"); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortTad(Nd4jPointer *extraPointers, void *x, Nd4jLong const* xShapeInfo, void *dX, Nd4jLong const* dXShapeInfo, int *dimension, int dimensionLength, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, bool descending) { try { // to be implemented auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]); auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength); dim3 launchDims((int) tadPack.numberOfTads(), 512, 33768); auto xType = sd::ArrayOptions::dataType(xShapeInfo); BUILD_SINGLE_SELECTOR(xType, oesTadGeneric, (launchDims, stream, dX, dXShapeInfo, nullptr, dimensionLength, tadShapeInfo, tadOffsets, descending), LIBND4J_TYPES); sd::DebugHelper::checkErrorCode(stream, "sortTad(...) failed"); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, const Nd4jLong *xShapeInfo) { throw std::runtime_error("sortCooIndices:: Not implemented yet"); } void ravelMultiIndex(Nd4jPointer *extraPointers, Nd4jLong *indices, Nd4jLong *flatIndices, Nd4jLong length, Nd4jLong *shapeInfo, int mode) { throw std::runtime_error("ravelMultiIndex:: Not implemented yet"); } void unravelIndex(Nd4jPointer *extraPointers, Nd4jLong *indices, Nd4jLong *flatIndices, Nd4jLong length, Nd4jLong *shapeInfo) { throw std::runtime_error("unravelIndex:: Not implemented yet"); } Nd4jLong* mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) { return nullptr; } void munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) { } sd::graph::ResultWrapper* executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) { try { return sd::graph::GraphExecutioner::executeFlatBuffer(flatBufferPointer); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getResultWrapperSize(sd::graph::ResultWrapper* ptr) { return ptr->size(); } Nd4jPointer getResultWrapperPointer(sd::graph::ResultWrapper* ptr) { return ptr->pointer(); } const char* getAllCustomOps() { return sd::ops::OpRegistrator::getInstance().getAllCustomOperations(); } sd::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, sd::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs, int numDArgs) { sd::graph::VariableSpace varSpace; Context block(2, &varSpace); sd::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numBArgs; e++) block.getBArguments()->push_back(bArgs[e]); for (int e = 0; e < numDArgs; e++) block.getDArguments()->push_back((sd::DataType) dArgs[e]); for (int e = 0; e < numInputShapes; e++) { auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]); // we shouldn't copy buffer if that's empty array void *buffer_ = sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; void *bufferD_ = sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputShapes]; auto array = new sd::NDArray(buffer_, bufferD_, shape_); // block should contain references to proper variable varSpace.putVariable(1, e, array); block.pickInput(1, e); inShapes.push_back(shape_); } auto shapeList = op->calculateOutputShape(&inShapes, block); if (varSpace.launchContext()->getWorkspace() != nullptr) shapeList->detach(); return shapeList; } sd::ShapeList* calculateOutputShapes2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs, int numDArgs) { try { auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, dArgs, numDArgs); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } sd::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, sd::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { Context block(1); sd::ShapeList inShapes; for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]); for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]); for (int e = 0; e < numInputShapes; e++) inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e])); auto shapeList = op->calculateOutputShape(&inShapes, block); return shapeList; } sd::ShapeList* calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) { try { auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash); return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getShapeListSize(sd::ShapeList* list) { return list->size(); } Nd4jLong const* getShape(sd::ShapeList* list, Nd4jLong i) { return list->at(i); } static FORCEINLINE Nd4jStatus realExec(sd::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { if (op == nullptr) nd4j_printf("Can't find requested operation: [%lld]\n", hash); // we're using the same fake nodeId everywhere here std::vector<sd::NDArray*> inputs(numInputs); std::vector<sd::NDArray*> outputs(numOutputs); std::vector<double> ttArgs(numTArgs); std::vector<bool> bbArgs(numBArgs); std::vector<Nd4jLong> iiArgs(numIArgs); // filling block now with inputs for (int e = 0; e < numInputs; e++) { auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]); void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e]; void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputs]; inputs[e] = new sd::NDArray(buffer, bufferD, shape); } // if not inplace - transferring output arrays if (!isInplace) for (int e = 0; e < numOutputs; e++) { // we want to keep original output shape intact auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e])); void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e]; void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e + numOutputs]; // FIXME: revisit this. bool canNullify = true; for (int i = 0; i < numInputs; i++) { void *ibuffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i]; if (ibuffer == buffer) { canNullify = false; break; } } if (canNullify && buffer != nullptr) memset((uint8_t *) buffer, '\0', shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape))); auto array = new sd::NDArray(buffer, bufferD, shape); outputs[e] = array; } for (int e = 0; e < numIArgs; e++) iiArgs[e] = iArgs[e]; for (int e = 0; e < numTArgs; e++) ttArgs[e] = tArgs[e]; for (int e = 0; e < numBArgs; e++) bbArgs[e] = bArgs[e]; // hypothetically at this point we have everything filled auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, std::vector<sd::DataType>(), isInplace); //auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace); if (!isInplace) for (int e = 0; e < numOutputs; e++) { //shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]); //shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo()); //outputs[e]->printIndexedBuffer("C++ raw output"); //outputs[e]->printBuffer("C++ indexed output"); if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))) outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e]))); } for (auto v: inputs) delete v; for (auto v: outputs) delete v; return Status::OK(); } int execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) { try { auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash); return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } int execCustomOp2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer opContext) { try { auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash); auto context = reinterpret_cast<Context *>(opContext); auto result = op->execute(context); auto res = cudaStreamSynchronize(*context->launchContext()->getCudaStream()); if (res != 0) throw sd::cuda_exception::build("customOp execution failed", res); for (auto v:context->fastpath_in()) { if (!v->isEmpty()) v->syncToDevice(); } for (auto v:context->fastpath_out()) { if (!v->isEmpty()) v->syncToDevice(); } return result; } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } int registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) { try { auto graph = sd::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer); sd::graph::GraphHolder::getInstance().registerGraph(graphId, graph); return ND4J_STATUS_OK; } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { auto graph = sd::graph::GraphHolder::getInstance().pullGraph(graphId); auto varSpace = graph->getVariableSpace()->clone(); std::vector<sd::NDArray*> handles; for (int e = 0; e < numInputs; e++) { auto idx = inputIndices[e]; // we'll delete this array later, together with cloned VariableSpace auto array = new sd::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e])); handles.emplace_back(array); if (varSpace->hasVariable(idx)) { auto var = varSpace->getVariable(idx); if (var->hasNDArray()) delete var->getNDArray(); var->setNDArray(array); } else varSpace->putVariable(idx, array); } auto dZ = sd::graph::GraphExecutioner::execute(graph, varSpace); auto varSet = new sd::graph::VariablesSet(dZ); if (dZ == ND4J_STATUS_OK) { // pull back results, and provide them auto outputs = graph->fetchOutputs(); for (int e = 0; e < outputs->size(); e++) { // we're only getting variable ID/Index from original grap. values will be taken from cloned workspace std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index()); auto var = varSpace->getVariable(varId); varSet->push_back(var->clone()); } delete outputs; } delete varSpace; return varSet; } VariablesSet* executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) { try { return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getVariablesSetSize(sd::graph::VariablesSet* set) { return set->size(); } Nd4jStatus getVariablesSetStatus(sd::graph::VariablesSet* set) { return set->status(); } sd::graph::Variable* getVariable(sd::graph::VariablesSet* set, Nd4jLong i) { return set->at(i); } int getVariableId(sd::graph::Variable* variable) { return variable->id(); } int getVariableIndex(sd::graph::Variable* variable) { return variable->index(); } const char* getVariableName(sd::graph::Variable* variable) { return variable->getName()->c_str(); } Nd4jLong const* getVariableShape(sd::graph::Variable* variable) { return variable->getNDArray()->shapeInfo(); } void* getVariableBuffer(sd::graph::Variable* variable) { return variable->getNDArray()->buffer(); } int unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) { try { sd::graph::GraphHolder::getInstance().dropGraphAny(graphId); return ND4J_STATUS_OK; } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } void deletePointerArray(Nd4jPointer pointer) { Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer); delete[] ptr; } void deleteCharArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<char *>(pointer); delete[] ptr; } void deleteIntArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<int *>(pointer); delete[] ptr; } void deleteLongArray(Nd4jPointer pointer) { auto ptr = reinterpret_cast<Nd4jLong *>(pointer); delete[] ptr; } void deleteVariablesSet(sd::graph::VariablesSet* pointer) { delete pointer; } void deleteShapeList(Nd4jPointer shapeList) { sd::ShapeList* list = reinterpret_cast<sd::ShapeList*>(shapeList); //list->destroy(); delete list; } const char* getAllOperations() { return sd::OpTracker::getInstance().exportOperations(); } Nd4jPointer getGraphState(Nd4jLong id) { return (Nd4jPointer) new sd::graph::GraphState(id); } void deleteGraphState(Nd4jPointer state) { auto stateP = reinterpret_cast<sd::graph::GraphState*>(state); delete stateP; } Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, sd::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { /** * That's basically exec, with VariableSpace provided in GraphState: * depending on operation (i.e. while of if), different logic executors could be used */ auto graph = state->graph(); auto varSpace = state->variableSpace(); // Node is dynamically created, and has nothing beyond it: only inputs and outputs // this node has id of 0, and inputs are Node node(OpType_LOGIC, opHash, 0); // mapping inputs for (int e = 0; e < numInputs; e++) { auto buffer = inputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]); auto array = new sd::NDArray(buffer, shapeInfo, varSpace->launchContext()); // now we just put array to VarSpace varSpace->putVariable(0, e, array); node.pickInput(0, e); } // mapping scopes for (int e = 0; e < numScopes; e++) { // we should check scope existence in GraphState/Graph int scopeId = (int) scopes[e]; if (!state->hasScope(scopeId)) { // nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId); return Status::THROW(); } node.pickInput(scopeId, 0); } auto dZ = LogicExecutor::processNode(graph, &node); if (dZ != Status::OK()) return dZ; // mapping outputs for (int e = 0; e < numOutputs; e++) { auto buffer = outputBuffers[e]; auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]); NDArray array(buffer, shapeInfo, varSpace->launchContext()); // now we just put array to VarSpace to the same ID //varSpace->putVariable(0, e, array); auto t = varSpace->getVariable(0, e)->getNDArray(); array.assign(t); } // removing input variables for (int e = 0; e < numInputs; e++) { varSpace->dropVariable(0, e); } // after some bla-bla-bla we should have Graph and Node for current op return Status::OK(); } Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) { try { return execCustomOpWithScope(extraPointers, reinterpret_cast<sd::graph::GraphState *>(state), opHash, scopes, numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes, numOutputs); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return 1; } } void deleteResultWrapper(Nd4jPointer ptr) { // just 0 room for compiler s@!t auto p = reinterpret_cast<sd::graph::ResultWrapper *>(ptr); delete p; } int estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong const* dXShapeInfo, int N, float threshold) { throw std::runtime_error("estimateThreshold: Not implemented yet"); } /* * TypeDef: * void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ); */ void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) { try { auto dx = reinterpret_cast<void *>(dX); auto dz = reinterpret_cast<void *>(dZ); if (srcType == ND4J_FLOAT8) { if (dstType == ND4J_FLOAT8) { // convertKernel<double, sd::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //sd::TypeCast::convertGenericCuda<sd::float8, sd::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { //sd::TypeCast::convertGenericCuda<sd::float8, sd::uint8>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { //sd::TypeCast::convertGenericCuda<sd::float8, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { //sd::TypeCast::convertGenericCuda<sd::float8, sd::int16>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { //sd::TypeCast::convertGenericCuda<sd::float8, sd::uint16>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { //sd::TypeCast::convertGenericCuda<sd::float8, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //sd::TypeCast::convertGenericCuda<sd::float8, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT8) { if (dstType == ND4J_FLOAT8) { //sd::TypeCast::convertGenericCuda<sd::int8, sd::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { //convertKernel<sd::int8, sd::int8>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { sd::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { sd::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { sd::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { sd::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: eventually we might want to add it } else if (dstType == ND4J_FLOAT32) { sd::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { sd::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_UINT8) { if (dstType == ND4J_FLOAT8) { //sd::TypeCast::convertGenericCuda<uint8_t, sd::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { sd::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { sd::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { sd::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { sd::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { sd::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: still might want to add } else if (dstType == ND4J_FLOAT32) { sd::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { sd::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT16) { if (dstType == ND4J_FLOAT8) { //sd::TypeCast::convertGenericCuda<float16, sd::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { sd::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { sd::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { sd::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { sd::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { sd::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO: .... ^^^ } else if (dstType == ND4J_FLOAT32) { sd::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { sd::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //sd::convertToThreshold<float16>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_INT16) { if (dstType == ND4J_FLOAT8) { //sd::TypeCast::convertGenericCuda<int16_t, sd::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { sd::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { sd::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { sd::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { sd::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { sd::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { // TODO... } else if (dstType == ND4J_FLOAT32) { sd::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { sd::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz); } else { printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_FLOAT24) { } else if (srcType == ND4J_FLOAT32) { if (dstType == ND4J_FLOAT8) { //sd::TypeCast::convertGenericCuda<float, sd::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { sd::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { sd::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { sd::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { sd::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { sd::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_DOUBLE) { sd::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz); } else if (dstType == ND4J_THRESHOLD) { //sd::convertToThreshold<float>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_DOUBLE) { if (dstType == ND4J_FLOAT8) { //sd::TypeCast::convertGenericCuda<double, sd::float8>(extras, dx, N, dz); } else if (dstType == ND4J_INT8) { sd::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT8) { sd::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT16) { sd::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz); } else if (dstType == ND4J_INT16) { sd::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz); } else if (dstType == ND4J_UINT16) { sd::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz); } else if (dstType == ND4J_FLOAT24) { } else if (dstType == ND4J_FLOAT32) { sd::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { // } else if (dstType == ND4J_THRESHOLD) { //sd::convertToThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else if (srcType == ND4J_THRESHOLD) { if (dstType == ND4J_FLOAT16) { //sd::convertFromThreshold<float16>(nullptr, dx, N, dz); } else if (dstType == ND4J_FLOAT32) { //sd::convertFromThreshold<float>(nullptr, dx, N, dz); } else if (dstType == ND4J_DOUBLE) { //sd::convertFromThreshold<double>(nullptr, dx, N, dz); } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } else { nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType); } } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } Nd4jPointer createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) { auto u = new sd::utf8string(string, length); return reinterpret_cast<Nd4jPointer>(u); } Nd4jLong getUtf8StringLength(Nd4jPointer *extraPointers, Nd4jPointer ptr) { return reinterpret_cast<sd::utf8string*>(ptr)->_length; } char* getUtf8StringBuffer(Nd4jPointer *extraPointers, Nd4jPointer ptr) { return reinterpret_cast<sd::utf8string*>(ptr)->_buffer; } void deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) { delete(reinterpret_cast<sd::utf8string*>(ptr)); } /////////////////////////////////////////////////////////////////// template<typename T, typename I> __global__ static void scatterUpdateCuda(const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const void* vindexes) { __shared__ T *x, *y; __shared__ Nd4jLong arrLenX, arrLenY; auto indexes = reinterpret_cast<const I*>(vindexes); for (int e = 0; e < numOfSubArrs; e++ ) { const auto xIndex = indexes[e]; const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x; if (!isOwner) continue; if (threadIdx.x == 0) { x = reinterpret_cast<T*>(vx) + xOffsets[xIndex]; y = reinterpret_cast<T*>(vy) + yOffsets[e]; arrLenX = shape::length(xShapeInfo); arrLenY = shape::length(yShapeInfo); } __syncthreads(); if (arrLenX != arrLenY) return; for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo); const auto yOffset = shape::getIndexOffset(i, yShapeInfo); switch (opCode) { case 0: x[xOffset] += y[yOffset]; break; case 1: x[xOffset] -= y[yOffset]; break; case 2: x[xOffset] *= y[yOffset]; break; case 3: x[xOffset] /= y[yOffset]; break; case 4: x[xOffset] = y[yOffset] - x[xOffset]; break; case 5: x[xOffset] = y[yOffset] / x[xOffset]; break; case 6: x[xOffset] = y[yOffset]; break; default: continue; } } __syncthreads(); } } template<typename T, typename I> __host__ static void scatterUpdateCudaLauncher(const cudaStream_t* stream, const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong const* xShapeInfo, const Nd4jLong* xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const void* indexes) { scatterUpdateCuda<T, I><<<512, 256, MAX_NUM_THREADS, *stream>>>(opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes); } ////////////////////////////////////////////////////////////////////////// void scatterUpdate(Nd4jPointer *extraPointers, int opCode, int numOfSubArrs, void* hX, Nd4jLong const* hXShapeInfo, Nd4jLong const* hXOffsets, void* dX, Nd4jLong const* dXShapeInfo, Nd4jLong const* dXOffsets, void* hY, Nd4jLong const* hYShapeInfo, Nd4jLong const* hYOffsets, void* dY, Nd4jLong const* dYShapeInfo, Nd4jLong const* dYOffsets, void* hIindexes, Nd4jLong const* hIndicesShapeInfo, void* dIindexes, Nd4jLong const* dIndicesShapeInfo) { try { auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]); auto type = ArrayOptions::dataType(hXShapeInfo); auto iType = ArrayOptions::dataType(hIndicesShapeInfo); BUILD_DOUBLE_SELECTOR(type, iType, scatterUpdateCudaLauncher, (stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIindexes), LIBND4J_TYPES, INDEXING_TYPES); sd::DebugHelper::checkErrorCode(stream, "scatterUpdate(...) failed"); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void inspectArray(Nd4jPointer *extraPointers, Nd4jPointer buffer, Nd4jLong *shapeInfo, Nd4jPointer specialBuffer, Nd4jLong *specialShapeInfo, Nd4jPointer debugInfo) { try { LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]); auto p = reinterpret_cast<sd::DebugInfo *>(debugInfo); NDArray array(buffer, specialBuffer, shapeInfo, &lc); sd::DebugHelper::retrieveDebugStatistics(p, &array); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } void __global__ tryPointerKernel(void* p, int len) { auto buf = reinterpret_cast<int8_t*>(p); auto tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int b; if (tid < len) atomicAdd(&b, buf[tid]); __syncthreads(); if (threadIdx.x ==0 && blockIdx.x == 0) printf("Pointer check complete: %i\n", b); } void tryPointer(Nd4jPointer extra, Nd4jPointer p, int len) { try { cudaStream_t stream; cudaStreamCreate(&stream); tryPointerKernel <<< 256, 512, len + 64, stream>>> (p, len); auto e = cudaStreamSynchronize(stream); if (e != 0) throw sd::cuda_exception::build("tryPointer failed", e); cudaStreamDestroy(stream); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } int dataTypeFromNpyHeader(void *header) { return (int) cnpy::dataTypeFromHeader(reinterpret_cast<char *>(header)); } OpaqueConstantShapeBuffer* shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *strides, sd::DataType dtype, char order, Nd4jLong ews, bool empty) { return shapeBufferEx(rank, shape, strides, dtype, order, ews, empty ? ARRAY_EMPTY : 0); } OpaqueConstantShapeBuffer* shapeBufferEx(int rank, Nd4jLong *shape, Nd4jLong *strides, sd::DataType dtype, char order, Nd4jLong ews, Nd4jLong extras) { try { auto buffer = new ConstantShapeBuffer(); *buffer = sd::ConstantShapeHelper::getInstance().bufferForShapeInfo( ShapeDescriptor(dtype, order, shape, strides, rank, ews, extras)); return buffer; } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } void deleteConstantShapeBuffer(OpaqueConstantShapeBuffer* ptr) { delete ptr; } void deleteConstantDataBuffer(OpaqueConstantDataBuffer* ptr) { delete ptr; } void deleteTadPack(sd::TadPack* ptr) { delete ptr; } bool isBlasVersionMatches(int major, int minor, int build) { auto result = major == Environment::getInstance()._blasMajorVersion && minor == Environment::getInstance()._blasMinorVersion && build == Environment::getInstance()._blasPatchVersion; if (!result) { nd4j_printf("CUDA/cuBLAS version mismatch. Expected: %i.%i.%i but got %i.%i.%i instead\n", Environment::getInstance()._blasMajorVersion, Environment::getInstance()._blasMinorVersion, Environment::getInstance()._blasPatchVersion, major, minor, build); sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(152); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("CUDA/cuBLAS version mismatch"); } return result; } sd::ConstantDataBuffer* constantBufferLong(sd::DataType dtype, Nd4jLong const* data, int length) { return sd::ConstantHelper::getInstance().constantBuffer(ConstantDescriptor(data, length), dtype); } sd::ConstantDataBuffer* constantBufferDouble(sd::DataType dtype, double *data, int length) { return sd::ConstantHelper::getInstance().constantBuffer(ConstantDescriptor(data, length), dtype); } sd::ConstantDataBuffer* constantBuffer(sd::DataType dtype, sd::ConstantDescriptor *descriptor) { return sd::ConstantHelper::getInstance().constantBuffer(*descriptor, dtype); } Nd4jPointer getConstantDataBufferPrimary(sd::ConstantDataBuffer* dbf) { return dbf->primary(); } Nd4jPointer getConstantDataBufferSpecial(sd::ConstantDataBuffer* dbf) { return dbf->special(); } Nd4jLong getConstantDataBufferLength(sd::ConstantDataBuffer* dbf) { return dbf->length(); } Nd4jLong getConstantDataBufferSizeOf(sd::ConstantDataBuffer* dbf) { return dbf->sizeOf(); } Nd4jPointer getConstantShapeBufferPrimary(OpaqueConstantShapeBuffer* dbf) { return const_cast<Nd4jLong*>(dbf->primary()); } Nd4jPointer getConstantShapeBufferSpecial(OpaqueConstantShapeBuffer* dbf) { return const_cast<Nd4jLong*>(dbf->special()); } sd::graph::Context* createGraphContext(int nodeId) { return new sd::graph::Context(nodeId); } sd::graph::RandomGenerator* getGraphContextRandomGenerator(sd::graph::Context* ptr) { return &ptr->randomGenerator(); } void markGraphContextInplace(sd::graph::Context* ptr, bool reallyInplace) { ptr->markInplace(reallyInplace); } void setGraphContextCudaContext(sd::graph::Context* ptr, void *stream, void *reductionPointer, void *allocationPointer) { ptr->setCudaContext(stream, reductionPointer, allocationPointer); } void setGraphContextInputArray(sd::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) { ptr->setInputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo); } void setGraphContextOutputArray(sd::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) { ptr->setOutputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo); } void setGraphContextInputBuffer(OpaqueContext* ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo, void *specialShapeInfo) { ptr->setInputArray(index, buffer, shapeInfo, specialShapeInfo); } void setGraphContextOutputBuffer(OpaqueContext* ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo, void *specialShapeInfo) { ptr->setOutputArray(index, buffer, shapeInfo, specialShapeInfo); } void setGraphContextTArguments(sd::graph::Context* ptr, double *arguments, int numberOfArguments) { ptr->setTArguments(arguments, numberOfArguments); } void setGraphContextIArguments(sd::graph::Context* ptr, Nd4jLong *arguments, int numberOfArguments) { ptr->setIArguments(arguments, numberOfArguments); } void setGraphContextBArguments(sd::graph::Context* ptr, bool *arguments, int numberOfArguments) { ptr->setBArguments(arguments, numberOfArguments); } void setGraphContextDArguments(OpaqueContext* ptr, int *arguments, int numberOfArguments) { std::vector<sd::DataType> dtypes(numberOfArguments); for (int e = 0; e < numberOfArguments; e++) dtypes[e] = (sd::DataType) arguments[e]; ptr->setDArguments(dtypes); } void deleteGraphContext(sd::graph::Context* ptr) { delete ptr; } sd::graph::RandomGenerator* createRandomGenerator(Nd4jLong rootSeed, Nd4jLong nodeSeed) { try { return new sd::graph::RandomGenerator(rootSeed, nodeSeed); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getRandomGeneratorRootState(sd::graph::RandomGenerator* ptr) { return ptr->rootState(); } Nd4jLong getRandomGeneratorNodeState(sd::graph::RandomGenerator* ptr) { return ptr->nodeState(); } void setRandomGeneratorStates(sd::graph::RandomGenerator* ptr, Nd4jLong rootSeed, Nd4jLong nodeSeed) { ptr->setStates(rootSeed, nodeSeed); } float getRandomGeneratorRelativeFloat(sd::graph::RandomGenerator* ptr, Nd4jLong index) { return ptr->relativeT<float>(index); } double getRandomGeneratorRelativeDouble(sd::graph::RandomGenerator* ptr, Nd4jLong index) { return ptr->relativeT<double>(index); } int getRandomGeneratorRelativeInt(sd::graph::RandomGenerator* ptr, Nd4jLong index) { return ptr->relativeInt(index); } Nd4jLong getRandomGeneratorRelativeLong(sd::graph::RandomGenerator* ptr, Nd4jLong index) { return ptr->relativeLong(index); } void deleteRandomGenerator(sd::graph::RandomGenerator* ptr) { delete ptr; } Nd4jPointer shapeBufferForNumpy(Nd4jPointer npyArray) { try { cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray)); unsigned int shapeSize = arr.shape.size(); std::vector<Nd4jLong> shape(shapeSize); bool _empty = false; for (unsigned int i = 0; i < shapeSize; i++) { shape[i] = arr.shape[i]; if (arr.shape[i] == 0) _empty = true; } auto dtype = cnpy::dataTypeFromHeader(reinterpret_cast<char *>(npyArray)); Nd4jLong *shapeBuffer; if (shape.size() == 1 && shape[0] == 0) { // scalar case shapeBuffer = sd::ShapeBuilders::createScalarShapeInfo(dtype); } else if (_empty) { if (shapeSize > 0) shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape); else shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype); } else { shapeBuffer = sd::ShapeBuilders::createShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape); } return (Nd4jPointer)(sd::ConstantShapeHelper::getInstance().createFromExisting(shapeBuffer, true)); // TO DO: this can lead to unpleasant crash sometimes } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } const char* runLightBenchmarkSuit(bool printOut) { try { sd::LightBenchmarkSuit suit; auto result = suit.runSuit(); if (printOut) nd4j_printf("%s\n", result.data()); auto chars = new char[result.length() + 1]; std::memcpy(chars, result.data(), result.length()); chars[result.length()] = (char) 0x0; return chars; } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } const char* runFullBenchmarkSuit(bool printOut) { try { sd::FullBenchmarkSuit suit; auto result = suit.runSuit(); if (printOut) nd4j_printf("%s\n", result.data()); auto chars = new char[result.length() + 1]; std::memcpy(chars, result.data(), result.length()); chars[result.length()] = (char) 0x0; return chars; } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jLong getCachedMemory(int deviceId) { return sd::ConstantHelper::getInstance().getCachedAmount(deviceId); } sd::LaunchContext* defaultLaunchContext() { return LaunchContext::defaultContext(); } Nd4jPointer lcScalarPointer(OpaqueLaunchContext* lc) { return lc->getScalarPointer(); } Nd4jPointer lcReductionPointer(OpaqueLaunchContext* lc) { return lc->getReductionPointer(); } Nd4jPointer lcAllocationPointer(OpaqueLaunchContext* lc) { return lc->getAllocationPointer(); } Nd4jPointer lcExecutionStream(OpaqueLaunchContext* lc) { return lc->getCudaStream(); } Nd4jPointer lcCopyStream(OpaqueLaunchContext* lc) { return lc->getCudaSpecialStream(); } Nd4jPointer lcBlasHandle(OpaqueLaunchContext* lc) { return lc->getCublasHandle(); } Nd4jPointer lcSolverHandle(OpaqueLaunchContext* lc) { return lc->getCusolverHandle(); } int lastErrorCode() { return sd::LaunchContext::defaultContext()->errorReference()->errorCode(); } const char* lastErrorMessage() { return sd::LaunchContext::defaultContext()->errorReference()->errorMessage(); } void ctxShapeFunctionOverride(OpaqueContext* ptr, bool reallyOverride) { ptr->setShapeFunctionOverride(reallyOverride); } void ctxPurge(OpaqueContext* ptr) { ptr->clearFastPath(); } int binaryLevel() { return 0; } int optimalLevel() { return 0; } bool isMinimalRequirementsMet() { return true; } bool isOptimalRequirementsMet() { return true; } void ctxAllowHelpers(OpaqueContext* ptr, bool reallyAllow) { ptr->allowHelpers(reallyAllow); } void ctxSetExecutionMode(OpaqueContext* ptr, int execMode) { if (execMode < 0 || execMode > 2) execMode = 0; ptr->setExecutionMode((samediff::ExecutionMode) execMode); } OpaqueDataBuffer* dbCreateExternalDataBuffer(Nd4jLong elements, int dataType, Nd4jPointer primary, Nd4jPointer special) { auto buffer = dbAllocateDataBuffer(0, dataType, false); if (primary != nullptr) buffer->setPrimary(primary, elements); if (special != nullptr) buffer->setSpecial(special, elements); return buffer; } OpaqueDataBuffer* dbAllocateDataBuffer(Nd4jLong elements, int dataType, bool allocateBoth) { return allocateDataBuffer(elements, dataType, allocateBoth); } OpaqueDataBuffer* allocateDataBuffer(Nd4jLong elements, int dataType, bool allocateBoth) { try { auto dtype = DataTypeUtils::fromInt(dataType); return new sd::InteropDataBuffer(elements * DataTypeUtils::sizeOf(dtype), dtype, allocateBoth); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); return nullptr; } } Nd4jPointer dbPrimaryBuffer(OpaqueDataBuffer *dataBuffer) { return dataBuffer->primary(); } Nd4jPointer dbSpecialBuffer(OpaqueDataBuffer *dataBuffer) { return dataBuffer->special(); } void deleteDataBuffer(OpaqueDataBuffer *dataBuffer) { delete dataBuffer; } void dbSetPrimaryBuffer(OpaqueDataBuffer *dataBuffer, Nd4jPointer primaryBuffer, Nd4jLong numBytes) { dataBuffer->setPrimary(primaryBuffer, numBytes); } void dbSetSpecialBuffer(OpaqueDataBuffer *dataBuffer, Nd4jPointer specialBuffer, Nd4jLong numBytes) { dataBuffer->setSpecial(specialBuffer, numBytes); } void dbAllocatePrimaryBuffer(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->allocatePrimary(); } void dbAllocateSpecialBuffer(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->allocateSpecial(); } void dbExpandBuffer(OpaqueDataBuffer *dataBuffer, Nd4jLong elements) { try { dataBuffer->dataBuffer()->expand(elements * DataTypeUtils::sizeOf(dataBuffer->dataBuffer()->getDataType())); } catch (std::exception &e) { sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1); sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what()); } } OpaqueDataBuffer* dbCreateView(OpaqueDataBuffer *dataBuffer, Nd4jLong length, Nd4jLong offset) { return new InteropDataBuffer(*dataBuffer, length, offset); } void dbSyncToSpecial(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->syncToSpecial(); } void dbSyncToPrimary(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->syncToPrimary(nullptr); } void dbTickHostRead(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->readPrimary(); } void dbTickHostWrite(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->writePrimary(); } void dbTickDeviceRead(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->readSpecial(); } void dbTickDeviceWrite(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->writeSpecial(); } void dbExpand(OpaqueDataBuffer *dataBuffer, Nd4jLong elements) { dataBuffer->expand(elements); } void dbClose(OpaqueDataBuffer *dataBuffer) { dataBuffer->getDataBuffer()->close(); } int dbDeviceId(OpaqueDataBuffer *dataBuffer) { return dataBuffer->deviceId(); } void dbSetDeviceId(OpaqueDataBuffer *dataBuffer, int deviceId) { dataBuffer->setDeviceId(deviceId); } int dbLocality(OpaqueDataBuffer *dataBuffer) { auto p = dataBuffer->dataBuffer()->isPrimaryActual(); auto d = dataBuffer->dataBuffer()->isSpecialActual(); if (p && d) return 0; else if (p) return -1; else return 1; }
aba0ccd3ce1174c678938f17633a474a9c3a6e73.hip
// !!! This is a file automatically generated by hipify!!! /* neurogpu++.cu Interface for the neurogpu++ program, that implements MLP Neural Networks in CUDA. Francisco M. Magalhaes Neto, 2014-05-28 Based on neurogpu, by Andrei de A. Formiga, 2012-05-21 */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "mlpnnets.h" #include "stopwatch.h" #define SEED 631814ULL #define MAX_ABS 1.2f DataSet* readDataset(char *filename, int inputSize, int outputSize, bool isFunction) { FILE *f; int i; char buffer[500]; char *value; float outputValue; float *outputs; DataSet *dset; f = fopen(filename, "r"); if (f == NULL) { fprintf(stderr, "File not found: %s\n", filename); return NULL; } // count lines in file to allocate dataset arrays i = 0; while (fgets(buffer, 500, f) != NULL) ++i; if (!feof(f) || ferror(f)) { fprintf(stderr, "IO error while reading from file\n"); fclose(f); return NULL; } fseek(f, 0, SEEK_SET); dset = CreateDataSet(i, inputSize, outputSize); if (dset == NULL) { fprintf(stderr, "Error creating dataset\n"); return NULL; } int iix = 0, oix = 0; outputs = (float*) malloc(sizeof(float) * outputSize); while(fgets(buffer, 500, f) != NULL){ value = strtok(buffer, ","); for (int i = 0; i < inputSize; ++i) { dset->inputs[iix++] = atof(value); value = strtok(NULL, ","); } outputValue = atof(value); if (outputSize > 1) for (int i = 0; i < outputSize; ++i) dset->outputs[oix++] = (i == outputValue) ? 0.9f : 0.1f; else if (!isFunction) dset->outputs[oix++] = (outputValue == 1) ? 0.9f : 0.1f; else dset->outputs[oix++] = (outputValue + 1.0f) / 2.0f; } free(outputs); fclose(f); return dset; } void print_dataset(DataSet *dset) { int i, j; printf("Number of cases: %d\n", dset->nCases); for (i = 0; i < dset->nCases; ++i) { for (j = 0; j < dset->inputSize; ++j) printf("%3.2f ", dset->inputs[i*dset->inputSize+j]); printf(" | "); for (j = 0; j < dset->outputSize; ++j) printf("%3.2f ", dset->outputs[i*dset->outputSize+j]); printf("\n"); } } int outputToClass(float *output, int outputSize) { int classNumber = 0; if (outputSize == 1) { if (output[0] < 0.5f) return 0; return 1; } for (int i = 0; i < outputSize; ++i) { if (output[i] > output[classNumber]) classNumber = i; } return classNumber; } void print_network_data(MLPNetwork *net) { printf("nLayers = %d, d_weights = %lu, nWeights = %d, nCases = %d\n", net->nLayers, (unsigned long) net->d_weights, net->nWeights, net->nCases); printf("output ptr for first layer: %lu\n", (unsigned long) net->layers[0]->d_outs); printf("output ptr for last layer: %lu\n", (unsigned long) net->layers[net->nLayers-1]->d_outs); } struct TestDescription { int inputSize; int outputSize; int epochs; float learningRate; bool isFunction; int nLayers; int *neuronsPerLayer; }; TestDescription *readDescription(const char *name) { FILE *f; TestDescription *desc; char buf[50]; char *layer; int isFunction; f = fopen(name, "r"); if (f == NULL) { fprintf(stderr, "File not found: %s\n", name); return NULL; } desc = (TestDescription*) malloc(sizeof(TestDescription)); fscanf(f, "%d\n", &desc->inputSize); fscanf(f, "%d\n", &desc->outputSize); fscanf(f, "%d\n", &desc->epochs); fscanf(f, "%f\n", &desc->learningRate); fscanf(f, "%d\n", &isFunction); desc->isFunction = isFunction; fscanf(f, "%d\n", &desc->nLayers); desc->neuronsPerLayer = (int*) malloc(sizeof(int) * desc->nLayers); fgets(buf, 50, f); layer = strtok(buf, ","); for (int i = 0; i < desc->nLayers; ++i) { desc->neuronsPerLayer[i] = atoi(layer); layer = strtok(NULL, ","); } return desc; } void destroyDescription(TestDescription *desc) { free(desc->neuronsPerLayer); free(desc); } int runTest(const char *name, int casesPerBlock, int neuronsPerThread) { int i; int errors; DataSet *train_set; DataSet *test_set; TestDescription *desc; float e; double acc; StopWatch timer; double elapsedTime; char desc_name[FILENAME_MAX]; char train_name[FILENAME_MAX]; char test_name[FILENAME_MAX]; MLPNetwork *nn; sprintf(desc_name, "data/%s.desc", name); sprintf(train_name, "data/%s.train", name); sprintf(test_name, "data/%s.test", name); desc = readDescription(desc_name); // training train_set = readDataset(train_name, desc->inputSize, desc->outputSize, desc->isFunction); if (train_set == NULL) { fprintf(stderr, "Error reading training set\n"); exit(1); } nn = CreateNetwork(desc->nLayers, desc->neuronsPerLayer); RandomWeights(nn, MAX_ABS, SEED); printf("Training network with %d epochs...\n", desc->epochs); StartTimer(&timer); e = BatchTrainBackprop(nn, train_set, desc->epochs, desc->learningRate, false, false, ACTF_SIGMOID, casesPerBlock, neuronsPerThread); StopTimer(&timer); elapsedTime = GetElapsedTime(&timer); printf("Training finished, approximate final MSE: %f\n", e/nn->nCases); printf("Weights after training:\n"); PrintWeights(nn); printf("-----------------------------------------\n"); // free the training dataset hipDeviceSynchronize(); DestroyDataSet(train_set); // testing test_set = readDataset(test_name, desc->inputSize, desc->outputSize, desc->isFunction); if (test_set == NULL) { fprintf(stderr, "Error reading test set\n"); return -1; } errors = 0; if (!PrepareForTesting(nn, test_set->nCases)) { fprintf(stderr, "Error preparing network for testing\n"); return -1; } printf("Testing with %d cases...\n", test_set->nCases); PresentInputsFromDataSet(nn, test_set, ACTF_SIGMOID, 1, 1); hipDeviceSynchronize(); printf("Weights again:\n"); PrintWeights(nn); float *output = (float*) malloc(sizeof(float) * test_set->nCases * test_set->outputSize); if (output == NULL) { fprintf(stderr, "Could not allocate memory for copying output to host\n"); return -1; } if (!CopyNetworkOutputs(nn, output)) { fprintf(stderr, "Could not get device outputs\n"); return -1; } if (!desc->isFunction) { int predicted, desired; for (i = 0; i < test_set->nCases; ++i) { predicted = outputToClass(output + (i * desc->outputSize), desc->outputSize); desired = outputToClass(test_set->outputs + (i * desc->outputSize), desc->outputSize); if (predicted != desired) ++errors; printf("Case %d | predicted: %d, desired: %d\n", i, predicted, desired); } acc = 100.0 - (100.0 * errors / test_set->nCases); printf("Testing accuracy: %f\n", acc); printf("Total classification errors: %d\n", errors); } else { float predicted, desired; double error = 0.0f; for (i = 0; i < test_set->nCases; ++i) { predicted = output[i]; desired = test_set->outputs[i]; error += fabs(predicted - desired); printf("Input %f | predicted: %f, desired: %f\n", test_set->inputs[i], predicted, desired); } error /= test_set->nCases; printf("Mean error: %f\n", error); } free(output); DestroyNetwork(nn); DestroyDataSet(test_set); destroyDescription(desc); fprintf(stderr, "Training time: %lf seconds\n", elapsedTime); return 0; } int main(int argc, char **argv) { int casesPerBlock = 1; int neuronsPerThread = 1; char name[FILENAME_MAX] = "cancer"; if (argc > 1) { strcpy(name, argv[1]); if (argc == 4) { casesPerBlock = atoi(argv[2]); neuronsPerThread = atoi(argv[3]); } else if (argc != 2) { fprintf(stderr, "Syntax error\n"); exit(EXIT_FAILURE); } } if ((casesPerBlock > 0) && (neuronsPerThread > 0)) return runTest(name, casesPerBlock, neuronsPerThread); fprintf(stderr, "Syntax error\n"); return EXIT_FAILURE; }
aba0ccd3ce1174c678938f17633a474a9c3a6e73.cu
/* neurogpu++.cu Interface for the neurogpu++ program, that implements MLP Neural Networks in CUDA. Francisco M. Magalhaes Neto, 2014-05-28 Based on neurogpu, by Andrei de A. Formiga, 2012-05-21 */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "mlpnnets.h" #include "stopwatch.h" #define SEED 631814ULL #define MAX_ABS 1.2f DataSet* readDataset(char *filename, int inputSize, int outputSize, bool isFunction) { FILE *f; int i; char buffer[500]; char *value; float outputValue; float *outputs; DataSet *dset; f = fopen(filename, "r"); if (f == NULL) { fprintf(stderr, "File not found: %s\n", filename); return NULL; } // count lines in file to allocate dataset arrays i = 0; while (fgets(buffer, 500, f) != NULL) ++i; if (!feof(f) || ferror(f)) { fprintf(stderr, "IO error while reading from file\n"); fclose(f); return NULL; } fseek(f, 0, SEEK_SET); dset = CreateDataSet(i, inputSize, outputSize); if (dset == NULL) { fprintf(stderr, "Error creating dataset\n"); return NULL; } int iix = 0, oix = 0; outputs = (float*) malloc(sizeof(float) * outputSize); while(fgets(buffer, 500, f) != NULL){ value = strtok(buffer, ","); for (int i = 0; i < inputSize; ++i) { dset->inputs[iix++] = atof(value); value = strtok(NULL, ","); } outputValue = atof(value); if (outputSize > 1) for (int i = 0; i < outputSize; ++i) dset->outputs[oix++] = (i == outputValue) ? 0.9f : 0.1f; else if (!isFunction) dset->outputs[oix++] = (outputValue == 1) ? 0.9f : 0.1f; else dset->outputs[oix++] = (outputValue + 1.0f) / 2.0f; } free(outputs); fclose(f); return dset; } void print_dataset(DataSet *dset) { int i, j; printf("Number of cases: %d\n", dset->nCases); for (i = 0; i < dset->nCases; ++i) { for (j = 0; j < dset->inputSize; ++j) printf("%3.2f ", dset->inputs[i*dset->inputSize+j]); printf(" | "); for (j = 0; j < dset->outputSize; ++j) printf("%3.2f ", dset->outputs[i*dset->outputSize+j]); printf("\n"); } } int outputToClass(float *output, int outputSize) { int classNumber = 0; if (outputSize == 1) { if (output[0] < 0.5f) return 0; return 1; } for (int i = 0; i < outputSize; ++i) { if (output[i] > output[classNumber]) classNumber = i; } return classNumber; } void print_network_data(MLPNetwork *net) { printf("nLayers = %d, d_weights = %lu, nWeights = %d, nCases = %d\n", net->nLayers, (unsigned long) net->d_weights, net->nWeights, net->nCases); printf("output ptr for first layer: %lu\n", (unsigned long) net->layers[0]->d_outs); printf("output ptr for last layer: %lu\n", (unsigned long) net->layers[net->nLayers-1]->d_outs); } struct TestDescription { int inputSize; int outputSize; int epochs; float learningRate; bool isFunction; int nLayers; int *neuronsPerLayer; }; TestDescription *readDescription(const char *name) { FILE *f; TestDescription *desc; char buf[50]; char *layer; int isFunction; f = fopen(name, "r"); if (f == NULL) { fprintf(stderr, "File not found: %s\n", name); return NULL; } desc = (TestDescription*) malloc(sizeof(TestDescription)); fscanf(f, "%d\n", &desc->inputSize); fscanf(f, "%d\n", &desc->outputSize); fscanf(f, "%d\n", &desc->epochs); fscanf(f, "%f\n", &desc->learningRate); fscanf(f, "%d\n", &isFunction); desc->isFunction = isFunction; fscanf(f, "%d\n", &desc->nLayers); desc->neuronsPerLayer = (int*) malloc(sizeof(int) * desc->nLayers); fgets(buf, 50, f); layer = strtok(buf, ","); for (int i = 0; i < desc->nLayers; ++i) { desc->neuronsPerLayer[i] = atoi(layer); layer = strtok(NULL, ","); } return desc; } void destroyDescription(TestDescription *desc) { free(desc->neuronsPerLayer); free(desc); } int runTest(const char *name, int casesPerBlock, int neuronsPerThread) { int i; int errors; DataSet *train_set; DataSet *test_set; TestDescription *desc; float e; double acc; StopWatch timer; double elapsedTime; char desc_name[FILENAME_MAX]; char train_name[FILENAME_MAX]; char test_name[FILENAME_MAX]; MLPNetwork *nn; sprintf(desc_name, "data/%s.desc", name); sprintf(train_name, "data/%s.train", name); sprintf(test_name, "data/%s.test", name); desc = readDescription(desc_name); // training train_set = readDataset(train_name, desc->inputSize, desc->outputSize, desc->isFunction); if (train_set == NULL) { fprintf(stderr, "Error reading training set\n"); exit(1); } nn = CreateNetwork(desc->nLayers, desc->neuronsPerLayer); RandomWeights(nn, MAX_ABS, SEED); printf("Training network with %d epochs...\n", desc->epochs); StartTimer(&timer); e = BatchTrainBackprop(nn, train_set, desc->epochs, desc->learningRate, false, false, ACTF_SIGMOID, casesPerBlock, neuronsPerThread); StopTimer(&timer); elapsedTime = GetElapsedTime(&timer); printf("Training finished, approximate final MSE: %f\n", e/nn->nCases); printf("Weights after training:\n"); PrintWeights(nn); printf("-----------------------------------------\n"); // free the training dataset cudaThreadSynchronize(); DestroyDataSet(train_set); // testing test_set = readDataset(test_name, desc->inputSize, desc->outputSize, desc->isFunction); if (test_set == NULL) { fprintf(stderr, "Error reading test set\n"); return -1; } errors = 0; if (!PrepareForTesting(nn, test_set->nCases)) { fprintf(stderr, "Error preparing network for testing\n"); return -1; } printf("Testing with %d cases...\n", test_set->nCases); PresentInputsFromDataSet(nn, test_set, ACTF_SIGMOID, 1, 1); cudaThreadSynchronize(); printf("Weights again:\n"); PrintWeights(nn); float *output = (float*) malloc(sizeof(float) * test_set->nCases * test_set->outputSize); if (output == NULL) { fprintf(stderr, "Could not allocate memory for copying output to host\n"); return -1; } if (!CopyNetworkOutputs(nn, output)) { fprintf(stderr, "Could not get device outputs\n"); return -1; } if (!desc->isFunction) { int predicted, desired; for (i = 0; i < test_set->nCases; ++i) { predicted = outputToClass(output + (i * desc->outputSize), desc->outputSize); desired = outputToClass(test_set->outputs + (i * desc->outputSize), desc->outputSize); if (predicted != desired) ++errors; printf("Case %d | predicted: %d, desired: %d\n", i, predicted, desired); } acc = 100.0 - (100.0 * errors / test_set->nCases); printf("Testing accuracy: %f\n", acc); printf("Total classification errors: %d\n", errors); } else { float predicted, desired; double error = 0.0f; for (i = 0; i < test_set->nCases; ++i) { predicted = output[i]; desired = test_set->outputs[i]; error += fabs(predicted - desired); printf("Input %f | predicted: %f, desired: %f\n", test_set->inputs[i], predicted, desired); } error /= test_set->nCases; printf("Mean error: %f\n", error); } free(output); DestroyNetwork(nn); DestroyDataSet(test_set); destroyDescription(desc); fprintf(stderr, "Training time: %lf seconds\n", elapsedTime); return 0; } int main(int argc, char **argv) { int casesPerBlock = 1; int neuronsPerThread = 1; char name[FILENAME_MAX] = "cancer"; if (argc > 1) { strcpy(name, argv[1]); if (argc == 4) { casesPerBlock = atoi(argv[2]); neuronsPerThread = atoi(argv[3]); } else if (argc != 2) { fprintf(stderr, "Syntax error\n"); exit(EXIT_FAILURE); } } if ((casesPerBlock > 0) && (neuronsPerThread > 0)) return runTest(name, casesPerBlock, neuronsPerThread); fprintf(stderr, "Syntax error\n"); return EXIT_FAILURE; }
daef0c538980a5fb5ef6c5bb2d3dd9c2ff380415.hip
// !!! This is a file automatically generated by hipify!!! /* #ifndef __HIPCC__ #define __HIPCC__ #endif */ #include <iostream> #include <stdio.h> #include <cmath> #include <fstream> #include <string> #include <cstdlib> #include <iterator> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <hip/hip_cooperative_groups.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <algorithm> #define SUBSEQLENGTH 200 #define NUMTHREADS 1024 #define TSLENGTH 24125 // Lunghezza txt #define NUMOFSUBSEQ (TSLENGTH - SUBSEQLENGTH + 1) #define SHAREDLENGTH (NUMTHREADS + SUBSEQLENGTH - 1) //#define TSLENGTH 2299 // Lunghezza csv //#define TSLENGTH 586086 #define NUMBLOCKS (TSLENGTH + NUMTHREADS - 1) / NUMTHREADS // Calcolo del numero ottimale di blocchi __constant__ float primo_vettore_confronto[SUBSEQLENGTH]; // Si crea il vettore della memoria constant e lo si riempe con una parte // del vettore timeSeries lungo SUBSEQLENGTH partendo dall'indice i using namespace std; __inline__ __device__ void warpReduceMin(float& val, int& idx) { for (int offset = warpSize / 2; offset > 0; offset /= 2) { float tmpVal = __shfl_down_sync(0xFFFFFFFF, val, offset, 32); int tmpIdx = __shfl_down_sync(0xFFFFFFFF, idx, offset, 32); if (tmpVal == val) { // Se due valori di distanza sono uguai si salva la posizione pi piccola if (tmpIdx < idx) { idx = tmpIdx; } } if (tmpVal < val) { val = tmpVal; idx = tmpIdx; } } } __inline__ __device__ void blockReduceMin(float& val, int& idx, int currentThreads, int indexFirstSubsequence) { static __shared__ float values[32], indices[32]; // Shared mem for 32 partial mins int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; if (lane == 0) { // Il primo thread di ogni warp inizializza un elemento della memoria shared values[wid] = FLT_MAX; // all'indice che corrisponde a quello del blocco di cui fa parte } warpReduceMin(val, idx); // Each warp performs partial reduction if (lane == 0) { values[wid] = val; // Write reduced value to shared memory indices[wid] = idx; // Write reduced value to shared memory } __syncthreads(); // Wait for all partial reductions if (threadIdx.x < (blockDim.x + warpSize - 1) / warpSize) { // Read from shared memory only if that warp existed val = values[lane]; // && threadIdx.x <= currentThreads / warpSize idx = indices[lane]; } else { val = FLT_MAX; idx = 0; } if (wid == 0) { warpReduceMin(val, idx); // Final reduce within first warp } } __global__ void sequencesDistance(int indexFirstSubsequence, float* dev_timeSeries, float* dev_blocksDistances, int* dev_blocksLocations) { extern __shared__ float cache[]; // La sua lunghezza NUMTHREADS + SUBSEQLENGTH - 1 float sum = 0; float distanza = FLT_MAX; // Variabile a cui assegnata la distanza. I thread non coinvolti nel calcolo hanno questo valore come default float diff; float power; int first_subseq_idx = indexFirstSubsequence; // Indice della prima sottosequenza che verr confrontata con tutte le altre int second_subseq_idx = blockIdx.x * blockDim.x + threadIdx.x; // Indice della seconda sottosequenza che si confronta con la prima. L'id globale di ogni thread stabilisce il punto di partenza int dev_ts_position = blockIdx.x * blockDim.x + threadIdx.x; int cache_idx = threadIdx.x; while (cache_idx < SHAREDLENGTH && dev_ts_position <= TSLENGTH) { // Ogni thread carica nella shared uno o pi elementi di timeSeries cache[cache_idx] = dev_timeSeries[dev_ts_position]; cache_idx = cache_idx + NUMTHREADS; dev_ts_position = dev_ts_position + NUMTHREADS; } __syncthreads(); // SYNC perch non tutti possono aver gi riempito la cache if (abs(second_subseq_idx - first_subseq_idx) >= SUBSEQLENGTH && second_subseq_idx < NUMOFSUBSEQ) { // Verifica di self-match e controllo che il secondo indice rientri nei valori consentiti for (int i = 0; i < SUBSEQLENGTH; i++) { diff = primo_vettore_confronto[i] - cache[threadIdx.x + i]; power = diff * diff; sum = sum + power; } distanza = sqrt(sum); // Da rimuovere nella versione finale, la radice si calcola alla fine } blockReduceMin(distanza, second_subseq_idx, NUMTHREADS, first_subseq_idx); if (threadIdx.x == 0) { dev_blocksDistances[blockIdx.x] = distanza; dev_blocksLocations[blockIdx.x] = second_subseq_idx; } } __global__ void finalReduction(int indexFirstSubsequence, int previousBlocks, int* dev_blocksLocations, float* dev_blocksDistances, int* dev_finalLocations, float* dev_finalDistances) { int tid = blockIdx.x * blockDim.x + threadIdx.x; float val = FLT_MAX; int idx = 0; if (tid < previousBlocks) { val = dev_blocksDistances[tid]; idx = dev_blocksLocations[tid]; } blockReduceMin(val, idx, previousBlocks, indexFirstSubsequence); if (threadIdx.x == 0 && gridDim.x != 1) { // Si utilizzano i vettori dev_blocksDistances e dev_blocksLocations per salvare i risultati delle riduzioni dev_blocksDistances[blockIdx.x] = val; // ad ogni nuova iterazione finch si utilizza pi di un blocco dev_blocksLocations[blockIdx.x] = idx; } if (tid == 0 && gridDim.x == 1) { // Si scrive sui vettori finali solo quando la riduzione arrivata ad utilizzare un solo blocco dev_finalDistances[indexFirstSubsequence] = val; dev_finalLocations[indexFirstSubsequence] = idx; } } void compareSubsequences(float* dev_blocksDistances, int* dev_blocksLocations, float* dev_timeSeries, float* dev_finalDistances, int* dev_finalLocations) { int threads = 1024; for (int i = 0; i <= TSLENGTH - SUBSEQLENGTH; i++) { // Outer loop int previousBlocks = NUMBLOCKS; int currentBlocks = (NUMBLOCKS + threads - 1) / threads; bool continueReduction = true; hipMemcpyToSymbol(primo_vettore_confronto, &dev_timeSeries[i], SUBSEQLENGTH * sizeof(float), 0, hipMemcpyDeviceToDevice); // Copia nella constant la sottosequenza all'i-esima posizione // da confrontare con tutte le altre hipLaunchKernelGGL(( sequencesDistance), dim3(NUMBLOCKS), dim3(NUMTHREADS), SHAREDLENGTH * sizeof(float), 0, i, dev_timeSeries, dev_blocksDistances, dev_blocksLocations); // Kernel che esegue il calcolo delle distanze // ed una prima riduzione while (continueReduction) { // Si continua a ridurre i risultati finch il secondo kernel non ha un solo blocco hipLaunchKernelGGL(( finalReduction), dim3(currentBlocks), dim3(threads), 0, 0, i, previousBlocks, dev_blocksLocations, dev_blocksDistances, dev_finalLocations, dev_finalDistances); // Riduce i risultati ottenuti dal kernel precedente if (currentBlocks == 1) { continueReduction = false; } previousBlocks = currentBlocks; currentBlocks = (currentBlocks + threads - 1) / threads; // Calcolo del nuovo numero di blocchi da usare in finalReduction } } hipFree(primo_vettore_confronto); } //--------------------Lettura e scrittura file start-------------------------- void readFile(float* timeSeries, string fileName) { const char* c = fileName.c_str(); float num = 0; int i = 0; ifstream readFile; readFile.open(c); if (!readFile.is_open()) { // Check to see that the file was opened correctly cerr << "There was a problem opening the input file!\n"; exit(1); // Exit or do additional error checking } while (readFile >> num) { // Keep storing values from the text file so long as data exists timeSeries[i] = float(num); i++; } readFile.close(); } void scriviFile(float* distances, int* locations, string fileName) { FILE* fp; //char nomeFile; //nomeFile = "nnd.dat"; fp = fopen("nnd.dat", "w+"); for (int i = 0; i < NUMOFSUBSEQ; i++) { fprintf(fp, "%lf \n", distances[i]); } fclose(fp); // nomeFile = "ngh"+fileName +".dat"; fp = fopen("loc.dat", "w+"); for (int i = 0; i < NUMOFSUBSEQ; i++) { fprintf(fp, "%d \n ", locations[i] + 1); } fclose(fp); fp = fopen("total.dat", "w+"); for (int i = 0; i < NUMOFSUBSEQ; i++) { fprintf(fp, "%lf \t %d \n", distances[i], locations[i] + 1); } fclose(fp); } //--------------------Lettura e scrittura file end----------------------------- int main() { //string fileName = "ecg0606_1.csv"; string fileName = "nprs44.txt"; //string fileName = "318_signal1.txt"; float* timeSeries; float* distances; int* locations; timeSeries = (float*)malloc(TSLENGTH * sizeof(float)); distances = (float*)malloc(NUMOFSUBSEQ * sizeof(float)); locations = (int*)malloc(NUMOFSUBSEQ * sizeof(int)); fill_n(distances, NUMOFSUBSEQ, FLT_MAX); cout << "File name: " << fileName << endl; cout << "File length: " << TSLENGTH << endl; cout << "Subsequence length: " << SUBSEQLENGTH << endl; cout << "Number of blocks: " << NUMBLOCKS << endl; cout << "Number of threads: " << NUMTHREADS << endl; cout << "Number of subsequences: " << NUMOFSUBSEQ << endl; cout << "Shared memory dimension: " << SHAREDLENGTH << endl; readFile(timeSeries, fileName); float* dev_blocksDistances; // Vettore sulla memoria global dove ogni blocco salva il risultato della distanza che calcola int* dev_blocksLocations; // Vettore sulla memoria global dove ogni blocco salva l'indice della propria migliore distanza trovata float* dev_finalDistances; int* dev_finalLocations; float* dev_timeSeries; hipMalloc((void**)&dev_blocksDistances, NUMBLOCKS * sizeof(float)); hipMalloc((void**)&dev_blocksLocations, NUMBLOCKS * sizeof(int)); hipMalloc((void**)&dev_timeSeries, TSLENGTH * sizeof(float)); hipMalloc((void**)&dev_finalDistances, NUMOFSUBSEQ * sizeof(float)); hipMalloc((void**)&dev_finalLocations, NUMOFSUBSEQ * sizeof(int)); hipMemcpy(dev_timeSeries, timeSeries, TSLENGTH * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_finalDistances, distances, NUMOFSUBSEQ * sizeof(float), hipMemcpyHostToDevice); compareSubsequences(dev_blocksDistances, dev_blocksLocations, dev_timeSeries, dev_finalDistances, dev_finalLocations); // Funzione che lancia il kernel hipMemcpy(distances, dev_finalDistances, NUMOFSUBSEQ * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(locations, dev_finalLocations, NUMOFSUBSEQ * sizeof(int), hipMemcpyDeviceToHost); scriviFile(distances, locations, fileName); free(timeSeries); free(distances); free(locations); hipFree(dev_blocksDistances); hipFree(dev_blocksLocations); hipFree(dev_timeSeries); hipFree(dev_finalDistances); hipFree(dev_finalLocations); return 0; }
daef0c538980a5fb5ef6c5bb2d3dd9c2ff380415.cu
/* #ifndef __CUDACC__ #define __CUDACC__ #endif */ #include <iostream> #include <stdio.h> #include <cmath> #include <fstream> #include <string> #include <cstdlib> #include <iterator> #include <cuda.h> #include <cuda_runtime_api.h> #include <cooperative_groups.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <algorithm> #define SUBSEQLENGTH 200 #define NUMTHREADS 1024 #define TSLENGTH 24125 // Lunghezza txt #define NUMOFSUBSEQ (TSLENGTH - SUBSEQLENGTH + 1) #define SHAREDLENGTH (NUMTHREADS + SUBSEQLENGTH - 1) //#define TSLENGTH 2299 // Lunghezza csv //#define TSLENGTH 586086 #define NUMBLOCKS (TSLENGTH + NUMTHREADS - 1) / NUMTHREADS // Calcolo del numero ottimale di blocchi __constant__ float primo_vettore_confronto[SUBSEQLENGTH]; // Si crea il vettore della memoria constant e lo si riempe con una parte // del vettore timeSeries lungo SUBSEQLENGTH partendo dall'indice i using namespace std; __inline__ __device__ void warpReduceMin(float& val, int& idx) { for (int offset = warpSize / 2; offset > 0; offset /= 2) { float tmpVal = __shfl_down_sync(0xFFFFFFFF, val, offset, 32); int tmpIdx = __shfl_down_sync(0xFFFFFFFF, idx, offset, 32); if (tmpVal == val) { // Se due valori di distanza sono uguai si salva la posizione più piccola if (tmpIdx < idx) { idx = tmpIdx; } } if (tmpVal < val) { val = tmpVal; idx = tmpIdx; } } } __inline__ __device__ void blockReduceMin(float& val, int& idx, int currentThreads, int indexFirstSubsequence) { static __shared__ float values[32], indices[32]; // Shared mem for 32 partial mins int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; if (lane == 0) { // Il primo thread di ogni warp inizializza un elemento della memoria shared values[wid] = FLT_MAX; // all'indice che corrisponde a quello del blocco di cui fa parte } warpReduceMin(val, idx); // Each warp performs partial reduction if (lane == 0) { values[wid] = val; // Write reduced value to shared memory indices[wid] = idx; // Write reduced value to shared memory } __syncthreads(); // Wait for all partial reductions if (threadIdx.x < (blockDim.x + warpSize - 1) / warpSize) { // Read from shared memory only if that warp existed val = values[lane]; // && threadIdx.x <= currentThreads / warpSize idx = indices[lane]; } else { val = FLT_MAX; idx = 0; } if (wid == 0) { warpReduceMin(val, idx); // Final reduce within first warp } } __global__ void sequencesDistance(int indexFirstSubsequence, float* dev_timeSeries, float* dev_blocksDistances, int* dev_blocksLocations) { extern __shared__ float cache[]; // La sua lunghezza è NUMTHREADS + SUBSEQLENGTH - 1 float sum = 0; float distanza = FLT_MAX; // Variabile a cui è assegnata la distanza. I thread non coinvolti nel calcolo hanno questo valore come default float diff; float power; int first_subseq_idx = indexFirstSubsequence; // Indice della prima sottosequenza che verrà confrontata con tutte le altre int second_subseq_idx = blockIdx.x * blockDim.x + threadIdx.x; // Indice della seconda sottosequenza che si confronta con la prima. L'id globale di ogni thread stabilisce il punto di partenza int dev_ts_position = blockIdx.x * blockDim.x + threadIdx.x; int cache_idx = threadIdx.x; while (cache_idx < SHAREDLENGTH && dev_ts_position <= TSLENGTH) { // Ogni thread carica nella shared uno o più elementi di timeSeries cache[cache_idx] = dev_timeSeries[dev_ts_position]; cache_idx = cache_idx + NUMTHREADS; dev_ts_position = dev_ts_position + NUMTHREADS; } __syncthreads(); // SYNC perché non tutti possono aver già riempito la cache if (abs(second_subseq_idx - first_subseq_idx) >= SUBSEQLENGTH && second_subseq_idx < NUMOFSUBSEQ) { // Verifica di self-match e controllo che il secondo indice rientri nei valori consentiti for (int i = 0; i < SUBSEQLENGTH; i++) { diff = primo_vettore_confronto[i] - cache[threadIdx.x + i]; power = diff * diff; sum = sum + power; } distanza = sqrt(sum); // Da rimuovere nella versione finale, la radice si calcola alla fine } blockReduceMin(distanza, second_subseq_idx, NUMTHREADS, first_subseq_idx); if (threadIdx.x == 0) { dev_blocksDistances[blockIdx.x] = distanza; dev_blocksLocations[blockIdx.x] = second_subseq_idx; } } __global__ void finalReduction(int indexFirstSubsequence, int previousBlocks, int* dev_blocksLocations, float* dev_blocksDistances, int* dev_finalLocations, float* dev_finalDistances) { int tid = blockIdx.x * blockDim.x + threadIdx.x; float val = FLT_MAX; int idx = 0; if (tid < previousBlocks) { val = dev_blocksDistances[tid]; idx = dev_blocksLocations[tid]; } blockReduceMin(val, idx, previousBlocks, indexFirstSubsequence); if (threadIdx.x == 0 && gridDim.x != 1) { // Si utilizzano i vettori dev_blocksDistances e dev_blocksLocations per salvare i risultati delle riduzioni dev_blocksDistances[blockIdx.x] = val; // ad ogni nuova iterazione finché si utilizza più di un blocco dev_blocksLocations[blockIdx.x] = idx; } if (tid == 0 && gridDim.x == 1) { // Si scrive sui vettori finali solo quando la riduzione è arrivata ad utilizzare un solo blocco dev_finalDistances[indexFirstSubsequence] = val; dev_finalLocations[indexFirstSubsequence] = idx; } } void compareSubsequences(float* dev_blocksDistances, int* dev_blocksLocations, float* dev_timeSeries, float* dev_finalDistances, int* dev_finalLocations) { int threads = 1024; for (int i = 0; i <= TSLENGTH - SUBSEQLENGTH; i++) { // Outer loop int previousBlocks = NUMBLOCKS; int currentBlocks = (NUMBLOCKS + threads - 1) / threads; bool continueReduction = true; cudaMemcpyToSymbol(primo_vettore_confronto, &dev_timeSeries[i], SUBSEQLENGTH * sizeof(float), 0, cudaMemcpyDeviceToDevice); // Copia nella constant la sottosequenza all'i-esima posizione // da confrontare con tutte le altre sequencesDistance<<<NUMBLOCKS, NUMTHREADS, SHAREDLENGTH * sizeof(float)>>>(i, dev_timeSeries, dev_blocksDistances, dev_blocksLocations); // Kernel che esegue il calcolo delle distanze // ed una prima riduzione while (continueReduction) { // Si continua a ridurre i risultati finché il secondo kernel non ha un solo blocco finalReduction<<<currentBlocks, threads>>>(i, previousBlocks, dev_blocksLocations, dev_blocksDistances, dev_finalLocations, dev_finalDistances); // Riduce i risultati ottenuti dal kernel precedente if (currentBlocks == 1) { continueReduction = false; } previousBlocks = currentBlocks; currentBlocks = (currentBlocks + threads - 1) / threads; // Calcolo del nuovo numero di blocchi da usare in finalReduction } } cudaFree(primo_vettore_confronto); } //--------------------Lettura e scrittura file start-------------------------- void readFile(float* timeSeries, string fileName) { const char* c = fileName.c_str(); float num = 0; int i = 0; ifstream readFile; readFile.open(c); if (!readFile.is_open()) { // Check to see that the file was opened correctly cerr << "There was a problem opening the input file!\n"; exit(1); // Exit or do additional error checking } while (readFile >> num) { // Keep storing values from the text file so long as data exists timeSeries[i] = float(num); i++; } readFile.close(); } void scriviFile(float* distances, int* locations, string fileName) { FILE* fp; //char nomeFile; //nomeFile = "nnd.dat"; fp = fopen("nnd.dat", "w+"); for (int i = 0; i < NUMOFSUBSEQ; i++) { fprintf(fp, "%lf \n", distances[i]); } fclose(fp); // nomeFile = "ngh"+fileName +".dat"; fp = fopen("loc.dat", "w+"); for (int i = 0; i < NUMOFSUBSEQ; i++) { fprintf(fp, "%d \n ", locations[i] + 1); } fclose(fp); fp = fopen("total.dat", "w+"); for (int i = 0; i < NUMOFSUBSEQ; i++) { fprintf(fp, "%lf \t %d \n", distances[i], locations[i] + 1); } fclose(fp); } //--------------------Lettura e scrittura file end----------------------------- int main() { //string fileName = "ecg0606_1.csv"; string fileName = "nprs44.txt"; //string fileName = "318_signal1.txt"; float* timeSeries; float* distances; int* locations; timeSeries = (float*)malloc(TSLENGTH * sizeof(float)); distances = (float*)malloc(NUMOFSUBSEQ * sizeof(float)); locations = (int*)malloc(NUMOFSUBSEQ * sizeof(int)); fill_n(distances, NUMOFSUBSEQ, FLT_MAX); cout << "File name: " << fileName << endl; cout << "File length: " << TSLENGTH << endl; cout << "Subsequence length: " << SUBSEQLENGTH << endl; cout << "Number of blocks: " << NUMBLOCKS << endl; cout << "Number of threads: " << NUMTHREADS << endl; cout << "Number of subsequences: " << NUMOFSUBSEQ << endl; cout << "Shared memory dimension: " << SHAREDLENGTH << endl; readFile(timeSeries, fileName); float* dev_blocksDistances; // Vettore sulla memoria global dove ogni blocco salva il risultato della distanza che calcola int* dev_blocksLocations; // Vettore sulla memoria global dove ogni blocco salva l'indice della propria migliore distanza trovata float* dev_finalDistances; int* dev_finalLocations; float* dev_timeSeries; cudaMalloc((void**)&dev_blocksDistances, NUMBLOCKS * sizeof(float)); cudaMalloc((void**)&dev_blocksLocations, NUMBLOCKS * sizeof(int)); cudaMalloc((void**)&dev_timeSeries, TSLENGTH * sizeof(float)); cudaMalloc((void**)&dev_finalDistances, NUMOFSUBSEQ * sizeof(float)); cudaMalloc((void**)&dev_finalLocations, NUMOFSUBSEQ * sizeof(int)); cudaMemcpy(dev_timeSeries, timeSeries, TSLENGTH * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_finalDistances, distances, NUMOFSUBSEQ * sizeof(float), cudaMemcpyHostToDevice); compareSubsequences(dev_blocksDistances, dev_blocksLocations, dev_timeSeries, dev_finalDistances, dev_finalLocations); // Funzione che lancia il kernel cudaMemcpy(distances, dev_finalDistances, NUMOFSUBSEQ * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(locations, dev_finalLocations, NUMOFSUBSEQ * sizeof(int), cudaMemcpyDeviceToHost); scriviFile(distances, locations, fileName); free(timeSeries); free(distances); free(locations); cudaFree(dev_blocksDistances); cudaFree(dev_blocksLocations); cudaFree(dev_timeSeries); cudaFree(dev_finalDistances); cudaFree(dev_finalLocations); return 0; }
d630dd56475917f151f007f0eb7769176fb989a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void NormalizeSubtract3DKernel(float * img_src, const float * img_sub, const int width, const int height, const int depth, float normalizer) { const int baseX = blockIdx.x * SUB_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * SUB_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * SUB_BLOCKDIM_Z + threadIdx.z; const int idx = (baseZ * height + baseY) * width + baseX; img_src[idx] = (img_src[idx] - img_sub[idx]) * normalizer; }
d630dd56475917f151f007f0eb7769176fb989a9.cu
#include "includes.h" __global__ void NormalizeSubtract3DKernel(float * img_src, const float * img_sub, const int width, const int height, const int depth, float normalizer) { const int baseX = blockIdx.x * SUB_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * SUB_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * SUB_BLOCKDIM_Z + threadIdx.z; const int idx = (baseZ * height + baseY) * width + baseX; img_src[idx] = (img_src[idx] - img_sub[idx]) * normalizer; }
6ca6900a64630929c9b1cdf25e5fac05b3746ce5.hip
// !!! This is a file automatically generated by hipify!!! #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/inner_product.h> #include <hip/hip_runtime.h> #include "rocblas.h" #include <ctime> #include <iostream> void transpose(int N, int M, double* src, double* dst) { for (int n = 0; n < N * M; n++) { int i = n / N; int j = n % N; dst[n] = src[M * j + i]; } } void printMatrix(int N, int M, double* array) { for (int i = 0; i < N * M; i++) { std::cout << "\t" << array[i]; if (!((i + 1) % M)) std::cout << std::endl; } std::cout << std::endl; } void printMatrix(int N, int M, thrust::device_vector<double> array) { for (int i = 0; i < N * M; i++) { std::cout << "\t" << array[i]; if (!((i + 1) % M)) std::cout << std::endl; } std::cout << std::endl; } template<class T> struct dp { T* A, * B; int m, n, r; dp(T* _A, T* _B, int _m, int _n, int _r) : A(_A), B(_B), m(_m), n(_n), r(_r) {}; __host__ __device__ T operator()(size_t idx) { T sum = 0.0f; int row = idx / r; int col = idx - (row * r); // cheaper modulo for (int i = 0; i < m; i++) sum += A[row * m + i] * B[col * m + i]; return sum; } }; void Transpose(double* src, double* dst, unsigned n, unsigned m) { // Allocate device memory double* d_src; double* d_dst; // Allocate device memory if (hipMalloc(&d_src, sizeof(double) * n * m) != hipSuccess) std::cout << "hipMalloc failed!" << std::endl; if (hipMalloc(&d_dst, sizeof(double) * m * n) != hipSuccess) std::cout << "hipMalloc failed!" << std::endl; if (hipMemcpy(d_src, src, m * n * sizeof(double), hipMemcpyHostToDevice)) std::cout << "hipMemcpy failed!" << std::endl; //hipDeviceSynchronize(); // cuBLAS handle hipblasHandle_t handle; if (hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) std::cout << "CUBLAS initialization failed" << std::endl; // Scalaing factors double alpha = 1.0; double beta = 0.0; // Tranpose d_matrix2 hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, n, m, &alpha, d_src, m, &beta, d_src, n, d_dst, n); //hipDeviceSynchronize(); // Copy back the three matrices hipMemcpy(dst, d_dst, sizeof(double) * m * n, hipMemcpyDeviceToHost); //hipDeviceSynchronize(); // Free our memory hipFree(d_src); hipFree(d_dst); hipblasDestroy(handle); } void MultiplyThrust(double* m1, double* m2, double* result, unsigned m, unsigned n, unsigned r) { thrust::device_vector<double> matrix1(m1, m1 + n * m); thrust::device_vector<double> matrix2(m2, m2 + m * r); thrust::device_vector<double> matrix_result(n * r, 0); thrust::transform(thrust::counting_iterator<unsigned>(0), thrust::counting_iterator<unsigned>(n * r), matrix_result.begin(), dp<double>(thrust::raw_pointer_cast(matrix1.data()), thrust::raw_pointer_cast(matrix2.data()), m, n, r)); hipDeviceSynchronize(); thrust::copy(matrix_result.begin(), matrix_result.end(), result); } void MultiplyCUBLAS(double* m1, double* m2, double* result, unsigned m, unsigned n, unsigned r) { // Allocate device memory double* d_matrix1; double* d_matrix2; double* d_result; // Allocate device memory if (hipMalloc(&d_matrix1, sizeof(double) * n * m) != hipSuccess) std::cout << "hipMalloc failed!" << std::endl; if (hipMalloc(&d_matrix2, sizeof(double) * m * r) != hipSuccess) std::cout << "hipMalloc failed!" << std::endl; if (hipMalloc(&d_result, sizeof(double) * n * r) != hipSuccess) std::cout << "hipMalloc failed!" << std::endl; // Copy host to device memory if (hipMemcpy(d_matrix1, m1, n * m * sizeof(double), hipMemcpyHostToDevice)) std::cout << "hipMemcpy failed!" << std::endl; if (hipMemcpy(d_matrix2, m2, m * r * sizeof(double), hipMemcpyHostToDevice)) std::cout << "hipMemcpy failed!" << std::endl; //hipDeviceSynchronize(); // cuBLAS handle hipblasHandle_t handle; if (hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) std::cout << "CUBLAS initialization failed" << std::endl; // Scalaing factors double alpha = 1.0; double beta = 0.0; // Calculate: c = (alpha*a) * b + (beta*c) // nxr = nxm * mxr // Signature: handle, operation, operation, n, r, m, alpha, A, lda, B, ldb, // beta, C, ldc hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, r, m, &alpha, d_matrix2, n, d_matrix1, m, &beta, d_result, r); //hipDeviceSynchronize(); // Copy back the three matrices hipMemcpy(result, d_result, sizeof(double) * n * r, hipMemcpyDeviceToHost); //hipDeviceSynchronize(); // Free our memory hipFree(d_matrix1); hipFree(d_matrix2); hipFree(d_result); hipblasDestroy(handle); } int main() { clock_t begin_time = clock(); const int n = 2; const int m = 3; const int r = 2; double matrix1[n * m]; double matrix2[m * r]; double matrix2_T[m * r]; double result[n * r]; for (int i = 0; i < n * m; ++i) matrix1[i] = i; for (int i = 0; i < m * r; ++i) matrix2[i] = i; for (int i = 0; i < n * r; ++i) result[i] = 0; Transpose(matrix2, matrix2_T, m, r); std::cout << std::endl << "matrix 1" << std::endl; printMatrix(n, m, matrix1); std::cout << std::endl << "matrix 2" << std::endl; printMatrix(m, r, matrix2); std::cout << std::endl << "matrix 2 transpose" << std::endl; printMatrix(n, m, matrix2_T); ////////////////////////////// /// CPU Nested loop // ////////////////////////////// begin_time = clock(); std::cout << begin_time << " , CPU Nested loop" << std::endl; for (int i = 0; i < n; ++i) { for (int j = 0; j < r; ++j) { double acc = 0.0; for (int k = 0; k < m; ++k) acc += matrix1[k + i * m] * matrix2[k * r + j]; result[i * n + j] = acc; } } std::cout << std::endl << "\tresults" << std::endl; printMatrix(n, r, result); std::cout << clock() << " , CPU Nested loop , " << double(clock() - begin_time) / CLOCKS_PER_SEC << std::endl; //////////////////////////////////////// /// CPU Nested loop transpose // //////////////////////////////////////// begin_time = clock(); std::cout << std::endl << begin_time << " , CPU Nested loop transpose" << std::endl; for (int i = 0; i < n; ++i) { for (int j = 0; j < r; ++j) { double acc = 0.0; for (int k = 0; k < m; ++k) acc += matrix1[k + i * m] * matrix2_T[k + j * m]; result[i * n + j] = acc; } } std::cout << std::endl << "\tresults" << std::endl; printMatrix(n, r, result); std::cout << clock() << " , CPU Nested loop transpose , " << double(clock() - begin_time) / CLOCKS_PER_SEC << std::endl; //////////////////////////////////////// /// GPU thrust::inner_product // //////////////////////////////////////// begin_time = clock(); std::cout << std::endl << begin_time << " , GPU thrust::inner_product" << std::endl; thrust::device_vector<double> inner_matrix1(matrix1, matrix1 + n * m); thrust::device_vector<double> inner_matrix2(matrix2_T, matrix2_T + m * r); for (int j = 0; j < n; ++j) { for (int i = 0; i < r; ++i) result[j * n + i] = thrust::inner_product(inner_matrix1.begin() + j * m, inner_matrix1.begin() + j * m + m, inner_matrix2.begin() + i * m, 0.0f); } std::cout << std::endl << "\tresults" << std::endl; printMatrix(n, r, result); std::cout << clock() << " , GPU thrust::inner_product , " << double(clock() - begin_time) / CLOCKS_PER_SEC << std::endl; //////////////////////////////////// /// GPU thrust::transform // //////////////////////////////////// begin_time = clock(); std::cout << std::endl << begin_time << " , GPU thrust::transform" << std::endl; thrust::device_vector<double> transform_matrix1(matrix1, matrix1 + n * m); thrust::device_vector<double> transform_matrix2(matrix2_T, matrix2_T + m * r); thrust::device_vector<double> transform_result(n * r, 0); thrust::transform(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(n * r), transform_result.begin(), dp<double>(thrust::raw_pointer_cast(transform_matrix1.data()), thrust::raw_pointer_cast(transform_matrix2.data()), m, n, r)); hipDeviceSynchronize(); thrust::copy(transform_result.begin(), transform_result.end(), result); std::cout << std::endl << "\tresults" << std::endl; printMatrix(n, r, result); std::cout << clock() << " , GPU thrust::transform , " << double(clock() - begin_time) / CLOCKS_PER_SEC << std::endl; ////////////////////////////// /// GPU CUDA CUBLAS // ////////////////////////////// MultiplyCUBLAS(matrix1, matrix2, result, m, n, r); std::cout << std::endl << "\tresults" << std::endl; printMatrix(n, r, result); return 0; }
6ca6900a64630929c9b1cdf25e5fac05b3746ce5.cu
#include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/inner_product.h> #include <cuda_runtime.h> #include "cublas_v2.h" #include <ctime> #include <iostream> void transpose(int N, int M, double* src, double* dst) { for (int n = 0; n < N * M; n++) { int i = n / N; int j = n % N; dst[n] = src[M * j + i]; } } void printMatrix(int N, int M, double* array) { for (int i = 0; i < N * M; i++) { std::cout << "\t" << array[i]; if (!((i + 1) % M)) std::cout << std::endl; } std::cout << std::endl; } void printMatrix(int N, int M, thrust::device_vector<double> array) { for (int i = 0; i < N * M; i++) { std::cout << "\t" << array[i]; if (!((i + 1) % M)) std::cout << std::endl; } std::cout << std::endl; } template<class T> struct dp { T* A, * B; int m, n, r; dp(T* _A, T* _B, int _m, int _n, int _r) : A(_A), B(_B), m(_m), n(_n), r(_r) {}; __host__ __device__ T operator()(size_t idx) { T sum = 0.0f; int row = idx / r; int col = idx - (row * r); // cheaper modulo for (int i = 0; i < m; i++) sum += A[row * m + i] * B[col * m + i]; return sum; } }; void Transpose(double* src, double* dst, unsigned n, unsigned m) { // Allocate device memory double* d_src; double* d_dst; // Allocate device memory if (cudaMalloc(&d_src, sizeof(double) * n * m) != cudaSuccess) std::cout << "cudaMalloc failed!" << std::endl; if (cudaMalloc(&d_dst, sizeof(double) * m * n) != cudaSuccess) std::cout << "cudaMalloc failed!" << std::endl; if (cudaMemcpy(d_src, src, m * n * sizeof(double), cudaMemcpyHostToDevice)) std::cout << "cudaMemcpy failed!" << std::endl; //cudaDeviceSynchronize(); // cuBLAS handle cublasHandle_t handle; if (cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) std::cout << "CUBLAS initialization failed" << std::endl; // Scalaing factors double alpha = 1.0; double beta = 0.0; // Tranpose d_matrix2 cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, n, m, &alpha, d_src, m, &beta, d_src, n, d_dst, n); //cudaDeviceSynchronize(); // Copy back the three matrices cudaMemcpy(dst, d_dst, sizeof(double) * m * n, cudaMemcpyDeviceToHost); //cudaDeviceSynchronize(); // Free our memory cudaFree(d_src); cudaFree(d_dst); cublasDestroy(handle); } void MultiplyThrust(double* m1, double* m2, double* result, unsigned m, unsigned n, unsigned r) { thrust::device_vector<double> matrix1(m1, m1 + n * m); thrust::device_vector<double> matrix2(m2, m2 + m * r); thrust::device_vector<double> matrix_result(n * r, 0); thrust::transform(thrust::counting_iterator<unsigned>(0), thrust::counting_iterator<unsigned>(n * r), matrix_result.begin(), dp<double>(thrust::raw_pointer_cast(matrix1.data()), thrust::raw_pointer_cast(matrix2.data()), m, n, r)); cudaDeviceSynchronize(); thrust::copy(matrix_result.begin(), matrix_result.end(), result); } void MultiplyCUBLAS(double* m1, double* m2, double* result, unsigned m, unsigned n, unsigned r) { // Allocate device memory double* d_matrix1; double* d_matrix2; double* d_result; // Allocate device memory if (cudaMalloc(&d_matrix1, sizeof(double) * n * m) != cudaSuccess) std::cout << "cudaMalloc failed!" << std::endl; if (cudaMalloc(&d_matrix2, sizeof(double) * m * r) != cudaSuccess) std::cout << "cudaMalloc failed!" << std::endl; if (cudaMalloc(&d_result, sizeof(double) * n * r) != cudaSuccess) std::cout << "cudaMalloc failed!" << std::endl; // Copy host to device memory if (cudaMemcpy(d_matrix1, m1, n * m * sizeof(double), cudaMemcpyHostToDevice)) std::cout << "cudaMemcpy failed!" << std::endl; if (cudaMemcpy(d_matrix2, m2, m * r * sizeof(double), cudaMemcpyHostToDevice)) std::cout << "cudaMemcpy failed!" << std::endl; //cudaDeviceSynchronize(); // cuBLAS handle cublasHandle_t handle; if (cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) std::cout << "CUBLAS initialization failed" << std::endl; // Scalaing factors double alpha = 1.0; double beta = 0.0; // Calculate: c = (alpha*a) * b + (beta*c) // nxr = nxm * mxr // Signature: handle, operation, operation, n, r, m, alpha, A, lda, B, ldb, // beta, C, ldc cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, r, m, &alpha, d_matrix2, n, d_matrix1, m, &beta, d_result, r); //cudaDeviceSynchronize(); // Copy back the three matrices cudaMemcpy(result, d_result, sizeof(double) * n * r, cudaMemcpyDeviceToHost); //cudaDeviceSynchronize(); // Free our memory cudaFree(d_matrix1); cudaFree(d_matrix2); cudaFree(d_result); cublasDestroy(handle); } int main() { clock_t begin_time = clock(); const int n = 2; const int m = 3; const int r = 2; double matrix1[n * m]; double matrix2[m * r]; double matrix2_T[m * r]; double result[n * r]; for (int i = 0; i < n * m; ++i) matrix1[i] = i; for (int i = 0; i < m * r; ++i) matrix2[i] = i; for (int i = 0; i < n * r; ++i) result[i] = 0; Transpose(matrix2, matrix2_T, m, r); std::cout << std::endl << "matrix 1" << std::endl; printMatrix(n, m, matrix1); std::cout << std::endl << "matrix 2" << std::endl; printMatrix(m, r, matrix2); std::cout << std::endl << "matrix 2 transpose" << std::endl; printMatrix(n, m, matrix2_T); ////////////////////////////// /// CPU Nested loop // ////////////////////////////// begin_time = clock(); std::cout << begin_time << " , CPU Nested loop" << std::endl; for (int i = 0; i < n; ++i) { for (int j = 0; j < r; ++j) { double acc = 0.0; for (int k = 0; k < m; ++k) acc += matrix1[k + i * m] * matrix2[k * r + j]; result[i * n + j] = acc; } } std::cout << std::endl << "\tresults" << std::endl; printMatrix(n, r, result); std::cout << clock() << " , CPU Nested loop , " << double(clock() - begin_time) / CLOCKS_PER_SEC << std::endl; //////////////////////////////////////// /// CPU Nested loop transpose // //////////////////////////////////////// begin_time = clock(); std::cout << std::endl << begin_time << " , CPU Nested loop transpose" << std::endl; for (int i = 0; i < n; ++i) { for (int j = 0; j < r; ++j) { double acc = 0.0; for (int k = 0; k < m; ++k) acc += matrix1[k + i * m] * matrix2_T[k + j * m]; result[i * n + j] = acc; } } std::cout << std::endl << "\tresults" << std::endl; printMatrix(n, r, result); std::cout << clock() << " , CPU Nested loop transpose , " << double(clock() - begin_time) / CLOCKS_PER_SEC << std::endl; //////////////////////////////////////// /// GPU thrust::inner_product // //////////////////////////////////////// begin_time = clock(); std::cout << std::endl << begin_time << " , GPU thrust::inner_product" << std::endl; thrust::device_vector<double> inner_matrix1(matrix1, matrix1 + n * m); thrust::device_vector<double> inner_matrix2(matrix2_T, matrix2_T + m * r); for (int j = 0; j < n; ++j) { for (int i = 0; i < r; ++i) result[j * n + i] = thrust::inner_product(inner_matrix1.begin() + j * m, inner_matrix1.begin() + j * m + m, inner_matrix2.begin() + i * m, 0.0f); } std::cout << std::endl << "\tresults" << std::endl; printMatrix(n, r, result); std::cout << clock() << " , GPU thrust::inner_product , " << double(clock() - begin_time) / CLOCKS_PER_SEC << std::endl; //////////////////////////////////// /// GPU thrust::transform // //////////////////////////////////// begin_time = clock(); std::cout << std::endl << begin_time << " , GPU thrust::transform" << std::endl; thrust::device_vector<double> transform_matrix1(matrix1, matrix1 + n * m); thrust::device_vector<double> transform_matrix2(matrix2_T, matrix2_T + m * r); thrust::device_vector<double> transform_result(n * r, 0); thrust::transform(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(n * r), transform_result.begin(), dp<double>(thrust::raw_pointer_cast(transform_matrix1.data()), thrust::raw_pointer_cast(transform_matrix2.data()), m, n, r)); cudaDeviceSynchronize(); thrust::copy(transform_result.begin(), transform_result.end(), result); std::cout << std::endl << "\tresults" << std::endl; printMatrix(n, r, result); std::cout << clock() << " , GPU thrust::transform , " << double(clock() - begin_time) / CLOCKS_PER_SEC << std::endl; ////////////////////////////// /// GPU CUDA CUBLAS // ////////////////////////////// MultiplyCUBLAS(matrix1, matrix2, result, m, n, r); std::cout << std::endl << "\tresults" << std::endl; printMatrix(n, r, result); return 0; }
fd6048f67772cdbd6d3399c14a4494a47a5d2ae5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cmath> #include <cstdio> #include <cstdlib> // include kernels and cuda headers after definitions of structures #include "common_hip.cuh" #include "kernels_hip.cuh" void ReadRecords(student_record *records); void MaximumMarkAtomic(student_records *, student_records *, student_records *, student_records *); void MaximumMarkRecursive(student_records *, student_records *, student_records *, student_records *); void MaximumMarkSM(student_records *, student_records *, student_records *, student_records *); void MaximumMarkShuffle(student_records *, student_records *, student_records *, student_records *); int main(void) { student_record *records_aos; student_records *h_records; student_records *h_records_result; student_records *d_records; student_records *d_records_result; // host allocation records_aos = (student_record *)malloc(sizeof(student_record) * NUM_RECORDS); h_records = (student_records *)malloc(sizeof(student_records)); h_records_result = (student_records *)malloc(sizeof(student_records)); // device allocation hipMalloc((void **)&d_records, sizeof(student_records)); hipMalloc((void **)&d_records_result, sizeof(student_records)); CheckCUDAError("CUDA malloc"); // read file ReadRecords(records_aos); // Exercise 1.1) Convert records_aos to a structure of arrays in h_records for (int i = 0; i < NUM_RECORDS; i++) { h_records->student_ids[i] = records_aos[i].student_id; h_records->assignment_marks[i] = records_aos[i].assignment_mark; } // free AOS as it is no longer needed free(records_aos); // apply each approach in turn MaximumMarkAtomic(h_records, h_records_result, d_records, d_records_result); MaximumMarkRecursive(h_records, h_records_result, d_records, d_records_result); MaximumMarkSM(h_records, h_records_result, d_records, d_records_result); MaximumMarkShuffle(h_records, h_records_result, d_records, d_records_result); // Cleanup free(h_records); free(h_records_result); hipFree(d_records); hipFree(d_records_result); CheckCUDAError("CUDA cleanup"); return 0; } void ReadRecords(student_record *records) { FILE *f = nullptr; f = fopen("com4521.dat", "rb"); // read and binary flags if (f == nullptr) { fprintf(stderr, "Error: Could not find com4521.dat file \n"); exit(1); } // read student data if (fread(records, sizeof(student_record), NUM_RECORDS, f) != NUM_RECORDS) { fprintf(stderr, "Error: Unexpected end of file!\n"); exit(1); } fclose(f); } void MaximumMarkAtomic(student_records *h_records, student_records *h_records_result, student_records *d_records, student_records *d_records_result) { float max_mark; int max_mark_student_id; float time; hipEvent_t start, stop; max_mark = 0; max_mark_student_id = 0.0f; hipEventCreate(&start); hipEventCreate(&stop); // memory copy records to device hipMemcpy(d_records, h_records, sizeof(student_records), hipMemcpyHostToDevice); CheckCUDAError("1) CUDA memcpy"); hipEventRecord(start, 0); // find highest mark using GPU dim3 blocks_per_grid(NUM_RECORDS / THREADS_PER_BLOCK, 1, 1); dim3 threads_per_block(THREADS_PER_BLOCK, 1, 1); hipLaunchKernelGGL(( MaximumMarkAtomicKernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, d_records); hipDeviceSynchronize(); CheckCUDAError("Atomics: CUDA kernel"); hipEventRecord(stop, 0); hipEventSynchronize(stop); // Copy result back to host hipMemcpyFromSymbol(&max_mark, d_max_mark, sizeof(float)); hipMemcpyFromSymbol(&max_mark_student_id, d_max_mark_student_id, sizeof(int)); CheckCUDAError("Atomics: CUDA memcpy back"); hipEventElapsedTime(&time, start, stop); // output result printf("Atomics: Highest mark recorded %f was by student %d\n", max_mark, max_mark_student_id); printf("\tExecution time was %f ms\n", time); hipEventDestroy(start); hipEventDestroy(stop); } // Exercise 2) void MaximumMarkRecursive(student_records *h_records, student_records *h_records_result, student_records *d_records, student_records *d_records_result) { int i; float max_mark; int max_mark_student_id; student_records *d_records_temp1, *d_records_temp2; float time; hipEvent_t start, stop; max_mark = 0; max_mark_student_id = 0; hipMalloc(&d_records_temp1, sizeof(student_records)); hipMalloc(&d_records_temp2, sizeof(student_records)); CheckCUDAError("malloc d_records_temp"); hipEventCreate(&start); hipEventCreate(&stop); // memory copy records to device hipMemcpy(d_records, h_records, sizeof(student_records), hipMemcpyHostToDevice); CheckCUDAError("Recursive: CUDA memcpy"); hipEventRecord(start, 0); // Exercise 2.3) Recursively call GPU steps until there are THREADS_PER_BLOCK // values left dim3 blocks_per_grid(NUM_RECORDS / THREADS_PER_BLOCK, 1, 1); dim3 threads_per_block(THREADS_PER_BLOCK, 1, 1); hipLaunchKernelGGL(( MaximumMarkRecursiveKernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, d_records, d_records_temp2); for (int i = NUM_RECORDS / 2; i > THREADS_PER_BLOCK; i /= 2) { std::swap(d_records_temp1, d_records_temp2); blocks_per_grid = dim3(i / THREADS_PER_BLOCK, 1, 1); threads_per_block = dim3(THREADS_PER_BLOCK, 1, 1); hipLaunchKernelGGL(( MaximumMarkRecursiveKernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, d_records_temp1, d_records_temp2); } hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); // Exercise 2.4) copy back the final THREADS_PER_BLOCK values hipMemcpy(h_records_result, d_records_temp2, sizeof(student_records), hipMemcpyDeviceToHost); CheckCUDAError("memcpy to h_records_result"); hipFree(d_records_temp1); hipFree(d_records_temp2); // Exercise 2.5) reduce the final THREADS_PER_BLOCK values on CPU for (int i = 0; i < THREADS_PER_BLOCK; i++) { if (h_records_result->assignment_marks[i] > max_mark) { max_mark = h_records_result->assignment_marks[i]; max_mark_student_id = h_records_result->student_ids[i]; } } // output the result printf("Recursive: Highest mark recorded %f was by student %d\n", max_mark, max_mark_student_id); printf("\tExecution time was %f ms\n", time); hipEventDestroy(start); hipEventDestroy(stop); } // Exercise 3) void MaximumMarkSM(student_records *h_records, student_records *h_records_result, student_records *d_records, student_records *d_records_result) { unsigned int i; float max_mark; int max_mark_student_id; float time; hipEvent_t start, stop; max_mark = 0; max_mark_student_id = 0.0f; hipEventCreate(&start); hipEventCreate(&stop); // memory copy records to device hipMemcpy(d_records, h_records, sizeof(student_records), hipMemcpyHostToDevice); CheckCUDAError("SM: CUDA memcpy"); hipEventRecord(start, 0); // Exercise 3.4) Call the shared memory reduction kernel dim3 blocks_per_grid(NUM_RECORDS / THREADS_PER_BLOCK, 1, 1); dim3 threads_per_block(THREADS_PER_BLOCK, 1, 1); hipLaunchKernelGGL(( MaximumMarkSMKernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, d_records, d_records_result); hipEventRecord(stop, 0); hipEventSynchronize(stop); // Exercise 3.5) Copy the final block values back to CPU hipMemcpy(h_records_result, d_records_result, sizeof(student_records), hipMemcpyDeviceToHost); CheckCUDAError("memcpy to h_records_result"); // Exercise 3.6) Reduce the block level results on CPU for (int i = 0; i < blocks_per_grid.x; i++) { if (h_records_result->assignment_marks[i] > max_mark) { max_mark = h_records_result->assignment_marks[i]; max_mark_student_id = h_records_result->student_ids[i]; } } hipEventElapsedTime(&time, start, stop); // output result printf("SM: Highest mark recorded %f was by student %d\n", max_mark, max_mark_student_id); printf("\tExecution time was %f ms\n", time); hipEventDestroy(start); hipEventDestroy(stop); } // Exercise 4) void MaximumMarkShuffle(student_records *h_records, student_records *h_records_result, student_records *d_records, student_records *d_records_result) { unsigned int i; unsigned int warps_per_grid; float max_mark; int max_mark_student_id; float time; hipEvent_t start, stop; max_mark = 0; max_mark_student_id = 0.0f; hipEventCreate(&start); hipEventCreate(&stop); // memory copy records to device hipMemcpy(d_records, h_records, sizeof(student_records), hipMemcpyHostToDevice); CheckCUDAError("Shuffle: CUDA memcpy"); hipEventRecord(start, 0); dim3 blocks_per_grid(NUM_RECORDS / THREADS_PER_BLOCK, 1, 1); dim3 threads_per_block(THREADS_PER_BLOCK, 1, 1); hipLaunchKernelGGL(( MaximumMarkShuffleKernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, d_records, d_records_result); // Exercise 4.2) Execute the kernel, copy back result, reduce final values on // CPU hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); hipMemcpy(h_records_result, d_records_result, sizeof(student_records), hipMemcpyDeviceToHost); CheckCUDAError("memcpy to h_records_result"); for (int i = 0; i < NUM_RECORDS >> 5; i++) { if (h_records_result->assignment_marks[i] > max_mark) { max_mark = h_records_result->assignment_marks[i]; max_mark_student_id = h_records_result->student_ids[i]; } } // output result printf("Shuffle: Highest mark recorded %f was by student %d\n", max_mark, max_mark_student_id); printf("\tExecution time was %f ms\n", time); hipEventDestroy(start); hipEventDestroy(stop); }
fd6048f67772cdbd6d3399c14a4494a47a5d2ae5.cu
#include <algorithm> #include <cmath> #include <cstdio> #include <cstdlib> // include kernels and cuda headers after definitions of structures #include "common.cuh" #include "kernels.cuh" void ReadRecords(student_record *records); void MaximumMarkAtomic(student_records *, student_records *, student_records *, student_records *); void MaximumMarkRecursive(student_records *, student_records *, student_records *, student_records *); void MaximumMarkSM(student_records *, student_records *, student_records *, student_records *); void MaximumMarkShuffle(student_records *, student_records *, student_records *, student_records *); int main(void) { student_record *records_aos; student_records *h_records; student_records *h_records_result; student_records *d_records; student_records *d_records_result; // host allocation records_aos = (student_record *)malloc(sizeof(student_record) * NUM_RECORDS); h_records = (student_records *)malloc(sizeof(student_records)); h_records_result = (student_records *)malloc(sizeof(student_records)); // device allocation cudaMalloc((void **)&d_records, sizeof(student_records)); cudaMalloc((void **)&d_records_result, sizeof(student_records)); CheckCUDAError("CUDA malloc"); // read file ReadRecords(records_aos); // Exercise 1.1) Convert records_aos to a structure of arrays in h_records for (int i = 0; i < NUM_RECORDS; i++) { h_records->student_ids[i] = records_aos[i].student_id; h_records->assignment_marks[i] = records_aos[i].assignment_mark; } // free AOS as it is no longer needed free(records_aos); // apply each approach in turn MaximumMarkAtomic(h_records, h_records_result, d_records, d_records_result); MaximumMarkRecursive(h_records, h_records_result, d_records, d_records_result); MaximumMarkSM(h_records, h_records_result, d_records, d_records_result); MaximumMarkShuffle(h_records, h_records_result, d_records, d_records_result); // Cleanup free(h_records); free(h_records_result); cudaFree(d_records); cudaFree(d_records_result); CheckCUDAError("CUDA cleanup"); return 0; } void ReadRecords(student_record *records) { FILE *f = nullptr; f = fopen("com4521.dat", "rb"); // read and binary flags if (f == nullptr) { fprintf(stderr, "Error: Could not find com4521.dat file \n"); exit(1); } // read student data if (fread(records, sizeof(student_record), NUM_RECORDS, f) != NUM_RECORDS) { fprintf(stderr, "Error: Unexpected end of file!\n"); exit(1); } fclose(f); } void MaximumMarkAtomic(student_records *h_records, student_records *h_records_result, student_records *d_records, student_records *d_records_result) { float max_mark; int max_mark_student_id; float time; cudaEvent_t start, stop; max_mark = 0; max_mark_student_id = 0.0f; cudaEventCreate(&start); cudaEventCreate(&stop); // memory copy records to device cudaMemcpy(d_records, h_records, sizeof(student_records), cudaMemcpyHostToDevice); CheckCUDAError("1) CUDA memcpy"); cudaEventRecord(start, 0); // find highest mark using GPU dim3 blocks_per_grid(NUM_RECORDS / THREADS_PER_BLOCK, 1, 1); dim3 threads_per_block(THREADS_PER_BLOCK, 1, 1); MaximumMarkAtomicKernel<<<blocks_per_grid, threads_per_block>>>(d_records); cudaDeviceSynchronize(); CheckCUDAError("Atomics: CUDA kernel"); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Copy result back to host cudaMemcpyFromSymbol(&max_mark, d_max_mark, sizeof(float)); cudaMemcpyFromSymbol(&max_mark_student_id, d_max_mark_student_id, sizeof(int)); CheckCUDAError("Atomics: CUDA memcpy back"); cudaEventElapsedTime(&time, start, stop); // output result printf("Atomics: Highest mark recorded %f was by student %d\n", max_mark, max_mark_student_id); printf("\tExecution time was %f ms\n", time); cudaEventDestroy(start); cudaEventDestroy(stop); } // Exercise 2) void MaximumMarkRecursive(student_records *h_records, student_records *h_records_result, student_records *d_records, student_records *d_records_result) { int i; float max_mark; int max_mark_student_id; student_records *d_records_temp1, *d_records_temp2; float time; cudaEvent_t start, stop; max_mark = 0; max_mark_student_id = 0; cudaMalloc(&d_records_temp1, sizeof(student_records)); cudaMalloc(&d_records_temp2, sizeof(student_records)); CheckCUDAError("malloc d_records_temp"); cudaEventCreate(&start); cudaEventCreate(&stop); // memory copy records to device cudaMemcpy(d_records, h_records, sizeof(student_records), cudaMemcpyHostToDevice); CheckCUDAError("Recursive: CUDA memcpy"); cudaEventRecord(start, 0); // Exercise 2.3) Recursively call GPU steps until there are THREADS_PER_BLOCK // values left dim3 blocks_per_grid(NUM_RECORDS / THREADS_PER_BLOCK, 1, 1); dim3 threads_per_block(THREADS_PER_BLOCK, 1, 1); MaximumMarkRecursiveKernel<<<blocks_per_grid, threads_per_block>>>( d_records, d_records_temp2); for (int i = NUM_RECORDS / 2; i > THREADS_PER_BLOCK; i /= 2) { std::swap(d_records_temp1, d_records_temp2); blocks_per_grid = dim3(i / THREADS_PER_BLOCK, 1, 1); threads_per_block = dim3(THREADS_PER_BLOCK, 1, 1); MaximumMarkRecursiveKernel<<<blocks_per_grid, threads_per_block>>>( d_records_temp1, d_records_temp2); } cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); // Exercise 2.4) copy back the final THREADS_PER_BLOCK values cudaMemcpy(h_records_result, d_records_temp2, sizeof(student_records), cudaMemcpyDeviceToHost); CheckCUDAError("memcpy to h_records_result"); cudaFree(d_records_temp1); cudaFree(d_records_temp2); // Exercise 2.5) reduce the final THREADS_PER_BLOCK values on CPU for (int i = 0; i < THREADS_PER_BLOCK; i++) { if (h_records_result->assignment_marks[i] > max_mark) { max_mark = h_records_result->assignment_marks[i]; max_mark_student_id = h_records_result->student_ids[i]; } } // output the result printf("Recursive: Highest mark recorded %f was by student %d\n", max_mark, max_mark_student_id); printf("\tExecution time was %f ms\n", time); cudaEventDestroy(start); cudaEventDestroy(stop); } // Exercise 3) void MaximumMarkSM(student_records *h_records, student_records *h_records_result, student_records *d_records, student_records *d_records_result) { unsigned int i; float max_mark; int max_mark_student_id; float time; cudaEvent_t start, stop; max_mark = 0; max_mark_student_id = 0.0f; cudaEventCreate(&start); cudaEventCreate(&stop); // memory copy records to device cudaMemcpy(d_records, h_records, sizeof(student_records), cudaMemcpyHostToDevice); CheckCUDAError("SM: CUDA memcpy"); cudaEventRecord(start, 0); // Exercise 3.4) Call the shared memory reduction kernel dim3 blocks_per_grid(NUM_RECORDS / THREADS_PER_BLOCK, 1, 1); dim3 threads_per_block(THREADS_PER_BLOCK, 1, 1); MaximumMarkSMKernel<<<blocks_per_grid, threads_per_block>>>(d_records, d_records_result); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Exercise 3.5) Copy the final block values back to CPU cudaMemcpy(h_records_result, d_records_result, sizeof(student_records), cudaMemcpyDeviceToHost); CheckCUDAError("memcpy to h_records_result"); // Exercise 3.6) Reduce the block level results on CPU for (int i = 0; i < blocks_per_grid.x; i++) { if (h_records_result->assignment_marks[i] > max_mark) { max_mark = h_records_result->assignment_marks[i]; max_mark_student_id = h_records_result->student_ids[i]; } } cudaEventElapsedTime(&time, start, stop); // output result printf("SM: Highest mark recorded %f was by student %d\n", max_mark, max_mark_student_id); printf("\tExecution time was %f ms\n", time); cudaEventDestroy(start); cudaEventDestroy(stop); } // Exercise 4) void MaximumMarkShuffle(student_records *h_records, student_records *h_records_result, student_records *d_records, student_records *d_records_result) { unsigned int i; unsigned int warps_per_grid; float max_mark; int max_mark_student_id; float time; cudaEvent_t start, stop; max_mark = 0; max_mark_student_id = 0.0f; cudaEventCreate(&start); cudaEventCreate(&stop); // memory copy records to device cudaMemcpy(d_records, h_records, sizeof(student_records), cudaMemcpyHostToDevice); CheckCUDAError("Shuffle: CUDA memcpy"); cudaEventRecord(start, 0); dim3 blocks_per_grid(NUM_RECORDS / THREADS_PER_BLOCK, 1, 1); dim3 threads_per_block(THREADS_PER_BLOCK, 1, 1); MaximumMarkShuffleKernel<<<blocks_per_grid, threads_per_block>>>( d_records, d_records_result); // Exercise 4.2) Execute the kernel, copy back result, reduce final values on // CPU cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaMemcpy(h_records_result, d_records_result, sizeof(student_records), cudaMemcpyDeviceToHost); CheckCUDAError("memcpy to h_records_result"); for (int i = 0; i < NUM_RECORDS >> 5; i++) { if (h_records_result->assignment_marks[i] > max_mark) { max_mark = h_records_result->assignment_marks[i]; max_mark_student_id = h_records_result->student_ids[i]; } } // output result printf("Shuffle: Highest mark recorded %f was by student %d\n", max_mark, max_mark_student_id); printf("\tExecution time was %f ms\n", time); cudaEventDestroy(start); cudaEventDestroy(stop); }
9b8408d6db34f488dec8ff31260d7b0b43bd62ce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zmergetfqmr.cu, normal z -> c, Sun Nov 20 20:20:40 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_c // These routines merge multiple kernels from tfqmr into one. /* -------------------------------------------------------------------------- */ __global__ void magma_ctfqmr_1_kernel( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex sigma, magmaFloatComplex *v, magmaFloatComplex *Au, magmaFloatComplex *u_m, magmaFloatComplex *pu_m, magmaFloatComplex *u_mp1, magmaFloatComplex *w, magmaFloatComplex *d, magmaFloatComplex *Ad ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ u_mp1[ i+j*num_rows ] = u_m[ i+j*num_rows ] - alpha * v[ i+j*num_rows ]; w[ i+j*num_rows ] = w[ i+j*num_rows ] - alpha * Au[ i+j*num_rows ]; d[ i+j*num_rows ] = pu_m[ i+j*num_rows ] + sigma * d[ i+j*num_rows ]; Ad[ i+j*num_rows ] = Au[ i+j*num_rows ] + sigma * Ad[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: u_mp1 = u_mp1 - alpha*v; w = w - alpha*Au; d = pu_m + sigma*d; Ad = Au + sigma*Ad; @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha magmaFloatComplex scalar @param[in] sigma magmaFloatComplex scalar @param[in] v magmaFloatComplex_ptr vector @param[in] Au magmaFloatComplex_ptr vector @param[in,out] u_m magmaFloatComplex_ptr vector @param[in,out] pu_m magmaFloatComplex_ptr vector @param[in,out] u_mp1 magmaFloatComplex_ptr vector @param[in,out] w magmaFloatComplex_ptr vector @param[in,out] d magmaFloatComplex_ptr vector @param[in,out] Ad magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_ctfqmr_1( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex alpha, magmaFloatComplex sigma, magmaFloatComplex_ptr v, magmaFloatComplex_ptr Au, magmaFloatComplex_ptr u_m, magmaFloatComplex_ptr pu_m, magmaFloatComplex_ptr u_mp1, magmaFloatComplex_ptr w, magmaFloatComplex_ptr d, magmaFloatComplex_ptr Ad, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_ctfqmr_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, sigma, v, Au, u_m, pu_m, u_mp1, w, d, Ad ); return MAGMA_SUCCESS; } __global__ void magma_ctfqmr_2_kernel( int num_rows, int num_cols, magmaFloatComplex eta, magmaFloatComplex_ptr d, magmaFloatComplex_ptr Ad, magmaFloatComplex_ptr x, magmaFloatComplex_ptr r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ x[ i+j*num_rows ] = x[ i+j*num_rows ] + eta * d[ i+j*num_rows ]; r[ i+j*num_rows ] = r[ i+j*num_rows ] - eta * Ad[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: x = x + eta * d r = r - eta * Ad Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] eta magmaFloatComplex scalar @param[in] d magmaFloatComplex_ptr vector @param[in] Ad magmaFloatComplex_ptr vector @param[in,out] x magmaFloatComplex_ptr vector @param[in,out] r magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_ctfqmr_2( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex eta, magmaFloatComplex_ptr d, magmaFloatComplex_ptr Ad, magmaFloatComplex_ptr x, magmaFloatComplex_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_ctfqmr_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, eta, d, Ad, x, r ); return MAGMA_SUCCESS; } __global__ void magma_ctfqmr_3_kernel( int num_rows, int num_cols, magmaFloatComplex beta, magmaFloatComplex *w, magmaFloatComplex *u_m, magmaFloatComplex *u_mp1 ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ u_mp1[ i+j*num_rows ] = w[ i+j*num_rows ] + beta * u_m[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: u_mp1 = w + beta*u_mp1 Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta magmaFloatComplex scalar @param[in] w magmaFloatComplex_ptr vector @param[in] u_m magmaFloatComplex_ptr vector @param[in,out] u_mp1 magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_ctfqmr_3( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex beta, magmaFloatComplex_ptr w, magmaFloatComplex_ptr u_m, magmaFloatComplex_ptr u_mp1, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_ctfqmr_3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, w, u_m, u_mp1 ); return MAGMA_SUCCESS; } __global__ void magma_ctfqmr_4_kernel( int num_rows, int num_cols, magmaFloatComplex beta, magmaFloatComplex *Au_new, magmaFloatComplex *v, magmaFloatComplex *Au ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ magmaFloatComplex tmp = Au_new[ i+j*num_rows ]; v[ i+j*num_rows ] = tmp + beta * Au[ i+j*num_rows ] + beta * beta * v[ i+j*num_rows ]; Au[ i+j*num_rows ] = tmp; } } } /** Purpose ------- Mergels multiple operations into one kernel: v = Au_new + beta*(Au+beta*v); Au = Au_new Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta magmaFloatComplex scalar @param[in] Au_new magmaFloatComplex_ptr vector @param[in,out] v magmaFloatComplex_ptr vector @param[in,out] Au magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_ctfqmr_4( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex beta, magmaFloatComplex_ptr Au_new, magmaFloatComplex_ptr v, magmaFloatComplex_ptr Au, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_ctfqmr_4_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, Au_new, v, Au ); return MAGMA_SUCCESS; } __global__ void magma_ctfqmr_5_kernel( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex sigma, magmaFloatComplex *v, magmaFloatComplex *Au, magmaFloatComplex *u_mp1, magmaFloatComplex *w, magmaFloatComplex *d, magmaFloatComplex *Ad ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ w[ i+j*num_rows ] = w[ i+j*num_rows ] - alpha * Au[ i+j*num_rows ]; d[ i+j*num_rows ] = u_mp1[ i+j*num_rows ] + sigma * d[ i+j*num_rows ]; Ad[ i+j*num_rows ] = Au[ i+j*num_rows ] + sigma * Ad[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: w = w - alpha*Au; d = pu_m + sigma*d; Ad = Au + sigma*Ad; @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha magmaFloatComplex scalar @param[in] sigma magmaFloatComplex scalar @param[in] v magmaFloatComplex_ptr vector @param[in] Au magmaFloatComplex_ptr vector @param[in,out] u_mp1 magmaFloatComplex_ptr vector @param[in,out] w magmaFloatComplex_ptr vector @param[in,out] d magmaFloatComplex_ptr vector @param[in,out] Ad magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_ctfqmr_5( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex alpha, magmaFloatComplex sigma, magmaFloatComplex_ptr v, magmaFloatComplex_ptr Au, magmaFloatComplex_ptr u_mp1, magmaFloatComplex_ptr w, magmaFloatComplex_ptr d, magmaFloatComplex_ptr Ad, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_ctfqmr_5_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, sigma, v, Au, u_mp1, w, d, Ad ); return MAGMA_SUCCESS; }
9b8408d6db34f488dec8ff31260d7b0b43bd62ce.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zmergetfqmr.cu, normal z -> c, Sun Nov 20 20:20:40 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_c // These routines merge multiple kernels from tfqmr into one. /* -------------------------------------------------------------------------- */ __global__ void magma_ctfqmr_1_kernel( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex sigma, magmaFloatComplex *v, magmaFloatComplex *Au, magmaFloatComplex *u_m, magmaFloatComplex *pu_m, magmaFloatComplex *u_mp1, magmaFloatComplex *w, magmaFloatComplex *d, magmaFloatComplex *Ad ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ u_mp1[ i+j*num_rows ] = u_m[ i+j*num_rows ] - alpha * v[ i+j*num_rows ]; w[ i+j*num_rows ] = w[ i+j*num_rows ] - alpha * Au[ i+j*num_rows ]; d[ i+j*num_rows ] = pu_m[ i+j*num_rows ] + sigma * d[ i+j*num_rows ]; Ad[ i+j*num_rows ] = Au[ i+j*num_rows ] + sigma * Ad[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: u_mp1 = u_mp1 - alpha*v; w = w - alpha*Au; d = pu_m + sigma*d; Ad = Au + sigma*Ad; @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha magmaFloatComplex scalar @param[in] sigma magmaFloatComplex scalar @param[in] v magmaFloatComplex_ptr vector @param[in] Au magmaFloatComplex_ptr vector @param[in,out] u_m magmaFloatComplex_ptr vector @param[in,out] pu_m magmaFloatComplex_ptr vector @param[in,out] u_mp1 magmaFloatComplex_ptr vector @param[in,out] w magmaFloatComplex_ptr vector @param[in,out] d magmaFloatComplex_ptr vector @param[in,out] Ad magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_ctfqmr_1( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex alpha, magmaFloatComplex sigma, magmaFloatComplex_ptr v, magmaFloatComplex_ptr Au, magmaFloatComplex_ptr u_m, magmaFloatComplex_ptr pu_m, magmaFloatComplex_ptr u_mp1, magmaFloatComplex_ptr w, magmaFloatComplex_ptr d, magmaFloatComplex_ptr Ad, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_ctfqmr_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, sigma, v, Au, u_m, pu_m, u_mp1, w, d, Ad ); return MAGMA_SUCCESS; } __global__ void magma_ctfqmr_2_kernel( int num_rows, int num_cols, magmaFloatComplex eta, magmaFloatComplex_ptr d, magmaFloatComplex_ptr Ad, magmaFloatComplex_ptr x, magmaFloatComplex_ptr r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ x[ i+j*num_rows ] = x[ i+j*num_rows ] + eta * d[ i+j*num_rows ]; r[ i+j*num_rows ] = r[ i+j*num_rows ] - eta * Ad[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: x = x + eta * d r = r - eta * Ad Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] eta magmaFloatComplex scalar @param[in] d magmaFloatComplex_ptr vector @param[in] Ad magmaFloatComplex_ptr vector @param[in,out] x magmaFloatComplex_ptr vector @param[in,out] r magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_ctfqmr_2( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex eta, magmaFloatComplex_ptr d, magmaFloatComplex_ptr Ad, magmaFloatComplex_ptr x, magmaFloatComplex_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_ctfqmr_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, eta, d, Ad, x, r ); return MAGMA_SUCCESS; } __global__ void magma_ctfqmr_3_kernel( int num_rows, int num_cols, magmaFloatComplex beta, magmaFloatComplex *w, magmaFloatComplex *u_m, magmaFloatComplex *u_mp1 ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ u_mp1[ i+j*num_rows ] = w[ i+j*num_rows ] + beta * u_m[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: u_mp1 = w + beta*u_mp1 Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta magmaFloatComplex scalar @param[in] w magmaFloatComplex_ptr vector @param[in] u_m magmaFloatComplex_ptr vector @param[in,out] u_mp1 magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_ctfqmr_3( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex beta, magmaFloatComplex_ptr w, magmaFloatComplex_ptr u_m, magmaFloatComplex_ptr u_mp1, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_ctfqmr_3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, w, u_m, u_mp1 ); return MAGMA_SUCCESS; } __global__ void magma_ctfqmr_4_kernel( int num_rows, int num_cols, magmaFloatComplex beta, magmaFloatComplex *Au_new, magmaFloatComplex *v, magmaFloatComplex *Au ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ magmaFloatComplex tmp = Au_new[ i+j*num_rows ]; v[ i+j*num_rows ] = tmp + beta * Au[ i+j*num_rows ] + beta * beta * v[ i+j*num_rows ]; Au[ i+j*num_rows ] = tmp; } } } /** Purpose ------- Mergels multiple operations into one kernel: v = Au_new + beta*(Au+beta*v); Au = Au_new Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta magmaFloatComplex scalar @param[in] Au_new magmaFloatComplex_ptr vector @param[in,out] v magmaFloatComplex_ptr vector @param[in,out] Au magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_ctfqmr_4( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex beta, magmaFloatComplex_ptr Au_new, magmaFloatComplex_ptr v, magmaFloatComplex_ptr Au, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_ctfqmr_4_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, Au_new, v, Au ); return MAGMA_SUCCESS; } __global__ void magma_ctfqmr_5_kernel( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex sigma, magmaFloatComplex *v, magmaFloatComplex *Au, magmaFloatComplex *u_mp1, magmaFloatComplex *w, magmaFloatComplex *d, magmaFloatComplex *Ad ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ w[ i+j*num_rows ] = w[ i+j*num_rows ] - alpha * Au[ i+j*num_rows ]; d[ i+j*num_rows ] = u_mp1[ i+j*num_rows ] + sigma * d[ i+j*num_rows ]; Ad[ i+j*num_rows ] = Au[ i+j*num_rows ] + sigma * Ad[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: w = w - alpha*Au; d = pu_m + sigma*d; Ad = Au + sigma*Ad; @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha magmaFloatComplex scalar @param[in] sigma magmaFloatComplex scalar @param[in] v magmaFloatComplex_ptr vector @param[in] Au magmaFloatComplex_ptr vector @param[in,out] u_mp1 magmaFloatComplex_ptr vector @param[in,out] w magmaFloatComplex_ptr vector @param[in,out] d magmaFloatComplex_ptr vector @param[in,out] Ad magmaFloatComplex_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_ctfqmr_5( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex alpha, magmaFloatComplex sigma, magmaFloatComplex_ptr v, magmaFloatComplex_ptr Au, magmaFloatComplex_ptr u_mp1, magmaFloatComplex_ptr w, magmaFloatComplex_ptr d, magmaFloatComplex_ptr Ad, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_ctfqmr_5_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, sigma, v, Au, u_mp1, w, d, Ad ); return MAGMA_SUCCESS; }
c3b6f7eb7db79b192aadf964d444ea0c480a6bb1.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <ATen/ATen.h> #include <THH/THHAtomics.cuh> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "../utils.h" #define eps 1e-10 namespace kaolin { template<typename scalar_t> __global__ void dr_cuda_backword_color_batch( const scalar_t* __restrict__ grad_im_bxhxwxd, const scalar_t* __restrict__ im_bxhxwxd, const int64_t* __restrict__ imidx_bxhxwx1, const scalar_t* __restrict__ imwei_bxhxwx3, const scalar_t* __restrict__ points2d_bxfx6, const scalar_t* __restrict__ features_bxfx3d, scalar_t* __restrict__ grad_points2d_bxfx6, scalar_t* __restrict__ grad_features_bxfx3d, int bnum, int height, int width, int fnum, int dnum, int multiplier) { int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int wididx = presentthread % width; presentthread = (presentthread - wididx) / width; int heiidx = presentthread % height; int bidx = (presentthread - heiidx) / height; if (bidx >= bnum || heiidx >= height || wididx >= width) return; // which pixel it belongs to const int totalidx1 = bidx * height * width + heiidx * width + wididx; const int totalidx3 = totalidx1 * 3; const int totalidxd = totalidx1 * dnum; // coordinates scalar_t x0 = 1.0 * multiplier / width * (2 * wididx + 1 - width); scalar_t y0 = 1.0 * multiplier / height * (height - 2 * heiidx - 1); // which face it belongs to? int fidxint = imidx_bxhxwx1[totalidx1]; // visible faces if (fidxint >= 0) { const int shift1 = bidx * fnum + fidxint; const int shift6 = shift1 * 6; const int shift3d = shift1 * 3 * dnum; // the imaging model is: // I(x, y) = w0 * c0 + w1 * c1 + w2 * c2 // gradient of colors // 3 points in one face for (int i = 0; i < 3; i++) { // directly use opengl weights scalar_t w = imwei_bxhxwx3[totalidx3 + i]; int pointshift = shift3d + i * dnum; // rgb value for (int rgb = 0; rgb < dnum; rgb++) { int colorshift = pointshift + rgb; // this should be atomic operation scalar_t * addr = grad_features_bxfx3d + colorshift; scalar_t val = grad_im_bxhxwxd[totalidxd + rgb] * w; atomicAdd(addr, val); } } // gradient of points // here, we calculate dl/dp // dl/dp = dldI * dI/dp // dI/dp = c0 * dw0 / dp + c1 * dw1 / dp + c2 * dw2 / dp // first // 4 coorinates scalar_t ax = points2d_bxfx6[shift6 + 0]; scalar_t ay = points2d_bxfx6[shift6 + 1]; scalar_t bx = points2d_bxfx6[shift6 + 2]; scalar_t by = points2d_bxfx6[shift6 + 3]; scalar_t cx = points2d_bxfx6[shift6 + 4]; scalar_t cy = points2d_bxfx6[shift6 + 5]; // replace with other variables scalar_t m = bx - ax; scalar_t p = by - ay; scalar_t n = cx - ax; scalar_t q = cy - ay; scalar_t s = x0 - ax; scalar_t t = y0 - ay; // m* w1 + n * w2 = s // p * w1 + q * w2 = t // w1 = (sq - nt) / (mq - np) // w2 = (mt - sp) / (mq - np) scalar_t k1 = s * q - n * t; scalar_t k2 = m * t - s * p; scalar_t k3 = m * q - n * p; scalar_t dk1dm = 0; scalar_t dk1dn = -t; scalar_t dk1dp = 0; scalar_t dk1dq = s; scalar_t dk1ds = q; scalar_t dk1dt = -n; scalar_t dk2dm = t; scalar_t dk2dn = 0; scalar_t dk2dp = -s; scalar_t dk2dq = 0; scalar_t dk2ds = -p; scalar_t dk2dt = m; scalar_t dk3dm = q; scalar_t dk3dn = -p; scalar_t dk3dp = -n; scalar_t dk3dq = m; scalar_t dk3ds = 0; scalar_t dk3dt = 0; // w1 = k1 / k3 // w2 = k2 / k3 // remember we need divide k3 ^ 2 scalar_t dw1dm = dk1dm * k3 - dk3dm * k1; scalar_t dw1dn = dk1dn * k3 - dk3dn * k1; scalar_t dw1dp = dk1dp * k3 - dk3dp * k1; scalar_t dw1dq = dk1dq * k3 - dk3dq * k1; scalar_t dw1ds = dk1ds * k3 - dk3ds * k1; scalar_t dw1dt = dk1dt * k3 - dk3dt * k1; scalar_t dw2dm = dk2dm * k3 - dk3dm * k2; scalar_t dw2dn = dk2dn * k3 - dk3dn * k2; scalar_t dw2dp = dk2dp * k3 - dk3dp * k2; scalar_t dw2dq = dk2dq * k3 - dk3dq * k2; scalar_t dw2ds = dk2ds * k3 - dk3ds * k2; scalar_t dw2dt = dk2dt * k3 - dk3dt * k2; scalar_t dw1dax = -(dw1dm + dw1dn + dw1ds); scalar_t dw1day = -(dw1dp + dw1dq + dw1dt); scalar_t dw1dbx = dw1dm; scalar_t dw1dby = dw1dp; scalar_t dw1dcx = dw1dn; scalar_t dw1dcy = dw1dq; scalar_t dw2dax = -(dw2dm + dw2dn + dw2ds); scalar_t dw2day = -(dw2dp + dw2dq + dw2dt); scalar_t dw2dbx = dw2dm; scalar_t dw2dby = dw2dp; scalar_t dw2dcx = dw2dn; scalar_t dw2dcy = dw2dq; for (int rgb = 0; rgb < dnum; rgb++) { // the same color for 3 points // thus we can simplify it scalar_t c0 = features_bxfx3d[shift3d + rgb]; scalar_t c1 = features_bxfx3d[shift3d + dnum + rgb]; scalar_t c2 = features_bxfx3d[shift3d + dnum + dnum + rgb]; scalar_t dIdax = (c1 - c0) * dw1dax + (c2 - c0) * dw2dax; scalar_t dIday = (c1 - c0) * dw1day + (c2 - c0) * dw2day; scalar_t dIdbx = (c1 - c0) * dw1dbx + (c2 - c0) * dw2dbx; scalar_t dIdby = (c1 - c0) * dw1dby + (c2 - c0) * dw2dby; scalar_t dIdcx = (c1 - c0) * dw1dcx + (c2 - c0) * dw2dcx; scalar_t dIdcy = (c1 - c0) * dw1dcy + (c2 - c0) * dw2dcy; scalar_t dldI = multiplier * grad_im_bxhxwxd[totalidxd + rgb] / (k3 * k3 + eps); atomicAdd(grad_points2d_bxfx6 + shift6 + 0, dldI * dIdax); atomicAdd(grad_points2d_bxfx6 + shift6 + 1, dldI * dIday); atomicAdd(grad_points2d_bxfx6 + shift6 + 2, dldI * dIdbx); atomicAdd(grad_points2d_bxfx6 + shift6 + 3, dldI * dIdby); atomicAdd(grad_points2d_bxfx6 + shift6 + 4, dldI * dIdcx); atomicAdd(grad_points2d_bxfx6 + shift6 + 5, dldI * dIdcy); } } } template<typename scalar_t> __global__ void dr_cuda_backword_prob_batch( const scalar_t* __restrict__ grad_improb_bxhxwx1, const scalar_t* __restrict__ improb_bxhxwx1, const int64_t* __restrict__ imidx_bxhxwx1, const scalar_t* __restrict__ probface_bxhxwxk, const scalar_t* __restrict__ probcase_bxhxwxk, const scalar_t* __restrict__ probdis_bxhxwxk, const scalar_t* __restrict__ points2d_bxfx6, scalar_t* __restrict__ grad_points2dprob_bxfx6, int bnum, int height, int width, int fnum, int knum, int multiplier, int sigmainv) { int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int wididx = presentthread % width; presentthread = (presentthread - wididx) / width; int heiidx = presentthread % height; int bidx = (presentthread - heiidx) / height; if (bidx >= bnum || heiidx >= height || wididx >= width) return; // which pixel it belongs to const int totalidx1 = bidx * height * width + heiidx * width + wididx; const int totalidxk = totalidx1 * knum; // coordinates scalar_t x0 = 1.0 * multiplier / width * (2 * wididx + 1 - width); scalar_t y0 = 1.0 * multiplier / height * (height - 2 * heiidx - 1); // which face it belongs to? int fidxint = imidx_bxhxwx1[totalidx1]; // not covered by any faces if (fidxint < 0) { scalar_t dLdp = grad_improb_bxhxwx1[totalidx1]; scalar_t allprob = improb_bxhxwx1[totalidx1]; for (int kid = 0; kid < knum; kid++) { scalar_t fidx = probface_bxhxwxk[totalidxk + kid]; // face begins from 1 // convert it into int, use round! int fidxint = static_cast<int>(fidx + 0.5) - 1; if (fidxint < 0) break; const int shift1 = bidx * fnum + fidxint; const int shift6 = shift1 * 6; scalar_t prob = probdis_bxhxwxk[totalidxk + kid]; scalar_t dLdz = -1.0 * sigmainv * dLdp * (1.0 - allprob) / (1.0 - prob + eps) * prob; scalar_t edgecase = probcase_bxhxwxk[totalidxk + kid]; int edgeid = static_cast<int>(edgecase + 0.5) - 1; if (edgeid >= 3) { // point distance int pshift = shift6 + (edgeid - 3) * 2; scalar_t x1 = points2d_bxfx6[pshift + 0]; scalar_t y1 = points2d_bxfx6[pshift + 1]; scalar_t dLdx1 = dLdz * 2 * (x1 - x0); scalar_t dLdy1 = dLdz * 2 * (y1 - y0); atomicAdd(grad_points2dprob_bxfx6 + pshift + 0, dLdx1 / multiplier); atomicAdd(grad_points2dprob_bxfx6 + pshift + 1, dLdy1 / multiplier); } else { // perpendicular distance int pshift = shift6 + edgeid * 2; scalar_t x1 = points2d_bxfx6[pshift + 0]; scalar_t y1 = points2d_bxfx6[pshift + 1]; int pshift2 = shift6 + ((edgeid + 1) % 3) * 2; scalar_t x2 = points2d_bxfx6[pshift2 + 0]; scalar_t y2 = points2d_bxfx6[pshift2 + 1]; // ax + by + c = 0 scalar_t A = y2 - y1; scalar_t B = x1 - x2; scalar_t C = x2 * y1 - x1 * y2; // dissquare = d^2 = (ax+by+c)^2 / (a^2+b^2) // up = ax + by + c // down = a^2 + b^2 // dissquare = up^2 / down scalar_t up = A * x0 + B * y0 + C; scalar_t down = A * A + B * B; scalar_t dissquare = up * up / (down + eps); scalar_t dzdA = 2 * (x0 * up - dissquare * A) / (down + eps); scalar_t dzdB = 2 * (y0 * up - dissquare * B) / (down + eps); scalar_t dzdC = 2 * up / (down + eps); scalar_t dLdx1 = dLdz * (dzdB - y2 * dzdC); scalar_t dLdy1 = dLdz * (x2 * dzdC - dzdA); scalar_t dLdx2 = dLdz * (y1 * dzdC - dzdB); scalar_t dLdy2 = dLdz * (dzdA - x1 * dzdC); atomicAdd(grad_points2dprob_bxfx6 + pshift + 0, dLdx1 / multiplier); atomicAdd(grad_points2dprob_bxfx6 + pshift + 1, dLdy1 / multiplier); atomicAdd(grad_points2dprob_bxfx6 + pshift2 + 0, dLdx2 / multiplier); atomicAdd(grad_points2dprob_bxfx6 + pshift2 + 1, dLdy2 / multiplier); } } } return; } void rasterize_backward_cuda_kernel_launcher(at::Tensor grad_image_bxhxwxd, at::Tensor grad_improb_bxhxwx1, at::Tensor image_bxhxwxd, at::Tensor improb_bxhxwx1, at::Tensor imidx_bxhxwx1, at::Tensor imwei_bxhxwx3, at::Tensor probface_bxhxwxk, at::Tensor probcase_bxhxwxk, at::Tensor probdis_bxhxwxk, at::Tensor points2d_bxfx6, at::Tensor colors_bxfx3d, at::Tensor grad_points2d_bxfx6, at::Tensor grad_colors_bxfx3d, at::Tensor grad_points2dprob_bxfx6, int multiplier, int sigmainv) { int bnum = grad_image_bxhxwxd.size(0); int height = grad_image_bxhxwxd.size(1); int width = grad_image_bxhxwxd.size(2); int dnum = grad_image_bxhxwxd.size(3); int fnum = grad_points2d_bxfx6.size(1); int knum = probface_bxhxwxk.size(3); // for bxhxw image size const int threadnum = 512; const int totalthread = bnum * height * width; const int blocknum = totalthread / threadnum + 1; const dim3 threads(threadnum, 1, 1); const dim3 blocks(blocknum, 1, 1); // we exchange block and thread! AT_DISPATCH_FLOATING_TYPES(grad_image_bxhxwxd.scalar_type(), "dr_cuda_backward_color_batch", ([&] { hipLaunchKernelGGL(( dr_cuda_backword_color_batch<scalar_t>), dim3(blocks), dim3(threads), 0, 0, grad_image_bxhxwxd.data_ptr<scalar_t>(), image_bxhxwxd.data_ptr<scalar_t>(), imidx_bxhxwx1.data_ptr<int64_t>(), imwei_bxhxwx3.data_ptr<scalar_t>(), points2d_bxfx6.data_ptr<scalar_t>(), colors_bxfx3d.data_ptr<scalar_t>(), grad_points2d_bxfx6.data_ptr<scalar_t>(), grad_colors_bxfx3d.data_ptr<scalar_t>(), bnum, height, width, fnum, dnum, multiplier); })); AT_DISPATCH_FLOATING_TYPES(grad_image_bxhxwxd.scalar_type(), "dr_cuda_backward_prob_batch", ([&] { hipLaunchKernelGGL(( dr_cuda_backword_prob_batch<scalar_t>), dim3(blocks), dim3(threads), 0, 0, grad_improb_bxhxwx1.data_ptr<scalar_t>(), improb_bxhxwx1.data_ptr<scalar_t>(), imidx_bxhxwx1.data_ptr<int64_t>(), probface_bxhxwxk.data_ptr<scalar_t>(), probcase_bxhxwxk.data_ptr<scalar_t>(), probdis_bxhxwxk.data_ptr<scalar_t>(), points2d_bxfx6.data_ptr<scalar_t>(), grad_points2dprob_bxfx6.data_ptr<scalar_t>(), bnum, height, width, fnum, knum, multiplier, sigmainv); })); } } // namespace kaolin
c3b6f7eb7db79b192aadf964d444ea0c480a6bb1.cu
// Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <ATen/ATen.h> #include <THC/THCAtomics.cuh> #include <cuda.h> #include <cuda_runtime.h> #include "../utils.h" #define eps 1e-10 namespace kaolin { template<typename scalar_t> __global__ void dr_cuda_backword_color_batch( const scalar_t* __restrict__ grad_im_bxhxwxd, const scalar_t* __restrict__ im_bxhxwxd, const int64_t* __restrict__ imidx_bxhxwx1, const scalar_t* __restrict__ imwei_bxhxwx3, const scalar_t* __restrict__ points2d_bxfx6, const scalar_t* __restrict__ features_bxfx3d, scalar_t* __restrict__ grad_points2d_bxfx6, scalar_t* __restrict__ grad_features_bxfx3d, int bnum, int height, int width, int fnum, int dnum, int multiplier) { int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int wididx = presentthread % width; presentthread = (presentthread - wididx) / width; int heiidx = presentthread % height; int bidx = (presentthread - heiidx) / height; if (bidx >= bnum || heiidx >= height || wididx >= width) return; // which pixel it belongs to const int totalidx1 = bidx * height * width + heiidx * width + wididx; const int totalidx3 = totalidx1 * 3; const int totalidxd = totalidx1 * dnum; // coordinates scalar_t x0 = 1.0 * multiplier / width * (2 * wididx + 1 - width); scalar_t y0 = 1.0 * multiplier / height * (height - 2 * heiidx - 1); // which face it belongs to? int fidxint = imidx_bxhxwx1[totalidx1]; // visible faces if (fidxint >= 0) { const int shift1 = bidx * fnum + fidxint; const int shift6 = shift1 * 6; const int shift3d = shift1 * 3 * dnum; // the imaging model is: // I(x, y) = w0 * c0 + w1 * c1 + w2 * c2 // gradient of colors // 3 points in one face for (int i = 0; i < 3; i++) { // directly use opengl weights scalar_t w = imwei_bxhxwx3[totalidx3 + i]; int pointshift = shift3d + i * dnum; // rgb value for (int rgb = 0; rgb < dnum; rgb++) { int colorshift = pointshift + rgb; // this should be atomic operation scalar_t * addr = grad_features_bxfx3d + colorshift; scalar_t val = grad_im_bxhxwxd[totalidxd + rgb] * w; atomicAdd(addr, val); } } // gradient of points // here, we calculate dl/dp // dl/dp = dldI * dI/dp // dI/dp = c0 * dw0 / dp + c1 * dw1 / dp + c2 * dw2 / dp // first // 4 coorinates scalar_t ax = points2d_bxfx6[shift6 + 0]; scalar_t ay = points2d_bxfx6[shift6 + 1]; scalar_t bx = points2d_bxfx6[shift6 + 2]; scalar_t by = points2d_bxfx6[shift6 + 3]; scalar_t cx = points2d_bxfx6[shift6 + 4]; scalar_t cy = points2d_bxfx6[shift6 + 5]; // replace with other variables scalar_t m = bx - ax; scalar_t p = by - ay; scalar_t n = cx - ax; scalar_t q = cy - ay; scalar_t s = x0 - ax; scalar_t t = y0 - ay; // m* w1 + n * w2 = s // p * w1 + q * w2 = t // w1 = (sq - nt) / (mq - np) // w2 = (mt - sp) / (mq - np) scalar_t k1 = s * q - n * t; scalar_t k2 = m * t - s * p; scalar_t k3 = m * q - n * p; scalar_t dk1dm = 0; scalar_t dk1dn = -t; scalar_t dk1dp = 0; scalar_t dk1dq = s; scalar_t dk1ds = q; scalar_t dk1dt = -n; scalar_t dk2dm = t; scalar_t dk2dn = 0; scalar_t dk2dp = -s; scalar_t dk2dq = 0; scalar_t dk2ds = -p; scalar_t dk2dt = m; scalar_t dk3dm = q; scalar_t dk3dn = -p; scalar_t dk3dp = -n; scalar_t dk3dq = m; scalar_t dk3ds = 0; scalar_t dk3dt = 0; // w1 = k1 / k3 // w2 = k2 / k3 // remember we need divide k3 ^ 2 scalar_t dw1dm = dk1dm * k3 - dk3dm * k1; scalar_t dw1dn = dk1dn * k3 - dk3dn * k1; scalar_t dw1dp = dk1dp * k3 - dk3dp * k1; scalar_t dw1dq = dk1dq * k3 - dk3dq * k1; scalar_t dw1ds = dk1ds * k3 - dk3ds * k1; scalar_t dw1dt = dk1dt * k3 - dk3dt * k1; scalar_t dw2dm = dk2dm * k3 - dk3dm * k2; scalar_t dw2dn = dk2dn * k3 - dk3dn * k2; scalar_t dw2dp = dk2dp * k3 - dk3dp * k2; scalar_t dw2dq = dk2dq * k3 - dk3dq * k2; scalar_t dw2ds = dk2ds * k3 - dk3ds * k2; scalar_t dw2dt = dk2dt * k3 - dk3dt * k2; scalar_t dw1dax = -(dw1dm + dw1dn + dw1ds); scalar_t dw1day = -(dw1dp + dw1dq + dw1dt); scalar_t dw1dbx = dw1dm; scalar_t dw1dby = dw1dp; scalar_t dw1dcx = dw1dn; scalar_t dw1dcy = dw1dq; scalar_t dw2dax = -(dw2dm + dw2dn + dw2ds); scalar_t dw2day = -(dw2dp + dw2dq + dw2dt); scalar_t dw2dbx = dw2dm; scalar_t dw2dby = dw2dp; scalar_t dw2dcx = dw2dn; scalar_t dw2dcy = dw2dq; for (int rgb = 0; rgb < dnum; rgb++) { // the same color for 3 points // thus we can simplify it scalar_t c0 = features_bxfx3d[shift3d + rgb]; scalar_t c1 = features_bxfx3d[shift3d + dnum + rgb]; scalar_t c2 = features_bxfx3d[shift3d + dnum + dnum + rgb]; scalar_t dIdax = (c1 - c0) * dw1dax + (c2 - c0) * dw2dax; scalar_t dIday = (c1 - c0) * dw1day + (c2 - c0) * dw2day; scalar_t dIdbx = (c1 - c0) * dw1dbx + (c2 - c0) * dw2dbx; scalar_t dIdby = (c1 - c0) * dw1dby + (c2 - c0) * dw2dby; scalar_t dIdcx = (c1 - c0) * dw1dcx + (c2 - c0) * dw2dcx; scalar_t dIdcy = (c1 - c0) * dw1dcy + (c2 - c0) * dw2dcy; scalar_t dldI = multiplier * grad_im_bxhxwxd[totalidxd + rgb] / (k3 * k3 + eps); atomicAdd(grad_points2d_bxfx6 + shift6 + 0, dldI * dIdax); atomicAdd(grad_points2d_bxfx6 + shift6 + 1, dldI * dIday); atomicAdd(grad_points2d_bxfx6 + shift6 + 2, dldI * dIdbx); atomicAdd(grad_points2d_bxfx6 + shift6 + 3, dldI * dIdby); atomicAdd(grad_points2d_bxfx6 + shift6 + 4, dldI * dIdcx); atomicAdd(grad_points2d_bxfx6 + shift6 + 5, dldI * dIdcy); } } } template<typename scalar_t> __global__ void dr_cuda_backword_prob_batch( const scalar_t* __restrict__ grad_improb_bxhxwx1, const scalar_t* __restrict__ improb_bxhxwx1, const int64_t* __restrict__ imidx_bxhxwx1, const scalar_t* __restrict__ probface_bxhxwxk, const scalar_t* __restrict__ probcase_bxhxwxk, const scalar_t* __restrict__ probdis_bxhxwxk, const scalar_t* __restrict__ points2d_bxfx6, scalar_t* __restrict__ grad_points2dprob_bxfx6, int bnum, int height, int width, int fnum, int knum, int multiplier, int sigmainv) { int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int wididx = presentthread % width; presentthread = (presentthread - wididx) / width; int heiidx = presentthread % height; int bidx = (presentthread - heiidx) / height; if (bidx >= bnum || heiidx >= height || wididx >= width) return; // which pixel it belongs to const int totalidx1 = bidx * height * width + heiidx * width + wididx; const int totalidxk = totalidx1 * knum; // coordinates scalar_t x0 = 1.0 * multiplier / width * (2 * wididx + 1 - width); scalar_t y0 = 1.0 * multiplier / height * (height - 2 * heiidx - 1); // which face it belongs to? int fidxint = imidx_bxhxwx1[totalidx1]; // not covered by any faces if (fidxint < 0) { scalar_t dLdp = grad_improb_bxhxwx1[totalidx1]; scalar_t allprob = improb_bxhxwx1[totalidx1]; for (int kid = 0; kid < knum; kid++) { scalar_t fidx = probface_bxhxwxk[totalidxk + kid]; // face begins from 1 // convert it into int, use round! int fidxint = static_cast<int>(fidx + 0.5) - 1; if (fidxint < 0) break; const int shift1 = bidx * fnum + fidxint; const int shift6 = shift1 * 6; scalar_t prob = probdis_bxhxwxk[totalidxk + kid]; scalar_t dLdz = -1.0 * sigmainv * dLdp * (1.0 - allprob) / (1.0 - prob + eps) * prob; scalar_t edgecase = probcase_bxhxwxk[totalidxk + kid]; int edgeid = static_cast<int>(edgecase + 0.5) - 1; if (edgeid >= 3) { // point distance int pshift = shift6 + (edgeid - 3) * 2; scalar_t x1 = points2d_bxfx6[pshift + 0]; scalar_t y1 = points2d_bxfx6[pshift + 1]; scalar_t dLdx1 = dLdz * 2 * (x1 - x0); scalar_t dLdy1 = dLdz * 2 * (y1 - y0); atomicAdd(grad_points2dprob_bxfx6 + pshift + 0, dLdx1 / multiplier); atomicAdd(grad_points2dprob_bxfx6 + pshift + 1, dLdy1 / multiplier); } else { // perpendicular distance int pshift = shift6 + edgeid * 2; scalar_t x1 = points2d_bxfx6[pshift + 0]; scalar_t y1 = points2d_bxfx6[pshift + 1]; int pshift2 = shift6 + ((edgeid + 1) % 3) * 2; scalar_t x2 = points2d_bxfx6[pshift2 + 0]; scalar_t y2 = points2d_bxfx6[pshift2 + 1]; // ax + by + c = 0 scalar_t A = y2 - y1; scalar_t B = x1 - x2; scalar_t C = x2 * y1 - x1 * y2; // dissquare = d^2 = (ax+by+c)^2 / (a^2+b^2) // up = ax + by + c // down = a^2 + b^2 // dissquare = up^2 / down scalar_t up = A * x0 + B * y0 + C; scalar_t down = A * A + B * B; scalar_t dissquare = up * up / (down + eps); scalar_t dzdA = 2 * (x0 * up - dissquare * A) / (down + eps); scalar_t dzdB = 2 * (y0 * up - dissquare * B) / (down + eps); scalar_t dzdC = 2 * up / (down + eps); scalar_t dLdx1 = dLdz * (dzdB - y2 * dzdC); scalar_t dLdy1 = dLdz * (x2 * dzdC - dzdA); scalar_t dLdx2 = dLdz * (y1 * dzdC - dzdB); scalar_t dLdy2 = dLdz * (dzdA - x1 * dzdC); atomicAdd(grad_points2dprob_bxfx6 + pshift + 0, dLdx1 / multiplier); atomicAdd(grad_points2dprob_bxfx6 + pshift + 1, dLdy1 / multiplier); atomicAdd(grad_points2dprob_bxfx6 + pshift2 + 0, dLdx2 / multiplier); atomicAdd(grad_points2dprob_bxfx6 + pshift2 + 1, dLdy2 / multiplier); } } } return; } void rasterize_backward_cuda_kernel_launcher(at::Tensor grad_image_bxhxwxd, at::Tensor grad_improb_bxhxwx1, at::Tensor image_bxhxwxd, at::Tensor improb_bxhxwx1, at::Tensor imidx_bxhxwx1, at::Tensor imwei_bxhxwx3, at::Tensor probface_bxhxwxk, at::Tensor probcase_bxhxwxk, at::Tensor probdis_bxhxwxk, at::Tensor points2d_bxfx6, at::Tensor colors_bxfx3d, at::Tensor grad_points2d_bxfx6, at::Tensor grad_colors_bxfx3d, at::Tensor grad_points2dprob_bxfx6, int multiplier, int sigmainv) { int bnum = grad_image_bxhxwxd.size(0); int height = grad_image_bxhxwxd.size(1); int width = grad_image_bxhxwxd.size(2); int dnum = grad_image_bxhxwxd.size(3); int fnum = grad_points2d_bxfx6.size(1); int knum = probface_bxhxwxk.size(3); // for bxhxw image size const int threadnum = 512; const int totalthread = bnum * height * width; const int blocknum = totalthread / threadnum + 1; const dim3 threads(threadnum, 1, 1); const dim3 blocks(blocknum, 1, 1); // we exchange block and thread! AT_DISPATCH_FLOATING_TYPES(grad_image_bxhxwxd.scalar_type(), "dr_cuda_backward_color_batch", ([&] { dr_cuda_backword_color_batch<scalar_t><<<blocks, threads>>>( grad_image_bxhxwxd.data_ptr<scalar_t>(), image_bxhxwxd.data_ptr<scalar_t>(), imidx_bxhxwx1.data_ptr<int64_t>(), imwei_bxhxwx3.data_ptr<scalar_t>(), points2d_bxfx6.data_ptr<scalar_t>(), colors_bxfx3d.data_ptr<scalar_t>(), grad_points2d_bxfx6.data_ptr<scalar_t>(), grad_colors_bxfx3d.data_ptr<scalar_t>(), bnum, height, width, fnum, dnum, multiplier); })); AT_DISPATCH_FLOATING_TYPES(grad_image_bxhxwxd.scalar_type(), "dr_cuda_backward_prob_batch", ([&] { dr_cuda_backword_prob_batch<scalar_t><<<blocks, threads>>>( grad_improb_bxhxwx1.data_ptr<scalar_t>(), improb_bxhxwx1.data_ptr<scalar_t>(), imidx_bxhxwx1.data_ptr<int64_t>(), probface_bxhxwxk.data_ptr<scalar_t>(), probcase_bxhxwxk.data_ptr<scalar_t>(), probdis_bxhxwxk.data_ptr<scalar_t>(), points2d_bxfx6.data_ptr<scalar_t>(), grad_points2dprob_bxfx6.data_ptr<scalar_t>(), bnum, height, width, fnum, knum, multiplier, sigmainv); })); } } // namespace kaolin
47b58b60e14d34c614a212375fb6d77a939251da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define TILE_DIM 16 __global__ void multiMatrix (int *a, int *b, int *c, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int idy = threadIdx.y + blockDim.y * blockIdx.y; int pos = idx + idy * N; int temp_result = 0; int posa, posb; __shared__ int s_a[TILE_DIM][TILE_DIM]; __shared__ int s_b[TILE_DIM][TILE_DIM]; for (int tile_idx = 0; tile_idx < gridDim.x; tile_idx++) { posa = idy * N + (tile_idx * TILE_DIM + threadIdx.x); posb = (tile_idx * TILE_DIM + threadIdx.y) * N + idx; if (posa < N*N) { s_a[threadIdx.y][threadIdx.x] = a[posa]; } else { s_a[threadIdx.y][threadIdx.x] = 0; } if (posb < N*N) { s_b[threadIdx.y][threadIdx.x] = b[posb]; } else { s_b[threadIdx.y][threadIdx.x] = 0; } __syncthreads(); if (idx < N && idy < N) { for (int i=0; i < TILE_DIM; i++) { temp_result += s_a[threadIdx.y][i] * s_b[i][threadIdx.x]; } } __syncthreads(); } __syncthreads(); if(idx < N && idy < N) { c[pos] = temp_result; } } int main (int argc, char* argv[]){ int N = 4; size_t size = N*N*sizeof(int); int num_thread, num_block; int *h_a, *h_b, *h_c; h_a = (int*)malloc(size); h_b = (int*)malloc(size); h_c = (int*)malloc(size); int *d_a, *d_b, *d_c; hipMalloc(&d_a, size); hipMalloc(&d_b, size); hipMalloc(&d_c, size); int i = 0, j = 0; for (i = 0; i < N*N; i++){ h_a[i] = h_b[i] = i; } hipMemcpy(d_a,h_a,size,hipMemcpyHostToDevice); hipMemcpy(d_b,h_b,size,hipMemcpyHostToDevice); hipMemset(d_c,0,size); num_block = ceil((float)N/TILE_DIM); num_thread = N < TILE_DIM ? N : TILE_DIM; printf("Blocks: %d Threads: %d \n", num_block, num_thread); dim3 gridsize(num_block,num_block,1); dim3 blocksize(num_thread,num_thread,1); hipLaunchKernelGGL(( multiMatrix), dim3(gridsize),dim3(blocksize), 0, 0, d_a, d_b, d_c, N); hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost); hipFree(d_a); hipFree(d_b); hipFree(d_c); free(h_a); free(h_b); free(h_c); }
47b58b60e14d34c614a212375fb6d77a939251da.cu
#include <stdio.h> #define TILE_DIM 16 __global__ void multiMatrix (int *a, int *b, int *c, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int idy = threadIdx.y + blockDim.y * blockIdx.y; int pos = idx + idy * N; int temp_result = 0; int posa, posb; __shared__ int s_a[TILE_DIM][TILE_DIM]; __shared__ int s_b[TILE_DIM][TILE_DIM]; for (int tile_idx = 0; tile_idx < gridDim.x; tile_idx++) { posa = idy * N + (tile_idx * TILE_DIM + threadIdx.x); posb = (tile_idx * TILE_DIM + threadIdx.y) * N + idx; if (posa < N*N) { s_a[threadIdx.y][threadIdx.x] = a[posa]; } else { s_a[threadIdx.y][threadIdx.x] = 0; } if (posb < N*N) { s_b[threadIdx.y][threadIdx.x] = b[posb]; } else { s_b[threadIdx.y][threadIdx.x] = 0; } __syncthreads(); if (idx < N && idy < N) { for (int i=0; i < TILE_DIM; i++) { temp_result += s_a[threadIdx.y][i] * s_b[i][threadIdx.x]; } } __syncthreads(); } __syncthreads(); if(idx < N && idy < N) { c[pos] = temp_result; } } int main (int argc, char* argv[]){ int N = 4; size_t size = N*N*sizeof(int); int num_thread, num_block; int *h_a, *h_b, *h_c; h_a = (int*)malloc(size); h_b = (int*)malloc(size); h_c = (int*)malloc(size); int *d_a, *d_b, *d_c; cudaMalloc(&d_a, size); cudaMalloc(&d_b, size); cudaMalloc(&d_c, size); int i = 0, j = 0; for (i = 0; i < N*N; i++){ h_a[i] = h_b[i] = i; } cudaMemcpy(d_a,h_a,size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b,size,cudaMemcpyHostToDevice); cudaMemset(d_c,0,size); num_block = ceil((float)N/TILE_DIM); num_thread = N < TILE_DIM ? N : TILE_DIM; printf("Blocks: %d Threads: %d \n", num_block, num_thread); dim3 gridsize(num_block,num_block,1); dim3 blocksize(num_thread,num_thread,1); multiMatrix<<<gridsize,blocksize>>>(d_a, d_b, d_c, N); cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(h_a); free(h_b); free(h_c); }
8cbd1ad34a7921d67598d99d4fa820313cb95ab1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> #include "lib/hpc.h" #define LINE_LENGHT 4000 #define WARP_SIZE 32 #define MAX_DIM 200 /* This function reads the points from a file descriptor and saves * them in the return matrix. Also, it stores the dimension D and * the number of points N onto two int memory locations. */ double* read_points(FILE* fd, int* N, int* D){ char line[LINE_LENGHT]; const size_t BUF_SIZE = sizeof(line); /* Read the dimension: fetch the first line until space */ char* dim; dim = fgets(line, BUF_SIZE, fd); sscanf(dim, "%d", D); /* Read the number of points: fetch the second line until newline */ char* n; n = fgets(line, BUF_SIZE, fd); sscanf(n, "%d", N); /* Allocate the matrix (N x D), where each line i contains the values of the points on that dimension i. */ double *matrix = (double*) malloc((*N) * (*D) * sizeof(double)); char* str; const char* s = " "; char* token; char* ptr; for(int i = 0; i < *N; i++){ /* Read current line */ str = fgets(line, BUF_SIZE, fd); /* Split the string read on s=" " separator and fetch the values */ token = strtok(str, s); for(int k = 0; k < *D && token != NULL; k++){ /* convert ASCII string to doubleing-point number */ matrix[k * (*N) + i] = strtod(token, &ptr); token = strtok(NULL, s); } } return matrix; } /* Returns true if the array s dominates the array d. * Parameters: * - s, d: arrays of double * - length: number of elements of s and d * - offset: distance between two elements that we must read in array s */ __device__ bool dominance(double *s, double *d, int length, int offset){ bool strictly_major = false; for(int i = 0; i < length; i++){ if(s[i * offset] < d[i]){ return false; } if(s[i * offset] > d[i]){ strictly_major = true; } } return strictly_major; } /* * This function prints to the file descriptor fd given as parameter: * - The dimension D of the points; * - The cardinality K of the Skyline set; * - The Skyline set. */ __host__ void print_skyline(FILE* fd, bool *S, double *points, int N, int D, int K){ int i, j; /* Print D, K */ fprintf(fd, "%d\n%d\n", D, K); /* Print the Skyline set */ for(i = 0; i < N; i++){ if(S[i]){ for(j = 0; j < D; j++){ fprintf(fd, "%f ", points[j * N + i]); } fprintf(fd, "\n"); } } } /* Kernel function: * each thread has the purpose to determine if the number in charge is in * the Skyline set. To do so, this function iterates on all the points and stops * if any of them dominates it. * The result, in the end, is put in the array S, stored in the global memory. */ __global__ void compute_skyline(double *points, bool *S, int *k, int n, int d){ const int y = blockIdx.y * blockDim.y + threadIdx.y; if(y < n){ /* Copy the number in charge to the local memory in order to perform coalesced memory accesses */ double num[MAX_DIM]; for(int i = 0; i < d; i++){ num[i] = points[i * n + y]; } int is_skyline_point = true; for(int i = 0; i < n && is_skyline_point; i++){ /* If num is dominates by another number then it is not in the Skyline set */ if(i != y){ if(dominance(&points[i], num, d, n)){ is_skyline_point = false; } } } /* Copy the results on the device global memory */ S[y] = is_skyline_point; atomicAdd(k, is_skyline_point); } } int main(int argc, char* argv[]){ /* Allocate memory to store the number of points, them dimension and the points */ int* D = (int*) malloc(sizeof(int)); int* N = (int*) malloc(sizeof(int)); double* points = read_points(stdin, N, D); /* - Define the matrix dimension, - Allocate space on the device global memory - Copy the array points on the allocated space */ const size_t size = (*N) * (*D) * sizeof(double); double* d_points; cudaSafeCall(hipMalloc((void**)&d_points, size)); cudaSafeCall(hipMemcpy(d_points, points, size, hipMemcpyHostToDevice)); /* Allocate space where the kernel function will store the result */ bool *S, *d_S; cudaSafeCall(hipMalloc((void**)&d_S, (*N) * sizeof(bool))); /* Allocate space in order to store the cardinality of the Skyline set */ int *K, *d_K; K = (int*) malloc(sizeof(int)); *K = 0; cudaSafeCall(hipMalloc((void**)&d_K, sizeof(int))); cudaSafeCall(hipMemcpy(d_K, K, sizeof(int), hipMemcpyHostToDevice)); /* Define the block and grid dimensions */ dim3 block(1, WARP_SIZE * 2); dim3 grid(1, ((*N) + WARP_SIZE * 2 - 1)/(WARP_SIZE * 2)); hipEvent_t t_kernel_start, t_kernel_stop; hipEventCreate(&t_kernel_start); hipEventCreate(&t_kernel_stop); hipEventRecord(t_kernel_start); /* Kernel function call to determine the Skyline set */ hipLaunchKernelGGL(( compute_skyline), dim3(grid), dim3(block), 0, 0, d_points, d_S, d_K, *N, *D); hipEventRecord(t_kernel_stop); /* Wait the Kernel to finish and check errors */ cudaCheckError(); /* While Kernel function is executing on device, allocate memory on heap * in order to store the result */ S = (bool*) malloc((*N) * sizeof(bool)); /* - Copy the result from device memory to host's - Copy the Skyline cardinality from device to host memory - Print the points in the Skyline set */ cudaSafeCall(hipMemcpy(S, d_S, (*N) * sizeof(bool), hipMemcpyDeviceToHost)); cudaSafeCall(hipMemcpy(K, d_K, sizeof(int), hipMemcpyDeviceToHost)); print_skyline(stdout, S, points, *N, *D, *K); /* Free space on device and host heap memory */ hipFree(d_points); hipFree(d_K); free(points); free(S); free(D); free(N); free(K); float milliseconds = 0; hipEventElapsedTime(&milliseconds, t_kernel_start, t_kernel_stop); fprintf(stdout, "%f\n", milliseconds / 1000); return 0; }
8cbd1ad34a7921d67598d99d4fa820313cb95ab1.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> #include "lib/hpc.h" #define LINE_LENGHT 4000 #define WARP_SIZE 32 #define MAX_DIM 200 /* This function reads the points from a file descriptor and saves * them in the return matrix. Also, it stores the dimension D and * the number of points N onto two int memory locations. */ double* read_points(FILE* fd, int* N, int* D){ char line[LINE_LENGHT]; const size_t BUF_SIZE = sizeof(line); /* Read the dimension: fetch the first line until space */ char* dim; dim = fgets(line, BUF_SIZE, fd); sscanf(dim, "%d", D); /* Read the number of points: fetch the second line until newline */ char* n; n = fgets(line, BUF_SIZE, fd); sscanf(n, "%d", N); /* Allocate the matrix (N x D), where each line i contains the values of the points on that dimension i. */ double *matrix = (double*) malloc((*N) * (*D) * sizeof(double)); char* str; const char* s = " "; char* token; char* ptr; for(int i = 0; i < *N; i++){ /* Read current line */ str = fgets(line, BUF_SIZE, fd); /* Split the string read on s=" " separator and fetch the values */ token = strtok(str, s); for(int k = 0; k < *D && token != NULL; k++){ /* convert ASCII string to doubleing-point number */ matrix[k * (*N) + i] = strtod(token, &ptr); token = strtok(NULL, s); } } return matrix; } /* Returns true if the array s dominates the array d. * Parameters: * - s, d: arrays of double * - length: number of elements of s and d * - offset: distance between two elements that we must read in array s */ __device__ bool dominance(double *s, double *d, int length, int offset){ bool strictly_major = false; for(int i = 0; i < length; i++){ if(s[i * offset] < d[i]){ return false; } if(s[i * offset] > d[i]){ strictly_major = true; } } return strictly_major; } /* * This function prints to the file descriptor fd given as parameter: * - The dimension D of the points; * - The cardinality K of the Skyline set; * - The Skyline set. */ __host__ void print_skyline(FILE* fd, bool *S, double *points, int N, int D, int K){ int i, j; /* Print D, K */ fprintf(fd, "%d\n%d\n", D, K); /* Print the Skyline set */ for(i = 0; i < N; i++){ if(S[i]){ for(j = 0; j < D; j++){ fprintf(fd, "%f ", points[j * N + i]); } fprintf(fd, "\n"); } } } /* Kernel function: * each thread has the purpose to determine if the number in charge is in * the Skyline set. To do so, this function iterates on all the points and stops * if any of them dominates it. * The result, in the end, is put in the array S, stored in the global memory. */ __global__ void compute_skyline(double *points, bool *S, int *k, int n, int d){ const int y = blockIdx.y * blockDim.y + threadIdx.y; if(y < n){ /* Copy the number in charge to the local memory in order to perform coalesced memory accesses */ double num[MAX_DIM]; for(int i = 0; i < d; i++){ num[i] = points[i * n + y]; } int is_skyline_point = true; for(int i = 0; i < n && is_skyline_point; i++){ /* If num is dominates by another number then it is not in the Skyline set */ if(i != y){ if(dominance(&points[i], num, d, n)){ is_skyline_point = false; } } } /* Copy the results on the device global memory */ S[y] = is_skyline_point; atomicAdd(k, is_skyline_point); } } int main(int argc, char* argv[]){ /* Allocate memory to store the number of points, them dimension and the points */ int* D = (int*) malloc(sizeof(int)); int* N = (int*) malloc(sizeof(int)); double* points = read_points(stdin, N, D); /* - Define the matrix dimension, - Allocate space on the device global memory - Copy the array points on the allocated space */ const size_t size = (*N) * (*D) * sizeof(double); double* d_points; cudaSafeCall(cudaMalloc((void**)&d_points, size)); cudaSafeCall(cudaMemcpy(d_points, points, size, cudaMemcpyHostToDevice)); /* Allocate space where the kernel function will store the result */ bool *S, *d_S; cudaSafeCall(cudaMalloc((void**)&d_S, (*N) * sizeof(bool))); /* Allocate space in order to store the cardinality of the Skyline set */ int *K, *d_K; K = (int*) malloc(sizeof(int)); *K = 0; cudaSafeCall(cudaMalloc((void**)&d_K, sizeof(int))); cudaSafeCall(cudaMemcpy(d_K, K, sizeof(int), cudaMemcpyHostToDevice)); /* Define the block and grid dimensions */ dim3 block(1, WARP_SIZE * 2); dim3 grid(1, ((*N) + WARP_SIZE * 2 - 1)/(WARP_SIZE * 2)); cudaEvent_t t_kernel_start, t_kernel_stop; cudaEventCreate(&t_kernel_start); cudaEventCreate(&t_kernel_stop); cudaEventRecord(t_kernel_start); /* Kernel function call to determine the Skyline set */ compute_skyline<<<grid, block>>>(d_points, d_S, d_K, *N, *D); cudaEventRecord(t_kernel_stop); /* Wait the Kernel to finish and check errors */ cudaCheckError(); /* While Kernel function is executing on device, allocate memory on heap * in order to store the result */ S = (bool*) malloc((*N) * sizeof(bool)); /* - Copy the result from device memory to host's - Copy the Skyline cardinality from device to host memory - Print the points in the Skyline set */ cudaSafeCall(cudaMemcpy(S, d_S, (*N) * sizeof(bool), cudaMemcpyDeviceToHost)); cudaSafeCall(cudaMemcpy(K, d_K, sizeof(int), cudaMemcpyDeviceToHost)); print_skyline(stdout, S, points, *N, *D, *K); /* Free space on device and host heap memory */ cudaFree(d_points); cudaFree(d_K); free(points); free(S); free(D); free(N); free(K); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, t_kernel_start, t_kernel_stop); fprintf(stdout, "%f\n", milliseconds / 1000); return 0; }
92b50bd383fab3a2747e501cc39e9bef4e2e04dc.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "detail/nbr_sampling_utils.cuh" #include <cugraph/graph_functions.hpp> #include <gtest/gtest.h> #include <thrust/distance.h> #include <thrust/sort.h> #include <thrust/unique.h> struct Uniform_Neighbor_Sampling_Usecase { std::vector<int32_t> fanout{{-1}}; int32_t batch_size{10}; bool check_correctness{true}; bool flag_replacement{true}; }; template <typename input_usecase_t> class Tests_Uniform_Neighbor_Sampling : public ::testing::TestWithParam< std::tuple<Uniform_Neighbor_Sampling_Usecase, input_usecase_t>> { public: Tests_Uniform_Neighbor_Sampling() {} static void SetUpTestCase() {} static void TearDownTestCase() {} virtual void SetUp() {} virtual void TearDown() {} template <typename vertex_t, typename edge_t, typename weight_t> void run_current_test(Uniform_Neighbor_Sampling_Usecase const& uniform_neighbor_sampling_usecase, input_usecase_t const& input_usecase) { raft::handle_t handle{}; HighResTimer hr_timer{}; if (cugraph::test::g_perf) { RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement hr_timer.start("Construct graph"); } auto [graph, edge_weights, renumber_map_labels] = cugraph::test::construct_graph<vertex_t, edge_t, weight_t, false, false>( handle, input_usecase, true, true); if (cugraph::test::g_perf) { RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement hr_timer.stop(); hr_timer.display_and_clear(std::cout); } auto graph_view = graph.view(); auto edge_weight_view = edge_weights ? std::make_optional((*edge_weights).view()) : std::nullopt; // // Test is designed like GNN sampling. We'll select 90% of vertices // to be included in sampling batches // constexpr float select_probability{0.9}; // FIXME: Update the tests to initialize RngState and use it instead // of seed... constexpr uint64_t seed{0}; raft::random::RngState rng_state(seed); auto random_sources = cugraph::select_random_vertices( handle, graph_view, std::optional<raft::device_span<vertex_t const>>{std::nullopt}, rng_state, ::max(static_cast<size_t>(graph_view.number_of_vertices() * select_probability), ::min(static_cast<size_t>(graph_view.number_of_vertices()), size_t{1})), false, false); // // Now we'll assign the vertices to batches // rmm::device_uvector<float> random_numbers(random_sources.size(), handle.get_stream()); cugraph::detail::uniform_random_fill(handle.get_stream(), random_numbers.data(), random_numbers.size(), float{0}, float{1}, rng_state); thrust::sort_by_key(handle.get_thrust_policy(), random_numbers.begin(), random_numbers.end(), random_sources.begin()); random_numbers.resize(0, handle.get_stream()); random_numbers.shrink_to_fit(handle.get_stream()); auto batch_number = std::make_optional<rmm::device_uvector<int32_t>>(random_sources.size(), handle.get_stream()); thrust::tabulate(handle.get_thrust_policy(), batch_number->begin(), batch_number->end(), [batch_size = uniform_neighbor_sampling_usecase.batch_size] __device__( int32_t index) { return index / batch_size; }); rmm::device_uvector<vertex_t> random_sources_copy(random_sources.size(), handle.get_stream()); raft::copy(random_sources_copy.data(), random_sources.data(), random_sources.size(), handle.get_stream()); std::optional<std::tuple<raft::device_span<int32_t const>, raft::device_span<int32_t const>>> label_to_output_comm_rank_mapping{std::nullopt}; #ifdef NO_CUGRAPH_OPS EXPECT_THROW( cugraph::uniform_neighbor_sample( handle, graph_view, edge_weight_view, std::optional<cugraph::edge_property_view_t<edge_t, edge_t const*>>{std::nullopt}, std::optional<cugraph::edge_property_view_t<edge_t, int32_t const*>>{std::nullopt}, raft::device_span<vertex_t const>{random_sources_copy.data(), random_sources.size()}, batch_number ? std::make_optional(raft::device_span<int32_t const>{batch_number->data(), batch_number->size()}) : std::nullopt, label_to_output_comm_rank_mapping, raft::host_span<int32_t const>(uniform_neighbor_sampling_usecase.fanout.data(), uniform_neighbor_sampling_usecase.fanout.size()), rng_state, true, uniform_neighbor_sampling_usecase.flag_replacement), std::exception); #else auto&& [src_out, dst_out, wgt_out, edge_id, edge_type, hop, labels, offsets] = cugraph::uniform_neighbor_sample( handle, graph_view, edge_weight_view, std::optional<cugraph::edge_property_view_t<edge_t, edge_t const*>>{std::nullopt}, std::optional<cugraph::edge_property_view_t<edge_t, int32_t const*>>{std::nullopt}, raft::device_span<vertex_t const>{random_sources_copy.data(), random_sources.size()}, batch_number ? std::make_optional(raft::device_span<int32_t const>{batch_number->data(), batch_number->size()}) : std::nullopt, label_to_output_comm_rank_mapping, raft::host_span<int32_t const>(uniform_neighbor_sampling_usecase.fanout.data(), uniform_neighbor_sampling_usecase.fanout.size()), rng_state, true, uniform_neighbor_sampling_usecase.flag_replacement); if (uniform_neighbor_sampling_usecase.check_correctness) { // First validate that the extracted edges are actually a subset of the // edges in the input graph rmm::device_uvector<vertex_t> vertices(2 * src_out.size(), handle.get_stream()); raft::copy(vertices.data(), src_out.data(), src_out.size(), handle.get_stream()); raft::copy( vertices.data() + src_out.size(), dst_out.data(), dst_out.size(), handle.get_stream()); thrust::sort(handle.get_thrust_policy(), vertices.begin(), vertices.end()); auto vertices_end = thrust::unique(handle.get_thrust_policy(), vertices.begin(), vertices.end()); vertices.resize(thrust::distance(vertices.begin(), vertices_end), handle.get_stream()); rmm::device_uvector<size_t> d_subgraph_offsets(2, handle.get_stream()); std::vector<size_t> h_subgraph_offsets({0, vertices.size()}); raft::update_device(d_subgraph_offsets.data(), h_subgraph_offsets.data(), h_subgraph_offsets.size(), handle.get_stream()); rmm::device_uvector<vertex_t> src_compare(0, handle.get_stream()); rmm::device_uvector<vertex_t> dst_compare(0, handle.get_stream()); std::optional<rmm::device_uvector<weight_t>> wgt_compare{std::nullopt}; std::tie(src_compare, dst_compare, wgt_compare, std::ignore) = extract_induced_subgraphs( handle, graph_view, edge_weight_view, raft::device_span<size_t const>(d_subgraph_offsets.data(), 2), raft::device_span<vertex_t const>(vertices.data(), vertices.size()), true); cugraph::test::validate_extracted_graph_is_subgraph( handle, src_compare, dst_compare, wgt_compare, src_out, dst_out, wgt_out); if (random_sources.size() < 100) { // This validation is too expensive for large number of vertices cugraph::test::validate_sampling_depth(handle, std::move(src_out), std::move(dst_out), std::move(wgt_out), std::move(random_sources), uniform_neighbor_sampling_usecase.fanout.size()); } } #endif } }; using Tests_Uniform_Neighbor_Sampling_File = Tests_Uniform_Neighbor_Sampling<cugraph::test::File_Usecase>; using Tests_Uniform_Neighbor_Sampling_Rmat = Tests_Uniform_Neighbor_Sampling<cugraph::test::Rmat_Usecase>; TEST_P(Tests_Uniform_Neighbor_Sampling_File, CheckInt32Int32Float) { auto param = GetParam(); run_current_test<int32_t, int32_t, float>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_Uniform_Neighbor_Sampling_File, CheckInt32Int64Float) { auto param = GetParam(); run_current_test<int32_t, int64_t, float>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_Uniform_Neighbor_Sampling_File, CheckInt64Int64Float) { auto param = GetParam(); run_current_test<int64_t, int64_t, float>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_Uniform_Neighbor_Sampling_Rmat, CheckInt32Int32Float) { auto param = GetParam(); run_current_test<int32_t, int32_t, float>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_Uniform_Neighbor_Sampling_Rmat, CheckInt32Int64Float) { auto param = GetParam(); run_current_test<int32_t, int64_t, float>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_Uniform_Neighbor_Sampling_Rmat, CheckInt64Int64Float) { auto param = GetParam(); run_current_test<int64_t, int64_t, float>(std::get<0>(param), std::get<1>(param)); } INSTANTIATE_TEST_SUITE_P( file_test, Tests_Uniform_Neighbor_Sampling_File, ::testing::Combine( ::testing::Values(Uniform_Neighbor_Sampling_Usecase{{2}, 100, true, true}, Uniform_Neighbor_Sampling_Usecase{{2}, 100, true, false}), ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"), cugraph::test::File_Usecase("test/datasets/web-Google.mtx"), cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"), cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx")))); INSTANTIATE_TEST_SUITE_P( rmat_small_test, Tests_Uniform_Neighbor_Sampling_Rmat, ::testing::Combine( ::testing::Values(Uniform_Neighbor_Sampling_Usecase{{2}, 10, false, true}), ::testing::Values(cugraph::test::Rmat_Usecase(10, 16, 0.57, 0.19, 0.19, 0, false, false, 0)))); INSTANTIATE_TEST_SUITE_P( rmat_benchmark_test, /* note that scale & edge factor can be overridden in benchmarking (with --gtest_filter to select only the rmat_benchmark_test with a specific vertex & edge type combination) by command line arguments and do not include more than one Rmat_Usecase that differ only in scale or edge factor (to avoid running same benchmarks more than once) */ Tests_Uniform_Neighbor_Sampling_Rmat, ::testing::Combine( ::testing::Values(Uniform_Neighbor_Sampling_Usecase{{2}, 500, false, true}), ::testing::Values(cugraph::test::Rmat_Usecase(20, 32, 0.57, 0.19, 0.19, 0, false, false, 0)))); CUGRAPH_TEST_PROGRAM_MAIN()
92b50bd383fab3a2747e501cc39e9bef4e2e04dc.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "detail/nbr_sampling_utils.cuh" #include <cugraph/graph_functions.hpp> #include <gtest/gtest.h> #include <thrust/distance.h> #include <thrust/sort.h> #include <thrust/unique.h> struct Uniform_Neighbor_Sampling_Usecase { std::vector<int32_t> fanout{{-1}}; int32_t batch_size{10}; bool check_correctness{true}; bool flag_replacement{true}; }; template <typename input_usecase_t> class Tests_Uniform_Neighbor_Sampling : public ::testing::TestWithParam< std::tuple<Uniform_Neighbor_Sampling_Usecase, input_usecase_t>> { public: Tests_Uniform_Neighbor_Sampling() {} static void SetUpTestCase() {} static void TearDownTestCase() {} virtual void SetUp() {} virtual void TearDown() {} template <typename vertex_t, typename edge_t, typename weight_t> void run_current_test(Uniform_Neighbor_Sampling_Usecase const& uniform_neighbor_sampling_usecase, input_usecase_t const& input_usecase) { raft::handle_t handle{}; HighResTimer hr_timer{}; if (cugraph::test::g_perf) { RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement hr_timer.start("Construct graph"); } auto [graph, edge_weights, renumber_map_labels] = cugraph::test::construct_graph<vertex_t, edge_t, weight_t, false, false>( handle, input_usecase, true, true); if (cugraph::test::g_perf) { RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement hr_timer.stop(); hr_timer.display_and_clear(std::cout); } auto graph_view = graph.view(); auto edge_weight_view = edge_weights ? std::make_optional((*edge_weights).view()) : std::nullopt; // // Test is designed like GNN sampling. We'll select 90% of vertices // to be included in sampling batches // constexpr float select_probability{0.9}; // FIXME: Update the tests to initialize RngState and use it instead // of seed... constexpr uint64_t seed{0}; raft::random::RngState rng_state(seed); auto random_sources = cugraph::select_random_vertices( handle, graph_view, std::optional<raft::device_span<vertex_t const>>{std::nullopt}, rng_state, std::max(static_cast<size_t>(graph_view.number_of_vertices() * select_probability), std::min(static_cast<size_t>(graph_view.number_of_vertices()), size_t{1})), false, false); // // Now we'll assign the vertices to batches // rmm::device_uvector<float> random_numbers(random_sources.size(), handle.get_stream()); cugraph::detail::uniform_random_fill(handle.get_stream(), random_numbers.data(), random_numbers.size(), float{0}, float{1}, rng_state); thrust::sort_by_key(handle.get_thrust_policy(), random_numbers.begin(), random_numbers.end(), random_sources.begin()); random_numbers.resize(0, handle.get_stream()); random_numbers.shrink_to_fit(handle.get_stream()); auto batch_number = std::make_optional<rmm::device_uvector<int32_t>>(random_sources.size(), handle.get_stream()); thrust::tabulate(handle.get_thrust_policy(), batch_number->begin(), batch_number->end(), [batch_size = uniform_neighbor_sampling_usecase.batch_size] __device__( int32_t index) { return index / batch_size; }); rmm::device_uvector<vertex_t> random_sources_copy(random_sources.size(), handle.get_stream()); raft::copy(random_sources_copy.data(), random_sources.data(), random_sources.size(), handle.get_stream()); std::optional<std::tuple<raft::device_span<int32_t const>, raft::device_span<int32_t const>>> label_to_output_comm_rank_mapping{std::nullopt}; #ifdef NO_CUGRAPH_OPS EXPECT_THROW( cugraph::uniform_neighbor_sample( handle, graph_view, edge_weight_view, std::optional<cugraph::edge_property_view_t<edge_t, edge_t const*>>{std::nullopt}, std::optional<cugraph::edge_property_view_t<edge_t, int32_t const*>>{std::nullopt}, raft::device_span<vertex_t const>{random_sources_copy.data(), random_sources.size()}, batch_number ? std::make_optional(raft::device_span<int32_t const>{batch_number->data(), batch_number->size()}) : std::nullopt, label_to_output_comm_rank_mapping, raft::host_span<int32_t const>(uniform_neighbor_sampling_usecase.fanout.data(), uniform_neighbor_sampling_usecase.fanout.size()), rng_state, true, uniform_neighbor_sampling_usecase.flag_replacement), std::exception); #else auto&& [src_out, dst_out, wgt_out, edge_id, edge_type, hop, labels, offsets] = cugraph::uniform_neighbor_sample( handle, graph_view, edge_weight_view, std::optional<cugraph::edge_property_view_t<edge_t, edge_t const*>>{std::nullopt}, std::optional<cugraph::edge_property_view_t<edge_t, int32_t const*>>{std::nullopt}, raft::device_span<vertex_t const>{random_sources_copy.data(), random_sources.size()}, batch_number ? std::make_optional(raft::device_span<int32_t const>{batch_number->data(), batch_number->size()}) : std::nullopt, label_to_output_comm_rank_mapping, raft::host_span<int32_t const>(uniform_neighbor_sampling_usecase.fanout.data(), uniform_neighbor_sampling_usecase.fanout.size()), rng_state, true, uniform_neighbor_sampling_usecase.flag_replacement); if (uniform_neighbor_sampling_usecase.check_correctness) { // First validate that the extracted edges are actually a subset of the // edges in the input graph rmm::device_uvector<vertex_t> vertices(2 * src_out.size(), handle.get_stream()); raft::copy(vertices.data(), src_out.data(), src_out.size(), handle.get_stream()); raft::copy( vertices.data() + src_out.size(), dst_out.data(), dst_out.size(), handle.get_stream()); thrust::sort(handle.get_thrust_policy(), vertices.begin(), vertices.end()); auto vertices_end = thrust::unique(handle.get_thrust_policy(), vertices.begin(), vertices.end()); vertices.resize(thrust::distance(vertices.begin(), vertices_end), handle.get_stream()); rmm::device_uvector<size_t> d_subgraph_offsets(2, handle.get_stream()); std::vector<size_t> h_subgraph_offsets({0, vertices.size()}); raft::update_device(d_subgraph_offsets.data(), h_subgraph_offsets.data(), h_subgraph_offsets.size(), handle.get_stream()); rmm::device_uvector<vertex_t> src_compare(0, handle.get_stream()); rmm::device_uvector<vertex_t> dst_compare(0, handle.get_stream()); std::optional<rmm::device_uvector<weight_t>> wgt_compare{std::nullopt}; std::tie(src_compare, dst_compare, wgt_compare, std::ignore) = extract_induced_subgraphs( handle, graph_view, edge_weight_view, raft::device_span<size_t const>(d_subgraph_offsets.data(), 2), raft::device_span<vertex_t const>(vertices.data(), vertices.size()), true); cugraph::test::validate_extracted_graph_is_subgraph( handle, src_compare, dst_compare, wgt_compare, src_out, dst_out, wgt_out); if (random_sources.size() < 100) { // This validation is too expensive for large number of vertices cugraph::test::validate_sampling_depth(handle, std::move(src_out), std::move(dst_out), std::move(wgt_out), std::move(random_sources), uniform_neighbor_sampling_usecase.fanout.size()); } } #endif } }; using Tests_Uniform_Neighbor_Sampling_File = Tests_Uniform_Neighbor_Sampling<cugraph::test::File_Usecase>; using Tests_Uniform_Neighbor_Sampling_Rmat = Tests_Uniform_Neighbor_Sampling<cugraph::test::Rmat_Usecase>; TEST_P(Tests_Uniform_Neighbor_Sampling_File, CheckInt32Int32Float) { auto param = GetParam(); run_current_test<int32_t, int32_t, float>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_Uniform_Neighbor_Sampling_File, CheckInt32Int64Float) { auto param = GetParam(); run_current_test<int32_t, int64_t, float>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_Uniform_Neighbor_Sampling_File, CheckInt64Int64Float) { auto param = GetParam(); run_current_test<int64_t, int64_t, float>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_Uniform_Neighbor_Sampling_Rmat, CheckInt32Int32Float) { auto param = GetParam(); run_current_test<int32_t, int32_t, float>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_Uniform_Neighbor_Sampling_Rmat, CheckInt32Int64Float) { auto param = GetParam(); run_current_test<int32_t, int64_t, float>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_Uniform_Neighbor_Sampling_Rmat, CheckInt64Int64Float) { auto param = GetParam(); run_current_test<int64_t, int64_t, float>(std::get<0>(param), std::get<1>(param)); } INSTANTIATE_TEST_SUITE_P( file_test, Tests_Uniform_Neighbor_Sampling_File, ::testing::Combine( ::testing::Values(Uniform_Neighbor_Sampling_Usecase{{2}, 100, true, true}, Uniform_Neighbor_Sampling_Usecase{{2}, 100, true, false}), ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"), cugraph::test::File_Usecase("test/datasets/web-Google.mtx"), cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"), cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx")))); INSTANTIATE_TEST_SUITE_P( rmat_small_test, Tests_Uniform_Neighbor_Sampling_Rmat, ::testing::Combine( ::testing::Values(Uniform_Neighbor_Sampling_Usecase{{2}, 10, false, true}), ::testing::Values(cugraph::test::Rmat_Usecase(10, 16, 0.57, 0.19, 0.19, 0, false, false, 0)))); INSTANTIATE_TEST_SUITE_P( rmat_benchmark_test, /* note that scale & edge factor can be overridden in benchmarking (with --gtest_filter to select only the rmat_benchmark_test with a specific vertex & edge type combination) by command line arguments and do not include more than one Rmat_Usecase that differ only in scale or edge factor (to avoid running same benchmarks more than once) */ Tests_Uniform_Neighbor_Sampling_Rmat, ::testing::Combine( ::testing::Values(Uniform_Neighbor_Sampling_Usecase{{2}, 500, false, true}), ::testing::Values(cugraph::test::Rmat_Usecase(20, 32, 0.57, 0.19, 0.19, 0, false, false, 0)))); CUGRAPH_TEST_PROGRAM_MAIN()
3d5b558148a8750fb7e36704941b83184e887954.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <string> #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/fake_quantize_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { template <typename T> __global__ void FindAbsMaxKernel(const T* in, const int n, T* out) { int bid = threadIdx.x + blockIdx.x * blockDim.x; int tid = threadIdx.x; extern __shared__ T shared_max_data[]; if (gridDim.x > 1) { shared_max_data[tid] = T(0); for (int i = bid; i < n; i += blockDim.x * gridDim.x) { T tmp = fabs(in[i]); if (tmp > shared_max_data[tid]) { shared_max_data[tid] = tmp; } } } else { if (bid < n) { shared_max_data[tid] = fabs(in[bid]); } else { shared_max_data[tid] = T(0); } } __syncthreads(); for (int i = blockDim.x / 2; i > 0; i >>= 1) { if (tid < i && (shared_max_data[tid] < shared_max_data[tid + i])) { shared_max_data[tid] = shared_max_data[tid + i]; } __syncthreads(); } if (tid == 0) { out[blockIdx.x] = shared_max_data[0]; } } template <typename T> struct FindAbsMaxFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& ctx, const T* in, const int num, T* out) { int block = 1024; int grid = (block - 1 + num) / block; grid = (grid > block) ? block : grid; framework::Tensor max; T* max_data = max.mutable_data<T>(framework::make_ddim({grid}), ctx.GetPlace()); hipLaunchKernelGGL(( FindAbsMaxKernel<T>), dim3(grid), dim3(block), 1024 * sizeof(T), ctx.stream(), in, num, max_data); hipLaunchKernelGGL(( FindAbsMaxKernel<T>), dim3(1), dim3(block), 1024 * sizeof(T), ctx.stream(), max_data, grid, out); } }; template struct FindAbsMaxFunctor<platform::CUDADeviceContext, float>; template <typename T> __global__ void ClipAndQuantKernel(const T* in, const T* scale, const int bin_cnt, const int n, T* out) { int bid = threadIdx.x + blockIdx.x * blockDim.x; int tid = threadIdx.x; T s = scale[0]; for (int i = bid; i < n; i += blockDim.x * gridDim.x) { T x = in[bid]; T v = x > s ? s : x; v = v < -s ? -s : v; v = bin_cnt / s * v; out[bid] = round(v); } } template <typename T> __global__ void FindRangeAbsMaxAndFillArray(const T* cur_scale, const T* last_scale, const int64_t* iter, const int window_size, T* scale_arr, T* out_scale, int* need_find_max, int* out_size) { int it = iter[0]; int idx = it % window_size; T removed = scale_arr[idx]; T cur = cur_scale[0]; scale_arr[idx] = cur; T max = last_scale[0]; out_scale[0] = max < cur ? cur : max; if (fabs(removed - max) < 1e-6) { need_find_max[0] = 1; out_size[0] = it > window_size ? window_size : it; } else { need_find_max[0] = 0; } } template <typename T> struct FindRangeAbsMaxFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& ctx, const framework::Tensor& cur_scale, const framework::Tensor& last_scale, const framework::Tensor& iter, const int window_size, framework::Tensor* scales_arr, framework::Tensor* out_scale) { const auto gpu_place = boost::get<platform::CUDAPlace>(ctx.GetPlace()); T* scale_arr = scales_arr->mutable_data<T>(gpu_place); T* out_scale_data = out_scale->mutable_data<T>(gpu_place); framework::Tensor need_find_max, out_size; int* find_max = need_find_max.mutable_data<int>(gpu_place); int* out_size_data = out_size.mutable_data<int>(gpu_place); hipLaunchKernelGGL(( FindRangeAbsMaxAndFillArray<T>), dim3(1), dim3(1), 0, ctx.stream(), cur_scale.data<T>(), last_scale.data<T>(), iter.data<int64_t>(), window_size, scale_arr, out_scale_data, find_max, out_size_data); int g_find_max; memory::Copy(platform::CPUPlace(), &g_find_max, gpu_place, find_max, sizeof(int), 0); if (g_find_max) { int len; memory::Copy(platform::CPUPlace(), &len, gpu_place, out_size_data, sizeof(int), 0); FindAbsMaxFunctor<platform::CUDADeviceContext, T>()(ctx, scale_arr, len, out_scale_data); } } }; template struct FindRangeAbsMaxFunctor<platform::CUDADeviceContext, float>; template <typename T> struct FindMovingAverageAbsMaxFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& ctx, const framework::Tensor& in_accum, const framework::Tensor& in_state, const T* cur_scale, const float rate, framework::Tensor* out_state, framework::Tensor* out_accum, framework::Tensor* out_scale) { const auto gpu_place = boost::get<platform::CUDAPlace>(ctx.GetPlace()); T accum; memory::Copy(platform::CPUPlace(), &accum, gpu_place, in_accum.data<T>(), sizeof(T), 0); T state; memory::Copy(platform::CPUPlace(), &state, gpu_place, in_state.data<T>(), sizeof(T), 0); T scale; memory::Copy(platform::CPUPlace(), &scale, gpu_place, cur_scale, sizeof(T), 0); state = rate * state + 1; accum = rate * accum + scale; scale = accum / state; memory::Copy(gpu_place, out_accum->mutable_data<T>(gpu_place), platform::CPUPlace(), &accum, sizeof(T), 0); memory::Copy(gpu_place, out_state->mutable_data<T>(gpu_place), platform::CPUPlace(), &state, sizeof(T), 0); memory::Copy(gpu_place, out_scale->mutable_data<T>(gpu_place), platform::CPUPlace(), &scale, sizeof(T), 0); } }; template struct FindMovingAverageAbsMaxFunctor<platform::CUDADeviceContext, float>; template <typename T> struct ClipAndFakeQuantFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& ctx, const framework::Tensor& in, const framework::Tensor& scale, const int bin_cnt, framework::Tensor* out) { int num = in.numel(); int block = 1024; int grid = (block - 1 + num) / block; const T* in_data = in.data<T>(); const T* scale_data = scale.data<T>(); T* out_data = out->mutable_data<T>(ctx.GetPlace()); hipLaunchKernelGGL(( ClipAndQuantKernel<T>), dim3(grid), dim3(block), 0, ctx.stream(), in_data, scale_data, bin_cnt, num, out_data); } }; template struct ClipAndFakeQuantFunctor<platform::CUDADeviceContext, float>; } // namespace operators } // namespace paddle namespace ops = paddle::operators; using CUDA = paddle::platform::CUDADeviceContext; REGISTER_OP_CUDA_KERNEL(fake_quantize_abs_max, ops::FakeQuantizeAbsMaxKernel<CUDA, float>); REGISTER_OP_CUDA_KERNEL(fake_channel_wise_quantize_abs_max, ops::FakeChannelWiseQuantizeAbsMaxKernel<CUDA, float>); REGISTER_OP_CUDA_KERNEL(fake_quantize_range_abs_max, ops::FakeQuantizeRangeAbsMaxKernel<CUDA, float>); REGISTER_OP_CUDA_KERNEL( fake_quantize_moving_average_abs_max, ops::FakeQuantizeMovingAverageAbsMaxKernel<CUDA, float>);
3d5b558148a8750fb7e36704941b83184e887954.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <string> #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/fake_quantize_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { template <typename T> __global__ void FindAbsMaxKernel(const T* in, const int n, T* out) { int bid = threadIdx.x + blockIdx.x * blockDim.x; int tid = threadIdx.x; extern __shared__ T shared_max_data[]; if (gridDim.x > 1) { shared_max_data[tid] = T(0); for (int i = bid; i < n; i += blockDim.x * gridDim.x) { T tmp = fabs(in[i]); if (tmp > shared_max_data[tid]) { shared_max_data[tid] = tmp; } } } else { if (bid < n) { shared_max_data[tid] = fabs(in[bid]); } else { shared_max_data[tid] = T(0); } } __syncthreads(); for (int i = blockDim.x / 2; i > 0; i >>= 1) { if (tid < i && (shared_max_data[tid] < shared_max_data[tid + i])) { shared_max_data[tid] = shared_max_data[tid + i]; } __syncthreads(); } if (tid == 0) { out[blockIdx.x] = shared_max_data[0]; } } template <typename T> struct FindAbsMaxFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& ctx, const T* in, const int num, T* out) { int block = 1024; int grid = (block - 1 + num) / block; grid = (grid > block) ? block : grid; framework::Tensor max; T* max_data = max.mutable_data<T>(framework::make_ddim({grid}), ctx.GetPlace()); FindAbsMaxKernel<T><<<grid, block, 1024 * sizeof(T), ctx.stream()>>>( in, num, max_data); FindAbsMaxKernel<T><<<1, block, 1024 * sizeof(T), ctx.stream()>>>( max_data, grid, out); } }; template struct FindAbsMaxFunctor<platform::CUDADeviceContext, float>; template <typename T> __global__ void ClipAndQuantKernel(const T* in, const T* scale, const int bin_cnt, const int n, T* out) { int bid = threadIdx.x + blockIdx.x * blockDim.x; int tid = threadIdx.x; T s = scale[0]; for (int i = bid; i < n; i += blockDim.x * gridDim.x) { T x = in[bid]; T v = x > s ? s : x; v = v < -s ? -s : v; v = bin_cnt / s * v; out[bid] = round(v); } } template <typename T> __global__ void FindRangeAbsMaxAndFillArray(const T* cur_scale, const T* last_scale, const int64_t* iter, const int window_size, T* scale_arr, T* out_scale, int* need_find_max, int* out_size) { int it = iter[0]; int idx = it % window_size; T removed = scale_arr[idx]; T cur = cur_scale[0]; scale_arr[idx] = cur; T max = last_scale[0]; out_scale[0] = max < cur ? cur : max; if (fabs(removed - max) < 1e-6) { need_find_max[0] = 1; out_size[0] = it > window_size ? window_size : it; } else { need_find_max[0] = 0; } } template <typename T> struct FindRangeAbsMaxFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& ctx, const framework::Tensor& cur_scale, const framework::Tensor& last_scale, const framework::Tensor& iter, const int window_size, framework::Tensor* scales_arr, framework::Tensor* out_scale) { const auto gpu_place = boost::get<platform::CUDAPlace>(ctx.GetPlace()); T* scale_arr = scales_arr->mutable_data<T>(gpu_place); T* out_scale_data = out_scale->mutable_data<T>(gpu_place); framework::Tensor need_find_max, out_size; int* find_max = need_find_max.mutable_data<int>(gpu_place); int* out_size_data = out_size.mutable_data<int>(gpu_place); FindRangeAbsMaxAndFillArray<T><<<1, 1, 0, ctx.stream()>>>( cur_scale.data<T>(), last_scale.data<T>(), iter.data<int64_t>(), window_size, scale_arr, out_scale_data, find_max, out_size_data); int g_find_max; memory::Copy(platform::CPUPlace(), &g_find_max, gpu_place, find_max, sizeof(int), 0); if (g_find_max) { int len; memory::Copy(platform::CPUPlace(), &len, gpu_place, out_size_data, sizeof(int), 0); FindAbsMaxFunctor<platform::CUDADeviceContext, T>()(ctx, scale_arr, len, out_scale_data); } } }; template struct FindRangeAbsMaxFunctor<platform::CUDADeviceContext, float>; template <typename T> struct FindMovingAverageAbsMaxFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& ctx, const framework::Tensor& in_accum, const framework::Tensor& in_state, const T* cur_scale, const float rate, framework::Tensor* out_state, framework::Tensor* out_accum, framework::Tensor* out_scale) { const auto gpu_place = boost::get<platform::CUDAPlace>(ctx.GetPlace()); T accum; memory::Copy(platform::CPUPlace(), &accum, gpu_place, in_accum.data<T>(), sizeof(T), 0); T state; memory::Copy(platform::CPUPlace(), &state, gpu_place, in_state.data<T>(), sizeof(T), 0); T scale; memory::Copy(platform::CPUPlace(), &scale, gpu_place, cur_scale, sizeof(T), 0); state = rate * state + 1; accum = rate * accum + scale; scale = accum / state; memory::Copy(gpu_place, out_accum->mutable_data<T>(gpu_place), platform::CPUPlace(), &accum, sizeof(T), 0); memory::Copy(gpu_place, out_state->mutable_data<T>(gpu_place), platform::CPUPlace(), &state, sizeof(T), 0); memory::Copy(gpu_place, out_scale->mutable_data<T>(gpu_place), platform::CPUPlace(), &scale, sizeof(T), 0); } }; template struct FindMovingAverageAbsMaxFunctor<platform::CUDADeviceContext, float>; template <typename T> struct ClipAndFakeQuantFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& ctx, const framework::Tensor& in, const framework::Tensor& scale, const int bin_cnt, framework::Tensor* out) { int num = in.numel(); int block = 1024; int grid = (block - 1 + num) / block; const T* in_data = in.data<T>(); const T* scale_data = scale.data<T>(); T* out_data = out->mutable_data<T>(ctx.GetPlace()); ClipAndQuantKernel<T><<<grid, block, 0, ctx.stream()>>>( in_data, scale_data, bin_cnt, num, out_data); } }; template struct ClipAndFakeQuantFunctor<platform::CUDADeviceContext, float>; } // namespace operators } // namespace paddle namespace ops = paddle::operators; using CUDA = paddle::platform::CUDADeviceContext; REGISTER_OP_CUDA_KERNEL(fake_quantize_abs_max, ops::FakeQuantizeAbsMaxKernel<CUDA, float>); REGISTER_OP_CUDA_KERNEL(fake_channel_wise_quantize_abs_max, ops::FakeChannelWiseQuantizeAbsMaxKernel<CUDA, float>); REGISTER_OP_CUDA_KERNEL(fake_quantize_range_abs_max, ops::FakeQuantizeRangeAbsMaxKernel<CUDA, float>); REGISTER_OP_CUDA_KERNEL( fake_quantize_moving_average_abs_max, ops::FakeQuantizeMovingAverageAbsMaxKernel<CUDA, float>);
d749e5f501b08afeb2a6f83a473ce0b212e2db78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/weighted_sample_op.h" #include "caffe2/utils/math.h" namespace caffe2 { namespace { __global__ void WeightedSampleKernel( const int batch_size, const int weights_dim, const float* in_weights_data, const float* in_val_data, float* samples, int* out_idx_data, float* out_val_data) { CUDA_1D_KERNEL_LOOP(i, batch_size) { int offset = i * weights_dim; float sum = 0.0; for (int j = 0; j < weights_dim; j++) { sum += in_weights_data[offset + j]; } samples[i] *= sum; float cum_sum = 0.0; int j = 0; for (; j < weights_dim; j++) { cum_sum += in_weights_data[offset + j]; if (cum_sum >= samples[i]) { break; } } out_idx_data[i] = min(j, weights_dim - 1); if (out_val_data) { out_val_data[i] = in_val_data[offset + out_idx_data[i]]; } } } } // namespace template <> bool WeightedSampleOp<float, CUDAContext>::RunOnDevice() { CAFFE_ENFORCE_EQ( InputSize(), OutputSize(), "The number of tensors of the input and the output must be the same."); auto& in_weights = Input(0); int batch_size = in_weights.dim(0); int weights_dim = in_weights.dim(1); if (batch_size > 0 && weights_dim > 0) { auto* out_idx = Output(0, {batch_size, 1}, at::dtype<int>()); unif_samples_.Resize(batch_size); const float* in_weights_data = in_weights.data<float>(); const float* in_val_data = nullptr; int* out_idx_data = out_idx->template mutable_data<int>(); float* out_val_data = nullptr; if (OutputSize() == 2) { auto& in_val = Input(1); CAFFE_ENFORCE_EQ( in_weights.sizes(), in_val.sizes(), "The sampling weights tensor and the sampling values tensor must have the same dimensions."); in_val_data = in_val.data<float>(); auto* out_val = Output(1, {batch_size, 1}, at::dtype<float>()); out_val_data = out_val->template mutable_data<float>(); } float* unif_samples_data = unif_samples_.mutable_data<float>(); CURAND_ENFORCE(hiprandGenerateUniform( context_.curand_generator(), unif_samples_data, batch_size)); hipLaunchKernelGGL(( WeightedSampleKernel), dim3(CAFFE_GET_BLOCKS(batch_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), batch_size, weights_dim, in_weights_data, in_val_data, unif_samples_data, out_idx_data, out_val_data); } else { /* out_idx = */ Output(0, {0}, at::dtype<int>()); if (OutputSize() == 2) { /* out_val = */ Output(1, {0}, at::dtype<float>()); } } return true; } REGISTER_CUDA_OPERATOR(WeightedSample, WeightedSampleOp<float, CUDAContext>); } // namespace caffe2
d749e5f501b08afeb2a6f83a473ce0b212e2db78.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/weighted_sample_op.h" #include "caffe2/utils/math.h" namespace caffe2 { namespace { __global__ void WeightedSampleKernel( const int batch_size, const int weights_dim, const float* in_weights_data, const float* in_val_data, float* samples, int* out_idx_data, float* out_val_data) { CUDA_1D_KERNEL_LOOP(i, batch_size) { int offset = i * weights_dim; float sum = 0.0; for (int j = 0; j < weights_dim; j++) { sum += in_weights_data[offset + j]; } samples[i] *= sum; float cum_sum = 0.0; int j = 0; for (; j < weights_dim; j++) { cum_sum += in_weights_data[offset + j]; if (cum_sum >= samples[i]) { break; } } out_idx_data[i] = min(j, weights_dim - 1); if (out_val_data) { out_val_data[i] = in_val_data[offset + out_idx_data[i]]; } } } } // namespace template <> bool WeightedSampleOp<float, CUDAContext>::RunOnDevice() { CAFFE_ENFORCE_EQ( InputSize(), OutputSize(), "The number of tensors of the input and the output must be the same."); auto& in_weights = Input(0); int batch_size = in_weights.dim(0); int weights_dim = in_weights.dim(1); if (batch_size > 0 && weights_dim > 0) { auto* out_idx = Output(0, {batch_size, 1}, at::dtype<int>()); unif_samples_.Resize(batch_size); const float* in_weights_data = in_weights.data<float>(); const float* in_val_data = nullptr; int* out_idx_data = out_idx->template mutable_data<int>(); float* out_val_data = nullptr; if (OutputSize() == 2) { auto& in_val = Input(1); CAFFE_ENFORCE_EQ( in_weights.sizes(), in_val.sizes(), "The sampling weights tensor and the sampling values tensor must have the same dimensions."); in_val_data = in_val.data<float>(); auto* out_val = Output(1, {batch_size, 1}, at::dtype<float>()); out_val_data = out_val->template mutable_data<float>(); } float* unif_samples_data = unif_samples_.mutable_data<float>(); CURAND_ENFORCE(curandGenerateUniform( context_.curand_generator(), unif_samples_data, batch_size)); WeightedSampleKernel<<< CAFFE_GET_BLOCKS(batch_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( batch_size, weights_dim, in_weights_data, in_val_data, unif_samples_data, out_idx_data, out_val_data); } else { /* out_idx = */ Output(0, {0}, at::dtype<int>()); if (OutputSize() == 2) { /* out_val = */ Output(1, {0}, at::dtype<float>()); } } return true; } REGISTER_CUDA_OPERATOR(WeightedSample, WeightedSampleOp<float, CUDAContext>); } // namespace caffe2
c2cd1b0b037c72e0a4da1f94d7551ff30a361a74.hip
// !!! This is a file automatically generated by hipify!!! #ifndef ZERODM_KERNEL_H_ #define ZERODM_KERNEL_H_ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "AstroAccelerate/params.h" //{{{ zero dm kernel - needs cleaning and optimizing // WA 21/10/16 __global__ void zero_dm_kernel(unsigned short *d_input, int nchans, int nsamp) { int t = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0.0f; for(int c = 0; c < nchans; c++) sum+=(float)__ldg(&d_input[t*nchans + c]); sum = (sum/(float)nchans); for(int c = 0; c < nchans; c++) d_input[t*nchans + c]=(unsigned short)((unsigned char)((float)d_input[t*nchans + c]-sum)); } //}}} #endif
c2cd1b0b037c72e0a4da1f94d7551ff30a361a74.cu
#ifndef ZERODM_KERNEL_H_ #define ZERODM_KERNEL_H_ #include <cuda.h> #include <cuda_runtime.h> #include "AstroAccelerate/params.h" //{{{ zero dm kernel - needs cleaning and optimizing // WA 21/10/16 __global__ void zero_dm_kernel(unsigned short *d_input, int nchans, int nsamp) { int t = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0.0f; for(int c = 0; c < nchans; c++) sum+=(float)__ldg(&d_input[t*nchans + c]); sum = (sum/(float)nchans); for(int c = 0; c < nchans; c++) d_input[t*nchans + c]=(unsigned short)((unsigned char)((float)d_input[t*nchans + c]-sum)); } //}}} #endif
c6d8cfb9f563f4422c66d328adf02c45a61f9e83.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <inttypes.h> #include <stdlib.h> #include <unistd.h> #include "../support/device-support.cu" #include "../lib/Common.h" #define cudaChecked(code) do {\ hipError_t err = code;\ if (err != hipSuccess) {\ printf("CUDA error at %s:%d: %s\n", __FILE__, __LINE__,\ hipGetErrorString(err));\ abort();\ }\ } while(0)\ extern "C" __constant__ traceinfo_t globalVar; extern "C" { void __trace_touch(hipStream_t stream); void __trace_start(hipStream_t stream, const char *kernel_name); void __trace_fill_info(const void *info, hipStream_t stream); void __trace_copy_to_symbol(hipStream_t stream, const char* symbol, const void *info); void __trace_stop(hipStream_t stream); } __global__ void test_kernel(uint8_t* records, uint8_t* allocs, uint8_t* commits, int32_t rounds, int32_t modulo) { int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid % modulo != 0) return; for (int i = 0; i < rounds; ++i) { __mem_trace(records, allocs, commits, gid, gid/32, gid/32 % SLOTS_NUM); } } int main(int argc, char** argv) { // check if unified addressing is used, so that cudaHostGetDevicePtr is the // identity function. hipDeviceProp_t prop; cudaChecked(hipGetDeviceProperties(&prop, 0)); const char* rounds_str = getenv("ROUNDS"); int32_t rounds = rounds_str ? strtol(rounds_str, NULL, 10) : 1; const char* threads_total_str = getenv("THREADS"); int32_t threads_total = threads_total_str ? strtol(threads_total_str, NULL, 10) : 32; const char* modulo_str = getenv("MODULO"); int32_t modulo = modulo_str ? strtol(modulo_str, NULL, 10) : 2; if (!prop.unifiedAddressing) { printf("unified addressing not supported, unable to test device access from host\n"); exit(0); } int32_t threads = threads_total > 1024 ? 1024 : threads_total; int32_t blocks = (threads + 1023) / 1024; printf("blockSize.x = %d, blockDim.x = %d (total: %d), rounds: %d\n", threads, blocks, threads*blocks, rounds); printf("guard modulo: if (gid %% %d != 0) return;\n", modulo); printf("expected trace records: %" PRIu64 "\n", (uint64_t)threads*blocks*rounds / modulo); printf("---\n"); setenv("MEMTRACE_PATTERN", "./test-trace-device", 1); __trace_touch(NULL); printf("starting trace\n"); __trace_start(NULL, "test"); traceinfo_t info; __trace_fill_info(&info, NULL); __trace_copy_to_symbol(NULL, "globalVar", &info); uint8_t *allocs = info.allocs; uint8_t *commits = info.commits; uint8_t *records = info.records; hipLaunchKernelGGL(( test_kernel), dim3(blocks), dim3(threads), 0, 0, records, allocs, commits, rounds, modulo); hipMemcpyToSymbol(globalVar, &info, sizeof(traceinfo_t), hipMemcpyHostToDevice); cudaChecked(hipDeviceSynchronize()); printf("stopping trace\n"); __trace_stop(NULL); cudaChecked(hipStreamSynchronize(NULL)); cudaChecked(hipDeviceSynchronize()); return 0; }
c6d8cfb9f563f4422c66d328adf02c45a61f9e83.cu
#include <stdio.h> #include <inttypes.h> #include <stdlib.h> #include <unistd.h> #include "../support/device-support.cu" #include "../lib/Common.h" #define cudaChecked(code) do {\ cudaError_t err = code;\ if (err != cudaSuccess) {\ printf("CUDA error at %s:%d: %s\n", __FILE__, __LINE__,\ cudaGetErrorString(err));\ abort();\ }\ } while(0)\ extern "C" __constant__ traceinfo_t globalVar; extern "C" { void __trace_touch(cudaStream_t stream); void __trace_start(cudaStream_t stream, const char *kernel_name); void __trace_fill_info(const void *info, cudaStream_t stream); void __trace_copy_to_symbol(cudaStream_t stream, const char* symbol, const void *info); void __trace_stop(cudaStream_t stream); } __global__ void test_kernel(uint8_t* records, uint8_t* allocs, uint8_t* commits, int32_t rounds, int32_t modulo) { int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid % modulo != 0) return; for (int i = 0; i < rounds; ++i) { __mem_trace(records, allocs, commits, gid, gid/32, gid/32 % SLOTS_NUM); } } int main(int argc, char** argv) { // check if unified addressing is used, so that cudaHostGetDevicePtr is the // identity function. cudaDeviceProp prop; cudaChecked(cudaGetDeviceProperties(&prop, 0)); const char* rounds_str = getenv("ROUNDS"); int32_t rounds = rounds_str ? strtol(rounds_str, NULL, 10) : 1; const char* threads_total_str = getenv("THREADS"); int32_t threads_total = threads_total_str ? strtol(threads_total_str, NULL, 10) : 32; const char* modulo_str = getenv("MODULO"); int32_t modulo = modulo_str ? strtol(modulo_str, NULL, 10) : 2; if (!prop.unifiedAddressing) { printf("unified addressing not supported, unable to test device access from host\n"); exit(0); } int32_t threads = threads_total > 1024 ? 1024 : threads_total; int32_t blocks = (threads + 1023) / 1024; printf("blockSize.x = %d, blockDim.x = %d (total: %d), rounds: %d\n", threads, blocks, threads*blocks, rounds); printf("guard modulo: if (gid %% %d != 0) return;\n", modulo); printf("expected trace records: %" PRIu64 "\n", (uint64_t)threads*blocks*rounds / modulo); printf("---\n"); setenv("MEMTRACE_PATTERN", "./test-trace-device", 1); __trace_touch(NULL); printf("starting trace\n"); __trace_start(NULL, "test"); traceinfo_t info; __trace_fill_info(&info, NULL); __trace_copy_to_symbol(NULL, "globalVar", &info); uint8_t *allocs = info.allocs; uint8_t *commits = info.commits; uint8_t *records = info.records; test_kernel<<<blocks, threads>>>(records, allocs, commits, rounds, modulo); cudaMemcpyToSymbol(globalVar, &info, sizeof(traceinfo_t), cudaMemcpyHostToDevice); cudaChecked(cudaDeviceSynchronize()); printf("stopping trace\n"); __trace_stop(NULL); cudaChecked(cudaStreamSynchronize(NULL)); cudaChecked(cudaDeviceSynchronize()); return 0; }
1feb6311075c1c90411a564a03b64c7074f9d6f7.hip
// !!! This is a file automatically generated by hipify!!! /** * (C) Copyright 2020 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "pwu_kernel_parameter.h" #include "rpu_pulsed_meta_parameter.h" #include "rpucuda_pulsed_device.h" #include <memory> namespace RPU { /******************************************************************************************/ /* PulsedRPUDeviceCuda Base class which maintains the basic hard bounds and dw_min up/down and decays etc for the pulsed updates. Note that it is still Abstract. Need to implement the getUpdateKernels in derived. */ template <typename T> PulsedRPUDeviceCuda<T>::PulsedRPUDeviceCuda(CudaContext *c, int x_size, int d_size) : PulsedRPUDeviceCudaBase<T>(c, x_size, d_size){}; template <typename T> void PulsedRPUDeviceCuda<T>::initialize() { dev_4params_ = RPU::make_unique<CudaArray<float>>(this->context_, 4 * this->size_); dev_diffusion_rate_ = RPU::make_unique<CudaArray<T>>(this->context_, this->size_); dev_reset_bias_ = RPU::make_unique<CudaArray<T>>(this->context_, this->size_); dev_decay_scale_ = RPU::make_unique<CudaArray<T>>(this->context_, this->size_); this->context_->synchronize(); }; template <typename T> PulsedRPUDeviceCuda<T>::PulsedRPUDeviceCuda(const PulsedRPUDeviceCuda<T> &other) : PulsedRPUDeviceCudaBase<T>(other) { initialize(); dev_4params_->assign(*other.dev_4params_); dev_diffusion_rate_->assign(*other.dev_diffusion_rate_); dev_reset_bias_->assign(*other.dev_reset_bias_); dev_decay_scale_->assign(*other.dev_decay_scale_); this->context_->synchronize(); }; // template <typename T> // PulsedRPUDeviceCuda<T>& PulsedRPUDeviceCuda<T>::operator=(const PulsedRPUDeviceCuda<T>& other){ // PulsedRPUDeviceCuda<T> tmp(other); // swap(*this,tmp); // return *this; // }; // template <typename T> // PulsedRPUDeviceCuda<T>::PulsedRPUDeviceCuda(PulsedRPUDeviceCuda<T>&& other) { // *this = std::move(other); // }; // template <typename T> // PulsedRPUDeviceCuda<T>& PulsedRPUDeviceCuda<T>::operator=(PulsedRPUDeviceCuda<T>&& other){ // PulsedRPUDeviceCudaBase<T>::operator=(std::move(other)); // dev_4params_ = std::move(other.dev_4params_); // dev_diffusion_rate_ = std::move(other.dev_diffusion_rate_); // dev_reset_bias_ = std::move(other.dev_reset_bias_); // dev_decay_scale_ = std::move(other.dev_decay_scale_); // return *this; // }; template <typename T> void PulsedRPUDeviceCuda<T>::populateFrom(const AbstractRPUDevice<T> &rpu_device_in) { const auto &rpu_device = dynamic_cast<const PulsedRPUDevice<T> &>(rpu_device_in); if (&rpu_device == nullptr) { RPU_FATAL("populateFrom expects PulsedRPUDevice."); } int x_size = rpu_device.getXSize(); int d_size = rpu_device.getDSize(); int size = x_size * d_size; initialize(); PulsedRPUDeviceCudaBase<T>::populateFrom(rpu_device_in); PulsedDPStruc<T> **sup = rpu_device.getDPStruc(); // copy RPU to device variables float *tmp = new float[4 * size]; T *tmp_ds = new T[size]; T *tmp_df = new T[size]; T *tmp_rb = new T[size]; T *ds = rpu_device.getDecayScale()[0]; T *df = rpu_device.getDiffusionRate()[0]; T *rb = rpu_device.getResetBias()[0]; for (int i = 0; i < d_size; ++i) { for (int j = 0; j < x_size; ++j) { int k = j * (d_size * 4) + 4 * i; // transposed: col major required by cuBLAS .. linear arangmenet for now tmp[k] = sup[i][j].min_bound; tmp[k + 1] = sup[i][j].scale_down; tmp[k + 2] = sup[i][j].max_bound; tmp[k + 3] = sup[i][j].scale_up; int l_t = j * (d_size) + i; int l = i * (x_size) + j; tmp_ds[l_t] = ds[l]; tmp_df[l_t] = df[l]; tmp_rb[l_t] = rb[l]; } } dev_4params_->assign(tmp); // other parameters dev_decay_scale_->assign(tmp_ds); dev_diffusion_rate_->assign(tmp_df); dev_reset_bias_->assign(tmp_rb); this->context_->synchronize(); delete[] tmp_ds; delete[] tmp_df; delete[] tmp_rb; delete[] tmp; } template <typename T> void PulsedRPUDeviceCuda<T>::applyWeightUpdate(T *weights, T *dw_and_current_weight_out) { RPU::math::elemaddcopysat<T>( this->context_, weights, dw_and_current_weight_out, this->size_, dev_4params_->getDataConst()); } template <typename T> void PulsedRPUDeviceCuda<T>::decayWeights(T *weights, T alpha, bool bias_no_decay) { RPU::math::elemscalealpha<T>( this->context_, weights, bias_no_decay ? MAX(this->size_ - this->d_size_, 0) : this->size_, dev_decay_scale_->getData(), dev_4params_->getData(), alpha); } template <typename T> void PulsedRPUDeviceCuda<T>::decayWeights(T *weights, bool bias_no_decay) { RPU::math::elemscale<T>( this->context_, weights, bias_no_decay ? MAX(this->size_ - this->d_size_, 0) : this->size_, dev_decay_scale_->getData(), dev_4params_->getData()); } template <typename T> void PulsedRPUDeviceCuda<T>::diffuseWeights(T *weights) { if (this->dev_diffusion_nrnd_ == nullptr) { this->initDiffusionRnd(); this->rnd_context_->randNormal( this->dev_diffusion_nrnd_->getData(), this->dev_diffusion_nrnd_->getSize()); } this->rnd_context_->synchronize(); RPU::math::elemasb02<T>( this->context_, weights, this->size_, this->dev_diffusion_nrnd_->getData(), dev_diffusion_rate_->getData(), dev_4params_->getData()); this->rnd_context_->recordWaitEvent(this->context_->getStream()); this->rnd_context_->randNormal( this->dev_diffusion_nrnd_->getData(), this->dev_diffusion_nrnd_->getSize()); } template <typename T> void PulsedRPUDeviceCuda<T>::clipWeights(T *weights, T clip) { RPU::math::elemsat<T>(this->context_, weights, this->size_, dev_4params_->getData()); if (clip >= 0) { RPU::math::aclip<T>(this->context_, weights, this->size_, clip); } } template <typename T> void PulsedRPUDeviceCuda<T>::initResetRnd() { if (this->rnd_context_ == nullptr) { this->initRndContext(); } dev_reset_nrnd_ = std::unique_ptr<CudaArray<float>>( new CudaArray<float>(&*this->rnd_context_, (this->size_ + 31) / 32 * 32)); dev_reset_flag_ = std::unique_ptr<CudaArray<float>>( new CudaArray<float>(&*this->rnd_context_, (this->size_ + 31) / 32 * 32)); dev_reset_flag_->setConst(0); this->rnd_context_->synchronize(); } template <typename T> void PulsedRPUDeviceCuda<T>::resetCols(T *weights, int start_col, int n_cols, T reset_prob) { // col-major in CUDA. if (dev_reset_nrnd_ == nullptr) { initResetRnd(); } int n = n_cols * this->d_size_; int offset = start_col * this->d_size_; this->rnd_context_->randNormal( dev_reset_nrnd_->getData(), n_cols * this->d_size_, 0.0, getPar().reset_std); if (reset_prob < 1) { this->rnd_context_->randUniform(dev_reset_flag_->getData(), n_cols * this->d_size_); } this->context_->recordWaitEvent(this->rnd_context_->getStream()); if (n >= this->size_) { // reset whole matrix RPU::math::elemresetsat<T>( this->context_, weights, this->size_, dev_reset_bias_->getDataConst(), dev_reset_nrnd_->getDataConst(), dev_reset_flag_->getDataConst(), reset_prob, dev_4params_->getData()); } else if (offset + n <= this->size_) { // one pass enough RPU::math::elemresetsat<T>( this->context_, weights + offset, n, dev_reset_bias_->getDataConst() + offset, dev_reset_nrnd_->getDataConst(), dev_reset_flag_->getDataConst(), reset_prob, dev_4params_->getData() + 4 * offset); } else { // two passes int m = this->size_ - offset; RPU::math::elemresetsat<T>( this->context_, weights + offset, m, dev_reset_bias_->getDataConst() + offset, dev_reset_nrnd_->getDataConst(), dev_reset_flag_->getDataConst(), reset_prob, dev_4params_->getData() + 4 * offset); RPU::math::elemresetsat<T>( this->context_, weights, n - m, dev_reset_bias_->getDataConst(), dev_reset_nrnd_->getDataConst() + m, dev_reset_flag_->getDataConst() + m, reset_prob, dev_4params_->getData()); } } template <typename T> void PulsedRPUDeviceCuda<T>::runUpdateKernel( pwukp_t<T> kpars, CudaContext *c, T *dev_weights, int m_batch, const BitLineMaker<T> *blm, const PulsedUpdateMetaParameter<T> &up, hiprandState_t *dev_states, int one_sided, uint32_t *x_counts_chunk, uint32_t *d_counts_chunk) { kpars->run( c->getStream(), dev_weights, m_batch, blm, this, up, dev_states, one_sided, x_counts_chunk, d_counts_chunk); } template class PulsedRPUDeviceCuda<float>; #ifdef RPU_USE_DOUBLE template class PulsedRPUDeviceCuda<double>; #endif } // namespace RPU
1feb6311075c1c90411a564a03b64c7074f9d6f7.cu
/** * (C) Copyright 2020 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "pwu_kernel_parameter.h" #include "rpu_pulsed_meta_parameter.h" #include "rpucuda_pulsed_device.h" #include <memory> namespace RPU { /******************************************************************************************/ /* PulsedRPUDeviceCuda Base class which maintains the basic hard bounds and dw_min up/down and decays etc for the pulsed updates. Note that it is still Abstract. Need to implement the getUpdateKernels in derived. */ template <typename T> PulsedRPUDeviceCuda<T>::PulsedRPUDeviceCuda(CudaContext *c, int x_size, int d_size) : PulsedRPUDeviceCudaBase<T>(c, x_size, d_size){}; template <typename T> void PulsedRPUDeviceCuda<T>::initialize() { dev_4params_ = RPU::make_unique<CudaArray<float>>(this->context_, 4 * this->size_); dev_diffusion_rate_ = RPU::make_unique<CudaArray<T>>(this->context_, this->size_); dev_reset_bias_ = RPU::make_unique<CudaArray<T>>(this->context_, this->size_); dev_decay_scale_ = RPU::make_unique<CudaArray<T>>(this->context_, this->size_); this->context_->synchronize(); }; template <typename T> PulsedRPUDeviceCuda<T>::PulsedRPUDeviceCuda(const PulsedRPUDeviceCuda<T> &other) : PulsedRPUDeviceCudaBase<T>(other) { initialize(); dev_4params_->assign(*other.dev_4params_); dev_diffusion_rate_->assign(*other.dev_diffusion_rate_); dev_reset_bias_->assign(*other.dev_reset_bias_); dev_decay_scale_->assign(*other.dev_decay_scale_); this->context_->synchronize(); }; // template <typename T> // PulsedRPUDeviceCuda<T>& PulsedRPUDeviceCuda<T>::operator=(const PulsedRPUDeviceCuda<T>& other){ // PulsedRPUDeviceCuda<T> tmp(other); // swap(*this,tmp); // return *this; // }; // template <typename T> // PulsedRPUDeviceCuda<T>::PulsedRPUDeviceCuda(PulsedRPUDeviceCuda<T>&& other) { // *this = std::move(other); // }; // template <typename T> // PulsedRPUDeviceCuda<T>& PulsedRPUDeviceCuda<T>::operator=(PulsedRPUDeviceCuda<T>&& other){ // PulsedRPUDeviceCudaBase<T>::operator=(std::move(other)); // dev_4params_ = std::move(other.dev_4params_); // dev_diffusion_rate_ = std::move(other.dev_diffusion_rate_); // dev_reset_bias_ = std::move(other.dev_reset_bias_); // dev_decay_scale_ = std::move(other.dev_decay_scale_); // return *this; // }; template <typename T> void PulsedRPUDeviceCuda<T>::populateFrom(const AbstractRPUDevice<T> &rpu_device_in) { const auto &rpu_device = dynamic_cast<const PulsedRPUDevice<T> &>(rpu_device_in); if (&rpu_device == nullptr) { RPU_FATAL("populateFrom expects PulsedRPUDevice."); } int x_size = rpu_device.getXSize(); int d_size = rpu_device.getDSize(); int size = x_size * d_size; initialize(); PulsedRPUDeviceCudaBase<T>::populateFrom(rpu_device_in); PulsedDPStruc<T> **sup = rpu_device.getDPStruc(); // copy RPU to device variables float *tmp = new float[4 * size]; T *tmp_ds = new T[size]; T *tmp_df = new T[size]; T *tmp_rb = new T[size]; T *ds = rpu_device.getDecayScale()[0]; T *df = rpu_device.getDiffusionRate()[0]; T *rb = rpu_device.getResetBias()[0]; for (int i = 0; i < d_size; ++i) { for (int j = 0; j < x_size; ++j) { int k = j * (d_size * 4) + 4 * i; // transposed: col major required by cuBLAS .. linear arangmenet for now tmp[k] = sup[i][j].min_bound; tmp[k + 1] = sup[i][j].scale_down; tmp[k + 2] = sup[i][j].max_bound; tmp[k + 3] = sup[i][j].scale_up; int l_t = j * (d_size) + i; int l = i * (x_size) + j; tmp_ds[l_t] = ds[l]; tmp_df[l_t] = df[l]; tmp_rb[l_t] = rb[l]; } } dev_4params_->assign(tmp); // other parameters dev_decay_scale_->assign(tmp_ds); dev_diffusion_rate_->assign(tmp_df); dev_reset_bias_->assign(tmp_rb); this->context_->synchronize(); delete[] tmp_ds; delete[] tmp_df; delete[] tmp_rb; delete[] tmp; } template <typename T> void PulsedRPUDeviceCuda<T>::applyWeightUpdate(T *weights, T *dw_and_current_weight_out) { RPU::math::elemaddcopysat<T>( this->context_, weights, dw_and_current_weight_out, this->size_, dev_4params_->getDataConst()); } template <typename T> void PulsedRPUDeviceCuda<T>::decayWeights(T *weights, T alpha, bool bias_no_decay) { RPU::math::elemscalealpha<T>( this->context_, weights, bias_no_decay ? MAX(this->size_ - this->d_size_, 0) : this->size_, dev_decay_scale_->getData(), dev_4params_->getData(), alpha); } template <typename T> void PulsedRPUDeviceCuda<T>::decayWeights(T *weights, bool bias_no_decay) { RPU::math::elemscale<T>( this->context_, weights, bias_no_decay ? MAX(this->size_ - this->d_size_, 0) : this->size_, dev_decay_scale_->getData(), dev_4params_->getData()); } template <typename T> void PulsedRPUDeviceCuda<T>::diffuseWeights(T *weights) { if (this->dev_diffusion_nrnd_ == nullptr) { this->initDiffusionRnd(); this->rnd_context_->randNormal( this->dev_diffusion_nrnd_->getData(), this->dev_diffusion_nrnd_->getSize()); } this->rnd_context_->synchronize(); RPU::math::elemasb02<T>( this->context_, weights, this->size_, this->dev_diffusion_nrnd_->getData(), dev_diffusion_rate_->getData(), dev_4params_->getData()); this->rnd_context_->recordWaitEvent(this->context_->getStream()); this->rnd_context_->randNormal( this->dev_diffusion_nrnd_->getData(), this->dev_diffusion_nrnd_->getSize()); } template <typename T> void PulsedRPUDeviceCuda<T>::clipWeights(T *weights, T clip) { RPU::math::elemsat<T>(this->context_, weights, this->size_, dev_4params_->getData()); if (clip >= 0) { RPU::math::aclip<T>(this->context_, weights, this->size_, clip); } } template <typename T> void PulsedRPUDeviceCuda<T>::initResetRnd() { if (this->rnd_context_ == nullptr) { this->initRndContext(); } dev_reset_nrnd_ = std::unique_ptr<CudaArray<float>>( new CudaArray<float>(&*this->rnd_context_, (this->size_ + 31) / 32 * 32)); dev_reset_flag_ = std::unique_ptr<CudaArray<float>>( new CudaArray<float>(&*this->rnd_context_, (this->size_ + 31) / 32 * 32)); dev_reset_flag_->setConst(0); this->rnd_context_->synchronize(); } template <typename T> void PulsedRPUDeviceCuda<T>::resetCols(T *weights, int start_col, int n_cols, T reset_prob) { // col-major in CUDA. if (dev_reset_nrnd_ == nullptr) { initResetRnd(); } int n = n_cols * this->d_size_; int offset = start_col * this->d_size_; this->rnd_context_->randNormal( dev_reset_nrnd_->getData(), n_cols * this->d_size_, 0.0, getPar().reset_std); if (reset_prob < 1) { this->rnd_context_->randUniform(dev_reset_flag_->getData(), n_cols * this->d_size_); } this->context_->recordWaitEvent(this->rnd_context_->getStream()); if (n >= this->size_) { // reset whole matrix RPU::math::elemresetsat<T>( this->context_, weights, this->size_, dev_reset_bias_->getDataConst(), dev_reset_nrnd_->getDataConst(), dev_reset_flag_->getDataConst(), reset_prob, dev_4params_->getData()); } else if (offset + n <= this->size_) { // one pass enough RPU::math::elemresetsat<T>( this->context_, weights + offset, n, dev_reset_bias_->getDataConst() + offset, dev_reset_nrnd_->getDataConst(), dev_reset_flag_->getDataConst(), reset_prob, dev_4params_->getData() + 4 * offset); } else { // two passes int m = this->size_ - offset; RPU::math::elemresetsat<T>( this->context_, weights + offset, m, dev_reset_bias_->getDataConst() + offset, dev_reset_nrnd_->getDataConst(), dev_reset_flag_->getDataConst(), reset_prob, dev_4params_->getData() + 4 * offset); RPU::math::elemresetsat<T>( this->context_, weights, n - m, dev_reset_bias_->getDataConst(), dev_reset_nrnd_->getDataConst() + m, dev_reset_flag_->getDataConst() + m, reset_prob, dev_4params_->getData()); } } template <typename T> void PulsedRPUDeviceCuda<T>::runUpdateKernel( pwukp_t<T> kpars, CudaContext *c, T *dev_weights, int m_batch, const BitLineMaker<T> *blm, const PulsedUpdateMetaParameter<T> &up, curandState_t *dev_states, int one_sided, uint32_t *x_counts_chunk, uint32_t *d_counts_chunk) { kpars->run( c->getStream(), dev_weights, m_batch, blm, this, up, dev_states, one_sided, x_counts_chunk, d_counts_chunk); } template class PulsedRPUDeviceCuda<float>; #ifdef RPU_USE_DOUBLE template class PulsedRPUDeviceCuda<double>; #endif } // namespace RPU
c3136a126f0f62ff9fffb61b43117d4dc64b82cc.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2013 Bryan Catanzaro Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Modifications in 2015,2016 by Levi Barnes */ #include "kmeans.h" #include "util.h" #include <thrust/reduce.h> namespace kmeans { int kmeans(int iterations, int n, int d, int k, thrust::device_vector<double>** data, thrust::device_vector<int>** labels, thrust::device_vector<double>** centroids, thrust::device_vector<double>** distances, int n_gpu, bool init_from_labels, double threshold) { thrust::device_vector<double> *data_dots[16]; thrust::device_vector<double> *centroid_dots[16]; thrust::device_vector<double> *pairwise_distances[16]; thrust::device_vector<int> *labels_copy[16]; thrust::device_vector<int> *range[16]; thrust::device_vector<int> *indices[16]; thrust::device_vector<int> *counts[16]; thrust::host_vector<double> h_centroids( k * d ); thrust::host_vector<double> h_centroids_tmp( k * d ); int h_changes[16], *d_changes[16]; double h_distance_sum[16], *d_distance_sum[16]; for (int q = 0; q < n_gpu; q++) { hipSetDevice(q); hipMalloc(&d_changes[q], sizeof(int)); hipMalloc(&d_distance_sum[q], sizeof(double)); detail::labels_init(); data_dots[q] = new thrust::device_vector <double>(n/n_gpu); centroid_dots[q] = new thrust::device_vector<double>(n/n_gpu); pairwise_distances[q] = new thrust::device_vector<double>(n/n_gpu * k); labels_copy[q] = new thrust::device_vector<int>(n/n_gpu * d); range[q] = new thrust::device_vector<int>(n/n_gpu); counts[q] = new thrust::device_vector<int>(k); indices[q] = new thrust::device_vector<int>(n/n_gpu); //Create and save "range" for initializing labels thrust::copy(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(n/n_gpu), (*range[q]).begin()); detail::make_self_dots(n/n_gpu, d, *data[q], *data_dots[q]); if (init_from_labels) { detail::find_centroids(n/n_gpu, d, k, *data[q], *labels[q], *centroids[q], *range[q], *indices[q], *counts[q]); } } double prior_distance_sum = 0; int i=0; for(; i < iterations; i++) { //Average the centroids from each device if (n_gpu > 1) { for (int p = 0; p < k * d; p++) h_centroids[p] = 0.0; for (int q = 0; q < n_gpu; q++) { hipSetDevice(q); detail::memcpy(h_centroids_tmp, *centroids[q]); detail::streamsync(q); for (int p = 0; p < k * d; p++) h_centroids[p] += h_centroids_tmp[p]; } for (int p = 0; p < k * d; p++) h_centroids[p] /= n_gpu; //Copy the averaged centroids to each device for (int q = 0; q < n_gpu; q++) { hipSetDevice(q); detail::memcpy(*centroids[q],h_centroids); } } for (int q = 0; q < n_gpu; q++) { //TODO compute total distance hipSetDevice(q); detail::calculate_distances(n/n_gpu, d, k, *data[q], *centroids[q], *data_dots[q], *centroid_dots[q], *pairwise_distances[q]); detail::relabel(n/n_gpu, k, *pairwise_distances[q], *labels[q], *distances[q], d_changes[q]); //TODO remove one memcpy detail::memcpy(*labels_copy[q], *labels[q]); detail::find_centroids(n/n_gpu, d, k, *data[q], *labels[q], *centroids[q], *range[q], *indices[q], *counts[q]); detail::memcpy(*labels[q], *labels_copy[q]); //double d_distance_sum[q] = thrust::reduce(distances[q].begin(), distances[q].end()) mycub::sum_reduce(*distances[q], d_distance_sum[q]); } #if __VERBOSE double distance_sum = 0.0; for (int q = 0; q < n_gpu; q++) { hipMemcpyAsync(h_changes+q, d_changes[q], sizeof(int), hipMemcpyDeviceToHost, cuda_stream[q]); hipMemcpyAsync(h_distance_sum+q, d_distance_sum[q], sizeof(double), hipMemcpyDeviceToHost, cuda_stream[q]); detail::streamsync(q); std::cout << "Device " << q << ": Iteration " << i << " produced " << h_changes[q] << " changes and the total_distance is " << h_distance_sum[q] << std::endl; distance_sum += h_distance_sum[q]; } if (i > 0) { double delta = distance_sum / prior_distance_sum; if (delta > 1 - threshold) { std::cout << "Threshold triggered. Terminating iterations early." << std::endl; return i + 1; } } prior_distance_sum = distance_sum; #endif } for (int q = 0; q < n_gpu; q++) { hipSetDevice(q); hipFree(d_changes[q]); detail::labels_close(); delete(pairwise_distances[q]); delete(data_dots[q]); delete(centroid_dots[q]); } return i; } }
c3136a126f0f62ff9fffb61b43117d4dc64b82cc.cu
/* Copyright 2013 Bryan Catanzaro Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Modifications in 2015,2016 by Levi Barnes */ #include "kmeans.h" #include "util.h" #include <thrust/reduce.h> namespace kmeans { int kmeans(int iterations, int n, int d, int k, thrust::device_vector<double>** data, thrust::device_vector<int>** labels, thrust::device_vector<double>** centroids, thrust::device_vector<double>** distances, int n_gpu, bool init_from_labels, double threshold) { thrust::device_vector<double> *data_dots[16]; thrust::device_vector<double> *centroid_dots[16]; thrust::device_vector<double> *pairwise_distances[16]; thrust::device_vector<int> *labels_copy[16]; thrust::device_vector<int> *range[16]; thrust::device_vector<int> *indices[16]; thrust::device_vector<int> *counts[16]; thrust::host_vector<double> h_centroids( k * d ); thrust::host_vector<double> h_centroids_tmp( k * d ); int h_changes[16], *d_changes[16]; double h_distance_sum[16], *d_distance_sum[16]; for (int q = 0; q < n_gpu; q++) { cudaSetDevice(q); cudaMalloc(&d_changes[q], sizeof(int)); cudaMalloc(&d_distance_sum[q], sizeof(double)); detail::labels_init(); data_dots[q] = new thrust::device_vector <double>(n/n_gpu); centroid_dots[q] = new thrust::device_vector<double>(n/n_gpu); pairwise_distances[q] = new thrust::device_vector<double>(n/n_gpu * k); labels_copy[q] = new thrust::device_vector<int>(n/n_gpu * d); range[q] = new thrust::device_vector<int>(n/n_gpu); counts[q] = new thrust::device_vector<int>(k); indices[q] = new thrust::device_vector<int>(n/n_gpu); //Create and save "range" for initializing labels thrust::copy(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(n/n_gpu), (*range[q]).begin()); detail::make_self_dots(n/n_gpu, d, *data[q], *data_dots[q]); if (init_from_labels) { detail::find_centroids(n/n_gpu, d, k, *data[q], *labels[q], *centroids[q], *range[q], *indices[q], *counts[q]); } } double prior_distance_sum = 0; int i=0; for(; i < iterations; i++) { //Average the centroids from each device if (n_gpu > 1) { for (int p = 0; p < k * d; p++) h_centroids[p] = 0.0; for (int q = 0; q < n_gpu; q++) { cudaSetDevice(q); detail::memcpy(h_centroids_tmp, *centroids[q]); detail::streamsync(q); for (int p = 0; p < k * d; p++) h_centroids[p] += h_centroids_tmp[p]; } for (int p = 0; p < k * d; p++) h_centroids[p] /= n_gpu; //Copy the averaged centroids to each device for (int q = 0; q < n_gpu; q++) { cudaSetDevice(q); detail::memcpy(*centroids[q],h_centroids); } } for (int q = 0; q < n_gpu; q++) { //TODO compute total distance cudaSetDevice(q); detail::calculate_distances(n/n_gpu, d, k, *data[q], *centroids[q], *data_dots[q], *centroid_dots[q], *pairwise_distances[q]); detail::relabel(n/n_gpu, k, *pairwise_distances[q], *labels[q], *distances[q], d_changes[q]); //TODO remove one memcpy detail::memcpy(*labels_copy[q], *labels[q]); detail::find_centroids(n/n_gpu, d, k, *data[q], *labels[q], *centroids[q], *range[q], *indices[q], *counts[q]); detail::memcpy(*labels[q], *labels_copy[q]); //double d_distance_sum[q] = thrust::reduce(distances[q].begin(), distances[q].end()) mycub::sum_reduce(*distances[q], d_distance_sum[q]); } #if __VERBOSE double distance_sum = 0.0; for (int q = 0; q < n_gpu; q++) { cudaMemcpyAsync(h_changes+q, d_changes[q], sizeof(int), cudaMemcpyDeviceToHost, cuda_stream[q]); cudaMemcpyAsync(h_distance_sum+q, d_distance_sum[q], sizeof(double), cudaMemcpyDeviceToHost, cuda_stream[q]); detail::streamsync(q); std::cout << "Device " << q << ": Iteration " << i << " produced " << h_changes[q] << " changes and the total_distance is " << h_distance_sum[q] << std::endl; distance_sum += h_distance_sum[q]; } if (i > 0) { double delta = distance_sum / prior_distance_sum; if (delta > 1 - threshold) { std::cout << "Threshold triggered. Terminating iterations early." << std::endl; return i + 1; } } prior_distance_sum = distance_sum; #endif } for (int q = 0; q < n_gpu; q++) { cudaSetDevice(q); cudaFree(d_changes[q]); detail::labels_close(); delete(pairwise_distances[q]); delete(data_dots[q]); delete(centroid_dots[q]); } return i; } }
f90f68505951319d9f7ea2134345bd96092157eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is part of Jacobi Test Code (JTC) , a hybrid CUDA-OpenMP-MPI benchmark for Jacobi solver applied to a 3D Laplace equation. This file contains CUDA kernels and auxiliary functions Contributions from Mike Giles, Saif Mula and Mark Mawson Lucian Anton March 2014. Copyright (c) 2014, Science & Technology Facilities Council, UK All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of the FreeBSD Project. */ #include<stdio.h> #include "cutil_inline.h" // thread blocks and grid parameters static int BlockX, BlockY, BlockZ, GridX, GridY, GridZ; /** * this notifies the compiler that the definitions are present in external file */ extern "C" { #include "jacobi_c.h" #include "gpu_laplace3d_wrapper.h" } __global__ void kernel_laplace3d_baseline(int NX, int NY, int NZ, const Real* __restrict__ d_u1, Real* __restrict__ d_u2) { int i, j, k, indg, IOFF, JOFF, KOFF, interior, active; Real u2, sixth=1.0/6.0; // // define global indices and array offsets // i = threadIdx.x + blockIdx.x*blockDim.x; j = threadIdx.y + blockIdx.y*blockDim.y; indg = i + j*NX; IOFF = 1; JOFF = NX; KOFF = NX*NY; interior = i > 0 && i< NX-1 && j> 0 && j<NY-1; active = i >= 0 && i<= NX-1 && j>= 0 && j <= NY-1; if ( active){ d_u2[indg] = d_u1[indg]; indg += KOFF; for (k=1; k<NZ-1; k++) { if (interior) { u2 = ( d_u1[indg-IOFF] + d_u1[indg+IOFF] + d_u1[indg-JOFF] + d_u1[indg+JOFF] + d_u1[indg-KOFF] + d_u1[indg+KOFF] ) * sixth; } else { u2 = d_u1[indg]; // Dirichlet b.c.'s } // the active flags selects only boundary points d_u2[indg] = u2; indg += KOFF; } d_u2[indg] = d_u1[indg]; } } // // Notes:one thread per node in the 2D block; // after initialisation it marches in the k-direction // extern Real *d_u1, *d_u2; __global__ void kernel_laplace3d_MarkMawson(int Nx, int Ny, int Nz, Real *d_u1, Real *d_u2) { //int i, j, k, bz, ks, ke, indg, active, IOFF, JOFF, KOFF; Real sixth=1.0/6.0; //Thread Indices int x = blockIdx.x*blockDim.x+threadIdx.x; int y = blockIdx.y*blockDim.y+threadIdx.y; int z = blockIdx.z*blockDim.z+threadIdx.z; if(x<(Nx)&&y<(Ny)&&z<(Nz)){ if(x!=0&&x!=(Nx-1)&&y!=0&&y!=(Ny-1)&&z!=0&&z!=(Nz-1)){ d_u2[z*Ny*Nx+y*Nx+x]= sixth*(d_u1[(z-1)*Ny*Nx+(y )*Nx+(x )] +d_u1[(z+1)*Ny*Nx+(y )*Nx+(x )] +d_u1[(z )*Ny*Nx+(y-1)*Nx+(x )] +d_u1[(z )*Ny*Nx+(y+1)*Nx+(x )] +d_u1[(z )*Ny*Nx+(y )*Nx+(x-1)] +d_u1[(z )*Ny*Nx+(y )*Nx+(x+1)]); }else{ d_u2[z*Ny*Nx+y*Nx+x]=d_u1[z*Ny*Nx+y*Nx+x]; } } } __global__ void kernel_laplace3d_shm(int NX, int NY, int NZ, Real *d_u1, Real *d_u2) { extern __shared__ Real plane[]; int indg, active, halo, indp, IOFF, JOFF, KOFF; Real u2, sixth=1.0/6.0; // // define global indices and array offsets // int i = blockIdx.x*(blockDim.x-2)+threadIdx.x-1; int j = blockIdx.y*(blockDim.y-2)+threadIdx.y-1; int k = blockIdx.z*blockDim.z+threadIdx.z; indg = i + j*NX + k*NX*NY; indp = threadIdx.x + blockDim.x * threadIdx.y; IOFF = 1; JOFF = blockDim.x;//for plane layout KOFF = NX*NY; active = i>=0 && i<=NX-1 && j>=0 && j<=NY-1; halo = threadIdx.x == 0 || threadIdx.x == blockDim.x - 1 || threadIdx.y == 0 || threadIdx.y == blockDim.y - 1; // populate plane with first layer if(active) plane[indp] = d_u1[indg]; __syncthreads(); if (active ) { if (i==0 || i==NX-1 || j==0 || j==NY-1 || k==0 || k==NZ-1) { u2 = d_u1[indg]; // Dirichlet b.c.'s } else { if (!halo) u2 = ( plane[indp-IOFF] + plane[indp+IOFF] + plane[indp-JOFF] + plane[indp+JOFF] + d_u1[indg-KOFF] + d_u1[indg+KOFF] ) * sixth; /* u2 = ( d_u1[indg-IOFF] + d_u1[indg+IOFF] + d_u1[indg-JOFF] + d_u1[indg+JOFF] + d_u1[indg-KOFF] + d_u1[indg+KOFF] ) * sixth; */ } if (!halo) d_u2[indg] = u2; } } //This kernel can be used for a quick extimate of the bandwidth __global__ void kernel_BandWidth(int NX, int NY, int NZ, Real *d_u1, Real *d_u2) { Real sixth=1.0/6.0; // // define global indices and array offsets // int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = blockIdx.z*blockDim.z + threadIdx.z; // WARNING: no checks for the interior, grid sizes need to multiple of blocks int indg = i + j * NY + k * NX * NY; d_u2[indg] = d_u1[indg] * sixth; } /** * this function is used to just check if an CUDA enabled GPU device is present in * the system and also to check it's working status. */ extern "C" void CudaDeviceInit(){ int cudaDevCnt; hipGetDeviceCount(&cudaDevCnt); if (cudaDevCnt==0){ printf("No CUDA device found, exiting ...\n"); exit(-1); } else{ printf("Number of cuda devices found %d\n",cudaDevCnt); } hipFree(0); #ifdef __cplusplus hipDeviceProp_t devProp; #else struct hipDeviceProp_t devProp; #endif int dev; hipGetDevice(&dev); hipGetDeviceProperties(&devProp,dev); printf("Using CUDA device %d: %s\n\n",dev,devProp.name); //set cache config to l1 cache // hipError_t error = hipDeviceSetCacheConfig(hipFuncCachePreferShared); // printf("set Cacheconfig error %d\n",error); } ///////////////////////////////////////////////////////////////// /** * this function copies the updated GPU array to GPU */ extern "C" void setInitialData(float* dev,float* host,int NX,int NY,int NZ,float* memoryTimer,int* memoryCtr){ printf("Setting up initial Data ...\n"); hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); //start the timer hipEventRecord(start,0); hipMemcpy(dev,host, sizeof(float)*NX*NY*NZ, hipMemcpyHostToDevice); hipEventRecord(stop,0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime,start,stop); *memoryTimer += elapsedTime; *memoryCtr += 1; hipEventDestroy(start); hipEventDestroy(stop); } //////////////////////////////////////////////////////////////// /** * this function invokes the Laplace3d GPU function which * executes the functionality on GPU */ extern "C" void laplace3d_GPU(const int alg_key, Real* uOld, int NX,int NY,int NZ,const int* gridparams, int iter_block, float *compTime, float *commTime) { float taux; Real *aux; int iter; size_t shmsize; int threadperblock = 4;//for shared memory blocksize which is currently static //event timer hipEvent_t compStart, compStop, commStart, commStop; cudaSafeCall(hipEventCreate(&commStart)); cudaSafeCall(hipEventCreate(&commStop)); cudaSafeCall(hipEventCreate(&compStart)); cudaSafeCall(hipEventCreate(&compStop)); //start the timer *commTime = 0.0; hipEventRecord(commStart,0); cudaSafeCall(hipMemcpy(d_u1, uOld, sizeof(Real)*NX*NY*NZ, hipMemcpyHostToDevice)); hipEventRecord(commStop,0); hipEventSynchronize(commStop); hipEventElapsedTime(&taux, commStart, commStop); *commTime += taux; dim3 dimGrid(GridX,GridY,GridZ) ; dim3 dimBlock(BlockX, BlockY,BlockZ) ; hipEventRecord(compStart,0); switch(alg_key) { case(ALG_CUDA_2D_BLK): for (iter = 0; iter < iter_block; ++iter){ hipLaunchKernelGGL(( kernel_laplace3d_baseline), dim3(dimGrid), dim3(dimBlock), 0, 0, NX, NY, NZ, d_u1, d_u2); cudaSafeCall(hipPeekAtLastError()); aux=d_u1; d_u1=d_u2; d_u2=aux; } break; case(ALG_CUDA_3D_BLK): for (iter = 0; iter < iter_block; ++iter){ hipLaunchKernelGGL(( kernel_laplace3d_MarkMawson), dim3(dimGrid), dim3(dimBlock), 0, 0, NX, NY, NZ, d_u1, d_u2); cudaSafeCall(hipPeekAtLastError()); aux=d_u1; d_u1=d_u2; d_u2=aux; } break; case(ALG_CUDA_SHM): shmsize=BlockX*BlockY*sizeof(Real); for (iter = 0; iter < iter_block; ++iter){ hipLaunchKernelGGL(( kernel_laplace3d_shm), dim3(dimGrid), dim3(dimBlock), shmsize, 0, NX, NY, NZ, d_u1, d_u2); cudaSafeCall(hipPeekAtLastError()); aux=d_u1; d_u1=d_u2; d_u2=aux; } break; case(ALG_CUDA_BANDWIDTH): for (iter = 0; iter < iter_block; ++iter){ hipLaunchKernelGGL(( kernel_BandWidth), dim3(dimGrid), dim3(dimBlock), 0, 0, NX, NY, NZ, d_u1, d_u2); cudaSafeCall(hipPeekAtLastError()); aux=d_u1; d_u1=d_u2; d_u2=aux; } break; } hipEventRecord(compStop,0); hipEventSynchronize(compStop); hipEventElapsedTime(compTime,compStart,compStop); hipEventRecord(commStart,0); // Becase of the above swap d_u1 points to the last iteration data cudaSafeCall(hipMemcpy(uOld, d_u1, sizeof(Real)*NX*NY*NZ, hipMemcpyDeviceToHost)); hipEventRecord(commStop, 0); hipEventSynchronize(commStop); hipEventElapsedTime(&taux, commStart, commStop); *commTime += taux; cudaSafeCall(hipEventDestroy(commStart)); cudaSafeCall(hipEventDestroy(commStop)); cudaSafeCall(hipEventDestroy(compStart)); cudaSafeCall(hipEventDestroy(compStop)); } /** * --function: getUpdatedArray * this function downloads gpu array from GPU and populates the data * from GPU array to CPU array */ extern "C" void getUpdatedArray(float* host,float* dev,int NX,int NY,int NZ,float* memoryTimer,int* memoryCtr) { hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); //start the timer hipEventRecord(start,0); hipMemcpy(host, dev, sizeof(float)*NX*NY*NZ, hipMemcpyDeviceToHost); hipEventRecord(stop,0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime,start,stop); *memoryTimer += elapsedTime; *memoryCtr += 1; hipEventDestroy(start); hipEventDestroy(stop); } extern "C" void calcGpuDims(int alg_key, int blockXsize, int blockYsize, int blockZsize,int NX,int NY, int NZ, int* gridsize) { // set threads block sizes and grid sizes. // used 2 dimensions // 0, 1 -> grid x, y // 2,3 -> block x, y switch (alg_key) { case(ALG_CUDA_2D_BLK): GridX = 1 + (NX-1)/blockXsize; GridY = (1 + (NY-1)/blockYsize); //* (1 + (NZ-1) / blockZsize); GridZ = 1; BlockX = blockXsize; BlockY = blockYsize; BlockZ = 1; break; case (ALG_CUDA_SHM) : GridX = 1 + (NX-1)/blockXsize; GridY = 1 + (NY-1)/blockYsize; GridZ = NZ; BlockX = blockXsize + 2; // halo BlockY = blockYsize + 2; BlockZ = 1; break; case(ALG_CUDA_BANDWIDTH): case(ALG_CUDA_3D_BLK): GridX = 1 + (NX-1)/blockXsize; GridY = 1 + (NY-1)/blockYsize; //* (1 + (NZ-1) / blockZsize); GridZ = NZ; BlockX = blockXsize; BlockY = blockYsize; BlockZ = 1; break; default: fprintf(stderr,"unkwon gpu kernel in calcGpuDims, quitting ...!"); exit (1); } // Needs to test if the blocks and grid sizes are meaningful }
f90f68505951319d9f7ea2134345bd96092157eb.cu
/* This is part of Jacobi Test Code (JTC) , a hybrid CUDA-OpenMP-MPI benchmark for Jacobi solver applied to a 3D Laplace equation. This file contains CUDA kernels and auxiliary functions Contributions from Mike Giles, Saif Mula and Mark Mawson Lucian Anton March 2014. Copyright (c) 2014, Science & Technology Facilities Council, UK All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of the FreeBSD Project. */ #include<stdio.h> #include "cutil_inline.h" // thread blocks and grid parameters static int BlockX, BlockY, BlockZ, GridX, GridY, GridZ; /** * this notifies the compiler that the definitions are present in external file */ extern "C" { #include "jacobi_c.h" #include "gpu_laplace3d_wrapper.h" } __global__ void kernel_laplace3d_baseline(int NX, int NY, int NZ, const Real* __restrict__ d_u1, Real* __restrict__ d_u2) { int i, j, k, indg, IOFF, JOFF, KOFF, interior, active; Real u2, sixth=1.0/6.0; // // define global indices and array offsets // i = threadIdx.x + blockIdx.x*blockDim.x; j = threadIdx.y + blockIdx.y*blockDim.y; indg = i + j*NX; IOFF = 1; JOFF = NX; KOFF = NX*NY; interior = i > 0 && i< NX-1 && j> 0 && j<NY-1; active = i >= 0 && i<= NX-1 && j>= 0 && j <= NY-1; if ( active){ d_u2[indg] = d_u1[indg]; indg += KOFF; for (k=1; k<NZ-1; k++) { if (interior) { u2 = ( d_u1[indg-IOFF] + d_u1[indg+IOFF] + d_u1[indg-JOFF] + d_u1[indg+JOFF] + d_u1[indg-KOFF] + d_u1[indg+KOFF] ) * sixth; } else { u2 = d_u1[indg]; // Dirichlet b.c.'s } // the active flags selects only boundary points d_u2[indg] = u2; indg += KOFF; } d_u2[indg] = d_u1[indg]; } } // // Notes:one thread per node in the 2D block; // after initialisation it marches in the k-direction // extern Real *d_u1, *d_u2; __global__ void kernel_laplace3d_MarkMawson(int Nx, int Ny, int Nz, Real *d_u1, Real *d_u2) { //int i, j, k, bz, ks, ke, indg, active, IOFF, JOFF, KOFF; Real sixth=1.0/6.0; //Thread Indices int x = blockIdx.x*blockDim.x+threadIdx.x; int y = blockIdx.y*blockDim.y+threadIdx.y; int z = blockIdx.z*blockDim.z+threadIdx.z; if(x<(Nx)&&y<(Ny)&&z<(Nz)){ if(x!=0&&x!=(Nx-1)&&y!=0&&y!=(Ny-1)&&z!=0&&z!=(Nz-1)){ d_u2[z*Ny*Nx+y*Nx+x]= sixth*(d_u1[(z-1)*Ny*Nx+(y )*Nx+(x )] +d_u1[(z+1)*Ny*Nx+(y )*Nx+(x )] +d_u1[(z )*Ny*Nx+(y-1)*Nx+(x )] +d_u1[(z )*Ny*Nx+(y+1)*Nx+(x )] +d_u1[(z )*Ny*Nx+(y )*Nx+(x-1)] +d_u1[(z )*Ny*Nx+(y )*Nx+(x+1)]); }else{ d_u2[z*Ny*Nx+y*Nx+x]=d_u1[z*Ny*Nx+y*Nx+x]; } } } __global__ void kernel_laplace3d_shm(int NX, int NY, int NZ, Real *d_u1, Real *d_u2) { extern __shared__ Real plane[]; int indg, active, halo, indp, IOFF, JOFF, KOFF; Real u2, sixth=1.0/6.0; // // define global indices and array offsets // int i = blockIdx.x*(blockDim.x-2)+threadIdx.x-1; int j = blockIdx.y*(blockDim.y-2)+threadIdx.y-1; int k = blockIdx.z*blockDim.z+threadIdx.z; indg = i + j*NX + k*NX*NY; indp = threadIdx.x + blockDim.x * threadIdx.y; IOFF = 1; JOFF = blockDim.x;//for plane layout KOFF = NX*NY; active = i>=0 && i<=NX-1 && j>=0 && j<=NY-1; halo = threadIdx.x == 0 || threadIdx.x == blockDim.x - 1 || threadIdx.y == 0 || threadIdx.y == blockDim.y - 1; // populate plane with first layer if(active) plane[indp] = d_u1[indg]; __syncthreads(); if (active ) { if (i==0 || i==NX-1 || j==0 || j==NY-1 || k==0 || k==NZ-1) { u2 = d_u1[indg]; // Dirichlet b.c.'s } else { if (!halo) u2 = ( plane[indp-IOFF] + plane[indp+IOFF] + plane[indp-JOFF] + plane[indp+JOFF] + d_u1[indg-KOFF] + d_u1[indg+KOFF] ) * sixth; /* u2 = ( d_u1[indg-IOFF] + d_u1[indg+IOFF] + d_u1[indg-JOFF] + d_u1[indg+JOFF] + d_u1[indg-KOFF] + d_u1[indg+KOFF] ) * sixth; */ } if (!halo) d_u2[indg] = u2; } } //This kernel can be used for a quick extimate of the bandwidth __global__ void kernel_BandWidth(int NX, int NY, int NZ, Real *d_u1, Real *d_u2) { Real sixth=1.0/6.0; // // define global indices and array offsets // int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = blockIdx.z*blockDim.z + threadIdx.z; // WARNING: no checks for the interior, grid sizes need to multiple of blocks int indg = i + j * NY + k * NX * NY; d_u2[indg] = d_u1[indg] * sixth; } /** * this function is used to just check if an CUDA enabled GPU device is present in * the system and also to check it's working status. */ extern "C" void CudaDeviceInit(){ int cudaDevCnt; cudaGetDeviceCount(&cudaDevCnt); if (cudaDevCnt==0){ printf("No CUDA device found, exiting ...\n"); exit(-1); } else{ printf("Number of cuda devices found %d\n",cudaDevCnt); } cudaFree(0); #ifdef __cplusplus cudaDeviceProp devProp; #else struct cudaDeviceProp devProp; #endif int dev; cudaGetDevice(&dev); cudaGetDeviceProperties(&devProp,dev); printf("Using CUDA device %d: %s\n\n",dev,devProp.name); //set cache config to l1 cache // cudaError_t error = cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); // printf("set Cacheconfig error %d\n",error); } ///////////////////////////////////////////////////////////////// /** * this function copies the updated GPU array to GPU */ extern "C" void setInitialData(float* dev,float* host,int NX,int NY,int NZ,float* memoryTimer,int* memoryCtr){ printf("Setting up initial Data ...\n"); cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); //start the timer cudaEventRecord(start,0); cudaMemcpy(dev,host, sizeof(float)*NX*NY*NZ, cudaMemcpyHostToDevice); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime,start,stop); *memoryTimer += elapsedTime; *memoryCtr += 1; cudaEventDestroy(start); cudaEventDestroy(stop); } //////////////////////////////////////////////////////////////// /** * this function invokes the Laplace3d GPU function which * executes the functionality on GPU */ extern "C" void laplace3d_GPU(const int alg_key, Real* uOld, int NX,int NY,int NZ,const int* gridparams, int iter_block, float *compTime, float *commTime) { float taux; Real *aux; int iter; size_t shmsize; int threadperblock = 4;//for shared memory blocksize which is currently static //event timer cudaEvent_t compStart, compStop, commStart, commStop; cudaSafeCall(cudaEventCreate(&commStart)); cudaSafeCall(cudaEventCreate(&commStop)); cudaSafeCall(cudaEventCreate(&compStart)); cudaSafeCall(cudaEventCreate(&compStop)); //start the timer *commTime = 0.0; cudaEventRecord(commStart,0); cudaSafeCall(cudaMemcpy(d_u1, uOld, sizeof(Real)*NX*NY*NZ, cudaMemcpyHostToDevice)); cudaEventRecord(commStop,0); cudaEventSynchronize(commStop); cudaEventElapsedTime(&taux, commStart, commStop); *commTime += taux; dim3 dimGrid(GridX,GridY,GridZ) ; dim3 dimBlock(BlockX, BlockY,BlockZ) ; cudaEventRecord(compStart,0); switch(alg_key) { case(ALG_CUDA_2D_BLK): for (iter = 0; iter < iter_block; ++iter){ kernel_laplace3d_baseline<<<dimGrid, dimBlock>>>(NX, NY, NZ, d_u1, d_u2); cudaSafeCall(cudaPeekAtLastError()); aux=d_u1; d_u1=d_u2; d_u2=aux; } break; case(ALG_CUDA_3D_BLK): for (iter = 0; iter < iter_block; ++iter){ kernel_laplace3d_MarkMawson<<<dimGrid, dimBlock>>>(NX, NY, NZ, d_u1, d_u2); cudaSafeCall(cudaPeekAtLastError()); aux=d_u1; d_u1=d_u2; d_u2=aux; } break; case(ALG_CUDA_SHM): shmsize=BlockX*BlockY*sizeof(Real); for (iter = 0; iter < iter_block; ++iter){ kernel_laplace3d_shm<<<dimGrid, dimBlock, shmsize>>>(NX, NY, NZ, d_u1, d_u2); cudaSafeCall(cudaPeekAtLastError()); aux=d_u1; d_u1=d_u2; d_u2=aux; } break; case(ALG_CUDA_BANDWIDTH): for (iter = 0; iter < iter_block; ++iter){ kernel_BandWidth<<<dimGrid, dimBlock>>>(NX, NY, NZ, d_u1, d_u2); cudaSafeCall(cudaPeekAtLastError()); aux=d_u1; d_u1=d_u2; d_u2=aux; } break; } cudaEventRecord(compStop,0); cudaEventSynchronize(compStop); cudaEventElapsedTime(compTime,compStart,compStop); cudaEventRecord(commStart,0); // Becase of the above swap d_u1 points to the last iteration data cudaSafeCall(cudaMemcpy(uOld, d_u1, sizeof(Real)*NX*NY*NZ, cudaMemcpyDeviceToHost)); cudaEventRecord(commStop, 0); cudaEventSynchronize(commStop); cudaEventElapsedTime(&taux, commStart, commStop); *commTime += taux; cudaSafeCall(cudaEventDestroy(commStart)); cudaSafeCall(cudaEventDestroy(commStop)); cudaSafeCall(cudaEventDestroy(compStart)); cudaSafeCall(cudaEventDestroy(compStop)); } /** * --function: getUpdatedArray * this function downloads gpu array from GPU and populates the data * from GPU array to CPU array */ extern "C" void getUpdatedArray(float* host,float* dev,int NX,int NY,int NZ,float* memoryTimer,int* memoryCtr) { cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); //start the timer cudaEventRecord(start,0); cudaMemcpy(host, dev, sizeof(float)*NX*NY*NZ, cudaMemcpyDeviceToHost); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime,start,stop); *memoryTimer += elapsedTime; *memoryCtr += 1; cudaEventDestroy(start); cudaEventDestroy(stop); } extern "C" void calcGpuDims(int alg_key, int blockXsize, int blockYsize, int blockZsize,int NX,int NY, int NZ, int* gridsize) { // set threads block sizes and grid sizes. // used 2 dimensions // 0, 1 -> grid x, y // 2,3 -> block x, y switch (alg_key) { case(ALG_CUDA_2D_BLK): GridX = 1 + (NX-1)/blockXsize; GridY = (1 + (NY-1)/blockYsize); //* (1 + (NZ-1) / blockZsize); GridZ = 1; BlockX = blockXsize; BlockY = blockYsize; BlockZ = 1; break; case (ALG_CUDA_SHM) : GridX = 1 + (NX-1)/blockXsize; GridY = 1 + (NY-1)/blockYsize; GridZ = NZ; BlockX = blockXsize + 2; // halo BlockY = blockYsize + 2; BlockZ = 1; break; case(ALG_CUDA_BANDWIDTH): case(ALG_CUDA_3D_BLK): GridX = 1 + (NX-1)/blockXsize; GridY = 1 + (NY-1)/blockYsize; //* (1 + (NZ-1) / blockZsize); GridZ = NZ; BlockX = blockXsize; BlockY = blockYsize; BlockZ = 1; break; default: fprintf(stderr,"unkwon gpu kernel in calcGpuDims, quitting ...!"); exit (1); } // Needs to test if the blocks and grid sizes are meaningful }
92bf2ef1a1ff5ef8f0ce2fb0eb7d8953c9e95e30.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "../include/gpu_stack.h" #include <thrust/device_vector.h> #define def_dvec(t) thrust::device_vector<t> #define to_ptr(x) thrust::raw_pointer_cast(&x[0]) using namespace std; const int STACK_SIZE = 100; __global__ void test(float *output){ gpu_stack<float, STACK_SIZE> stk; for(int i=1;i<=STACK_SIZE;++i){ stk.push(1.5*i); } int idx = 0, k = 0; while(!stk.empty()){ stk.pop_k(k); if(stk.empty()) return; output[idx] = stk.top(); idx += 1; output[idx] = (float)stk.size(); k += 1; idx += 1; } } int main(){ def_dvec(float) dev_out(40, 0); hipLaunchKernelGGL(( test), dim3(1), dim3(1), 0, 0, to_ptr(dev_out)); for(auto k:dev_out) cout<<k<<' '; cout<<endl; return 0; }
92bf2ef1a1ff5ef8f0ce2fb0eb7d8953c9e95e30.cu
#include <iostream> #include "../include/gpu_stack.h" #include <thrust/device_vector.h> #define def_dvec(t) thrust::device_vector<t> #define to_ptr(x) thrust::raw_pointer_cast(&x[0]) using namespace std; const int STACK_SIZE = 100; __global__ void test(float *output){ gpu_stack<float, STACK_SIZE> stk; for(int i=1;i<=STACK_SIZE;++i){ stk.push(1.5*i); } int idx = 0, k = 0; while(!stk.empty()){ stk.pop_k(k); if(stk.empty()) return; output[idx] = stk.top(); idx += 1; output[idx] = (float)stk.size(); k += 1; idx += 1; } } int main(){ def_dvec(float) dev_out(40, 0); test<<<1, 1>>>(to_ptr(dev_out)); for(auto k:dev_out) cout<<k<<' '; cout<<endl; return 0; }
2ac93a1ff904b9900355f40c4b7873a0dacf7030.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2021-2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <complex> #include <cstdio> #include <cstdint> #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/bias_add_grad_impl.cuh" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" #include "include/hip/hip_fp16.h" #include "include/cuda_runtime.h" const int kWarpSize = 32; // tuning param, for those nhw >= kLargeSize, launch more blocks to solve const int kLargeSize = 500000; // tuning param for BiasAddGradNHWC const int kNumBlocks = 8; // tuning param for BiasAddGradNHWC // For NHWC bias add grad, combine dy's NHW together, matrix column reduce. // This is a simple implementation, can be further optimized when C is small. // Firstly, Each warp sums several rows, each thread's partial_sum is the sum of // a part of one cloumn. // Secondly, in order to sum up all values in one column, which is to sum up the partial_sum // in different warps but with the same lane_id, each warp store their partial_sums // to one row of shared mem, and read partial_sums from one col of shared mem. // Then each warp do warp reduce to sum up 32 partial_sums, and write final result to db // For larger NHW, one block is not enough to sum up all rows, needs to launch more blocks. template <typename T> __global__ void BiasAddGradNHWC(const T *dy, T *db, const size_t m, const size_t n, const size_t rows_per_block, size_t rows_per_warp) { __shared__ T shared_d[kWarpSize][kWarpSize + 1]; // avoid bank conflict int shm_row_id = (threadIdx.x >> 5); int shm_col_id = (threadIdx.x % 32); int block_start_row = blockIdx.x * rows_per_block; int block_end_row = block_start_row + rows_per_block; block_end_row = block_end_row < m ? block_end_row : m; int warp_start_row = blockIdx.x * rows_per_block + shm_row_id * rows_per_warp; int warp_end_row = warp_start_row + rows_per_warp; int real_rows_per_warp = warp_end_row < block_end_row ? rows_per_warp : block_end_row - warp_start_row; // boundary process // Only the last row or column may not have the full size bool full_tile = true; int tile_width_real = 32; if (blockIdx.y == blockDim.y - 1) { tile_width_real = n - (blockDim.y - 1) * 32; full_tile = (tile_width_real == 32); } int read_offset = warp_start_row * n + (blockIdx.y << 5) + shm_col_id; T partial_sum = ZeroImpl<T>(); if (full_tile) { for (int i = 0; i < real_rows_per_warp; i++) { partial_sum += dy[read_offset]; read_offset += n; } } else { if (shm_col_id < tile_width_real) { for (int i = 0; i < real_rows_per_warp; i++) { partial_sum += dy[read_offset]; read_offset += n; } } } shared_d[shm_row_id][shm_col_id] = partial_sum; __syncthreads(); partial_sum = shared_d[shm_col_id][shm_row_id]; __syncthreads(); for (int offset = kWarpSize / 2; offset > 0; offset /= 2) { partial_sum += shfl_down_sync(0xffffffff, partial_sum, offset); } if (shm_col_id == 0) { if (full_tile) { MsAtomicAdd(db + (blockIdx.y << 5) + shm_row_id, partial_sum); } else { if (shm_row_id < tile_width_real) { MsAtomicAdd(db + (blockIdx.y << 5) + shm_row_id, partial_sum); } } } } template <typename T> __global__ void BiasAddGradNCHW(const size_t size, const int batch, const int bias_size, const int h, const int w, const int bg_size, const T *dy, T *db) { __shared__ T shared_d[32]; for (int i = threadIdx.x; i < 32; i += blockDim.x) { shared_d[i] = ZeroImpl<T>(); } __syncthreads(); T sum = ZeroImpl<T>(); int lane_id = threadIdx.x % 32; int thread_id = threadIdx.x; int img_size = h * w; // N*H*W -> count / bg_size equals the amount of work one block should reduce int count = batch * img_size; int bg_offset = blockIdx.x % bias_size; int bg_id = blockIdx.x / bias_size; for (int i = bg_id * blockDim.x + threadIdx.x; // thread start i < count; i += blockDim.x * bg_size) { int img_offset = i % img_size; int img_id = i / img_size; sum += *(dy + (img_id * bias_size + bg_offset) * img_size + img_offset); } MsAtomicAdd(shared_d + lane_id, sum); __syncthreads(); if (thread_id < 32) { T data = shared_d[thread_id]; for (int offset = kWarpSize / 2; offset > 0; offset /= 2) { data += shfl_xor_sync(0xffffffff, data, offset); } if (thread_id == 0) { MsAtomicAdd(db + bg_offset, data); } } } template <typename T> __global__ void FillDb(T *db, const size_t bias_size) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < bias_size; pos += blockDim.x * gridDim.x) { db[pos] = ZeroImpl<T>(); } } template <typename T> void CalBiasAddGradNCHW(const size_t size, const size_t bias_size, const int height, const int width, const T *dy, T *db, hipStream_t cuda_stream) { int batch_size = size / bias_size / height / width; int block_num = GET_BLOCKS(size); int thread_num = GET_THREADS; // how many blocks to solve one bias's reduce work(N * H * W) int block_group_size = (block_num + bias_size - 1) / bias_size; block_num = block_group_size * bias_size; if (thread_num < kWarpSize) { thread_num = kWarpSize; } hipLaunchKernelGGL(( FillDb), dim3(GET_BLOCKS(bias_size)), dim3(GET_THREADS), 0, cuda_stream, db, bias_size); hipLaunchKernelGGL(( BiasAddGradNCHW), dim3(block_num), dim3(thread_num), 0, cuda_stream, size, batch_size, bias_size, height, width, block_group_size, dy, db); return; } template <typename T> void CalBiasAddGradNHWC(const size_t size, const size_t bias_size, const T *dy, T *db, hipStream_t cuda_stream) { hipLaunchKernelGGL(( FillDb), dim3(GET_BLOCKS(bias_size)), dim3(GET_THREADS), 0, cuda_stream, db, bias_size); size_t rows = size / bias_size; int block_num_x = rows <= kLargeSize ? 1 : kNumBlocks; int block_num_y = (bias_size + kWarpSize - 1) / kWarpSize; dim3 grid_size(block_num_x, block_num_y, 1); dim3 block_size(kWarpSize * kWarpSize); size_t rows_per_block = (rows + block_num_x - 1) / block_num_x; size_t rows_per_warp = (rows_per_block + kWarpSize - 1) / kWarpSize; hipLaunchKernelGGL(( BiasAddGradNHWC), dim3(grid_size), dim3(block_size), 0, cuda_stream, dy, db, rows, bias_size, rows_per_block, rows_per_warp); return; } template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<half>(const size_t size, const size_t bias_size, const int height, const int width, const half *dy, half *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<float>(const size_t size, const size_t bias_size, const int height, const int width, const float *dy, float *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<double>(const size_t size, const size_t bias_size, const int height, const int width, const double *dy, double *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<int8_t>(const size_t size, const size_t bias_size, const int height, const int width, const int8_t *dy, int8_t *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<int16_t>(const size_t size, const size_t bias_size, const int height, const int width, const int16_t *dy, int16_t *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<int>(const size_t size, const size_t bias_size, const int height, const int width, const int *dy, int *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<int64_t>(const size_t size, const size_t bias_size, const int height, const int width, const int64_t *dy, int64_t *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<uint8_t>(const size_t size, const size_t bias_size, const int height, const int width, const uint8_t *dy, uint8_t *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<uint16_t>(const size_t size, const size_t bias_size, const int height, const int width, const uint16_t *dy, uint16_t *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<uint32_t>(const size_t size, const size_t bias_size, const int height, const int width, const uint32_t *dy, uint32_t *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<uint64_t>(const size_t size, const size_t bias_size, const int height, const int width, const uint64_t *dy, uint64_t *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<Complex<float>>(const size_t size, const size_t bias_size, const int height, const int width, const Complex<float> *dy, Complex<float> *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<Complex<double>>(const size_t size, const size_t bias_size, const int height, const int width, const Complex<double> *dy, Complex<double> *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<half>(const size_t size, const size_t bias_size, const half *dy, half *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<float>(const size_t size, const size_t bias_size, const float *dy, float *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<double>(const size_t size, const size_t bias_size, const double *dy, double *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<int8_t>(const size_t size, const size_t bias_size, const int8_t *dy, int8_t *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<int16_t>(const size_t size, const size_t bias_size, const int16_t *dy, int16_t *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<int>(const size_t size, const size_t bias_size, const int *dy, int *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<int64_t>(const size_t size, const size_t bias_size, const int64_t *dy, int64_t *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<uint8_t>(const size_t size, const size_t bias_size, const uint8_t *dy, uint8_t *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<uint16_t>(const size_t size, const size_t bias_size, const uint16_t *dy, uint16_t *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<uint32_t>(const size_t size, const size_t bias_size, const uint32_t *dy, uint32_t *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<uint64_t>(const size_t size, const size_t bias_size, const uint64_t *dy, uint64_t *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<Complex<float>>(const size_t size, const size_t bias_size, const Complex<float> *dy, Complex<float> *db, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<Complex<double>>(const size_t size, const size_t bias_size, const Complex<double> *dy, Complex<double> *db, hipStream_t cuda_stream);
2ac93a1ff904b9900355f40c4b7873a0dacf7030.cu
/** * Copyright 2021-2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <complex> #include <cstdio> #include <cstdint> #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/bias_add_grad_impl.cuh" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" #include "include/cuda_fp16.h" #include "include/cuda_runtime.h" const int kWarpSize = 32; // tuning param, for those nhw >= kLargeSize, launch more blocks to solve const int kLargeSize = 500000; // tuning param for BiasAddGradNHWC const int kNumBlocks = 8; // tuning param for BiasAddGradNHWC // For NHWC bias add grad, combine dy's NHW together, matrix column reduce. // This is a simple implementation, can be further optimized when C is small. // Firstly, Each warp sums several rows, each thread's partial_sum is the sum of // a part of one cloumn. // Secondly, in order to sum up all values in one column, which is to sum up the partial_sum // in different warps but with the same lane_id, each warp store their partial_sums // to one row of shared mem, and read partial_sums from one col of shared mem. // Then each warp do warp reduce to sum up 32 partial_sums, and write final result to db // For larger NHW, one block is not enough to sum up all rows, needs to launch more blocks. template <typename T> __global__ void BiasAddGradNHWC(const T *dy, T *db, const size_t m, const size_t n, const size_t rows_per_block, size_t rows_per_warp) { __shared__ T shared_d[kWarpSize][kWarpSize + 1]; // avoid bank conflict int shm_row_id = (threadIdx.x >> 5); int shm_col_id = (threadIdx.x % 32); int block_start_row = blockIdx.x * rows_per_block; int block_end_row = block_start_row + rows_per_block; block_end_row = block_end_row < m ? block_end_row : m; int warp_start_row = blockIdx.x * rows_per_block + shm_row_id * rows_per_warp; int warp_end_row = warp_start_row + rows_per_warp; int real_rows_per_warp = warp_end_row < block_end_row ? rows_per_warp : block_end_row - warp_start_row; // boundary process // Only the last row or column may not have the full size bool full_tile = true; int tile_width_real = 32; if (blockIdx.y == blockDim.y - 1) { tile_width_real = n - (blockDim.y - 1) * 32; full_tile = (tile_width_real == 32); } int read_offset = warp_start_row * n + (blockIdx.y << 5) + shm_col_id; T partial_sum = ZeroImpl<T>(); if (full_tile) { for (int i = 0; i < real_rows_per_warp; i++) { partial_sum += dy[read_offset]; read_offset += n; } } else { if (shm_col_id < tile_width_real) { for (int i = 0; i < real_rows_per_warp; i++) { partial_sum += dy[read_offset]; read_offset += n; } } } shared_d[shm_row_id][shm_col_id] = partial_sum; __syncthreads(); partial_sum = shared_d[shm_col_id][shm_row_id]; __syncthreads(); for (int offset = kWarpSize / 2; offset > 0; offset /= 2) { partial_sum += shfl_down_sync(0xffffffff, partial_sum, offset); } if (shm_col_id == 0) { if (full_tile) { MsAtomicAdd(db + (blockIdx.y << 5) + shm_row_id, partial_sum); } else { if (shm_row_id < tile_width_real) { MsAtomicAdd(db + (blockIdx.y << 5) + shm_row_id, partial_sum); } } } } template <typename T> __global__ void BiasAddGradNCHW(const size_t size, const int batch, const int bias_size, const int h, const int w, const int bg_size, const T *dy, T *db) { __shared__ T shared_d[32]; for (int i = threadIdx.x; i < 32; i += blockDim.x) { shared_d[i] = ZeroImpl<T>(); } __syncthreads(); T sum = ZeroImpl<T>(); int lane_id = threadIdx.x % 32; int thread_id = threadIdx.x; int img_size = h * w; // N*H*W -> count / bg_size equals the amount of work one block should reduce int count = batch * img_size; int bg_offset = blockIdx.x % bias_size; int bg_id = blockIdx.x / bias_size; for (int i = bg_id * blockDim.x + threadIdx.x; // thread start i < count; i += blockDim.x * bg_size) { int img_offset = i % img_size; int img_id = i / img_size; sum += *(dy + (img_id * bias_size + bg_offset) * img_size + img_offset); } MsAtomicAdd(shared_d + lane_id, sum); __syncthreads(); if (thread_id < 32) { T data = shared_d[thread_id]; for (int offset = kWarpSize / 2; offset > 0; offset /= 2) { data += shfl_xor_sync(0xffffffff, data, offset); } if (thread_id == 0) { MsAtomicAdd(db + bg_offset, data); } } } template <typename T> __global__ void FillDb(T *db, const size_t bias_size) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < bias_size; pos += blockDim.x * gridDim.x) { db[pos] = ZeroImpl<T>(); } } template <typename T> void CalBiasAddGradNCHW(const size_t size, const size_t bias_size, const int height, const int width, const T *dy, T *db, cudaStream_t cuda_stream) { int batch_size = size / bias_size / height / width; int block_num = GET_BLOCKS(size); int thread_num = GET_THREADS; // how many blocks to solve one bias's reduce work(N * H * W) int block_group_size = (block_num + bias_size - 1) / bias_size; block_num = block_group_size * bias_size; if (thread_num < kWarpSize) { thread_num = kWarpSize; } FillDb<<<GET_BLOCKS(bias_size), GET_THREADS, 0, cuda_stream>>>(db, bias_size); BiasAddGradNCHW<<<block_num, thread_num, 0, cuda_stream>>>(size, batch_size, bias_size, height, width, block_group_size, dy, db); return; } template <typename T> void CalBiasAddGradNHWC(const size_t size, const size_t bias_size, const T *dy, T *db, cudaStream_t cuda_stream) { FillDb<<<GET_BLOCKS(bias_size), GET_THREADS, 0, cuda_stream>>>(db, bias_size); size_t rows = size / bias_size; int block_num_x = rows <= kLargeSize ? 1 : kNumBlocks; int block_num_y = (bias_size + kWarpSize - 1) / kWarpSize; dim3 grid_size(block_num_x, block_num_y, 1); dim3 block_size(kWarpSize * kWarpSize); size_t rows_per_block = (rows + block_num_x - 1) / block_num_x; size_t rows_per_warp = (rows_per_block + kWarpSize - 1) / kWarpSize; BiasAddGradNHWC<<<grid_size, block_size, 0, cuda_stream>>>(dy, db, rows, bias_size, rows_per_block, rows_per_warp); return; } template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<half>(const size_t size, const size_t bias_size, const int height, const int width, const half *dy, half *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<float>(const size_t size, const size_t bias_size, const int height, const int width, const float *dy, float *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<double>(const size_t size, const size_t bias_size, const int height, const int width, const double *dy, double *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<int8_t>(const size_t size, const size_t bias_size, const int height, const int width, const int8_t *dy, int8_t *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<int16_t>(const size_t size, const size_t bias_size, const int height, const int width, const int16_t *dy, int16_t *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<int>(const size_t size, const size_t bias_size, const int height, const int width, const int *dy, int *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<int64_t>(const size_t size, const size_t bias_size, const int height, const int width, const int64_t *dy, int64_t *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<uint8_t>(const size_t size, const size_t bias_size, const int height, const int width, const uint8_t *dy, uint8_t *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<uint16_t>(const size_t size, const size_t bias_size, const int height, const int width, const uint16_t *dy, uint16_t *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<uint32_t>(const size_t size, const size_t bias_size, const int height, const int width, const uint32_t *dy, uint32_t *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<uint64_t>(const size_t size, const size_t bias_size, const int height, const int width, const uint64_t *dy, uint64_t *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<Complex<float>>(const size_t size, const size_t bias_size, const int height, const int width, const Complex<float> *dy, Complex<float> *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNCHW<Complex<double>>(const size_t size, const size_t bias_size, const int height, const int width, const Complex<double> *dy, Complex<double> *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<half>(const size_t size, const size_t bias_size, const half *dy, half *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<float>(const size_t size, const size_t bias_size, const float *dy, float *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<double>(const size_t size, const size_t bias_size, const double *dy, double *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<int8_t>(const size_t size, const size_t bias_size, const int8_t *dy, int8_t *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<int16_t>(const size_t size, const size_t bias_size, const int16_t *dy, int16_t *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<int>(const size_t size, const size_t bias_size, const int *dy, int *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<int64_t>(const size_t size, const size_t bias_size, const int64_t *dy, int64_t *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<uint8_t>(const size_t size, const size_t bias_size, const uint8_t *dy, uint8_t *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<uint16_t>(const size_t size, const size_t bias_size, const uint16_t *dy, uint16_t *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<uint32_t>(const size_t size, const size_t bias_size, const uint32_t *dy, uint32_t *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<uint64_t>(const size_t size, const size_t bias_size, const uint64_t *dy, uint64_t *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<Complex<float>>(const size_t size, const size_t bias_size, const Complex<float> *dy, Complex<float> *db, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalBiasAddGradNHWC<Complex<double>>(const size_t size, const size_t bias_size, const Complex<double> *dy, Complex<double> *db, cudaStream_t cuda_stream);
1ba4ca8844d5b8d9d465174ee0b750a945a308c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <iostream> #include "gpu-new-forward.h" #define TILE_WIDTH 16 const int constMemSize = 16*4*7*7; //M*C*K*K __constant__ float Kc[constMemSize];//filter-bank __global__ void shared_mem_kernel(float *y, const float *x, const int B, const int M, const int C, const int H, const int W, const int K) { __shared__ float X_shared[TILE_WIDTH + 6][TILE_WIDTH + 6]; const int H_out = H - K + 1; const int W_out = W - K + 1; #define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0] #define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0] #define k4d(i3, i2, i1, i0) Kc[(i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0] // Insert your GPU convolution kernel code here int W_grid = ceil(W_out*1.0 / TILE_WIDTH); //# of tiles in width int n, m, h, w, c, p, q; n = blockIdx.x; //current channel m = blockIdx.y; //current ouput feature map h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y; w = (blockIdx.z % W_grid) * TILE_WIDTH + threadIdx.x; X_shared[threadIdx.y][threadIdx.x]= x4d(n, c, h+threadIdx.y, w+threadIdx.x); __syncthreads(); float acc = 0.; if(h < H_out && w < W_out && threadIdx.x < TILE_WIDTH && threadIdx.y < TILE_WIDTH){ for(c=0; c<C; c++) { for(p=0; p<K; p++) { for(q=0; q<K; q++) { acc+= X_shared[threadIdx.y+p][threadIdx.x+q] * k4d(m, c, p, q); } } } y4d(n, m, h, w) = acc; } #undef y4d #undef x4d #undef k4d } __global__ void combined_unroll_mm_kernel(float *y, float *x, float *w, int B, int C, int H, int K, int W, int M) { __shared__ float MaskTile[TILE_WIDTH][TILE_WIDTH]; __shared__ float InputTile[TILE_WIDTH][TILE_WIDTH]; const int H_out = H - K + 1; const int W_out = W - K + 1; #define k4d(i3, i2, i1, i0) w[(i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0] #define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0] #define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0] int b = blockIdx.z; int tx = threadIdx.x; int ty = threadIdx.y; int row = blockIdx.y * TILE_WIDTH + ty; int column = blockIdx.x * TILE_WIDTH + tx; int unrollColumn = C*K*K; float acc = 0.0; int num_iterations = ceil(unrollColumn/(1.0*TILE_WIDTH)); for (int i = 0; i < num_iterations; i++) { int lx = i*TILE_WIDTH + tx; int ly = i*TILE_WIDTH + ty; MaskTile[ty][tx] = 0; InputTile[ty][tx] = 0; int W_m = row; int W_c = lx/(K*K); int W_h = (lx%(K*K))/K; int W_w = (lx%(K*K))%K; if ((lx < unrollColumn) && (row < M)){ MaskTile[ty][tx] = k4d(W_m, W_c, W_h, W_w); } else{ MaskTile[ty][tx] = 0; } int X_b = b; int X_c = ly/(K*K); int X_p = (ly%(K*K))/K; int X_q = (ly%(K*K))%K; int X_h = column/W_out; int X_w = column%W_out; if (ly < unrollColumn && column < H_out*W_out){ InputTile[ty][tx] = x4d(X_b, X_c, X_h + X_p, X_w + X_q); } else{ InputTile[ty][tx] = 0; } __syncthreads(); for (int q = 0; q < TILE_WIDTH; q++){ acc += MaskTile[ty][q] * InputTile[q][tx]; } __syncthreads(); } int Y_b = b; int Y_m = row; int Y_h = column / W_out; int Y_w = column % W_out; if (row < M && column < W_out*H_out) y4d(Y_b, Y_m, Y_h, Y_w) = acc; } __host__ void GPUInterface::conv_forward_gpu(float *host_y, const float *host_x, const float *host_k, const int B, const int M, const int C, const int H, const int W, const int K) { // Declare relevant device pointers float* device_y; float* device_x; float* device_k; // Allocate memory and copy over the relevant data structures to the GPU hipMalloc((void**) &device_y, B*M*(H-K+1)*(W-K+1)*sizeof(float)); hipMalloc((void**) &device_x, B*C*H*W*sizeof(float)); hipMalloc((void**) &device_k, M*C*K*K*sizeof(float)); std::cout<< "M: "<<M<<"\n"; std::cout<< "C: "<<C<<"\n"; std::cout<< "K: "<<K<<"\n"; std::cout<< "H: "<<H<<"\n"; std::cout<< "W: "<<W<<"\n"; std::cout<< "B: "<<B<<"\n"; get_device_properties(); //copy input to GPU hipMemcpy(device_x, host_x, B*C*H*W*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(device_k, host_k, M*C*K*K*sizeof(float), hipMemcpyHostToDevice); hipMemcpyToSymbol(Kc, host_k, 7*7*4*16*sizeof(float)); // Set the kernel dimensions and call the kernel int W_out = H - K + 1; //output feature map width int H_out = W - K + 1; //output feature map height if (C == 1) { int W_grid = ceil(W_out*1.0 / TILE_WIDTH); //# of tiles in width int H_grid = ceil(H_out*1.0 / TILE_WIDTH); //# of titls in height int Z = H_grid * W_grid; //total number of tile dim3 blockDim(TILE_WIDTH+6, TILE_WIDTH+6, 1); //thread block size dim3 gridDim(B, M, Z); //batch_size, # of output feature maps, total number of tiles hipLaunchKernelGGL(( shared_mem_kernel), dim3(gridDim), dim3(blockDim), 0, 0, device_y, device_x, B, M, C, H, W, K); } else { dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1); dim3 gridDim(ceil(H_out*W_out/(1.0*TILE_WIDTH)), ceil(M/(1.0*TILE_WIDTH)), B); hipLaunchKernelGGL(( combined_unroll_mm_kernel), dim3(gridDim), dim3(blockDim), 0, 0, device_y,device_x,device_k,B,C,H,K,W,M); } // Copy the output back to host hipMemcpy(host_y, device_y, B*M*(H-K+1)*(W-K+1)*sizeof(float), hipMemcpyDeviceToHost); // Free device memory hipFree(device_y); hipFree(device_x); hipFree(device_k); // Useful snippet for error checking // hipError_t error = hipGetLastError(); // if(error != hipSuccess) // { // std::cout<<"CUDA error: "<<hipGetErrorString(error)<<std::endl; // exit(-1); // } } __host__ void GPUInterface::get_device_properties() { int deviceCount; hipGetDeviceCount(&deviceCount); for(int dev = 0; dev < deviceCount; dev++) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl; std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl; std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl; std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl; std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl; std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl; std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl; std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl; std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl; } }
1ba4ca8844d5b8d9d465174ee0b750a945a308c9.cu
#include <cmath> #include <iostream> #include "gpu-new-forward.h" #define TILE_WIDTH 16 const int constMemSize = 16*4*7*7; //M*C*K*K __constant__ float Kc[constMemSize];//filter-bank __global__ void shared_mem_kernel(float *y, const float *x, const int B, const int M, const int C, const int H, const int W, const int K) { __shared__ float X_shared[TILE_WIDTH + 6][TILE_WIDTH + 6]; const int H_out = H - K + 1; const int W_out = W - K + 1; #define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0] #define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0] #define k4d(i3, i2, i1, i0) Kc[(i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0] // Insert your GPU convolution kernel code here int W_grid = ceil(W_out*1.0 / TILE_WIDTH); //# of tiles in width int n, m, h, w, c, p, q; n = blockIdx.x; //current channel m = blockIdx.y; //current ouput feature map h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y; w = (blockIdx.z % W_grid) * TILE_WIDTH + threadIdx.x; X_shared[threadIdx.y][threadIdx.x]= x4d(n, c, h+threadIdx.y, w+threadIdx.x); __syncthreads(); float acc = 0.; if(h < H_out && w < W_out && threadIdx.x < TILE_WIDTH && threadIdx.y < TILE_WIDTH){ for(c=0; c<C; c++) { for(p=0; p<K; p++) { for(q=0; q<K; q++) { acc+= X_shared[threadIdx.y+p][threadIdx.x+q] * k4d(m, c, p, q); } } } y4d(n, m, h, w) = acc; } #undef y4d #undef x4d #undef k4d } __global__ void combined_unroll_mm_kernel(float *y, float *x, float *w, int B, int C, int H, int K, int W, int M) { __shared__ float MaskTile[TILE_WIDTH][TILE_WIDTH]; __shared__ float InputTile[TILE_WIDTH][TILE_WIDTH]; const int H_out = H - K + 1; const int W_out = W - K + 1; #define k4d(i3, i2, i1, i0) w[(i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0] #define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0] #define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0] int b = blockIdx.z; int tx = threadIdx.x; int ty = threadIdx.y; int row = blockIdx.y * TILE_WIDTH + ty; int column = blockIdx.x * TILE_WIDTH + tx; int unrollColumn = C*K*K; float acc = 0.0; int num_iterations = ceil(unrollColumn/(1.0*TILE_WIDTH)); for (int i = 0; i < num_iterations; i++) { int lx = i*TILE_WIDTH + tx; int ly = i*TILE_WIDTH + ty; MaskTile[ty][tx] = 0; InputTile[ty][tx] = 0; int W_m = row; int W_c = lx/(K*K); int W_h = (lx%(K*K))/K; int W_w = (lx%(K*K))%K; if ((lx < unrollColumn) && (row < M)){ MaskTile[ty][tx] = k4d(W_m, W_c, W_h, W_w); } else{ MaskTile[ty][tx] = 0; } int X_b = b; int X_c = ly/(K*K); int X_p = (ly%(K*K))/K; int X_q = (ly%(K*K))%K; int X_h = column/W_out; int X_w = column%W_out; if (ly < unrollColumn && column < H_out*W_out){ InputTile[ty][tx] = x4d(X_b, X_c, X_h + X_p, X_w + X_q); } else{ InputTile[ty][tx] = 0; } __syncthreads(); for (int q = 0; q < TILE_WIDTH; q++){ acc += MaskTile[ty][q] * InputTile[q][tx]; } __syncthreads(); } int Y_b = b; int Y_m = row; int Y_h = column / W_out; int Y_w = column % W_out; if (row < M && column < W_out*H_out) y4d(Y_b, Y_m, Y_h, Y_w) = acc; } __host__ void GPUInterface::conv_forward_gpu(float *host_y, const float *host_x, const float *host_k, const int B, const int M, const int C, const int H, const int W, const int K) { // Declare relevant device pointers float* device_y; float* device_x; float* device_k; // Allocate memory and copy over the relevant data structures to the GPU cudaMalloc((void**) &device_y, B*M*(H-K+1)*(W-K+1)*sizeof(float)); cudaMalloc((void**) &device_x, B*C*H*W*sizeof(float)); cudaMalloc((void**) &device_k, M*C*K*K*sizeof(float)); std::cout<< "M: "<<M<<"\n"; std::cout<< "C: "<<C<<"\n"; std::cout<< "K: "<<K<<"\n"; std::cout<< "H: "<<H<<"\n"; std::cout<< "W: "<<W<<"\n"; std::cout<< "B: "<<B<<"\n"; get_device_properties(); //copy input to GPU cudaMemcpy(device_x, host_x, B*C*H*W*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(device_k, host_k, M*C*K*K*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(Kc, host_k, 7*7*4*16*sizeof(float)); // Set the kernel dimensions and call the kernel int W_out = H - K + 1; //output feature map width int H_out = W - K + 1; //output feature map height if (C == 1) { int W_grid = ceil(W_out*1.0 / TILE_WIDTH); //# of tiles in width int H_grid = ceil(H_out*1.0 / TILE_WIDTH); //# of titls in height int Z = H_grid * W_grid; //total number of tile dim3 blockDim(TILE_WIDTH+6, TILE_WIDTH+6, 1); //thread block size dim3 gridDim(B, M, Z); //batch_size, # of output feature maps, total number of tiles shared_mem_kernel<<<gridDim, blockDim>>>(device_y, device_x, B, M, C, H, W, K); } else { dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1); dim3 gridDim(ceil(H_out*W_out/(1.0*TILE_WIDTH)), ceil(M/(1.0*TILE_WIDTH)), B); combined_unroll_mm_kernel<<<gridDim, blockDim>>>(device_y,device_x,device_k,B,C,H,K,W,M); } // Copy the output back to host cudaMemcpy(host_y, device_y, B*M*(H-K+1)*(W-K+1)*sizeof(float), cudaMemcpyDeviceToHost); // Free device memory cudaFree(device_y); cudaFree(device_x); cudaFree(device_k); // Useful snippet for error checking // cudaError_t error = cudaGetLastError(); // if(error != cudaSuccess) // { // std::cout<<"CUDA error: "<<cudaGetErrorString(error)<<std::endl; // exit(-1); // } } __host__ void GPUInterface::get_device_properties() { int deviceCount; cudaGetDeviceCount(&deviceCount); for(int dev = 0; dev < deviceCount; dev++) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl; std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl; std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl; std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl; std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl; std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl; std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl; std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl; std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl; } }
f3604ec8f3c755cc3d63aaeb8e8b539ce4dde041.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // __constant__ int opDat0_res_calc_stride_OP2CONSTANT; int opDat0_res_calc_stride_OP2HOST=-1; __constant__ int opDat2_res_calc_stride_OP2CONSTANT; int opDat2_res_calc_stride_OP2HOST=-1; //user function __device__ void res_calc_gpu( const double *x1, const double *x2, const double *q1, const double *q2, const double *adt1, const double *adt2, double *res1, double *res2) { double dx, dy, mu, ri, p1, vol1, p2, vol2, f; dx = x1[(0)*opDat0_res_calc_stride_OP2CONSTANT] - x2[(0)*opDat0_res_calc_stride_OP2CONSTANT]; dy = x1[(1)*opDat0_res_calc_stride_OP2CONSTANT] - x2[(1)*opDat0_res_calc_stride_OP2CONSTANT]; ri = 1.0f / q1[(0)*opDat2_res_calc_stride_OP2CONSTANT]; p1 = gm1_cuda * (q1[(3)*opDat2_res_calc_stride_OP2CONSTANT] - 0.5f * ri * (q1[(1)*opDat2_res_calc_stride_OP2CONSTANT] * q1[(1)*opDat2_res_calc_stride_OP2CONSTANT] + q1[(2)*opDat2_res_calc_stride_OP2CONSTANT] * q1[(2)*opDat2_res_calc_stride_OP2CONSTANT])); vol1 = ri * (q1[(1)*opDat2_res_calc_stride_OP2CONSTANT] * dy - q1[(2)*opDat2_res_calc_stride_OP2CONSTANT] * dx); ri = 1.0f / q2[(0)*opDat2_res_calc_stride_OP2CONSTANT]; p2 = gm1_cuda * (q2[(3)*opDat2_res_calc_stride_OP2CONSTANT] - 0.5f * ri * (q2[(1)*opDat2_res_calc_stride_OP2CONSTANT] * q2[(1)*opDat2_res_calc_stride_OP2CONSTANT] + q2[(2)*opDat2_res_calc_stride_OP2CONSTANT] * q2[(2)*opDat2_res_calc_stride_OP2CONSTANT])); vol2 = ri * (q2[(1)*opDat2_res_calc_stride_OP2CONSTANT] * dy - q2[(2)*opDat2_res_calc_stride_OP2CONSTANT] * dx); mu = 0.5f * ((*adt1) + (*adt2)) * eps_cuda; f = 0.5f * (vol1 * q1[(0)*opDat2_res_calc_stride_OP2CONSTANT] + vol2 * q2[(0)*opDat2_res_calc_stride_OP2CONSTANT]) + mu * (q1[(0)*opDat2_res_calc_stride_OP2CONSTANT] - q2[(0)*opDat2_res_calc_stride_OP2CONSTANT]); res1[0] += f; res2[0] -= f; f = 0.5f * (vol1 * q1[(1)*opDat2_res_calc_stride_OP2CONSTANT] + p1 * dy + vol2 * q2[(1)*opDat2_res_calc_stride_OP2CONSTANT] + p2 * dy) + mu * (q1[(1)*opDat2_res_calc_stride_OP2CONSTANT] - q2[(1)*opDat2_res_calc_stride_OP2CONSTANT]); res1[1] += f; res2[1] -= f; f = 0.5f * (vol1 * q1[(2)*opDat2_res_calc_stride_OP2CONSTANT] - p1 * dx + vol2 * q2[(2)*opDat2_res_calc_stride_OP2CONSTANT] - p2 * dx) + mu * (q1[(2)*opDat2_res_calc_stride_OP2CONSTANT] - q2[(2)*opDat2_res_calc_stride_OP2CONSTANT]); res1[2] += f; res2[2] -= f; f = 0.5f * (vol1 * (q1[(3)*opDat2_res_calc_stride_OP2CONSTANT] + p1) + vol2 * (q2[(3)*opDat2_res_calc_stride_OP2CONSTANT] + p2)) + mu * (q1[(3)*opDat2_res_calc_stride_OP2CONSTANT] - q2[(3)*opDat2_res_calc_stride_OP2CONSTANT]); res1[3] += f; res2[3] -= f; } // CUDA kernel function __global__ void op_cuda_res_calc( const double *__restrict ind_arg0, const double *__restrict ind_arg1, const double *__restrict ind_arg2, double *__restrict ind_arg3, const int *__restrict opDat0Map, const int *__restrict opDat2Map, int start, int end, int set_size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid + start < end) { int n = tid + start; //initialise local variables double arg6_l[4]; for ( int d=0; d<4; d++ ){ arg6_l[d] = ZERO_double; } double arg7_l[4]; for ( int d=0; d<4; d++ ){ arg7_l[d] = ZERO_double; } int map0idx; int map1idx; int map2idx; int map3idx; map0idx = opDat0Map[n + set_size * 0]; map1idx = opDat0Map[n + set_size * 1]; map2idx = opDat2Map[n + set_size * 0]; map3idx = opDat2Map[n + set_size * 1]; //user-supplied kernel call res_calc_gpu(ind_arg0+map0idx, ind_arg0+map1idx, ind_arg1+map2idx, ind_arg1+map3idx, ind_arg2+map2idx*1, ind_arg2+map3idx*1, arg6_l, arg7_l); atomicAdd(&ind_arg3[0*opDat2_res_calc_stride_OP2CONSTANT+map2idx],arg6_l[0]); atomicAdd(&ind_arg3[1*opDat2_res_calc_stride_OP2CONSTANT+map2idx],arg6_l[1]); atomicAdd(&ind_arg3[2*opDat2_res_calc_stride_OP2CONSTANT+map2idx],arg6_l[2]); atomicAdd(&ind_arg3[3*opDat2_res_calc_stride_OP2CONSTANT+map2idx],arg6_l[3]); atomicAdd(&ind_arg3[0*opDat2_res_calc_stride_OP2CONSTANT+map3idx],arg7_l[0]); atomicAdd(&ind_arg3[1*opDat2_res_calc_stride_OP2CONSTANT+map3idx],arg7_l[1]); atomicAdd(&ind_arg3[2*opDat2_res_calc_stride_OP2CONSTANT+map3idx],arg7_l[2]); atomicAdd(&ind_arg3[3*opDat2_res_calc_stride_OP2CONSTANT+map3idx],arg7_l[3]); } } //host stub function void op_par_loop_res_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5, op_arg arg6, op_arg arg7){ int nargs = 8; op_arg args[8]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; args[6] = arg6; args[7] = arg7; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(2); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[2].name = name; OP_kernels[2].count += 1; int ninds = 4; int inds[8] = {0,0,1,1,2,2,3,3}; if (OP_diags>2) { printf(" kernel routine with indirection: res_calc\n"); } int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); if (set_size > 0) { if ((OP_kernels[2].count==1) || (opDat0_res_calc_stride_OP2HOST != getSetSizeFromOpArg(&arg0))) { opDat0_res_calc_stride_OP2HOST = getSetSizeFromOpArg(&arg0); hipMemcpyToSymbol(opDat0_res_calc_stride_OP2CONSTANT, &opDat0_res_calc_stride_OP2HOST,sizeof(int)); } if ((OP_kernels[2].count==1) || (opDat2_res_calc_stride_OP2HOST != getSetSizeFromOpArg(&arg2))) { opDat2_res_calc_stride_OP2HOST = getSetSizeFromOpArg(&arg2); hipMemcpyToSymbol(opDat2_res_calc_stride_OP2CONSTANT, &opDat2_res_calc_stride_OP2HOST,sizeof(int)); } //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_2 int nthread = OP_BLOCK_SIZE_2; #else int nthread = OP_block_size; #endif for ( int round=0; round<2; round++ ){ if (round==1) { op_mpi_wait_all_grouped(nargs, args, 2); } int start = round==0 ? 0 : set->core_size; int end = round==0 ? set->core_size : set->size + set->exec_size; if (end-start>0) { int nblocks = (end-start-1)/nthread+1; hipLaunchKernelGGL(( op_cuda_res_calc), dim3(nblocks),dim3(nthread), 0, 0, (double *)arg0.data_d, (double *)arg2.data_d, (double *)arg4.data_d, (double *)arg6.data_d, arg0.map_data_d, arg2.map_data_d, start,end,set->size+set->exec_size); } } } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(hipDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[2].time += wall_t2 - wall_t1; }
f3604ec8f3c755cc3d63aaeb8e8b539ce4dde041.cu
// // auto-generated by op2.py // __constant__ int opDat0_res_calc_stride_OP2CONSTANT; int opDat0_res_calc_stride_OP2HOST=-1; __constant__ int opDat2_res_calc_stride_OP2CONSTANT; int opDat2_res_calc_stride_OP2HOST=-1; //user function __device__ void res_calc_gpu( const double *x1, const double *x2, const double *q1, const double *q2, const double *adt1, const double *adt2, double *res1, double *res2) { double dx, dy, mu, ri, p1, vol1, p2, vol2, f; dx = x1[(0)*opDat0_res_calc_stride_OP2CONSTANT] - x2[(0)*opDat0_res_calc_stride_OP2CONSTANT]; dy = x1[(1)*opDat0_res_calc_stride_OP2CONSTANT] - x2[(1)*opDat0_res_calc_stride_OP2CONSTANT]; ri = 1.0f / q1[(0)*opDat2_res_calc_stride_OP2CONSTANT]; p1 = gm1_cuda * (q1[(3)*opDat2_res_calc_stride_OP2CONSTANT] - 0.5f * ri * (q1[(1)*opDat2_res_calc_stride_OP2CONSTANT] * q1[(1)*opDat2_res_calc_stride_OP2CONSTANT] + q1[(2)*opDat2_res_calc_stride_OP2CONSTANT] * q1[(2)*opDat2_res_calc_stride_OP2CONSTANT])); vol1 = ri * (q1[(1)*opDat2_res_calc_stride_OP2CONSTANT] * dy - q1[(2)*opDat2_res_calc_stride_OP2CONSTANT] * dx); ri = 1.0f / q2[(0)*opDat2_res_calc_stride_OP2CONSTANT]; p2 = gm1_cuda * (q2[(3)*opDat2_res_calc_stride_OP2CONSTANT] - 0.5f * ri * (q2[(1)*opDat2_res_calc_stride_OP2CONSTANT] * q2[(1)*opDat2_res_calc_stride_OP2CONSTANT] + q2[(2)*opDat2_res_calc_stride_OP2CONSTANT] * q2[(2)*opDat2_res_calc_stride_OP2CONSTANT])); vol2 = ri * (q2[(1)*opDat2_res_calc_stride_OP2CONSTANT] * dy - q2[(2)*opDat2_res_calc_stride_OP2CONSTANT] * dx); mu = 0.5f * ((*adt1) + (*adt2)) * eps_cuda; f = 0.5f * (vol1 * q1[(0)*opDat2_res_calc_stride_OP2CONSTANT] + vol2 * q2[(0)*opDat2_res_calc_stride_OP2CONSTANT]) + mu * (q1[(0)*opDat2_res_calc_stride_OP2CONSTANT] - q2[(0)*opDat2_res_calc_stride_OP2CONSTANT]); res1[0] += f; res2[0] -= f; f = 0.5f * (vol1 * q1[(1)*opDat2_res_calc_stride_OP2CONSTANT] + p1 * dy + vol2 * q2[(1)*opDat2_res_calc_stride_OP2CONSTANT] + p2 * dy) + mu * (q1[(1)*opDat2_res_calc_stride_OP2CONSTANT] - q2[(1)*opDat2_res_calc_stride_OP2CONSTANT]); res1[1] += f; res2[1] -= f; f = 0.5f * (vol1 * q1[(2)*opDat2_res_calc_stride_OP2CONSTANT] - p1 * dx + vol2 * q2[(2)*opDat2_res_calc_stride_OP2CONSTANT] - p2 * dx) + mu * (q1[(2)*opDat2_res_calc_stride_OP2CONSTANT] - q2[(2)*opDat2_res_calc_stride_OP2CONSTANT]); res1[2] += f; res2[2] -= f; f = 0.5f * (vol1 * (q1[(3)*opDat2_res_calc_stride_OP2CONSTANT] + p1) + vol2 * (q2[(3)*opDat2_res_calc_stride_OP2CONSTANT] + p2)) + mu * (q1[(3)*opDat2_res_calc_stride_OP2CONSTANT] - q2[(3)*opDat2_res_calc_stride_OP2CONSTANT]); res1[3] += f; res2[3] -= f; } // CUDA kernel function __global__ void op_cuda_res_calc( const double *__restrict ind_arg0, const double *__restrict ind_arg1, const double *__restrict ind_arg2, double *__restrict ind_arg3, const int *__restrict opDat0Map, const int *__restrict opDat2Map, int start, int end, int set_size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid + start < end) { int n = tid + start; //initialise local variables double arg6_l[4]; for ( int d=0; d<4; d++ ){ arg6_l[d] = ZERO_double; } double arg7_l[4]; for ( int d=0; d<4; d++ ){ arg7_l[d] = ZERO_double; } int map0idx; int map1idx; int map2idx; int map3idx; map0idx = opDat0Map[n + set_size * 0]; map1idx = opDat0Map[n + set_size * 1]; map2idx = opDat2Map[n + set_size * 0]; map3idx = opDat2Map[n + set_size * 1]; //user-supplied kernel call res_calc_gpu(ind_arg0+map0idx, ind_arg0+map1idx, ind_arg1+map2idx, ind_arg1+map3idx, ind_arg2+map2idx*1, ind_arg2+map3idx*1, arg6_l, arg7_l); atomicAdd(&ind_arg3[0*opDat2_res_calc_stride_OP2CONSTANT+map2idx],arg6_l[0]); atomicAdd(&ind_arg3[1*opDat2_res_calc_stride_OP2CONSTANT+map2idx],arg6_l[1]); atomicAdd(&ind_arg3[2*opDat2_res_calc_stride_OP2CONSTANT+map2idx],arg6_l[2]); atomicAdd(&ind_arg3[3*opDat2_res_calc_stride_OP2CONSTANT+map2idx],arg6_l[3]); atomicAdd(&ind_arg3[0*opDat2_res_calc_stride_OP2CONSTANT+map3idx],arg7_l[0]); atomicAdd(&ind_arg3[1*opDat2_res_calc_stride_OP2CONSTANT+map3idx],arg7_l[1]); atomicAdd(&ind_arg3[2*opDat2_res_calc_stride_OP2CONSTANT+map3idx],arg7_l[2]); atomicAdd(&ind_arg3[3*opDat2_res_calc_stride_OP2CONSTANT+map3idx],arg7_l[3]); } } //host stub function void op_par_loop_res_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5, op_arg arg6, op_arg arg7){ int nargs = 8; op_arg args[8]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; args[6] = arg6; args[7] = arg7; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(2); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[2].name = name; OP_kernels[2].count += 1; int ninds = 4; int inds[8] = {0,0,1,1,2,2,3,3}; if (OP_diags>2) { printf(" kernel routine with indirection: res_calc\n"); } int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); if (set_size > 0) { if ((OP_kernels[2].count==1) || (opDat0_res_calc_stride_OP2HOST != getSetSizeFromOpArg(&arg0))) { opDat0_res_calc_stride_OP2HOST = getSetSizeFromOpArg(&arg0); cudaMemcpyToSymbol(opDat0_res_calc_stride_OP2CONSTANT, &opDat0_res_calc_stride_OP2HOST,sizeof(int)); } if ((OP_kernels[2].count==1) || (opDat2_res_calc_stride_OP2HOST != getSetSizeFromOpArg(&arg2))) { opDat2_res_calc_stride_OP2HOST = getSetSizeFromOpArg(&arg2); cudaMemcpyToSymbol(opDat2_res_calc_stride_OP2CONSTANT, &opDat2_res_calc_stride_OP2HOST,sizeof(int)); } //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_2 int nthread = OP_BLOCK_SIZE_2; #else int nthread = OP_block_size; #endif for ( int round=0; round<2; round++ ){ if (round==1) { op_mpi_wait_all_grouped(nargs, args, 2); } int start = round==0 ? 0 : set->core_size; int end = round==0 ? set->core_size : set->size + set->exec_size; if (end-start>0) { int nblocks = (end-start-1)/nthread+1; op_cuda_res_calc<<<nblocks,nthread>>>( (double *)arg0.data_d, (double *)arg2.data_d, (double *)arg4.data_d, (double *)arg6.data_d, arg0.map_data_d, arg2.map_data_d, start,end,set->size+set->exec_size); } } } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(cudaDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[2].time += wall_t2 - wall_t1; }
08446558078f87ad4fff069c924284860dfab6dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "support_kernels.cu" #include <stdio.h> ////////////////////////////// ////////////////////////////// ////////////////////////////// //Helper functions for leaf-nodes __device__ void compute_monopole(float2 &mass, float2 &posx, float2 &posy, float2 &posz, float4 pos) { mass = ds_accumulate(mass, pos.w); posx = ds_accumulate(posx, pos.w*pos.x); posy = ds_accumulate(posy, pos.w*pos.y); posz = ds_accumulate(posz, pos.w*pos.z); } __device__ void compute_quadropole(float2 &oct_q11, float2 &oct_q22, float2 &oct_q33, float2 &oct_q12, float2 &oct_q13, float2 &oct_q23, float4 pos) { oct_q11 = ds_accumulate(oct_q11, pos.w * pos.x*pos.x); oct_q22 = ds_accumulate(oct_q22, pos.w * pos.y*pos.y); oct_q33 = ds_accumulate(oct_q33, pos.w * pos.z*pos.z); oct_q12 = ds_accumulate(oct_q12, pos.w * pos.x*pos.y); oct_q13 = ds_accumulate(oct_q13, pos.w * pos.y*pos.z); oct_q23 = ds_accumulate(oct_q23, pos.w * pos.z*pos.x); } __device__ void compute_bounds(float3 &r_min, float3 &r_max, float4 pos) { r_min.x = fminf(r_min.x, pos.x); r_min.y = fminf(r_min.y, pos.y); r_min.z = fminf(r_min.z, pos.z); r_max.x = fmaxf(r_max.x, pos.x); r_max.y = fmaxf(r_max.y, pos.y); r_max.z = fmaxf(r_max.z, pos.z); } //Non-leaf node helper functions __device__ void compute_quadropole_node(float2 &oct_q11, float2 &oct_q22, float2 &oct_q33, float2 &oct_q12, float2 &oct_q13, float2 &oct_q23, float4 Q0, float4 Q1) { oct_q11 = ds_accumulate(oct_q11, Q0.x); oct_q22 = ds_accumulate(oct_q22, Q0.y); oct_q33 = ds_accumulate(oct_q33, Q0.z); oct_q12 = ds_accumulate(oct_q12, Q1.x); oct_q13 = ds_accumulate(oct_q13, Q1.y); oct_q23 = ds_accumulate(oct_q23, Q1.z); } __device__ void compute_bounds_node(float3 &r_min, float3 &r_max, float4 node_min, float4 node_max) { r_min.x = fminf(r_min.x, node_min.x); r_min.y = fminf(r_min.y, node_min.y); r_min.z = fminf(r_min.z, node_min.z); r_max.x = fmaxf(r_max.x, node_max.x); r_max.y = fmaxf(r_max.y, node_max.y); r_max.z = fmaxf(r_max.z, node_max.z); } extern "C" __global__ void compute_leaf(const int n_leafs, uint *leafsIdxs, uint2 *node_bodies, real4 *body_pos, real4 *multipole, real4 *nodeLowerBounds, real4 *nodeUpperBounds, float3 *lowerBounds, float3 *upperBounds, real4 *body_vel) { const uint bid = blockIdx.y * gridDim.x + blockIdx.x; const uint tid = threadIdx.x; const uint id = bid * blockDim.x + tid; volatile __shared__ float3 shmem[256]; volatile float3 *sh_rmin = (float3*)&shmem [ 0]; volatile float3 *sh_rmax = (float3*)&shmem[128]; //Set shared memory to defaults and return if (id >= n_leafs) { sh_rmin[tid].x = +1e10f; sh_rmin[tid].y = +1e10f; sh_rmin[tid].z = +1e10f; sh_rmax[tid].x = -1e10f; sh_rmax[tid].y = -1e10f; sh_rmax[tid].z = -1e10f; return; } //Since nodes are intermixes with non-leafs in the node_bodies array //we get a leaf-id from the leafsIdxs array int nodeID = leafsIdxs[id]; const uint2 bij = node_bodies[nodeID]; const uint firstChild = bij.x & ILEVELMASK; const uint lastChild = bij.y; //TODO maybe have to increase it by 1 //Variables holding properties and intermediate answers float4 p; float2 mass, posx, posy, posz; mass = posx = posy = posz = (float2){0.0f, 0.0f}; float2 oct_q11, oct_q22, oct_q33; float2 oct_q12, oct_q13, oct_q23; oct_q11 = oct_q22 = oct_q33 = (float2){0.0f, 0.0f}; oct_q12 = oct_q13 = oct_q23 = (float2){0.0f, 0.0f}; float3 r_min, r_max; r_min = (float3){+1e10f, +1e10f, +1e10f}; r_max = (float3){-1e10f, -1e10f, -1e10f}; //Loop over the children=>particles=>bodys //unroll increases register usage #pragma unroll 16 float maxEps = 0.0f; int count=0; for(int i=firstChild; i < lastChild; i++) { p = body_pos[i]; maxEps = fmaxf(body_vel[i].w, maxEps); //Determine the max softening within this leaf count++; compute_monopole(mass, posx, posy, posz, p); compute_quadropole(oct_q11, oct_q22, oct_q33, oct_q12, oct_q13, oct_q23, p); compute_bounds(r_min, r_max, p); } float4 mon = {ds_regularise(posx).x, ds_regularise(posy).x, ds_regularise(posz).x, ds_regularise(mass).x}; float im = 1.0f/mon.w; mon.x *= im; mon.y *= im; mon.z *= im; float4 Q0, Q1; Q0 = (float4){ds_regularise(oct_q11).x, ds_regularise(oct_q22).x, ds_regularise(oct_q33).x, maxEps}; Q1 = (float4){ds_regularise(oct_q12).x, ds_regularise(oct_q13).x, ds_regularise(oct_q23).x, 0.0f}; //Store the node properties multipole[3*nodeID + 0] = mon; //Monopole multipole[3*nodeID + 1] = Q0; //Quadropole multipole[3*nodeID + 2] = Q1; //Quadropole //TODO why is this required again? nodeLowerBounds[nodeID] = (float4){r_min.x, r_min.y, r_min.z, 0.0f}; nodeUpperBounds[nodeID] = (float4){r_max.x, r_max.y, r_max.z, 1.0f}; //4th parameter is set to 1 to indicate this is a leaf //Global domain boundaries using reduction sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z; sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z; __syncthreads(); //Reduction of the global boundaries of the system if(blockDim.x >= 128) if (tid < 64) {sh_MinMax(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads(); if (tid < 32) { sh_MinMax(tid, tid + 32, &r_min, &r_max, sh_rmin,sh_rmax); sh_MinMax(tid, tid + 16, &r_min, &r_max, sh_rmin,sh_rmax); sh_MinMax(tid, tid + 8, &r_min, &r_max, sh_rmin,sh_rmax); sh_MinMax(tid, tid + 4, &r_min, &r_max, sh_rmin,sh_rmax); sh_MinMax(tid, tid + 2, &r_min, &r_max, sh_rmin,sh_rmax); sh_MinMax(tid, tid + 1, &r_min, &r_max, sh_rmin,sh_rmax); } __syncthreads(); //Store the results if(tid == 0) { //Compiler doesnt allow: volatile float3 = float3 lowerBounds[bid].x = sh_rmin[0].x; lowerBounds[bid].y = sh_rmin[0].y; lowerBounds[bid].z = sh_rmin[0].z; upperBounds[bid].x = sh_rmax[0].x; upperBounds[bid].y = sh_rmax[0].y; upperBounds[bid].z = sh_rmax[0].z; } return; } //Function goes level by level (starting from deepest) and computes //the properties of the non-leaf nodes extern "C" __global__ void compute_non_leaf(const int curLevel, //Level for which we calc uint *leafsIdxs, //Conversion of ids uint *node_level_list, //Contains the start nodes of each lvl uint *n_children, //Reference from node to first child and number of childs real4 *multipole, real4 *nodeLowerBounds, real4 *nodeUpperBounds){ const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int idx = bid * (blockDim.x * blockDim.y) + tid; const int endNode = node_level_list[curLevel]; const int startNode = node_level_list[curLevel-1]; if(idx >= (endNode-startNode)) return; const int nodeID = leafsIdxs[idx + startNode]; //Get the children info const uint firstChild = n_children[nodeID] & 0x0FFFFFFF; //TODO make this name/define? const uint nChildren = ((n_children[nodeID] & 0xF0000000) >> 28); //TODO make this name/define? //Variables float2 mass, posx, posy, posz; mass = posx = posy = posz = (float2){0.0f, 0.0f}; float2 oct_q11, oct_q22, oct_q33; float2 oct_q12, oct_q13, oct_q23; oct_q11 = oct_q22 = oct_q33 = (float2){0.0f, 0.0f}; oct_q12 = oct_q13 = oct_q23 = (float2){0.0f, 0.0f}; float3 r_min, r_max; r_min = (float3){+1e10f, +1e10f, +1e10f}; r_max = (float3){-1e10f, -1e10f, -1e10f}; float maxEps = 0; //Process the children (1 to 8) for(int i=firstChild; i < firstChild+nChildren; i++) { //Gogo process this data! float4 tmon = multipole[3*i + 0]; maxEps = fmaxf(multipole[3*i + 1].w, maxEps); compute_monopole(mass, posx, posy, posz, tmon); compute_quadropole_node(oct_q11, oct_q22, oct_q33, oct_q12, oct_q13, oct_q23, multipole[3*i + 1], multipole[3*i + 2]); compute_bounds_node(r_min, r_max, nodeLowerBounds[i], nodeUpperBounds[i]); } //Save the bounds nodeLowerBounds[nodeID] = (float4){r_min.x, r_min.y, r_min.z, 0.0f}; nodeUpperBounds[nodeID] = (float4){r_max.x, r_max.y, r_max.z, 0.0f}; //4th is set to 0 to indicate a non-leaf //Regularize and store the results float4 mon = {ds_regularise(posx).x, ds_regularise(posy).x, ds_regularise(posz).x, ds_regularise(mass).x}; float im = 1.0f/mon.w; mon.x *= im; mon.y *= im; mon.z *= im; float4 Q0, Q1; Q0 = (float4){ds_regularise(oct_q11).x, ds_regularise(oct_q22).x, ds_regularise(oct_q33).x, maxEps}; Q1 = (float4){ds_regularise(oct_q12).x, ds_regularise(oct_q13).x, ds_regularise(oct_q23).x, 0.0f}; multipole[3*nodeID + 0] = mon; //Monopole multipole[3*nodeID + 1] = Q0; //Quadropole1 multipole[3*nodeID + 2] = Q1; //Quadropole2 return; } extern "C" __global__ void compute_scaling(const int node_count, real4 corner, real4 *multipole, real4 *nodeLowerBounds, real4 *nodeUpperBounds, uint *n_children, uint4 *node_data, float theta, real4 *boxSizeInfo, real4 *boxCenterInfo){ const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int idx = bid * (blockDim.x * blockDim.y) + tid; if(idx >= node_count) return; float4 mon, Q0, Q1; mon = multipole[3*idx + 0]; //Monopole Q0 = multipole[3*idx + 1]; //Quadropole1 Q1 = multipole[3*idx + 2]; //Quadropole2 //Scale the quadropole float im = 1.0f / mon.w; Q0.x = Q0.x*im - mon.x*mon.x; Q0.y = Q0.y*im - mon.y*mon.y; Q0.z = Q0.z*im - mon.z*mon.z; Q1.x = Q1.x*im - mon.x*mon.y; Q1.y = Q1.y*im - mon.y*mon.z; Q1.z = Q1.z*im - mon.x*mon.z; //Switch the y and z parameter real temp = Q1.y; Q1.y = Q1.z; Q1.z = temp; multipole[3*idx + 1] = Q0; //Quadropole1 multipole[3*idx + 2] = Q1; //Quadropole2 float4 r_min, r_max; r_min = nodeLowerBounds[idx]; r_max = nodeUpperBounds[idx]; float3 boxCenter; boxCenter.x = 0.5*(r_min.x + r_max.x); boxCenter.y = 0.5*(r_min.y + r_max.y); boxCenter.z = 0.5*(r_min.z + r_max.z); float3 boxSize = (float3){fmaxf(fabs(boxCenter.x-r_min.x), fabs(boxCenter.x-r_max.x)), fmaxf(fabs(boxCenter.y-r_min.y), fabs(boxCenter.y-r_max.y)), fmaxf(fabs(boxCenter.z-r_min.z), fabs(boxCenter.z-r_max.z))}; //Calculate distance between center of the box and the center of mass float3 s3 = (float3){(boxCenter.x - mon.x), (boxCenter.y - mon.y), (boxCenter.z - mon.z)}; double s = sqrt((s3.x*s3.x) + (s3.y*s3.y) + (s3.z*s3.z)); //Length of the box, note times 2 since we only computed half the distance before float l = 2*fmaxf(boxSize.x, fmaxf(boxSize.y, boxSize.z)); //Extra check, shouldnt be necessary // if(l < 0.000001) // l = 0.000001; //Store the box size and opening criteria boxSizeInfo[idx].x = boxSize.x; boxSizeInfo[idx].y = boxSize.y; boxSizeInfo[idx].z = boxSize.z; boxSizeInfo[idx].w = __int_as_float(n_children[idx]); boxCenterInfo[idx].x = boxCenter.x; boxCenterInfo[idx].y = boxCenter.y; boxCenterInfo[idx].z = boxCenter.z; #ifdef IMPBH float cellOp = (l/theta) + s; #else //Minimum distance method float cellOp = (l/theta); #endif cellOp = cellOp*cellOp; if(r_max.w > 0) { cellOp = -cellOp; //This is a leaf node } boxCenterInfo[idx].w = cellOp; /* //Determine the size of the node based on the center of mass and the bounds of the node float3 size3 = (float3){fmaxf(fabs(mon.x-r_min.x), fabs(mon.x-r_max.x)), fmaxf(fabs(mon.y-r_min.y), fabs(mon.y-r_max.y)), fmaxf(fabs(mon.z-r_min.z), fabs(mon.z-r_max.z))}; float size = fmaxf(size3.x, fmaxf(size3.y, size3.z)); //Box properties float3 boxCenter; boxCenter.x = 0.5*(r_min.x + r_max.x); boxCenter.y = 0.5*(r_min.y + r_max.y); boxCenter.z = 0.5*(r_min.z + r_max.z); float3 boxSize3 = (float3){fmaxf(fabs(boxCenter.x-r_min.x), fabs(boxCenter.x-r_max.x)), fmaxf(fabs(boxCenter.y-r_min.y), fabs(boxCenter.y-r_max.y)), fmaxf(fabs(boxCenter.z-r_min.z), fabs(boxCenter.z-r_max.z))}; //Calculate distance between center of the box and the center of mass // float3 s3 = (float3){(boxCenter.x - mon.x), (boxCenter.y - mon.y), (boxCenter.z - mon.z)}; // double s = sqrt((s3.x*s3.x) + (s3.y*s3.y) + (s3.z*s3.z)); //BH: l/theta + s < d float l = fmaxf(boxSize3.x, fmaxf(boxSize3.y, boxSize3.z)); // float cellOp = (l/theta) + s; float cellOp; cellOp = cellOp*cellOp; //Store the box size and opening criteria // cellOpening[idx].x = boxSize.x; // cellOpening[idx].y = boxSize.y; // cellOpening[idx].z = boxSize.z; // cellOpening[idx].w = cellOp; //Store the properties of the node boxCenterInfo[idx].x = boxCenter.x; boxCenterInfo[idx].y = boxCenter.y; boxCenterInfo[idx].z = boxCenter.z; boxCenterInfo[idx].w = __int_as_float(n_children[idx]); boxSize[idx].x = boxSize3.x; boxSize[idx].y = boxSize3.y; boxSize[idx].z = boxSize3.z; if(r_max.w > 0) boxSize[idx].w = -1; //Leaf node else boxSize[idx].w = 1; // non-leaf node //Calculate the key int4 crd; float domain_fac = corner.w; float idomain_fac = 1.0f / domain_fac; crd.x = (int)((mon.x - corner.x) * idomain_fac + 0.5); crd.y = (int)((mon.y - corner.y) * idomain_fac + 0.5); crd.z = (int)((mon.z - corner.z) * idomain_fac + 0.5); uint2 key = get_key(crd); //Use the key to calculate back the position float3 pos; pos.x = crd.x*domain_fac + corner.x; pos.y = crd.y*domain_fac + corner.y; pos.z = crd.z*domain_fac + corner.z; //Adjust size based on the key-based position of the node float ds = fmax(fabs(pos.x - mon.x), max(fabs(pos.y - mon.y), fabs(pos.z - mon.z))); temp = size; size += ds; #ifdef IMPBH //Box size, max size for now size = l; if(l < 0.000001) size = 0.000001; #endif if(r_max.w > 0) { size = -size; //This is a leaf node } //nchildren contains the node to node references //we also need to use node_bodies to get the //leaf-particle references node_data[idx] = (uint4){key.x, key.y, __float_as_int(size), n_children[idx]};*/ // r_min.w = size; // nodeLowerBounds[idx] = r_min; return; } //Modify the references to the fist body and the number of bodys //for the leafs //Also copy the node_data to the group data extern "C" __global__ void copyNodeDataToGroupData(const int n_groups, const int n_nodes, uint4 *node_data, uint4 *group_data, uint2 *node_bodies, int *group_list, real4 *boxCenterInfo, real4 *boxSizeInfo, real4 *groupCenterInfo, real4 *groupSizeInfo){ const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int idx = bid * (blockDim.x * blockDim.y) + tid; if(idx >= n_nodes) return; //Copy the data and change the children data //Instead of pointing to child nodes we want it to point to //particles // uint4 nodeData = node_data[idx]; // bool leaf = __int_as_float(nodeData.z) <= 0; float temp = boxCenterInfo[idx].w; bool leaf = temp <= 0; //uint2 bij2 = node_bodies[idx]; //uint pfirst2 = bij2.x & ILEVELMASK; //uint nchild2 = bij2.y - pfirst2; //Change the indirections of the leaf nodes so they point to //the particle data if(leaf) { uint2 bij = node_bodies[idx]; uint pfirst = bij.x & ILEVELMASK; uint nchild = bij.y - pfirst; pfirst = pfirst | ((nchild-1) << LEAFBIT); boxSizeInfo[idx].w = __int_as_float(pfirst); } //Now fill in the group data if(idx >= n_groups) return; int nodeID = group_list[idx]; real4 nodeData = boxSizeInfo[nodeID]; uint2 bij = node_bodies[nodeID]; int pfirst = bij.x & ILEVELMASK; int nchild = bij.y - pfirst; pfirst = pfirst | (nchild-1) << CRITBIT; nodeData.w = __int_as_float(pfirst); groupSizeInfo[idx] = nodeData; groupCenterInfo[idx] = boxCenterInfo[nodeID]; }
08446558078f87ad4fff069c924284860dfab6dd.cu
#include "support_kernels.cu" #include <stdio.h> ////////////////////////////// ////////////////////////////// ////////////////////////////// //Helper functions for leaf-nodes __device__ void compute_monopole(float2 &mass, float2 &posx, float2 &posy, float2 &posz, float4 pos) { mass = ds_accumulate(mass, pos.w); posx = ds_accumulate(posx, pos.w*pos.x); posy = ds_accumulate(posy, pos.w*pos.y); posz = ds_accumulate(posz, pos.w*pos.z); } __device__ void compute_quadropole(float2 &oct_q11, float2 &oct_q22, float2 &oct_q33, float2 &oct_q12, float2 &oct_q13, float2 &oct_q23, float4 pos) { oct_q11 = ds_accumulate(oct_q11, pos.w * pos.x*pos.x); oct_q22 = ds_accumulate(oct_q22, pos.w * pos.y*pos.y); oct_q33 = ds_accumulate(oct_q33, pos.w * pos.z*pos.z); oct_q12 = ds_accumulate(oct_q12, pos.w * pos.x*pos.y); oct_q13 = ds_accumulate(oct_q13, pos.w * pos.y*pos.z); oct_q23 = ds_accumulate(oct_q23, pos.w * pos.z*pos.x); } __device__ void compute_bounds(float3 &r_min, float3 &r_max, float4 pos) { r_min.x = fminf(r_min.x, pos.x); r_min.y = fminf(r_min.y, pos.y); r_min.z = fminf(r_min.z, pos.z); r_max.x = fmaxf(r_max.x, pos.x); r_max.y = fmaxf(r_max.y, pos.y); r_max.z = fmaxf(r_max.z, pos.z); } //Non-leaf node helper functions __device__ void compute_quadropole_node(float2 &oct_q11, float2 &oct_q22, float2 &oct_q33, float2 &oct_q12, float2 &oct_q13, float2 &oct_q23, float4 Q0, float4 Q1) { oct_q11 = ds_accumulate(oct_q11, Q0.x); oct_q22 = ds_accumulate(oct_q22, Q0.y); oct_q33 = ds_accumulate(oct_q33, Q0.z); oct_q12 = ds_accumulate(oct_q12, Q1.x); oct_q13 = ds_accumulate(oct_q13, Q1.y); oct_q23 = ds_accumulate(oct_q23, Q1.z); } __device__ void compute_bounds_node(float3 &r_min, float3 &r_max, float4 node_min, float4 node_max) { r_min.x = fminf(r_min.x, node_min.x); r_min.y = fminf(r_min.y, node_min.y); r_min.z = fminf(r_min.z, node_min.z); r_max.x = fmaxf(r_max.x, node_max.x); r_max.y = fmaxf(r_max.y, node_max.y); r_max.z = fmaxf(r_max.z, node_max.z); } extern "C" __global__ void compute_leaf(const int n_leafs, uint *leafsIdxs, uint2 *node_bodies, real4 *body_pos, real4 *multipole, real4 *nodeLowerBounds, real4 *nodeUpperBounds, float3 *lowerBounds, float3 *upperBounds, real4 *body_vel) { const uint bid = blockIdx.y * gridDim.x + blockIdx.x; const uint tid = threadIdx.x; const uint id = bid * blockDim.x + tid; volatile __shared__ float3 shmem[256]; volatile float3 *sh_rmin = (float3*)&shmem [ 0]; volatile float3 *sh_rmax = (float3*)&shmem[128]; //Set shared memory to defaults and return if (id >= n_leafs) { sh_rmin[tid].x = +1e10f; sh_rmin[tid].y = +1e10f; sh_rmin[tid].z = +1e10f; sh_rmax[tid].x = -1e10f; sh_rmax[tid].y = -1e10f; sh_rmax[tid].z = -1e10f; return; } //Since nodes are intermixes with non-leafs in the node_bodies array //we get a leaf-id from the leafsIdxs array int nodeID = leafsIdxs[id]; const uint2 bij = node_bodies[nodeID]; const uint firstChild = bij.x & ILEVELMASK; const uint lastChild = bij.y; //TODO maybe have to increase it by 1 //Variables holding properties and intermediate answers float4 p; float2 mass, posx, posy, posz; mass = posx = posy = posz = (float2){0.0f, 0.0f}; float2 oct_q11, oct_q22, oct_q33; float2 oct_q12, oct_q13, oct_q23; oct_q11 = oct_q22 = oct_q33 = (float2){0.0f, 0.0f}; oct_q12 = oct_q13 = oct_q23 = (float2){0.0f, 0.0f}; float3 r_min, r_max; r_min = (float3){+1e10f, +1e10f, +1e10f}; r_max = (float3){-1e10f, -1e10f, -1e10f}; //Loop over the children=>particles=>bodys //unroll increases register usage #pragma unroll 16 float maxEps = 0.0f; int count=0; for(int i=firstChild; i < lastChild; i++) { p = body_pos[i]; maxEps = fmaxf(body_vel[i].w, maxEps); //Determine the max softening within this leaf count++; compute_monopole(mass, posx, posy, posz, p); compute_quadropole(oct_q11, oct_q22, oct_q33, oct_q12, oct_q13, oct_q23, p); compute_bounds(r_min, r_max, p); } float4 mon = {ds_regularise(posx).x, ds_regularise(posy).x, ds_regularise(posz).x, ds_regularise(mass).x}; float im = 1.0f/mon.w; mon.x *= im; mon.y *= im; mon.z *= im; float4 Q0, Q1; Q0 = (float4){ds_regularise(oct_q11).x, ds_regularise(oct_q22).x, ds_regularise(oct_q33).x, maxEps}; Q1 = (float4){ds_regularise(oct_q12).x, ds_regularise(oct_q13).x, ds_regularise(oct_q23).x, 0.0f}; //Store the node properties multipole[3*nodeID + 0] = mon; //Monopole multipole[3*nodeID + 1] = Q0; //Quadropole multipole[3*nodeID + 2] = Q1; //Quadropole //TODO why is this required again? nodeLowerBounds[nodeID] = (float4){r_min.x, r_min.y, r_min.z, 0.0f}; nodeUpperBounds[nodeID] = (float4){r_max.x, r_max.y, r_max.z, 1.0f}; //4th parameter is set to 1 to indicate this is a leaf //Global domain boundaries using reduction sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z; sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z; __syncthreads(); //Reduction of the global boundaries of the system if(blockDim.x >= 128) if (tid < 64) {sh_MinMax(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads(); if (tid < 32) { sh_MinMax(tid, tid + 32, &r_min, &r_max, sh_rmin,sh_rmax); sh_MinMax(tid, tid + 16, &r_min, &r_max, sh_rmin,sh_rmax); sh_MinMax(tid, tid + 8, &r_min, &r_max, sh_rmin,sh_rmax); sh_MinMax(tid, tid + 4, &r_min, &r_max, sh_rmin,sh_rmax); sh_MinMax(tid, tid + 2, &r_min, &r_max, sh_rmin,sh_rmax); sh_MinMax(tid, tid + 1, &r_min, &r_max, sh_rmin,sh_rmax); } __syncthreads(); //Store the results if(tid == 0) { //Compiler doesnt allow: volatile float3 = float3 lowerBounds[bid].x = sh_rmin[0].x; lowerBounds[bid].y = sh_rmin[0].y; lowerBounds[bid].z = sh_rmin[0].z; upperBounds[bid].x = sh_rmax[0].x; upperBounds[bid].y = sh_rmax[0].y; upperBounds[bid].z = sh_rmax[0].z; } return; } //Function goes level by level (starting from deepest) and computes //the properties of the non-leaf nodes extern "C" __global__ void compute_non_leaf(const int curLevel, //Level for which we calc uint *leafsIdxs, //Conversion of ids uint *node_level_list, //Contains the start nodes of each lvl uint *n_children, //Reference from node to first child and number of childs real4 *multipole, real4 *nodeLowerBounds, real4 *nodeUpperBounds){ const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int idx = bid * (blockDim.x * blockDim.y) + tid; const int endNode = node_level_list[curLevel]; const int startNode = node_level_list[curLevel-1]; if(idx >= (endNode-startNode)) return; const int nodeID = leafsIdxs[idx + startNode]; //Get the children info const uint firstChild = n_children[nodeID] & 0x0FFFFFFF; //TODO make this name/define? const uint nChildren = ((n_children[nodeID] & 0xF0000000) >> 28); //TODO make this name/define? //Variables float2 mass, posx, posy, posz; mass = posx = posy = posz = (float2){0.0f, 0.0f}; float2 oct_q11, oct_q22, oct_q33; float2 oct_q12, oct_q13, oct_q23; oct_q11 = oct_q22 = oct_q33 = (float2){0.0f, 0.0f}; oct_q12 = oct_q13 = oct_q23 = (float2){0.0f, 0.0f}; float3 r_min, r_max; r_min = (float3){+1e10f, +1e10f, +1e10f}; r_max = (float3){-1e10f, -1e10f, -1e10f}; float maxEps = 0; //Process the children (1 to 8) for(int i=firstChild; i < firstChild+nChildren; i++) { //Gogo process this data! float4 tmon = multipole[3*i + 0]; maxEps = fmaxf(multipole[3*i + 1].w, maxEps); compute_monopole(mass, posx, posy, posz, tmon); compute_quadropole_node(oct_q11, oct_q22, oct_q33, oct_q12, oct_q13, oct_q23, multipole[3*i + 1], multipole[3*i + 2]); compute_bounds_node(r_min, r_max, nodeLowerBounds[i], nodeUpperBounds[i]); } //Save the bounds nodeLowerBounds[nodeID] = (float4){r_min.x, r_min.y, r_min.z, 0.0f}; nodeUpperBounds[nodeID] = (float4){r_max.x, r_max.y, r_max.z, 0.0f}; //4th is set to 0 to indicate a non-leaf //Regularize and store the results float4 mon = {ds_regularise(posx).x, ds_regularise(posy).x, ds_regularise(posz).x, ds_regularise(mass).x}; float im = 1.0f/mon.w; mon.x *= im; mon.y *= im; mon.z *= im; float4 Q0, Q1; Q0 = (float4){ds_regularise(oct_q11).x, ds_regularise(oct_q22).x, ds_regularise(oct_q33).x, maxEps}; Q1 = (float4){ds_regularise(oct_q12).x, ds_regularise(oct_q13).x, ds_regularise(oct_q23).x, 0.0f}; multipole[3*nodeID + 0] = mon; //Monopole multipole[3*nodeID + 1] = Q0; //Quadropole1 multipole[3*nodeID + 2] = Q1; //Quadropole2 return; } extern "C" __global__ void compute_scaling(const int node_count, real4 corner, real4 *multipole, real4 *nodeLowerBounds, real4 *nodeUpperBounds, uint *n_children, uint4 *node_data, float theta, real4 *boxSizeInfo, real4 *boxCenterInfo){ const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int idx = bid * (blockDim.x * blockDim.y) + tid; if(idx >= node_count) return; float4 mon, Q0, Q1; mon = multipole[3*idx + 0]; //Monopole Q0 = multipole[3*idx + 1]; //Quadropole1 Q1 = multipole[3*idx + 2]; //Quadropole2 //Scale the quadropole float im = 1.0f / mon.w; Q0.x = Q0.x*im - mon.x*mon.x; Q0.y = Q0.y*im - mon.y*mon.y; Q0.z = Q0.z*im - mon.z*mon.z; Q1.x = Q1.x*im - mon.x*mon.y; Q1.y = Q1.y*im - mon.y*mon.z; Q1.z = Q1.z*im - mon.x*mon.z; //Switch the y and z parameter real temp = Q1.y; Q1.y = Q1.z; Q1.z = temp; multipole[3*idx + 1] = Q0; //Quadropole1 multipole[3*idx + 2] = Q1; //Quadropole2 float4 r_min, r_max; r_min = nodeLowerBounds[idx]; r_max = nodeUpperBounds[idx]; float3 boxCenter; boxCenter.x = 0.5*(r_min.x + r_max.x); boxCenter.y = 0.5*(r_min.y + r_max.y); boxCenter.z = 0.5*(r_min.z + r_max.z); float3 boxSize = (float3){fmaxf(fabs(boxCenter.x-r_min.x), fabs(boxCenter.x-r_max.x)), fmaxf(fabs(boxCenter.y-r_min.y), fabs(boxCenter.y-r_max.y)), fmaxf(fabs(boxCenter.z-r_min.z), fabs(boxCenter.z-r_max.z))}; //Calculate distance between center of the box and the center of mass float3 s3 = (float3){(boxCenter.x - mon.x), (boxCenter.y - mon.y), (boxCenter.z - mon.z)}; double s = sqrt((s3.x*s3.x) + (s3.y*s3.y) + (s3.z*s3.z)); //Length of the box, note times 2 since we only computed half the distance before float l = 2*fmaxf(boxSize.x, fmaxf(boxSize.y, boxSize.z)); //Extra check, shouldnt be necessary // if(l < 0.000001) // l = 0.000001; //Store the box size and opening criteria boxSizeInfo[idx].x = boxSize.x; boxSizeInfo[idx].y = boxSize.y; boxSizeInfo[idx].z = boxSize.z; boxSizeInfo[idx].w = __int_as_float(n_children[idx]); boxCenterInfo[idx].x = boxCenter.x; boxCenterInfo[idx].y = boxCenter.y; boxCenterInfo[idx].z = boxCenter.z; #ifdef IMPBH float cellOp = (l/theta) + s; #else //Minimum distance method float cellOp = (l/theta); #endif cellOp = cellOp*cellOp; if(r_max.w > 0) { cellOp = -cellOp; //This is a leaf node } boxCenterInfo[idx].w = cellOp; /* //Determine the size of the node based on the center of mass and the bounds of the node float3 size3 = (float3){fmaxf(fabs(mon.x-r_min.x), fabs(mon.x-r_max.x)), fmaxf(fabs(mon.y-r_min.y), fabs(mon.y-r_max.y)), fmaxf(fabs(mon.z-r_min.z), fabs(mon.z-r_max.z))}; float size = fmaxf(size3.x, fmaxf(size3.y, size3.z)); //Box properties float3 boxCenter; boxCenter.x = 0.5*(r_min.x + r_max.x); boxCenter.y = 0.5*(r_min.y + r_max.y); boxCenter.z = 0.5*(r_min.z + r_max.z); float3 boxSize3 = (float3){fmaxf(fabs(boxCenter.x-r_min.x), fabs(boxCenter.x-r_max.x)), fmaxf(fabs(boxCenter.y-r_min.y), fabs(boxCenter.y-r_max.y)), fmaxf(fabs(boxCenter.z-r_min.z), fabs(boxCenter.z-r_max.z))}; //Calculate distance between center of the box and the center of mass // float3 s3 = (float3){(boxCenter.x - mon.x), (boxCenter.y - mon.y), (boxCenter.z - mon.z)}; // double s = sqrt((s3.x*s3.x) + (s3.y*s3.y) + (s3.z*s3.z)); //BH: l/theta + s < d float l = fmaxf(boxSize3.x, fmaxf(boxSize3.y, boxSize3.z)); // float cellOp = (l/theta) + s; float cellOp; cellOp = cellOp*cellOp; //Store the box size and opening criteria // cellOpening[idx].x = boxSize.x; // cellOpening[idx].y = boxSize.y; // cellOpening[idx].z = boxSize.z; // cellOpening[idx].w = cellOp; //Store the properties of the node boxCenterInfo[idx].x = boxCenter.x; boxCenterInfo[idx].y = boxCenter.y; boxCenterInfo[idx].z = boxCenter.z; boxCenterInfo[idx].w = __int_as_float(n_children[idx]); boxSize[idx].x = boxSize3.x; boxSize[idx].y = boxSize3.y; boxSize[idx].z = boxSize3.z; if(r_max.w > 0) boxSize[idx].w = -1; //Leaf node else boxSize[idx].w = 1; // non-leaf node //Calculate the key int4 crd; float domain_fac = corner.w; float idomain_fac = 1.0f / domain_fac; crd.x = (int)((mon.x - corner.x) * idomain_fac + 0.5); crd.y = (int)((mon.y - corner.y) * idomain_fac + 0.5); crd.z = (int)((mon.z - corner.z) * idomain_fac + 0.5); uint2 key = get_key(crd); //Use the key to calculate back the position float3 pos; pos.x = crd.x*domain_fac + corner.x; pos.y = crd.y*domain_fac + corner.y; pos.z = crd.z*domain_fac + corner.z; //Adjust size based on the key-based position of the node float ds = fmax(fabs(pos.x - mon.x), max(fabs(pos.y - mon.y), fabs(pos.z - mon.z))); temp = size; size += ds; #ifdef IMPBH //Box size, max size for now size = l; if(l < 0.000001) size = 0.000001; #endif if(r_max.w > 0) { size = -size; //This is a leaf node } //nchildren contains the node to node references //we also need to use node_bodies to get the //leaf-particle references node_data[idx] = (uint4){key.x, key.y, __float_as_int(size), n_children[idx]};*/ // r_min.w = size; // nodeLowerBounds[idx] = r_min; return; } //Modify the references to the fist body and the number of bodys //for the leafs //Also copy the node_data to the group data extern "C" __global__ void copyNodeDataToGroupData(const int n_groups, const int n_nodes, uint4 *node_data, uint4 *group_data, uint2 *node_bodies, int *group_list, real4 *boxCenterInfo, real4 *boxSizeInfo, real4 *groupCenterInfo, real4 *groupSizeInfo){ const int bid = blockIdx.y * gridDim.x + blockIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int idx = bid * (blockDim.x * blockDim.y) + tid; if(idx >= n_nodes) return; //Copy the data and change the children data //Instead of pointing to child nodes we want it to point to //particles // uint4 nodeData = node_data[idx]; // bool leaf = __int_as_float(nodeData.z) <= 0; float temp = boxCenterInfo[idx].w; bool leaf = temp <= 0; //uint2 bij2 = node_bodies[idx]; //uint pfirst2 = bij2.x & ILEVELMASK; //uint nchild2 = bij2.y - pfirst2; //Change the indirections of the leaf nodes so they point to //the particle data if(leaf) { uint2 bij = node_bodies[idx]; uint pfirst = bij.x & ILEVELMASK; uint nchild = bij.y - pfirst; pfirst = pfirst | ((nchild-1) << LEAFBIT); boxSizeInfo[idx].w = __int_as_float(pfirst); } //Now fill in the group data if(idx >= n_groups) return; int nodeID = group_list[idx]; real4 nodeData = boxSizeInfo[nodeID]; uint2 bij = node_bodies[nodeID]; int pfirst = bij.x & ILEVELMASK; int nchild = bij.y - pfirst; pfirst = pfirst | (nchild-1) << CRITBIT; nodeData.w = __int_as_float(pfirst); groupSizeInfo[idx] = nodeData; groupCenterInfo[idx] = boxCenterInfo[nodeID]; }
8eefc80baff1e4d8b8f5d39b1efc09040c23235a.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <iostream> #include "cuda_scatter.h" namespace NAMESPACE { extern "C" __global__ void ScatterFP16(const __half *features_rw, const int *indices_rw, const int *valid_rw, __half *output_rw, int spatialShape0, int spatialShape1, int spatialShape2, int max_voxels, int batch_size, int num_features) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = idx; i < max_voxels * batch_size; i += stride) { const int batch_id = i / max_voxels; const int voxel_id_per_batch = i % max_voxels; if(voxel_id_per_batch>=valid_rw[batch_id]) continue; int3 coor = reinterpret_cast<const int3*>(indices_rw)[i]; int output_vol = spatialShape0 * spatialShape1 * spatialShape2; // out shape: (bs, c, x, y, z) __half *outPerBatch = output_rw + batch_id * num_features * output_vol; int offset = coor.x * spatialShape1 * spatialShape2 + coor.y * spatialShape2 + coor.z; for(int j = 0; j < num_features; ++j) outPerBatch[j * output_vol + offset] = features_rw[i * num_features + j]; } } void cuda_scatter_fp16(const __half *features_rw, const int *indices_rw, const int *valid_rw, __half *output_rw, std::vector<int> spatialShape_rw, int max_voxels, int batch_size, int num_features) { int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the // maximum occupancy for a full device launch checkCudaErrors(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, ScatterFP16)); minGridSize = ::min(minGridSize, DivUp(max_voxels * batch_size, blockSize)); hipLaunchKernelGGL(( ScatterFP16), dim3(minGridSize), dim3(blockSize), 0, 0, features_rw, indices_rw, valid_rw, output_rw, spatialShape_rw[0], spatialShape_rw[1], spatialShape_rw[2], max_voxels, batch_size, num_features); hipDeviceSynchronize(); } } //namespace
8eefc80baff1e4d8b8f5d39b1efc09040c23235a.cu
#include <cuda.h> #include <cuda_runtime_api.h> #include <iostream> #include "cuda_scatter.h" namespace NAMESPACE { extern "C" __global__ void ScatterFP16(const __half *features_rw, const int *indices_rw, const int *valid_rw, __half *output_rw, int spatialShape0, int spatialShape1, int spatialShape2, int max_voxels, int batch_size, int num_features) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = idx; i < max_voxels * batch_size; i += stride) { const int batch_id = i / max_voxels; const int voxel_id_per_batch = i % max_voxels; if(voxel_id_per_batch>=valid_rw[batch_id]) continue; int3 coor = reinterpret_cast<const int3*>(indices_rw)[i]; int output_vol = spatialShape0 * spatialShape1 * spatialShape2; // out shape: (bs, c, x, y, z) __half *outPerBatch = output_rw + batch_id * num_features * output_vol; int offset = coor.x * spatialShape1 * spatialShape2 + coor.y * spatialShape2 + coor.z; for(int j = 0; j < num_features; ++j) outPerBatch[j * output_vol + offset] = features_rw[i * num_features + j]; } } void cuda_scatter_fp16(const __half *features_rw, const int *indices_rw, const int *valid_rw, __half *output_rw, std::vector<int> spatialShape_rw, int max_voxels, int batch_size, int num_features) { int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the // maximum occupancy for a full device launch checkCudaErrors(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, ScatterFP16)); minGridSize = std::min(minGridSize, DivUp(max_voxels * batch_size, blockSize)); ScatterFP16<<<minGridSize, blockSize>>>(features_rw, indices_rw, valid_rw, output_rw, spatialShape_rw[0], spatialShape_rw[1], spatialShape_rw[2], max_voxels, batch_size, num_features); cudaDeviceSynchronize(); } } //namespace
a13e85a09a4728ca047e9c11513f91e268d60844.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <mpi.h> #include <cstdio> #include <cmath> #include <vector> #include <chrono> using namespace std; __global__ void matrix(float *a,float *b,float *c,int N, int offset,int size){ int j = blockIdx.x * blockDim.x + threadIdx.x; if (j <N/size){ for (int i=0; i<N/size; i++) for (int k=0; k<N; k++) c[N*i+j+offset] += a[N*i+k] * b[N/size*k+j]; } } int main(int argc, char** argv) { int size, rank; int gpusize; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); hipGetDeviceCount(&gpusize); hipSetDevice(rank % gpusize); const int N = 256; const int M = 256; vector<float> A(N*N); vector<float> B(N*N); vector<float> C(N*N, 0); float *subA, *subB, *subC,*recv; subA = (float *)malloc(N*N/size*sizeof(float)); subB = (float *)malloc(N*N/size*sizeof(float)); subC = (float *)malloc(N*N/size*sizeof(float)); recv = (float *)malloc(N*N/size*sizeof(float)); float *a; float *b; float *c; hipMalloc(&a, N*N/size*sizeof(float)); hipMalloc(&b, N*N/size*sizeof(float)); hipMalloc(&c, N*N/size*sizeof(float)); hipDeviceEnablePeerAccess(rank%gpusize, 0); for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { A[N*i+j] = drand48(); B[N*i+j] = drand48(); } } int offset = N/size*rank; for (int i=0; i<N/size; i++) for (int j=0; j<N; j++) subA[N*i+j] = A[N*(i+offset)+j]; for (int i=0; i<N; i++) for (int j=0; j<N/size; j++) subB[N/size*i+j] = B[N*i+j+offset]; hipMemcpy(a,subA,N*N/size*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(b,subB,N*N/size*sizeof(float),hipMemcpyHostToDevice); int recv_from = (rank + 1) % size; int send_to = (rank - 1 + size) % size; double comp_time = 0, comm_time = 0; for(int irank=0; irank<size; irank++) { auto tic = chrono::steady_clock::now(); offset = N/size*((rank+irank) % size); hipLaunchKernelGGL(( matrix), dim3((N/size+M-1)/M),dim3(M), 0, 0, a,b,c,N,offset,size); hipDeviceSynchronize(); auto toc = chrono::steady_clock::now(); comp_time += chrono::duration<double>(toc - tic).count(); MPI_Request request[2]; MPI_Isend(&subB[0], N*N/size, MPI_FLOAT, send_to, 0, MPI_COMM_WORLD, &request[0]); MPI_Irecv(&recv[0], N*N/size, MPI_FLOAT, recv_from, 0, MPI_COMM_WORLD, &request[1]); MPI_Waitall(2, request, MPI_STATUS_IGNORE); for (int i=0; i<N*N/size; i++) subB[i] = recv[i]; hipMemcpy(b,subB,N*N/size*sizeof(float),hipMemcpyHostToDevice); tic = chrono::steady_clock::now(); comm_time += chrono::duration<double>(tic - toc).count(); } hipMemcpy(subC,c,N*N/size*sizeof(float),hipMemcpyDeviceToHost); MPI_Allgather(&subC[0], N*N/size, MPI_FLOAT, &C[0], N*N/size, MPI_FLOAT, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); for (int i=0; i<N; i++) for (int j=0; j<N; j++) for (int k=0; k<N; k++) C[N*i+j] -= A[N*i+k] * B[N*k+j]; double err = 0; for (int i=0; i<N; i++) for (int j=0; j<N; j++) err += fabs(C[N*i+j]); if(rank==0) { double time = comp_time+comm_time; printf("N : %d\n",N); printf("size : %d\n",size); printf("comp : %lf s\n", comp_time); printf("comm : %lf s\n", comm_time); printf("total: %lf s (%lf GFlops)\n",time,2.*N*N*N/time/1e9); printf("error: %lf\n",err/N/N); } free(subA); free(subB); free(subC); free(recv); hipFree(a); hipFree(b); hipFree(c); MPI_Finalize(); }
a13e85a09a4728ca047e9c11513f91e268d60844.cu
#include <mpi.h> #include <cstdio> #include <cmath> #include <vector> #include <chrono> using namespace std; __global__ void matrix(float *a,float *b,float *c,int N, int offset,int size){ int j = blockIdx.x * blockDim.x + threadIdx.x; if (j <N/size){ for (int i=0; i<N/size; i++) for (int k=0; k<N; k++) c[N*i+j+offset] += a[N*i+k] * b[N/size*k+j]; } } int main(int argc, char** argv) { int size, rank; int gpusize; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); cudaGetDeviceCount(&gpusize); cudaSetDevice(rank % gpusize); const int N = 256; const int M = 256; vector<float> A(N*N); vector<float> B(N*N); vector<float> C(N*N, 0); float *subA, *subB, *subC,*recv; subA = (float *)malloc(N*N/size*sizeof(float)); subB = (float *)malloc(N*N/size*sizeof(float)); subC = (float *)malloc(N*N/size*sizeof(float)); recv = (float *)malloc(N*N/size*sizeof(float)); float *a; float *b; float *c; cudaMalloc(&a, N*N/size*sizeof(float)); cudaMalloc(&b, N*N/size*sizeof(float)); cudaMalloc(&c, N*N/size*sizeof(float)); cudaDeviceEnablePeerAccess(rank%gpusize, 0); for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { A[N*i+j] = drand48(); B[N*i+j] = drand48(); } } int offset = N/size*rank; for (int i=0; i<N/size; i++) for (int j=0; j<N; j++) subA[N*i+j] = A[N*(i+offset)+j]; for (int i=0; i<N; i++) for (int j=0; j<N/size; j++) subB[N/size*i+j] = B[N*i+j+offset]; cudaMemcpy(a,subA,N*N/size*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(b,subB,N*N/size*sizeof(float),cudaMemcpyHostToDevice); int recv_from = (rank + 1) % size; int send_to = (rank - 1 + size) % size; double comp_time = 0, comm_time = 0; for(int irank=0; irank<size; irank++) { auto tic = chrono::steady_clock::now(); offset = N/size*((rank+irank) % size); matrix<<<(N/size+M-1)/M,M>>>(a,b,c,N,offset,size); cudaDeviceSynchronize(); auto toc = chrono::steady_clock::now(); comp_time += chrono::duration<double>(toc - tic).count(); MPI_Request request[2]; MPI_Isend(&subB[0], N*N/size, MPI_FLOAT, send_to, 0, MPI_COMM_WORLD, &request[0]); MPI_Irecv(&recv[0], N*N/size, MPI_FLOAT, recv_from, 0, MPI_COMM_WORLD, &request[1]); MPI_Waitall(2, request, MPI_STATUS_IGNORE); for (int i=0; i<N*N/size; i++) subB[i] = recv[i]; cudaMemcpy(b,subB,N*N/size*sizeof(float),cudaMemcpyHostToDevice); tic = chrono::steady_clock::now(); comm_time += chrono::duration<double>(tic - toc).count(); } cudaMemcpy(subC,c,N*N/size*sizeof(float),cudaMemcpyDeviceToHost); MPI_Allgather(&subC[0], N*N/size, MPI_FLOAT, &C[0], N*N/size, MPI_FLOAT, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); for (int i=0; i<N; i++) for (int j=0; j<N; j++) for (int k=0; k<N; k++) C[N*i+j] -= A[N*i+k] * B[N*k+j]; double err = 0; for (int i=0; i<N; i++) for (int j=0; j<N; j++) err += fabs(C[N*i+j]); if(rank==0) { double time = comp_time+comm_time; printf("N : %d\n",N); printf("size : %d\n",size); printf("comp : %lf s\n", comp_time); printf("comm : %lf s\n", comm_time); printf("total: %lf s (%lf GFlops)\n",time,2.*N*N*N/time/1e9); printf("error: %lf\n",err/N/N); } free(subA); free(subB); free(subC); free(recv); cudaFree(a); cudaFree(b); cudaFree(c); MPI_Finalize(); }
2953fb4d5eb523bbe80fba586d4cbe2ea2fc8def.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <cstdio> #include <string> #include <iostream> #include <color_spinor_field.h> #include <clover_field.h> // these control the Wilson-type actions #ifdef GPU_WILSON_DIRAC //#define DIRECT_ACCESS_LINK //#define DIRECT_ACCESS_WILSON_SPINOR //#define DIRECT_ACCESS_WILSON_ACCUM //#define DIRECT_ACCESS_WILSON_INTER //#define DIRECT_ACCESS_WILSON_PACK_SPINOR //#define DIRECT_ACCESS_CLOVER #endif // GPU_WILSON_DIRAC #include <quda_internal.h> #include <dslash_quda.h> #include <sys/time.h> #include <blas_quda.h> #include <face_quda.h> #include <inline_ptx.h> namespace quda { namespace asym_clover { #undef GPU_STAGGERED_DIRAC #include <dslash_constants.h> #include <dslash_textures.h> #include <dslash_index.cuh> // Enable shared memory dslash for Fermi architecture //#define SHARED_WILSON_DSLASH //#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access #ifdef GPU_CLOVER_DIRAC #define DD_CLOVER 2 #include <wilson_dslash_def.h> // Wilson Dslash kernels (including clover) #undef DD_CLOVER #endif #ifndef DSLASH_SHARED_FLOATS_PER_THREAD #define DSLASH_SHARED_FLOATS_PER_THREAD 0 #endif #include <dslash_quda.cuh> } // end namespace asym_clover // declare the dslash events #include <dslash_events.cuh> using namespace asym_clover; #ifdef GPU_CLOVER_DIRAC template <typename sFloat, typename gFloat, typename cFloat> class AsymCloverDslashCuda : public SharedDslashCuda { private: const gFloat *gauge0, *gauge1; const cFloat *clover; const float *cloverNorm; const double a; protected: unsigned int sharedBytesPerThread() const { #if (__COMPUTE_CAPABILITY__ >= 200) if (dslashParam.kernel_type == INTERIOR_KERNEL) { int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float)); return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size; } else { return 0; } #else int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float)); return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size; #endif } public: AsymCloverDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1, const QudaReconstructType reconstruct, const cFloat *clover, const float *cloverNorm, int cl_stride, const cudaColorSpinorField *in, const cudaColorSpinorField *x, const double a, const int dagger) : SharedDslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1), clover(clover), cloverNorm(cloverNorm), a(a) { bindSpinorTex<sFloat>(in, out, x); dslashParam.cl_stride = cl_stride; if (!x) errorQuda("Asymmetric clover dslash only defined for Xpay"); } virtual ~AsymCloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); } void apply(const hipStream_t &stream) { #ifdef SHARED_WILSON_DSLASH if (dslashParam.kernel_type == EXTERIOR_KERNEL_X) errorQuda("Shared dslash does not yet support X-dimension partitioning"); #endif TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); ASYM_DSLASH(asymCloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cloverNorm, (sFloat*)in->V(), (float*)in->Norm(), (sFloat*)x, (float*)x->Norm(), a); } long long flops() const { int clover_flops = 504; long long flops = DslashCuda::flops(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: // clover flops are done in the interior kernel flops += clover_flops * in->VolumeCB(); break; } return flops; } long long bytes() const { bool isHalf = in->Precision() == sizeof(short) ? true : false; int clover_bytes = 72 * in->Precision() + (isHalf ? 2*sizeof(float) : 0); long long bytes = DslashCuda::bytes(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: bytes += clover_bytes*in->VolumeCB(); break; } return bytes; } }; #endif // GPU_CLOVER_DIRAC #include <dslash_policy.cuh> void asymCloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover cloverInv, const cudaColorSpinorField *in, const int parity, const int dagger, const cudaColorSpinorField *x, const double &a, const int *commOverride, TimeProfile &profile, const QudaDslashPolicy &dslashPolicy) { inSpinor = (cudaColorSpinorField*)in; // EVIL #ifdef GPU_CLOVER_DIRAC int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code for(int i=0;i<4;i++){ dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride()); dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride(); dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0 } void *cloverP, *cloverNormP; QudaPrecision clover_prec = bindCloverTex(cloverInv, parity, &cloverP, &cloverNormP); void *gauge0, *gauge1; bindGaugeTex(gauge, parity, &gauge0, &gauge1); if (in->Precision() != gauge.Precision()) errorQuda("Mixing gauge and spinor precision not supported"); if (in->Precision() != clover_prec) errorQuda("Mixing clover and spinor precision not supported"); DslashCuda *dslash = 0; size_t regSize = sizeof(float); if (in->Precision() == QUDA_DOUBLE_PRECISION) { #if (__COMPUTE_CAPABILITY__ >= 130) dslash = new AsymCloverDslashCuda<double2, double2, double2> (out, (double2*)gauge0, (double2*)gauge1, gauge.Reconstruct(), (double2*)cloverP, (float*)cloverNormP, cloverInv.stride, in, x, a, dagger); regSize = sizeof(double); #else errorQuda("Double precision not supported on this GPU"); #endif } else if (in->Precision() == QUDA_SINGLE_PRECISION) { dslash = new AsymCloverDslashCuda<float4, float4, float4> (out, (float4*)gauge0, (float4*)gauge1, gauge.Reconstruct(), (float4*)cloverP, (float*)cloverNormP, cloverInv.stride, in, x, a, dagger); } else if (in->Precision() == QUDA_HALF_PRECISION) { dslash = new AsymCloverDslashCuda<short4, short4, short4> (out, (short4*)gauge0, (short4*)gauge1, gauge.Reconstruct(), (short4*)cloverP, (float*)cloverNormP, cloverInv.stride, in, x, a, dagger); } #ifndef GPU_COMMS DslashPolicyImp* dslashImp = DslashFactory::create(dslashPolicy); #else DslashPolicyImp* dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH); #endif (*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, in->Volume(), in->GhostFace(), profile); delete dslashImp; delete dslash; unbindGaugeTex(gauge); unbindCloverTex(cloverInv); checkCudaError(); #else errorQuda("Clover dslash has not been built"); #endif } }
2953fb4d5eb523bbe80fba586d4cbe2ea2fc8def.cu
#include <cstdlib> #include <cstdio> #include <string> #include <iostream> #include <color_spinor_field.h> #include <clover_field.h> // these control the Wilson-type actions #ifdef GPU_WILSON_DIRAC //#define DIRECT_ACCESS_LINK //#define DIRECT_ACCESS_WILSON_SPINOR //#define DIRECT_ACCESS_WILSON_ACCUM //#define DIRECT_ACCESS_WILSON_INTER //#define DIRECT_ACCESS_WILSON_PACK_SPINOR //#define DIRECT_ACCESS_CLOVER #endif // GPU_WILSON_DIRAC #include <quda_internal.h> #include <dslash_quda.h> #include <sys/time.h> #include <blas_quda.h> #include <face_quda.h> #include <inline_ptx.h> namespace quda { namespace asym_clover { #undef GPU_STAGGERED_DIRAC #include <dslash_constants.h> #include <dslash_textures.h> #include <dslash_index.cuh> // Enable shared memory dslash for Fermi architecture //#define SHARED_WILSON_DSLASH //#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access #ifdef GPU_CLOVER_DIRAC #define DD_CLOVER 2 #include <wilson_dslash_def.h> // Wilson Dslash kernels (including clover) #undef DD_CLOVER #endif #ifndef DSLASH_SHARED_FLOATS_PER_THREAD #define DSLASH_SHARED_FLOATS_PER_THREAD 0 #endif #include <dslash_quda.cuh> } // end namespace asym_clover // declare the dslash events #include <dslash_events.cuh> using namespace asym_clover; #ifdef GPU_CLOVER_DIRAC template <typename sFloat, typename gFloat, typename cFloat> class AsymCloverDslashCuda : public SharedDslashCuda { private: const gFloat *gauge0, *gauge1; const cFloat *clover; const float *cloverNorm; const double a; protected: unsigned int sharedBytesPerThread() const { #if (__COMPUTE_CAPABILITY__ >= 200) if (dslashParam.kernel_type == INTERIOR_KERNEL) { int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float)); return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size; } else { return 0; } #else int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float)); return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size; #endif } public: AsymCloverDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1, const QudaReconstructType reconstruct, const cFloat *clover, const float *cloverNorm, int cl_stride, const cudaColorSpinorField *in, const cudaColorSpinorField *x, const double a, const int dagger) : SharedDslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1), clover(clover), cloverNorm(cloverNorm), a(a) { bindSpinorTex<sFloat>(in, out, x); dslashParam.cl_stride = cl_stride; if (!x) errorQuda("Asymmetric clover dslash only defined for Xpay"); } virtual ~AsymCloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); } void apply(const cudaStream_t &stream) { #ifdef SHARED_WILSON_DSLASH if (dslashParam.kernel_type == EXTERIOR_KERNEL_X) errorQuda("Shared dslash does not yet support X-dimension partitioning"); #endif TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); ASYM_DSLASH(asymCloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, clover, cloverNorm, (sFloat*)in->V(), (float*)in->Norm(), (sFloat*)x, (float*)x->Norm(), a); } long long flops() const { int clover_flops = 504; long long flops = DslashCuda::flops(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: // clover flops are done in the interior kernel flops += clover_flops * in->VolumeCB(); break; } return flops; } long long bytes() const { bool isHalf = in->Precision() == sizeof(short) ? true : false; int clover_bytes = 72 * in->Precision() + (isHalf ? 2*sizeof(float) : 0); long long bytes = DslashCuda::bytes(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: bytes += clover_bytes*in->VolumeCB(); break; } return bytes; } }; #endif // GPU_CLOVER_DIRAC #include <dslash_policy.cuh> void asymCloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover cloverInv, const cudaColorSpinorField *in, const int parity, const int dagger, const cudaColorSpinorField *x, const double &a, const int *commOverride, TimeProfile &profile, const QudaDslashPolicy &dslashPolicy) { inSpinor = (cudaColorSpinorField*)in; // EVIL #ifdef GPU_CLOVER_DIRAC int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code for(int i=0;i<4;i++){ dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride()); dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride(); dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0 } void *cloverP, *cloverNormP; QudaPrecision clover_prec = bindCloverTex(cloverInv, parity, &cloverP, &cloverNormP); void *gauge0, *gauge1; bindGaugeTex(gauge, parity, &gauge0, &gauge1); if (in->Precision() != gauge.Precision()) errorQuda("Mixing gauge and spinor precision not supported"); if (in->Precision() != clover_prec) errorQuda("Mixing clover and spinor precision not supported"); DslashCuda *dslash = 0; size_t regSize = sizeof(float); if (in->Precision() == QUDA_DOUBLE_PRECISION) { #if (__COMPUTE_CAPABILITY__ >= 130) dslash = new AsymCloverDslashCuda<double2, double2, double2> (out, (double2*)gauge0, (double2*)gauge1, gauge.Reconstruct(), (double2*)cloverP, (float*)cloverNormP, cloverInv.stride, in, x, a, dagger); regSize = sizeof(double); #else errorQuda("Double precision not supported on this GPU"); #endif } else if (in->Precision() == QUDA_SINGLE_PRECISION) { dslash = new AsymCloverDslashCuda<float4, float4, float4> (out, (float4*)gauge0, (float4*)gauge1, gauge.Reconstruct(), (float4*)cloverP, (float*)cloverNormP, cloverInv.stride, in, x, a, dagger); } else if (in->Precision() == QUDA_HALF_PRECISION) { dslash = new AsymCloverDslashCuda<short4, short4, short4> (out, (short4*)gauge0, (short4*)gauge1, gauge.Reconstruct(), (short4*)cloverP, (float*)cloverNormP, cloverInv.stride, in, x, a, dagger); } #ifndef GPU_COMMS DslashPolicyImp* dslashImp = DslashFactory::create(dslashPolicy); #else DslashPolicyImp* dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH); #endif (*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, in->Volume(), in->GhostFace(), profile); delete dslashImp; delete dslash; unbindGaugeTex(gauge); unbindCloverTex(cloverInv); checkCudaError(); #else errorQuda("Clover dslash has not been built"); #endif } }
f6cf232dfc195d684d8a8d7f95752ef3f4c3f2d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" namespace ann { // CUDA2 } __global__ void kernel_weight_update( int layer_id, int *l, int *s, int *sw, float *z_arr, float *a_arr, float *t_arr, float *gjl, float *w_arr, float *dw_arr, float eta, float alpha ){ volatile int idx = threadIdx.x + blockDim.x*blockIdx.x; int neuron_count = l[layer_id]; int neuron_count_next = l[layer_id+1]; if(idx >= neuron_count) return; float a = a_arr[s[layer_id] + idx]; for(int k = 0; k < neuron_count_next-1; k++){ float grad=/*a_arr[s[layer_id] + idx]*/a*gjl[s[layer_id + 1] + k]; dw_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k]= -eta*grad+ alpha*dw_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k]; w_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k]+= dw_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k]; } }
f6cf232dfc195d684d8a8d7f95752ef3f4c3f2d2.cu
#include "includes.h" namespace ann { // CUDA2 } __global__ void kernel_weight_update( int layer_id, int *l, int *s, int *sw, float *z_arr, float *a_arr, float *t_arr, float *gjl, float *w_arr, float *dw_arr, float eta, float alpha ){ volatile int idx = threadIdx.x + blockDim.x*blockIdx.x; int neuron_count = l[layer_id]; int neuron_count_next = l[layer_id+1]; if(idx >= neuron_count) return; float a = a_arr[s[layer_id] + idx]; for(int k = 0; k < neuron_count_next-1; k++){ float grad=/*a_arr[s[layer_id] + idx]*/a*gjl[s[layer_id + 1] + k]; dw_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k]= -eta*grad+ alpha*dw_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k]; w_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k]+= dw_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k]; } }
b24f0fe53e25f5888d93c0ca2edd18646a238d3f.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_proj.cu * * @brief Simple test driver program for Gunrock template. */ #include <gunrock/app/proj/proj_app.cu> #include <gunrock/app/test_base.cuh> using namespace gunrock; namespace APP_NAMESPACE = app::proj; /****************************************************************************** * Main ******************************************************************************/ /** * @brief Enclosure to the main function */ struct main_struct { /** * @brief the actual main function, after type switching * @tparam VertexT Type of vertex identifier * @tparam SizeT Type of graph size, i.e. type of edge identifier * @tparam ValueT Type of edge values * @param parameters Command line parameters * @param v,s,val Place holders for type deduction * \return hipError_t error message(s), if any */ template <typename VertexT, // Use int as the vertex identifier typename SizeT, // Use int as the graph size type typename ValueT> // Use int as the value type hipError_t operator()(util::Parameters &parameters, VertexT v, SizeT s, ValueT val) { // CLI parameters bool quick = parameters.Get<bool>("quick"); bool quiet = parameters.Get<bool>("quiet"); typedef typename app::TestGraph<VertexT, SizeT, ValueT, graph::HAS_EDGE_VALUES | graph::HAS_CSR> GraphT; hipError_t retval = hipSuccess; util::CpuTimer cpu_timer; GraphT graph; cpu_timer.Start(); GUARD_CU(graphio::LoadGraph(parameters, graph)); cpu_timer.Stop(); parameters.Set("load-time", cpu_timer.ElapsedMillis()); ValueT *ref_projections; if (!quick) { ref_projections = new ValueT[graph.nodes * graph.nodes]; util::PrintMsg("__________________________", !quiet); float elapsed = app::proj::CPU_Reference(graph.csr(), ref_projections, quiet); util::PrintMsg( "--------------------------\n Elapsed: " + std::to_string(elapsed), !quiet); } std::vector<std::string> switches{"advance-mode"}; GUARD_CU(app::Switch_Parameters( parameters, graph, switches, [ref_projections](util::Parameters &parameters, GraphT &graph) { return app::proj::RunTests(parameters, graph, ref_projections, util::DEVICE); })); if (!quick) { delete[] ref_projections; ref_projections = NULL; } return retval; } }; int main(int argc, char **argv) { hipError_t retval = hipSuccess; util::Parameters parameters("test graph_projections"); GUARD_CU(graphio::UseParameters(parameters)); GUARD_CU(app::proj::UseParameters(parameters)); GUARD_CU(app::UseParameters_test(parameters)); GUARD_CU(parameters.Parse_CommandLine(argc, argv)); if (parameters.Get<bool>("help")) { parameters.Print_Help(); return hipSuccess; } GUARD_CU(parameters.Check_Required()); return app::Switch_Types<app::VERTEXT_U32B | app::VERTEXT_U64B | app::SIZET_U32B | app::SIZET_U64B | app::VALUET_F32B | app::DIRECTED>(parameters, main_struct()); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
b24f0fe53e25f5888d93c0ca2edd18646a238d3f.cu
// ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_proj.cu * * @brief Simple test driver program for Gunrock template. */ #include <gunrock/app/proj/proj_app.cu> #include <gunrock/app/test_base.cuh> using namespace gunrock; namespace APP_NAMESPACE = app::proj; /****************************************************************************** * Main ******************************************************************************/ /** * @brief Enclosure to the main function */ struct main_struct { /** * @brief the actual main function, after type switching * @tparam VertexT Type of vertex identifier * @tparam SizeT Type of graph size, i.e. type of edge identifier * @tparam ValueT Type of edge values * @param parameters Command line parameters * @param v,s,val Place holders for type deduction * \return cudaError_t error message(s), if any */ template <typename VertexT, // Use int as the vertex identifier typename SizeT, // Use int as the graph size type typename ValueT> // Use int as the value type cudaError_t operator()(util::Parameters &parameters, VertexT v, SizeT s, ValueT val) { // CLI parameters bool quick = parameters.Get<bool>("quick"); bool quiet = parameters.Get<bool>("quiet"); typedef typename app::TestGraph<VertexT, SizeT, ValueT, graph::HAS_EDGE_VALUES | graph::HAS_CSR> GraphT; cudaError_t retval = cudaSuccess; util::CpuTimer cpu_timer; GraphT graph; cpu_timer.Start(); GUARD_CU(graphio::LoadGraph(parameters, graph)); cpu_timer.Stop(); parameters.Set("load-time", cpu_timer.ElapsedMillis()); ValueT *ref_projections; if (!quick) { ref_projections = new ValueT[graph.nodes * graph.nodes]; util::PrintMsg("__________________________", !quiet); float elapsed = app::proj::CPU_Reference(graph.csr(), ref_projections, quiet); util::PrintMsg( "--------------------------\n Elapsed: " + std::to_string(elapsed), !quiet); } std::vector<std::string> switches{"advance-mode"}; GUARD_CU(app::Switch_Parameters( parameters, graph, switches, [ref_projections](util::Parameters &parameters, GraphT &graph) { return app::proj::RunTests(parameters, graph, ref_projections, util::DEVICE); })); if (!quick) { delete[] ref_projections; ref_projections = NULL; } return retval; } }; int main(int argc, char **argv) { cudaError_t retval = cudaSuccess; util::Parameters parameters("test graph_projections"); GUARD_CU(graphio::UseParameters(parameters)); GUARD_CU(app::proj::UseParameters(parameters)); GUARD_CU(app::UseParameters_test(parameters)); GUARD_CU(parameters.Parse_CommandLine(argc, argv)); if (parameters.Get<bool>("help")) { parameters.Print_Help(); return cudaSuccess; } GUARD_CU(parameters.Check_Required()); return app::Switch_Types<app::VERTEXT_U32B | app::VERTEXT_U64B | app::SIZET_U32B | app::SIZET_U64B | app::VALUET_F32B | app::DIRECTED>(parameters, main_struct()); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
39b160dd3d2e2642d1484737e0e82196a8a48b92.hip
// !!! This is a file automatically generated by hipify!!! //CUDA CODE #include <stdio.h> #include "hip/hip_runtime.h" #include "hiprand/hiprand_kernel.h" #include "device_launch_parameters.h" #include "assert.h" #include "b18TrafficPerson.h" #include "b18EdgeData.h" #include <vector> #include <iostream> #include "../../src/benchmarker.h" #include "sp/config.h" #ifndef ushort #define ushort uint16_t #endif #ifndef uint #define uint uint32_t #endif #ifndef uchar #define uchar uint8_t #endif /////////////////////////////// // CONSTANTS #define MINIMUM_NUMBER_OF_CARS_TO_MEASURE_SPEED 5 __constant__ float intersectionClearance = 7.8f; //TODO(pavan): WHAT IS THIS? #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } inline void printMemoryUsage() { // show memory usage of GPU size_t free_byte; size_t total_byte; hipError_t cuda_status = hipMemGetInfo(&free_byte, &total_byte); if (hipSuccess != cuda_status) { printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status)); exit(1); } double free_db = (double) free_byte; double total_db = (double) total_byte; double used_db = total_db - free_db; printf("GPU memory usage: used = %.0f, free = %.0f MB, total = %.0f MB\n", used_db / 1024.0 / 1024.0, free_db / 1024.0 / 1024.0, total_db / 1024.0 / 1024.0); } //////////////////////////////// // VARIABLES LC::B18TrafficPerson *trafficPersonVec_d; uint *indexPathVec_d; uint indexPathVec_d_size; LC::B18EdgeData *edgesData_d; uint edgesData_d_size; uint laneMap_d_size; uint trafficLights_d_size; uint accSpeedPerLinePerTimeInterval_d_size; uint numVehPerLinePerTimeInterval_d_size; __constant__ bool calculatePollution = true; __constant__ float cellSize = 1.0f; uchar *laneMap_d; bool readFirstMapC=true; uint mapToReadShift; uint mapToWriteShift; uint halfLaneMap; float startTime; LC::B18IntersectionData *intersections_d; uchar *trafficLights_d; float* accSpeedPerLinePerTimeInterval_d; float* numVehPerLinePerTimeInterval_d; void b18InitCUDA( bool fistInitialization, std::vector<LC::B18TrafficPerson>& trafficPersonVec, std::vector<uint> &indexPathVec, std::vector<LC::B18EdgeData>& edgesData, std::vector<uchar>& laneMap, std::vector<uchar>& trafficLights, std::vector<LC::B18IntersectionData>& intersections, float startTimeH, float endTimeH, std::vector<float>& accSpeedPerLinePerTimeInterval, std::vector<float>& numVehPerLinePerTimeInterval, float deltaTime) { //printf(">>b18InitCUDA firstInitialization %s\n", (fistInitialization?"INIT":"ALREADY INIT")); //printMemoryUsage(); const uint numStepsPerSample = 30.0f / deltaTime; //each min const uint numStepsTogether = 12; //change also in density (10 per hour) { // people size_t size = trafficPersonVec.size() * sizeof(LC::B18TrafficPerson); if (fistInitialization) gpuErrchk(hipMalloc((void **) &trafficPersonVec_d, size)); // Allocate array on device gpuErrchk(hipMemcpy(trafficPersonVec_d, trafficPersonVec.data(), size, hipMemcpyHostToDevice)); } { // indexPathVec size_t sizeIn = indexPathVec.size() * sizeof(uint); indexPathVec_d_size = indexPathVec.size(); if (fistInitialization) gpuErrchk(hipMalloc((void **) &indexPathVec_d, sizeIn)); // Allocate array on device gpuErrchk(hipMemcpy(indexPathVec_d, indexPathVec.data(), sizeIn, hipMemcpyHostToDevice)); } {//edgeData size_t sizeD = edgesData_d_size * sizeof(LC::B18EdgeData); edgesData_d_size = edgesData.size(); if (fistInitialization) gpuErrchk(hipMalloc((void **) &edgesData_d, sizeD)); // Allocate array on device gpuErrchk(hipMemcpy(edgesData_d, edgesData.data(), sizeD, hipMemcpyHostToDevice)); } {//laneMap size_t sizeL = laneMap.size() * sizeof(uchar); laneMap_d_size = laneMap.size(); if (fistInitialization) gpuErrchk(hipMalloc((void **) &laneMap_d, sizeL)); // Allocate array on device gpuErrchk(hipMemcpy(laneMap_d, laneMap.data(), sizeL, hipMemcpyHostToDevice)); halfLaneMap = laneMap.size() / 2; } {// intersections size_t sizeI = intersections.size() * sizeof(LC::B18IntersectionData); if (fistInitialization) gpuErrchk(hipMalloc((void **) &intersections_d, sizeI)); // Allocate array on device gpuErrchk(hipMemcpy(intersections_d, intersections.data(), sizeI, hipMemcpyHostToDevice)); size_t sizeT = trafficLights.size() * sizeof(uchar);//total number of lanes trafficLights_d_size = trafficLights.size(); if (fistInitialization) gpuErrchk(hipMalloc((void **) &trafficLights_d, sizeT)); // Allocate array on device gpuErrchk(hipMemcpy(trafficLights_d, trafficLights.data(), sizeT, hipMemcpyHostToDevice)); } { startTime = startTimeH * 3600.0f; uint numSamples = ceil(((endTimeH*3600.0f - startTimeH*3600.0f) / (deltaTime * numStepsPerSample * numStepsTogether))) + 1; //!!! accSpeedPerLinePerTimeInterval.clear(); numVehPerLinePerTimeInterval.clear(); accSpeedPerLinePerTimeInterval.resize(numSamples * trafficLights.size()); numVehPerLinePerTimeInterval.resize(numSamples * trafficLights.size()); size_t sizeAcc = accSpeedPerLinePerTimeInterval.size() * sizeof(float); if (fistInitialization) gpuErrchk(hipMalloc((void **) &accSpeedPerLinePerTimeInterval_d, sizeAcc)); // Allocate array on device if (fistInitialization) gpuErrchk(hipMalloc((void **) &numVehPerLinePerTimeInterval_d, sizeAcc)); // Allocate array on device gpuErrchk(hipMemset(&accSpeedPerLinePerTimeInterval_d[0], 0, sizeAcc)); gpuErrchk(hipMemset(&numVehPerLinePerTimeInterval_d[0], 0, sizeAcc)); accSpeedPerLinePerTimeInterval_d_size = sizeAcc; numVehPerLinePerTimeInterval_d_size = sizeAcc; } printMemoryUsage(); } void b18updateStructuresCUDA( std::vector<LC::B18TrafficPerson>& trafficPersonVec, std::vector<uint> &indexPathVec, std::vector<LC::B18EdgeData>& edgesData) { std::cout<< ">> b18updateStructuresCUDA" << std::endl; //indexPathVec hipFree(indexPathVec_d); size_t sizeIn = indexPathVec.size() * sizeof(uint); indexPathVec_d_size = indexPathVec.size(); gpuErrchk(hipMalloc((void **) &indexPathVec_d, sizeIn)); gpuErrchk(hipMemcpy(indexPathVec_d, indexPathVec.data(), sizeIn, hipMemcpyHostToDevice)); hipFree(edgesData_d); size_t sizeD = edgesData.size() * sizeof(LC::B18EdgeData); gpuErrchk(hipMalloc((void **) &edgesData_d, sizeD)); gpuErrchk(hipMemcpy(edgesData_d, edgesData.data(), sizeD, hipMemcpyHostToDevice)); hipFree(trafficPersonVec_d); size_t size = trafficPersonVec.size() * sizeof(LC::B18TrafficPerson); gpuErrchk(hipMalloc((void **) &trafficPersonVec_d, size)); gpuErrchk(hipMemcpy(trafficPersonVec_d, trafficPersonVec.data(), size, hipMemcpyHostToDevice)); printMemoryUsage(); } void b18FinishCUDA(void){ hipFree(trafficPersonVec_d); hipFree(indexPathVec_d); hipFree(edgesData_d); hipFree(laneMap_d); hipFree(intersections_d); hipFree(trafficLights_d); hipFree(accSpeedPerLinePerTimeInterval_d); hipFree(numVehPerLinePerTimeInterval_d); } void b18GetDataCUDA(std::vector<LC::B18TrafficPerson>& trafficPersonVec, std::vector<LC::B18EdgeData> &edgesData){ // copy back people size_t size = trafficPersonVec.size() * sizeof(LC::B18TrafficPerson); size_t size_edges = edgesData_d_size * sizeof(LC::B18EdgeData); hipMemcpy(trafficPersonVec.data(),trafficPersonVec_d,size,hipMemcpyDeviceToHost);//hipMemcpyHostToDevice hipMemcpy(edgesData.data(),edgesData_d,size_edges,hipMemcpyDeviceToHost);//hipMemcpyHostToDevice } __device__ void calculateGapsLC( uint mapToReadShift, uchar* laneMap, uchar trafficLightState, uint laneToCheck, ushort numLinesEdge, float posInMToCheck, float length, uchar &v_a, uchar &v_b, float &gap_a, float &gap_b, uint laneMap_d_size) { ushort numOfCells = ceil(length); ushort initShift = ceil(posInMToCheck); uchar laneChar; bool found = false; // CHECK FORWARD //printf("initShift %u numOfCells %u\n",initShift,numOfCells); for (ushort b = initShift - 1; (b < numOfCells) && (!found); b++) { //NOTE -1 to make sure there is none in at the same level const uint posToSample = mapToReadShift + kMaxMapWidthM * (laneToCheck + (((int) (b / kMaxMapWidthM)) * numLinesEdge)) + b % kMaxMapWidthM; assert(posToSample < laneMap_d_size); laneChar = laneMap[posToSample]; if (laneChar != 0xFF) { gap_a = ((float) b - initShift); //m v_a = laneChar; //laneChar is in 3*ms (to save space in array) found = true; break; } } if (!found) { if (trafficLightState == 0x00) { //red //found=true; gap_a = gap_b = 1000.0f; //force to change to the line without vehicle v_a = v_b = 0xFF; return; } } if (!found) { gap_a = 1000.0f; } // CHECK BACKWARDS found = false; //printf("2initShift %u numOfCells %u\n",initShift,numOfCells); for (int b = initShift + 1; (b >= 0) && (!found); b--) { // NOTE +1 to make sure there is none in at the same level //laneChar = laneMap[mapToReadShift + maxWidth * (laneToCheck) + b]; const uint posToSample = mapToReadShift + kMaxMapWidthM * (laneToCheck + (((int) (b / kMaxMapWidthM)) * numLinesEdge)) + b % kMaxMapWidthM; assert(posToSample < laneMap_d_size); laneChar = laneMap[posToSample]; if (laneChar != 0xFF) { gap_b = ((float) initShift - b); //m v_b = laneChar; //laneChar is in 3*ms (to save space in array) found = true; break; } } //printf("3initShift %u numOfCells %u\n",initShift,numOfCells); if (!found) { gap_b = 1000.0f; } }// __device__ void calculateLaneCarShouldBe( uint curEdgeLane, uint nextEdge, LC::B18IntersectionData* intersections, uint edgeNextInters, ushort edgeNumLanes, ushort &initOKLanes, ushort &endOKLanes) { initOKLanes = 0; endOKLanes = edgeNumLanes; bool currentEdgeFound = false; bool exitFound = false; ushort numExitToTake = 0; ushort numExists = 0; for (int eN = intersections[edgeNextInters].totalInOutEdges - 1; eN >= 0; eN--) { // clockwise uint procEdge = intersections[edgeNextInters].edge[eN]; if ((procEdge & kMaskLaneMap) == curEdgeLane) { //current edge 0xFFFFF currentEdgeFound = true; if (!exitFound) { numExitToTake = 0; } continue; } if ((procEdge & kMaskInEdge) == 0x0) { //out edge 0x800000 numExists++; if (currentEdgeFound) { numExitToTake++; } if (!currentEdgeFound && !exitFound) { numExitToTake++; } } if ((procEdge & kMaskInEdge) == nextEdge) { exitFound = true; currentEdgeFound = false; } } if (edgeNumLanes == 0) { printf("ERRRROR\n"); } switch (edgeNumLanes) { /// ONE LANE case 1: initOKLanes = 0; endOKLanes = 1; break; /// TWO LANE case 2: switch (numExists) { case 1: case 2://all okay initOKLanes = 0; endOKLanes = 2; break; case 3: if (numExitToTake > 2) { //left initOKLanes = 0; endOKLanes = 1; break; } initOKLanes = 1; endOKLanes = 2; break; default: if (numExitToTake >= numExists - 1) { initOKLanes = 0; endOKLanes = 1; break; } initOKLanes = 1; endOKLanes = 2; break; } break; /// THREE LANE case 3: switch (numExists) { case 1: case 2://all okay initOKLanes = 0; endOKLanes = 3; break; case 3: if (numExitToTake > 2) { //left initOKLanes = 0; endOKLanes = 1; break; } initOKLanes = 1; endOKLanes = 3; break; default: if (numExitToTake >= numExists - 1) { initOKLanes = 0; endOKLanes = 1; break; } initOKLanes = 1; endOKLanes = 2; break; } break; case 4: switch (numExists) { case 1: case 2://all okay initOKLanes = 0; endOKLanes = 4; break; case 3: if (numExitToTake == 1) { //right initOKLanes = 3; endOKLanes = 4; } if (numExitToTake > 3) { //left initOKLanes = 0; endOKLanes = 1; break; } initOKLanes = 1; endOKLanes = 4; break; default: if (numExitToTake == 1) { //right initOKLanes = edgeNumLanes - 1; endOKLanes = edgeNumLanes; } if (numExitToTake >= numExists - 2) { initOKLanes = 0; endOKLanes = 2; break; } initOKLanes = 1; //also lane 2 endOKLanes = edgeNumLanes; } break; default: switch (numExists) { case 1: case 2://all okay initOKLanes = 0; endOKLanes = edgeNumLanes; break; case 3: if (numExitToTake == 1) { //right initOKLanes = edgeNumLanes - 1; endOKLanes = edgeNumLanes; } if (numExitToTake > edgeNumLanes - 2) { //left initOKLanes = 0; endOKLanes = 2; break; } initOKLanes = 1; endOKLanes = edgeNumLanes; break; default: if (numExitToTake < 2) { //right initOKLanes = edgeNumLanes - 2; endOKLanes = edgeNumLanes; } if (numExitToTake >= numExists - 2) { initOKLanes = 0; endOKLanes = 2; break; } initOKLanes = 1; //also lane 2 endOKLanes = edgeNumLanes - 1; } break; } } __device__ float meters_per_second_to_miles_per_hour(float meters_per_second) { return meters_per_second * 2.2369362920544; } __device__ const float calculateCOStep(float personVelocity) { // Formula comes from the paper "Designing Large-Scale Interactive Traffic Animations for Urban Modeling" // Section 4.4 Traffic Indicators const float personVelocityMPH = meters_per_second_to_miles_per_hour(personVelocity); return -0.064 + 0.0056 * personVelocityMPH + 0.00026 * (personVelocityMPH - 50.0f) * (personVelocityMPH - 50.0f); } __device__ const float calculateGasConsumption(const float a, const float v) { // Formula comes from the paper "Architecture for Modular Microsimulation of Real Estate Markets and Transportation" // Section 6.3.4 Vehicle energy consumption and pollution estimates formula (9) const float Pea = a > 0.0f ? (0.472f*1.680f*a*a*v) : 0.0f; return 0.666f + 0.072f*(0.269f*v + 0.000672f*(v*v*v) + 0.0171f*(v*v) + 1.680f*a*v + Pea); } // Kernel that executes on the CUDA device __global__ void kernel_trafficSimulation( int numPeople, float currentTime, uint mapToReadShift, uint mapToWriteShift, LC::B18TrafficPerson *trafficPersonVec, uint *indexPathVec, int indexPathVec_d_size, LC::B18EdgeData* edgesData, int edgesData_d_size, uchar *laneMap, int laneMap_d_size, LC::B18IntersectionData *intersections, uchar *trafficLights, uint trafficLights_d_size, float deltaTime, const parameters simParameters) { int p = blockIdx.x * blockDim.x + threadIdx.x; if (p >= numPeople) return; //CUDA check (inside margins) if (trafficPersonVec[p].active == 2) return; // trip finished if (trafficPersonVec[p].time_departure > currentTime) return; //1.1 just continue waiting assert(trafficPersonVec[p].indexPathCurr < indexPathVec_d_size); if (indexPathVec[trafficPersonVec[p].indexPathCurr] == END_OF_PATH) { trafficPersonVec[p].active = 2; //finished return; } //2.1. check if person should still wait or should start if (trafficPersonVec[p].active == 0) { //1.2 find first edge assert(trafficPersonVec[p].indexPathInit != INIT_EDGE_INDEX_NOT_SET); trafficPersonVec[p].indexPathCurr = trafficPersonVec[p].indexPathInit; // reset index. int indexFirstEdge = trafficPersonVec[p].indexPathCurr; assert(indexFirstEdge < indexPathVec_d_size); uint firstEdge = indexPathVec[indexFirstEdge]; trafficPersonVec[p].last_time_simulated = currentTime; if (firstEdge == END_OF_PATH) { trafficPersonVec[p].active = 2; return; } if (firstEdge >= edgesData_d_size) { printf("firstEdge %d is bigger than edgesData size %d\n", firstEdge, edgesData_d_size); } assert(firstEdge < edgesData_d_size); //1.4 try to place it in middle of edge ushort numOfCells = ceil(edgesData[firstEdge].length); ushort initShift = (ushort) (0.5f * numOfCells); //number of cells it should be placed (half of road) uchar laneChar; bool placed = false; ushort numCellsEmptyToBePlaced = simParameters.s_0; ushort countEmptyCells = 0; for (ushort b = initShift; (b < numOfCells) && (!placed); b++) { ushort lN = edgesData[firstEdge].numLines - 1; //just right lane int laneMapPosition = mapToReadShift + kMaxMapWidthM * (firstEdge + lN) + b; assert(laneMapPosition < laneMap_d_size); laneChar = laneMap[laneMapPosition]; //get byte of edge (proper line) if (laneChar != 0xFF) { countEmptyCells = 0; continue; } countEmptyCells++;// ensure there is enough room to place the car if (countEmptyCells < numCellsEmptyToBePlaced) { continue; } trafficPersonVec[p].numOfLaneInEdge = lN; trafficPersonVec[p].posInLaneM = b; //m uchar vInMpS = (uchar) (trafficPersonVec[p].v * 3); //speed in m/s *3 (to keep more precision int laneMapPosition2 = mapToWriteShift + kMaxMapWidthM * (firstEdge + lN) + b; assert(laneMapPosition2 < laneMap_d_size); laneMap[laneMapPosition2] = vInMpS; placed = true; break; } if (!placed) { //not posible to start now return; } trafficPersonVec[p].v = 0; trafficPersonVec[p].LC_stateofLaneChanging = 0; //1.5 active car trafficPersonVec[p].active = 1; trafficPersonVec[p].isInIntersection = 0; trafficPersonVec[p].num_steps = 1; trafficPersonVec[p].co = 0.0f; trafficPersonVec[p].gas = 0.0f; assert(trafficPersonVec[p].indexPathCurr + 1 < indexPathVec_d_size); if (indexPathVec[trafficPersonVec[p].indexPathCurr + 1] != END_OF_PATH) { trafficPersonVec[p].LC_initOKLanes = 0xFF; trafficPersonVec[p].LC_endOKLanes = 0xFF; } trafficPersonVec[p].path_length_gpu = 0; trafficPersonVec[p].prevEdge = firstEdge; return; } // set up next edge info int indexCurrentEdge = trafficPersonVec[p].indexPathCurr; assert(indexCurrentEdge < indexPathVec_d_size); uint currentEdge = indexPathVec[indexCurrentEdge]; assert(currentEdge < edgesData_d_size); int indexNextEdge = trafficPersonVec[p].indexPathCurr + 1; assert(indexNextEdge < indexPathVec_d_size); uint nextEdge = indexPathVec[indexNextEdge]; assert(nextEdge < edgesData_d_size || nextEdge == END_OF_PATH); if (nextEdge != END_OF_PATH) { trafficPersonVec[p].LC_initOKLanes = 0xFF; trafficPersonVec[p].LC_endOKLanes = 0xFF; } //2. it is moving trafficPersonVec[p].num_steps++; trafficPersonVec[p].last_time_simulated = fmaxf(currentTime, trafficPersonVec[p].last_time_simulated); //2.1 try to move float numMToMove; bool nextVehicleIsATrafficLight = false; //when we're on a new edge for the first time if (currentEdge == trafficPersonVec[p].nextEdge) { trafficPersonVec[p].end_time_on_prev_edge = currentTime - deltaTime; float elapsed_s = (trafficPersonVec[p].end_time_on_prev_edge - trafficPersonVec[p].start_time_on_prev_edge); //multiply by delta_time to get seconds elapsed (not half seconds) // We filter whenever elapsed_s == 0, which means the time granularity was not enough to measure the speed // We also filter whenever 0 > elapsed_s > 5, because it causes manual_v to turn extraordinarily high assert(trafficPersonVec[p].prevEdge < edgesData_d_size); if (elapsed_s > MINIMUM_NUMBER_OF_CARS_TO_MEASURE_SPEED) { trafficPersonVec[p].manual_v = edgesData[trafficPersonVec[p].prevEdge].length / elapsed_s; edgesData[trafficPersonVec[p].prevEdge].curr_iter_num_cars += 1; edgesData[trafficPersonVec[p].prevEdge].curr_cum_vel += trafficPersonVec[p].manual_v; } trafficPersonVec[p].start_time_on_prev_edge = currentTime; trafficPersonVec[p].prevEdge = currentEdge; } trafficPersonVec[p].nextEdge = nextEdge; // www.vwi.tu-dresden.de/~treiber/MicroApplet/IDM.html // IDM float thirdTerm = 0; // 2.1.1 Find front car int numCellsCheck = max(30.0f, trafficPersonVec[p].v * deltaTime * 2); //30 or double of the speed*time // a) SAME LINE (BEFORE SIGNALING) bool found = false; bool noFirstInLaneBeforeSign = false; //use for stop control (just let 1st to pass) TODO(pavan): I DON'T GET THIS bool noFirstInLaneAfterSign = false; //use for stop control (just let 1st to pass) float s; float delta_v; uchar laneChar; ushort byteInLine = (ushort) floor(trafficPersonVec[p].posInLaneM); ushort numOfCells = ceil((edgesData[currentEdge].length - intersectionClearance)); //intersectionClearance hardcoded to 7.8f - why? for (ushort b = byteInLine + 2; (b < numOfCells) && (!found) && (numCellsCheck > 0); b++, numCellsCheck--) { // ShiftRead + WIDTH * (width number * # lanes + # laneInEdge) + b TODO(pavan): WHAT IS THIS? //TODO(pavan): double check what mapToReadShift is printing out assert(trafficPersonVec[p].indexPathCurr < indexPathVec_d_size); const uint posToSample = mapToReadShift + kMaxMapWidthM *(currentEdge + (((int) (byteInLine / kMaxMapWidthM)) * edgesData[currentEdge].numLines) + trafficPersonVec[p].numOfLaneInEdge) + b % kMaxMapWidthM; assert(posToSample < laneMap_d_size); laneChar = laneMap[posToSample]; //TODO(pavan): Is this clause for when it is not at the intersection yet but it has found a car in front of it? if (laneChar != 0xFF) { s = ((float) (b - byteInLine)); //m delta_v = trafficPersonVec[p].v - (laneChar / 3.0f); //laneChar is in 3*ms (to save space in array) found = true; noFirstInLaneBeforeSign = true; break; } } // NEXT LINE // e) MOVING ALONG IN THE NEXT EDGE if (!found && numCellsCheck > 0) { //check if in next line if ((nextEdge != END_OF_PATH) && (edgesData[currentEdge].nextIntersMapped != trafficPersonVec[p].end_intersection)) { // we haven't arrived to destination (check next line) ushort nextEdgeLaneToBe = trafficPersonVec[p].numOfLaneInEdge; //same lane //printf("trafficPersonVec[p].numOfLaneInEdge %u\n",trafficPersonVec[p].numOfLaneInEdge); assert(nextEdge < edgesData_d_size); if (nextEdgeLaneToBe >= edgesData[nextEdge].numLines) { nextEdgeLaneToBe = edgesData[nextEdge].numLines - 1; //change line if there are less roads } //printf("2trafficPersonVec[p].numOfLaneInEdge %u\n",trafficPersonVec[p].numOfLaneInEdge); ushort numOfCells = ceil(edgesData[nextEdge].length); for (ushort b = 0; (b < numOfCells) && (!found) && (numCellsCheck > 0); b++, numCellsCheck--) { const uint posToSample = mapToReadShift + kMaxMapWidthM * (nextEdge + nextEdgeLaneToBe) + b; // b18 not changed since we check first width assert(posToSample < laneMap_d_size); laneChar = laneMap[posToSample]; if (laneChar != 0xFF) { s = ((float) (b)); //m delta_v = trafficPersonVec[p].v - (laneChar / 3.0f); // laneChar is in 3*ms (to save space in array) found = true; break; } } } } float s_star; if (found && delta_v > 0) { //car in front and slower than us // 2.1.2 calculate dv_dt // The following operation is taken from Designing Large-Scale Interactive Traffic Animations for Urban Modeling // Section 4.3.1. Car-Following Model formula (2) s_star = simParameters.s_0 + max(0.0f, (trafficPersonVec[p].v * trafficPersonVec[p].T + (trafficPersonVec[p].v * delta_v) / (2 * sqrtf(trafficPersonVec[p].a * trafficPersonVec[p].b)))); thirdTerm = powf(((s_star) / (s)), 2); } // The following operation is taken from Designing Large-Scale Interactive Traffic Animations for Urban Modeling // Section 4.3.1. Car-Following Model formula (1) // And also Architecture for Modular Microsimulation of Real Estate Markets and Transportation // Section 6.3.2 Per-vehicle and traffic control simulation formula (7) float dv_dt = trafficPersonVec[p].a * (1.0f - ::pow(( trafficPersonVec[p].v / edgesData[currentEdge].maxSpeedMperSec), 4) - thirdTerm); // 2.1.3 update values numMToMove = max(0.0f, trafficPersonVec[p].v * deltaTime + 0.5f * (dv_dt) * deltaTime * deltaTime); trafficPersonVec[p].v += dv_dt * deltaTime; if (trafficPersonVec[p].v < 0) { trafficPersonVec[p].v = 0; dv_dt = 0.0f; } trafficPersonVec[p].cum_v += trafficPersonVec[p].v; if (calculatePollution && ((float(currentTime) == int(currentTime)))) { // enabled and each second (assuming deltaTime 0.5f) const float coStep = calculateCOStep(trafficPersonVec[p].v); if (coStep > 0) { trafficPersonVec[p].co += coStep; } trafficPersonVec[p].gas += calculateGasConsumption(dv_dt, trafficPersonVec[p].v); } if (trafficPersonVec[p].v == 0) { //if not moving not do anything else ushort posInLineCells = (ushort) (trafficPersonVec[p].posInLaneM); const uint posToSample = mapToWriteShift + kMaxMapWidthM * (currentEdge + (((int) (posInLineCells / kMaxMapWidthM)) * edgesData[currentEdge].numLines) + trafficPersonVec[p].numOfLaneInEdge) + posInLineCells % kMaxMapWidthM; assert(posToSample < laneMap_d_size); laneMap[posToSample] = 0; return; } // COLOR trafficPersonVec[p].color = p << 8; // STOP (check if it is a stop if it can go through) trafficPersonVec[p].posInLaneM = trafficPersonVec[p].posInLaneM + numMToMove; //2.2 close to intersection //2.2 check if change intersection if (trafficPersonVec[p].posInLaneM > edgesData[currentEdge].length) { //reach intersection numMToMove = trafficPersonVec[p].posInLaneM - edgesData[currentEdge].length; trafficPersonVec[p].posInLaneM = numMToMove; trafficPersonVec[p].dist_traveled += edgesData[currentEdge].length; trafficPersonVec[p].path_length_gpu++; //2.2.1 find next edge assert(indexCurrentEdge < indexPathVec_d_size); assert(currentEdge < edgesData_d_size); trafficPersonVec[p].LC_stateofLaneChanging = 0; //2.1 check if end if (nextEdge != END_OF_PATH) { assert(nextEdge < edgesData_d_size); if (trafficPersonVec[p].numOfLaneInEdge >= edgesData[nextEdge].numLines) { trafficPersonVec[p].numOfLaneInEdge = edgesData[nextEdge].numLines - 1; //change line if there are less roads } //TODO: Test if the following line is doing the conversion wrong uchar vInMpS = (uchar) (trafficPersonVec[p].v * 3); //speed in m/s to fit in uchar ushort posInLineCells = (ushort) (trafficPersonVec[p].posInLaneM); const uint posToSample = mapToWriteShift + kMaxMapWidthM * (nextEdge + (((int) (posInLineCells / kMaxMapWidthM)) * edgesData[nextEdge].numLines) + trafficPersonVec[p].numOfLaneInEdge) + posInLineCells % kMaxMapWidthM; // note the last % should not happen assert(posToSample < laneMap_d_size); laneMap[posToSample] = vInMpS; trafficPersonVec[p].LC_initOKLanes = 0xFF; trafficPersonVec[p].LC_endOKLanes = 0xFF; } else { trafficPersonVec[p].active == 2; } trafficPersonVec[p].indexPathCurr++; trafficPersonVec[p].LC_stateofLaneChanging = 0; } else { //does not reach an intersection assert(indexCurrentEdge < indexPathVec_d_size); assert(indexNextEdge < indexPathVec_d_size); assert(currentEdge < edgesData_d_size); assert(nextEdge < edgesData_d_size || nextEdge == END_OF_PATH); // LANE CHANGING (happens when we are not reached the intersection) if (trafficPersonVec[p].v > 3.0f && trafficPersonVec[p].num_steps % 5 == 0) { //at least 10km/h to try to change lane //just check every (5 steps) 5 seconds // next thing is not a traffic light // skip if there is one lane (avoid to do this) // skip if it is the last edge if (!nextVehicleIsATrafficLight && edgesData[currentEdge].numLines > 1 && nextEdge != END_OF_PATH) { //////////////////////////////////////////////////// // LC 1 update lane changing status if (trafficPersonVec[p].LC_stateofLaneChanging == 0) { // 2.2-exp((x-1)^2) float x = trafficPersonVec[p].posInLaneM / edgesData[currentEdge].length; if (x > 0.4f) { //just after 40% of the road float probabiltyMandatoryState = 2.2 - exp((x - 1) * (x - 1)); //if (((float) qrand() / RAND_MAX) < probabiltyMandatoryState) { if ((((int) (x * 100) % 100) / 100.0f) < probabiltyMandatoryState) { // pseudo random number trafficPersonVec[p].LC_stateofLaneChanging = 1; } } } // LC 2 NOT MANDATORY STATE if (trafficPersonVec[p].LC_stateofLaneChanging == 0) { // discretionary change: v slower than the current road limit and deccelerating and moving if ((trafficPersonVec[p].v < (edgesData[currentEdge].maxSpeedMperSec * 0.7f)) && (dv_dt < 0) && trafficPersonVec[p].v > 3.0f) { bool leftLane = trafficPersonVec[p].numOfLaneInEdge > 0; //at least one lane on the left bool rightLane = trafficPersonVec[p].numOfLaneInEdge < edgesData[currentEdge].numLines - 1; //at least one lane if (leftLane && rightLane) { if (int(trafficPersonVec[p].v) % 2 == 0) { // pseudo random leftLane = false; } else { rightLane = false; } } ushort laneToCheck; if (leftLane) { laneToCheck = trafficPersonVec[p].numOfLaneInEdge - 1; } else { laneToCheck = trafficPersonVec[p].numOfLaneInEdge + 1; } uchar v_a, v_b; float gap_a, gap_b; assert(currentEdge + trafficPersonVec[p].numOfLaneInEdge < trafficLights_d_size); uchar trafficLightState = trafficLights[currentEdge + trafficPersonVec[p].numOfLaneInEdge]; calculateGapsLC(mapToReadShift, laneMap, trafficLightState, currentEdge + laneToCheck, edgesData[currentEdge].numLines, trafficPersonVec[p].posInLaneM, edgesData[currentEdge].length, v_a, v_b, gap_a, gap_b, laneMap_d_size); if (gap_a == 1000.0f && gap_b == 1000.0f) { //lag and lead car very far trafficPersonVec[p].numOfLaneInEdge = laneToCheck; // CHANGE LINE } else { // NOT ALONE float b1A = 0.05f, b2A = 0.15f; float b1B = 0.15f, b2B = 0.40f; // simParameters.s_0-> critical lead gap float g_na_D, g_bn_D; bool acceptLC = true; if (gap_a != 1000.0f) { g_na_D = max(simParameters.s_0, simParameters.s_0 + b1A * trafficPersonVec[p].v + b2A * (trafficPersonVec[p].v - v_a * 3.0f)); if (gap_a < g_na_D) { //gap smaller than critical gap acceptLC = false; } } if (acceptLC && gap_b != 1000.0f) { g_bn_D = max(simParameters.s_0, simParameters.s_0 + b1B * v_b * 3.0f + b2B * (v_b * 3.0f - trafficPersonVec[p].v)); if (gap_b < g_bn_D) { //gap smaller than critical gap acceptLC = false; } } if (acceptLC) { trafficPersonVec[p].numOfLaneInEdge = laneToCheck; // CHANGE LINE } } } }// Discretionary // LC 3 *MANDATORY* STATE if (trafficPersonVec[p].LC_stateofLaneChanging == 1) { // LC 3.1 Calculate the correct lanes if (trafficPersonVec[p].LC_endOKLanes == 0xFF) { calculateLaneCarShouldBe(currentEdge, nextEdge, intersections, edgesData[currentEdge].nextIntersMapped, edgesData[currentEdge].numLines, trafficPersonVec[p].LC_initOKLanes, trafficPersonVec[p].LC_endOKLanes); if (trafficPersonVec[p].LC_initOKLanes == 0 && trafficPersonVec[p].LC_endOKLanes == 0) { } } bool leftLane = false, rightLane = false; // LC 3.2 CORRECT LANES--> DICRETIONARY LC WITHIN if (trafficPersonVec[p].numOfLaneInEdge >= trafficPersonVec[p].LC_initOKLanes && trafficPersonVec[p].numOfLaneInEdge < trafficPersonVec[p].LC_endOKLanes) { // for discretionary it should be under some circustances if ((trafficPersonVec[p].v < (edgesData[currentEdge].maxSpeedMperSec * 0.7f)) && (dv_dt < 0) && trafficPersonVec[p].v > 3.0f) { leftLane = (trafficPersonVec[p].numOfLaneInEdge > 0) && //at least one lane on the left (trafficPersonVec[p].numOfLaneInEdge - 1 >= trafficPersonVec[p].LC_initOKLanes) && (trafficPersonVec[p].numOfLaneInEdge - 1 < trafficPersonVec[p].LC_endOKLanes); rightLane = (trafficPersonVec[p].numOfLaneInEdge < edgesData[currentEdge].numLines - 1) && //at least one lane (trafficPersonVec[p].numOfLaneInEdge + 1 >= trafficPersonVec[p].LC_initOKLanes) && (trafficPersonVec[p].numOfLaneInEdge + 1 < trafficPersonVec[p].LC_endOKLanes); } } else { // LC 3.3 INCORRECT LANES--> MANDATORY LC if (trafficPersonVec[p].numOfLaneInEdge < trafficPersonVec[p].LC_initOKLanes) { rightLane = true; } else { leftLane = true; } if (rightLane && trafficPersonVec[p].numOfLaneInEdge + 1 >= edgesData[currentEdge].numLines) { printf("ERROR: RT laneToCheck>=edgeNumLanes\n"); } if (leftLane && trafficPersonVec[p].numOfLaneInEdge == 0) { printf("ERROR %u: LT laneToCheck>=edgeNumLanes OK %u-%u NE %u\n", p, trafficPersonVec[p].LC_initOKLanes, trafficPersonVec[p].LC_endOKLanes, currentEdge); } } if (leftLane || rightLane) { // choose lane (if necessary) if (leftLane && rightLane) { if ((int) (trafficPersonVec[p].posInLaneM) % 2 == 0) { //pseudo random leftLane = false; } else { rightLane = false; } } ushort laneToCheck; if (leftLane) { laneToCheck = trafficPersonVec[p].numOfLaneInEdge - 1; } else { laneToCheck = trafficPersonVec[p].numOfLaneInEdge + 1; } if (laneToCheck >= edgesData[currentEdge].numLines) { printf("ERROR: laneToCheck>=edgesData[currentEdge].numLines %u %u\n", laneToCheck, edgesData[currentEdge].numLines); } uchar v_a, v_b; float gap_a, gap_b; assert(currentEdge + trafficPersonVec[p].numOfLaneInEdge < trafficLights_d_size); uchar trafficLightState = trafficLights[currentEdge + trafficPersonVec[p].numOfLaneInEdge]; calculateGapsLC(mapToReadShift, laneMap, trafficLightState, currentEdge + laneToCheck, edgesData[currentEdge].numLines, trafficPersonVec[p].posInLaneM, edgesData[currentEdge].length, v_a, v_b, gap_a, gap_b, laneMap_d_size); if (gap_a == 1000.0f && gap_b == 1000.0f) { //lag and lead car very far trafficPersonVec[p].numOfLaneInEdge = laneToCheck; // CHANGE LINE } else { // NOT ALONE float b1A = 0.05f, b2A = 0.15f; float b1B = 0.15f, b2B = 0.40f; float gamma = 0.000025; // simParameters.s_0-> critical lead gap float distEnd = edgesData[currentEdge].length - trafficPersonVec[p].posInLaneM; float expTerm = (1 - exp(-gamma * distEnd * distEnd)); float g_na_M, g_bn_M; bool acceptLC = true; if (gap_a != 1000.0f) { g_na_M = max(simParameters.s_0, simParameters.s_0 + (b1A * trafficPersonVec[p].v + b2A * (trafficPersonVec[p].v - v_a * 3.0f))); if (gap_a < g_na_M) { //gap smaller than critical gap acceptLC = false; } } if (acceptLC && gap_b != 1000.0f) { g_bn_M = max(simParameters.s_0, simParameters.s_0 + (b1B * v_b * 3.0f + b2B * (v_b * 3.0f - trafficPersonVec[p].v))); if (gap_b < g_bn_M) { //gap smaller than critical gap acceptLC = false; } } if (acceptLC) { trafficPersonVec[p].numOfLaneInEdge = laneToCheck; // CHANGE LINE } } } }// Mandatory }//at least two lanes and not stopped by traffic light } uchar vInMpS = (uchar) (trafficPersonVec[p].v * 3); //speed in m/s to fit in uchar ushort posInLineCells = (ushort) (trafficPersonVec[p].posInLaneM); const uint posToSample = mapToWriteShift + kMaxMapWidthM * (currentEdge + (((int) (posInLineCells / kMaxMapWidthM)) * edgesData[currentEdge].numLines) + trafficPersonVec[p].numOfLaneInEdge) + posInLineCells % kMaxMapWidthM; assert(posToSample < laneMap_d_size); laneMap[posToSample] = vInMpS; } } /* __global__ void kernel_intersectionSTOPSimulation( uint numIntersections, float currentTime, LC::B18IntersectionData *intersections, uchar *trafficLights, LC::B18EdgeData* edgesData,//for the length uchar* laneMap,//to check if there are cars uint mapToReadShift) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<numIntersections) {//CUDA check (inside margins) const float deltaEvent = 0.0f; //if(i==0)printf("i %d\n",i); if (currentTime > intersections[i].nextEvent && intersections[i].totalInOutEdges > 0) { uint edgeOT = intersections[i].edge[intersections[i].state]; uchar numLinesO = edgeOT >> 24; uint edgeONum = edgeOT & kMaskLaneMap; // 0xFFFFF // red old traffic lights for (int nL = 0; nL < numLinesO; nL++) { trafficLights[edgeONum + nL] = 0x00; //red old traffic light } for (int iN = 0; iN <= intersections[i].totalInOutEdges + 1; iN++) { //to give a round intersections[i].state = (intersections[i].state + 1) % intersections[i].totalInOutEdges;//next light if ((intersections[i].edge[intersections[i].state] & kMaskInEdge) == kMaskInEdge) { // 0x800000 uint edgeIT = intersections[i].edge[intersections[i].state]; uint edgeINum = edgeIT & kMaskLaneMap; //get edgeI 0xFFFFF uchar numLinesI = edgeIT >> 24; /// check if someone in this edge int rangeToCheck = 5.0f; //5m ushort firstPosToCheck = edgesData[edgeINum].length - intersectionClearance; //last po bool atLeastOneStopped = false; for (int posCheck = firstPosToCheck; rangeToCheck >= 0 && posCheck >= 0; posCheck--, rangeToCheck--) { //as many cells as the rangeToCheck says for (int nL = 0; nL < numLinesI; nL++) { //int cellNum = mapToReadShift + maxWidth * (edgeINum + nL) + posCheck; const uint posToSample = mapToReadShift + kMaxMapWidthM * (edgeINum + (((int) (posCheck / kMaxMapWidthM)) * numLinesI) + nL) + posCheck % kMaxMapWidthM; if (laneMap[posToSample] == 0) { //car stopped trafficLights[edgeINum + nL] = 0x0F; // STOP SIGN 0x0F--> Let pass atLeastOneStopped = true; } } } if (atLeastOneStopped == true) { intersections[i].nextEvent = currentTime + deltaEvent; //just move forward time if changed (otherwise check in next iteration) break; } } } } /// } }// */ __global__ void kernel_intersectionOneSimulation( uint numIntersections, float currentTime, LC::B18IntersectionData *intersections, uchar *trafficLights) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i<numIntersections){//CUDA check (inside margins) const float deltaEvent = 20.0f; /// !!!! if (currentTime > intersections[i].nextEvent && intersections[i].totalInOutEdges > 0) { uint edgeOT = intersections[i].edge[intersections[i].state]; uchar numLinesO = edgeOT >> 24; uint edgeONum = edgeOT & kMaskLaneMap; // 0xFFFFF; // red old traffic lights if ((edgeOT&kMaskInEdge) == kMaskInEdge) { // Just do it if we were in in for (int nL = 0; nL < numLinesO; nL++) { trafficLights[edgeONum + nL] = 0x00; //red old traffic light } } for (int iN = 0; iN <= intersections[i].totalInOutEdges + 1; iN++) { //to give a round intersections[i].state = (intersections[i].state + 1) % intersections[i].totalInOutEdges;//next light if ((intersections[i].edge[intersections[i].state] & kMaskInEdge) == kMaskInEdge) { // 0x800000 // green new traffic lights uint edgeIT = intersections[i].edge[intersections[i].state]; uint edgeINum = edgeIT & kMaskLaneMap; // 0xFFFFF; //get edgeI uchar numLinesI = edgeIT >> 24; for (int nL = 0; nL < numLinesI; nL++) { trafficLights[edgeINum + nL] = 0xFF; } //trafficLights[edgeINum]=0xFF; break; } }//green new traffic light intersections[i].nextEvent = currentTime + deltaEvent; } } }// // Kernel that executes on the CUDA device __global__ void kernel_sampleTraffic( int numPeople, LC::B18TrafficPerson *trafficPersonVec, uint *indexPathVec, int indexPathVec_d_size, float *accSpeedPerLinePerTimeInterval, uint accSpeedPerLinePerTimeInterval_d_size, float *numVehPerLinePerTimeInterval, uint numVehPerLinePerTimeInterval_d_size, uint offset) { int p = blockIdx.x * blockDim.x + threadIdx.x; if (p >= numPeople) { //CUDA check (inside margins) return; } if (trafficPersonVec[p].active == 1 && trafficPersonVec[p].indexPathCurr != END_OF_PATH) { assert(trafficPersonVec[p].indexPathCurr < indexPathVec_d_size); int edgeNum = indexPathVec[trafficPersonVec[p].indexPathCurr]; assert(edgeNum + offset < accSpeedPerLinePerTimeInterval_d_size); accSpeedPerLinePerTimeInterval[edgeNum + offset] += trafficPersonVec[p].v / 3.0f; assert(edgeNum + offset < numVehPerLinePerTimeInterval_d_size); numVehPerLinePerTimeInterval[edgeNum + offset]++; } } __global__ void kernel_resetPeople( int numPeople, LC::B18TrafficPerson *trafficPersonVec) { int p = blockIdx.x * blockDim.x + threadIdx.x; if (p < numPeople) {//CUDA check (inside margins) trafficPersonVec[p].active = 0; } } void b18GetSampleTrafficCUDA(std::vector<float>& accSpeedPerLinePerTimeInterval, std::vector<float>& numVehPerLinePerTimeInterval) { // copy back people size_t size = accSpeedPerLinePerTimeInterval.size() * sizeof(float); hipMemcpy(accSpeedPerLinePerTimeInterval.data(), accSpeedPerLinePerTimeInterval_d, size, hipMemcpyDeviceToHost); size_t sizeI = numVehPerLinePerTimeInterval.size() * sizeof(uchar); hipMemcpy(numVehPerLinePerTimeInterval.data(), numVehPerLinePerTimeInterval_d, sizeI, hipMemcpyDeviceToHost); } void b18ResetPeopleLanesCUDA(uint numPeople) { kernel_resetPeople << < ceil(numPeople / 1024.0f), 1024 >> > (numPeople, trafficPersonVec_d); hipMemset(&laneMap_d[0], -1, halfLaneMap*sizeof(unsigned char)); hipMemset(&laneMap_d[halfLaneMap], -1, halfLaneMap*sizeof(unsigned char)); } void b18SimulateTrafficCUDA(float currentTime, uint numPeople, uint numIntersections, float deltaTime, const parameters simParameters, int numBlocks, int threadsPerBlock) { intersectionBench.startMeasuring(); const uint numStepsTogether = 12; //change also in density (10 per hour) // 1. CHANGE MAP: set map to use and clean the other if (readFirstMapC==true) { mapToReadShift=0; mapToWriteShift=halfLaneMap; gpuErrchk(hipMemset(&laneMap_d[halfLaneMap], -1, halfLaneMap*sizeof(unsigned char)));//clean second half } else { mapToReadShift=halfLaneMap; mapToWriteShift=0; gpuErrchk(hipMemset(&laneMap_d[0], -1, halfLaneMap*sizeof(unsigned char)));//clean first half } readFirstMapC=!readFirstMapC;//next iteration invert use // Simulate intersections. kernel_intersectionOneSimulation << < ceil(numIntersections / 512.0f), 512 >> > (numIntersections, currentTime, intersections_d, trafficLights_d); gpuErrchk(hipPeekAtLastError()); intersectionBench.stopMeasuring(); peopleBench.startMeasuring(); // Simulate people. kernel_trafficSimulation <<< numBlocks, threadsPerBlock>> > (numPeople, currentTime, mapToReadShift, mapToWriteShift, trafficPersonVec_d, indexPathVec_d, indexPathVec_d_size, edgesData_d, edgesData_d_size, laneMap_d, laneMap_d_size, intersections_d, trafficLights_d, trafficLights_d_size, deltaTime, simParameters); hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError()); peopleBench.stopMeasuring(); }
39b160dd3d2e2642d1484737e0e82196a8a48b92.cu
//CUDA CODE #include <stdio.h> #include "cuda_runtime.h" #include "curand_kernel.h" #include "device_launch_parameters.h" #include "assert.h" #include "b18TrafficPerson.h" #include "b18EdgeData.h" #include <vector> #include <iostream> #include "../../src/benchmarker.h" #include "sp/config.h" #ifndef ushort #define ushort uint16_t #endif #ifndef uint #define uint uint32_t #endif #ifndef uchar #define uchar uint8_t #endif /////////////////////////////// // CONSTANTS #define MINIMUM_NUMBER_OF_CARS_TO_MEASURE_SPEED 5 __constant__ float intersectionClearance = 7.8f; //TODO(pavan): WHAT IS THIS? #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } inline void printMemoryUsage() { // show memory usage of GPU size_t free_byte; size_t total_byte; cudaError_t cuda_status = cudaMemGetInfo(&free_byte, &total_byte); if (cudaSuccess != cuda_status) { printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status)); exit(1); } double free_db = (double) free_byte; double total_db = (double) total_byte; double used_db = total_db - free_db; printf("GPU memory usage: used = %.0f, free = %.0f MB, total = %.0f MB\n", used_db / 1024.0 / 1024.0, free_db / 1024.0 / 1024.0, total_db / 1024.0 / 1024.0); } //////////////////////////////// // VARIABLES LC::B18TrafficPerson *trafficPersonVec_d; uint *indexPathVec_d; uint indexPathVec_d_size; LC::B18EdgeData *edgesData_d; uint edgesData_d_size; uint laneMap_d_size; uint trafficLights_d_size; uint accSpeedPerLinePerTimeInterval_d_size; uint numVehPerLinePerTimeInterval_d_size; __constant__ bool calculatePollution = true; __constant__ float cellSize = 1.0f; uchar *laneMap_d; bool readFirstMapC=true; uint mapToReadShift; uint mapToWriteShift; uint halfLaneMap; float startTime; LC::B18IntersectionData *intersections_d; uchar *trafficLights_d; float* accSpeedPerLinePerTimeInterval_d; float* numVehPerLinePerTimeInterval_d; void b18InitCUDA( bool fistInitialization, std::vector<LC::B18TrafficPerson>& trafficPersonVec, std::vector<uint> &indexPathVec, std::vector<LC::B18EdgeData>& edgesData, std::vector<uchar>& laneMap, std::vector<uchar>& trafficLights, std::vector<LC::B18IntersectionData>& intersections, float startTimeH, float endTimeH, std::vector<float>& accSpeedPerLinePerTimeInterval, std::vector<float>& numVehPerLinePerTimeInterval, float deltaTime) { //printf(">>b18InitCUDA firstInitialization %s\n", (fistInitialization?"INIT":"ALREADY INIT")); //printMemoryUsage(); const uint numStepsPerSample = 30.0f / deltaTime; //each min const uint numStepsTogether = 12; //change also in density (10 per hour) { // people size_t size = trafficPersonVec.size() * sizeof(LC::B18TrafficPerson); if (fistInitialization) gpuErrchk(cudaMalloc((void **) &trafficPersonVec_d, size)); // Allocate array on device gpuErrchk(cudaMemcpy(trafficPersonVec_d, trafficPersonVec.data(), size, cudaMemcpyHostToDevice)); } { // indexPathVec size_t sizeIn = indexPathVec.size() * sizeof(uint); indexPathVec_d_size = indexPathVec.size(); if (fistInitialization) gpuErrchk(cudaMalloc((void **) &indexPathVec_d, sizeIn)); // Allocate array on device gpuErrchk(cudaMemcpy(indexPathVec_d, indexPathVec.data(), sizeIn, cudaMemcpyHostToDevice)); } {//edgeData size_t sizeD = edgesData_d_size * sizeof(LC::B18EdgeData); edgesData_d_size = edgesData.size(); if (fistInitialization) gpuErrchk(cudaMalloc((void **) &edgesData_d, sizeD)); // Allocate array on device gpuErrchk(cudaMemcpy(edgesData_d, edgesData.data(), sizeD, cudaMemcpyHostToDevice)); } {//laneMap size_t sizeL = laneMap.size() * sizeof(uchar); laneMap_d_size = laneMap.size(); if (fistInitialization) gpuErrchk(cudaMalloc((void **) &laneMap_d, sizeL)); // Allocate array on device gpuErrchk(cudaMemcpy(laneMap_d, laneMap.data(), sizeL, cudaMemcpyHostToDevice)); halfLaneMap = laneMap.size() / 2; } {// intersections size_t sizeI = intersections.size() * sizeof(LC::B18IntersectionData); if (fistInitialization) gpuErrchk(cudaMalloc((void **) &intersections_d, sizeI)); // Allocate array on device gpuErrchk(cudaMemcpy(intersections_d, intersections.data(), sizeI, cudaMemcpyHostToDevice)); size_t sizeT = trafficLights.size() * sizeof(uchar);//total number of lanes trafficLights_d_size = trafficLights.size(); if (fistInitialization) gpuErrchk(cudaMalloc((void **) &trafficLights_d, sizeT)); // Allocate array on device gpuErrchk(cudaMemcpy(trafficLights_d, trafficLights.data(), sizeT, cudaMemcpyHostToDevice)); } { startTime = startTimeH * 3600.0f; uint numSamples = ceil(((endTimeH*3600.0f - startTimeH*3600.0f) / (deltaTime * numStepsPerSample * numStepsTogether))) + 1; //!!! accSpeedPerLinePerTimeInterval.clear(); numVehPerLinePerTimeInterval.clear(); accSpeedPerLinePerTimeInterval.resize(numSamples * trafficLights.size()); numVehPerLinePerTimeInterval.resize(numSamples * trafficLights.size()); size_t sizeAcc = accSpeedPerLinePerTimeInterval.size() * sizeof(float); if (fistInitialization) gpuErrchk(cudaMalloc((void **) &accSpeedPerLinePerTimeInterval_d, sizeAcc)); // Allocate array on device if (fistInitialization) gpuErrchk(cudaMalloc((void **) &numVehPerLinePerTimeInterval_d, sizeAcc)); // Allocate array on device gpuErrchk(cudaMemset(&accSpeedPerLinePerTimeInterval_d[0], 0, sizeAcc)); gpuErrchk(cudaMemset(&numVehPerLinePerTimeInterval_d[0], 0, sizeAcc)); accSpeedPerLinePerTimeInterval_d_size = sizeAcc; numVehPerLinePerTimeInterval_d_size = sizeAcc; } printMemoryUsage(); } void b18updateStructuresCUDA( std::vector<LC::B18TrafficPerson>& trafficPersonVec, std::vector<uint> &indexPathVec, std::vector<LC::B18EdgeData>& edgesData) { std::cout<< ">> b18updateStructuresCUDA" << std::endl; //indexPathVec cudaFree(indexPathVec_d); size_t sizeIn = indexPathVec.size() * sizeof(uint); indexPathVec_d_size = indexPathVec.size(); gpuErrchk(cudaMalloc((void **) &indexPathVec_d, sizeIn)); gpuErrchk(cudaMemcpy(indexPathVec_d, indexPathVec.data(), sizeIn, cudaMemcpyHostToDevice)); cudaFree(edgesData_d); size_t sizeD = edgesData.size() * sizeof(LC::B18EdgeData); gpuErrchk(cudaMalloc((void **) &edgesData_d, sizeD)); gpuErrchk(cudaMemcpy(edgesData_d, edgesData.data(), sizeD, cudaMemcpyHostToDevice)); cudaFree(trafficPersonVec_d); size_t size = trafficPersonVec.size() * sizeof(LC::B18TrafficPerson); gpuErrchk(cudaMalloc((void **) &trafficPersonVec_d, size)); gpuErrchk(cudaMemcpy(trafficPersonVec_d, trafficPersonVec.data(), size, cudaMemcpyHostToDevice)); printMemoryUsage(); } void b18FinishCUDA(void){ cudaFree(trafficPersonVec_d); cudaFree(indexPathVec_d); cudaFree(edgesData_d); cudaFree(laneMap_d); cudaFree(intersections_d); cudaFree(trafficLights_d); cudaFree(accSpeedPerLinePerTimeInterval_d); cudaFree(numVehPerLinePerTimeInterval_d); } void b18GetDataCUDA(std::vector<LC::B18TrafficPerson>& trafficPersonVec, std::vector<LC::B18EdgeData> &edgesData){ // copy back people size_t size = trafficPersonVec.size() * sizeof(LC::B18TrafficPerson); size_t size_edges = edgesData_d_size * sizeof(LC::B18EdgeData); cudaMemcpy(trafficPersonVec.data(),trafficPersonVec_d,size,cudaMemcpyDeviceToHost);//cudaMemcpyHostToDevice cudaMemcpy(edgesData.data(),edgesData_d,size_edges,cudaMemcpyDeviceToHost);//cudaMemcpyHostToDevice } __device__ void calculateGapsLC( uint mapToReadShift, uchar* laneMap, uchar trafficLightState, uint laneToCheck, ushort numLinesEdge, float posInMToCheck, float length, uchar &v_a, uchar &v_b, float &gap_a, float &gap_b, uint laneMap_d_size) { ushort numOfCells = ceil(length); ushort initShift = ceil(posInMToCheck); uchar laneChar; bool found = false; // CHECK FORWARD //printf("initShift %u numOfCells %u\n",initShift,numOfCells); for (ushort b = initShift - 1; (b < numOfCells) && (!found); b++) { //NOTE -1 to make sure there is none in at the same level const uint posToSample = mapToReadShift + kMaxMapWidthM * (laneToCheck + (((int) (b / kMaxMapWidthM)) * numLinesEdge)) + b % kMaxMapWidthM; assert(posToSample < laneMap_d_size); laneChar = laneMap[posToSample]; if (laneChar != 0xFF) { gap_a = ((float) b - initShift); //m v_a = laneChar; //laneChar is in 3*ms (to save space in array) found = true; break; } } if (!found) { if (trafficLightState == 0x00) { //red //found=true; gap_a = gap_b = 1000.0f; //force to change to the line without vehicle v_a = v_b = 0xFF; return; } } if (!found) { gap_a = 1000.0f; } // CHECK BACKWARDS found = false; //printf("2initShift %u numOfCells %u\n",initShift,numOfCells); for (int b = initShift + 1; (b >= 0) && (!found); b--) { // NOTE +1 to make sure there is none in at the same level //laneChar = laneMap[mapToReadShift + maxWidth * (laneToCheck) + b]; const uint posToSample = mapToReadShift + kMaxMapWidthM * (laneToCheck + (((int) (b / kMaxMapWidthM)) * numLinesEdge)) + b % kMaxMapWidthM; assert(posToSample < laneMap_d_size); laneChar = laneMap[posToSample]; if (laneChar != 0xFF) { gap_b = ((float) initShift - b); //m v_b = laneChar; //laneChar is in 3*ms (to save space in array) found = true; break; } } //printf("3initShift %u numOfCells %u\n",initShift,numOfCells); if (!found) { gap_b = 1000.0f; } }// __device__ void calculateLaneCarShouldBe( uint curEdgeLane, uint nextEdge, LC::B18IntersectionData* intersections, uint edgeNextInters, ushort edgeNumLanes, ushort &initOKLanes, ushort &endOKLanes) { initOKLanes = 0; endOKLanes = edgeNumLanes; bool currentEdgeFound = false; bool exitFound = false; ushort numExitToTake = 0; ushort numExists = 0; for (int eN = intersections[edgeNextInters].totalInOutEdges - 1; eN >= 0; eN--) { // clockwise uint procEdge = intersections[edgeNextInters].edge[eN]; if ((procEdge & kMaskLaneMap) == curEdgeLane) { //current edge 0xFFFFF currentEdgeFound = true; if (!exitFound) { numExitToTake = 0; } continue; } if ((procEdge & kMaskInEdge) == 0x0) { //out edge 0x800000 numExists++; if (currentEdgeFound) { numExitToTake++; } if (!currentEdgeFound && !exitFound) { numExitToTake++; } } if ((procEdge & kMaskInEdge) == nextEdge) { exitFound = true; currentEdgeFound = false; } } if (edgeNumLanes == 0) { printf("ERRRROR\n"); } switch (edgeNumLanes) { /// ONE LANE case 1: initOKLanes = 0; endOKLanes = 1; break; /// TWO LANE case 2: switch (numExists) { case 1: case 2://all okay initOKLanes = 0; endOKLanes = 2; break; case 3: if (numExitToTake > 2) { //left initOKLanes = 0; endOKLanes = 1; break; } initOKLanes = 1; endOKLanes = 2; break; default: if (numExitToTake >= numExists - 1) { initOKLanes = 0; endOKLanes = 1; break; } initOKLanes = 1; endOKLanes = 2; break; } break; /// THREE LANE case 3: switch (numExists) { case 1: case 2://all okay initOKLanes = 0; endOKLanes = 3; break; case 3: if (numExitToTake > 2) { //left initOKLanes = 0; endOKLanes = 1; break; } initOKLanes = 1; endOKLanes = 3; break; default: if (numExitToTake >= numExists - 1) { initOKLanes = 0; endOKLanes = 1; break; } initOKLanes = 1; endOKLanes = 2; break; } break; case 4: switch (numExists) { case 1: case 2://all okay initOKLanes = 0; endOKLanes = 4; break; case 3: if (numExitToTake == 1) { //right initOKLanes = 3; endOKLanes = 4; } if (numExitToTake > 3) { //left initOKLanes = 0; endOKLanes = 1; break; } initOKLanes = 1; endOKLanes = 4; break; default: if (numExitToTake == 1) { //right initOKLanes = edgeNumLanes - 1; endOKLanes = edgeNumLanes; } if (numExitToTake >= numExists - 2) { initOKLanes = 0; endOKLanes = 2; break; } initOKLanes = 1; //also lane 2 endOKLanes = edgeNumLanes; } break; default: switch (numExists) { case 1: case 2://all okay initOKLanes = 0; endOKLanes = edgeNumLanes; break; case 3: if (numExitToTake == 1) { //right initOKLanes = edgeNumLanes - 1; endOKLanes = edgeNumLanes; } if (numExitToTake > edgeNumLanes - 2) { //left initOKLanes = 0; endOKLanes = 2; break; } initOKLanes = 1; endOKLanes = edgeNumLanes; break; default: if (numExitToTake < 2) { //right initOKLanes = edgeNumLanes - 2; endOKLanes = edgeNumLanes; } if (numExitToTake >= numExists - 2) { initOKLanes = 0; endOKLanes = 2; break; } initOKLanes = 1; //also lane 2 endOKLanes = edgeNumLanes - 1; } break; } } __device__ float meters_per_second_to_miles_per_hour(float meters_per_second) { return meters_per_second * 2.2369362920544; } __device__ const float calculateCOStep(float personVelocity) { // Formula comes from the paper "Designing Large-Scale Interactive Traffic Animations for Urban Modeling" // Section 4.4 Traffic Indicators const float personVelocityMPH = meters_per_second_to_miles_per_hour(personVelocity); return -0.064 + 0.0056 * personVelocityMPH + 0.00026 * (personVelocityMPH - 50.0f) * (personVelocityMPH - 50.0f); } __device__ const float calculateGasConsumption(const float a, const float v) { // Formula comes from the paper "Architecture for Modular Microsimulation of Real Estate Markets and Transportation" // Section 6.3.4 Vehicle energy consumption and pollution estimates formula (9) const float Pea = a > 0.0f ? (0.472f*1.680f*a*a*v) : 0.0f; return 0.666f + 0.072f*(0.269f*v + 0.000672f*(v*v*v) + 0.0171f*(v*v) + 1.680f*a*v + Pea); } // Kernel that executes on the CUDA device __global__ void kernel_trafficSimulation( int numPeople, float currentTime, uint mapToReadShift, uint mapToWriteShift, LC::B18TrafficPerson *trafficPersonVec, uint *indexPathVec, int indexPathVec_d_size, LC::B18EdgeData* edgesData, int edgesData_d_size, uchar *laneMap, int laneMap_d_size, LC::B18IntersectionData *intersections, uchar *trafficLights, uint trafficLights_d_size, float deltaTime, const parameters simParameters) { int p = blockIdx.x * blockDim.x + threadIdx.x; if (p >= numPeople) return; //CUDA check (inside margins) if (trafficPersonVec[p].active == 2) return; // trip finished if (trafficPersonVec[p].time_departure > currentTime) return; //1.1 just continue waiting assert(trafficPersonVec[p].indexPathCurr < indexPathVec_d_size); if (indexPathVec[trafficPersonVec[p].indexPathCurr] == END_OF_PATH) { trafficPersonVec[p].active = 2; //finished return; } //2.1. check if person should still wait or should start if (trafficPersonVec[p].active == 0) { //1.2 find first edge assert(trafficPersonVec[p].indexPathInit != INIT_EDGE_INDEX_NOT_SET); trafficPersonVec[p].indexPathCurr = trafficPersonVec[p].indexPathInit; // reset index. int indexFirstEdge = trafficPersonVec[p].indexPathCurr; assert(indexFirstEdge < indexPathVec_d_size); uint firstEdge = indexPathVec[indexFirstEdge]; trafficPersonVec[p].last_time_simulated = currentTime; if (firstEdge == END_OF_PATH) { trafficPersonVec[p].active = 2; return; } if (firstEdge >= edgesData_d_size) { printf("firstEdge %d is bigger than edgesData size %d\n", firstEdge, edgesData_d_size); } assert(firstEdge < edgesData_d_size); //1.4 try to place it in middle of edge ushort numOfCells = ceil(edgesData[firstEdge].length); ushort initShift = (ushort) (0.5f * numOfCells); //number of cells it should be placed (half of road) uchar laneChar; bool placed = false; ushort numCellsEmptyToBePlaced = simParameters.s_0; ushort countEmptyCells = 0; for (ushort b = initShift; (b < numOfCells) && (!placed); b++) { ushort lN = edgesData[firstEdge].numLines - 1; //just right lane int laneMapPosition = mapToReadShift + kMaxMapWidthM * (firstEdge + lN) + b; assert(laneMapPosition < laneMap_d_size); laneChar = laneMap[laneMapPosition]; //get byte of edge (proper line) if (laneChar != 0xFF) { countEmptyCells = 0; continue; } countEmptyCells++;// ensure there is enough room to place the car if (countEmptyCells < numCellsEmptyToBePlaced) { continue; } trafficPersonVec[p].numOfLaneInEdge = lN; trafficPersonVec[p].posInLaneM = b; //m uchar vInMpS = (uchar) (trafficPersonVec[p].v * 3); //speed in m/s *3 (to keep more precision int laneMapPosition2 = mapToWriteShift + kMaxMapWidthM * (firstEdge + lN) + b; assert(laneMapPosition2 < laneMap_d_size); laneMap[laneMapPosition2] = vInMpS; placed = true; break; } if (!placed) { //not posible to start now return; } trafficPersonVec[p].v = 0; trafficPersonVec[p].LC_stateofLaneChanging = 0; //1.5 active car trafficPersonVec[p].active = 1; trafficPersonVec[p].isInIntersection = 0; trafficPersonVec[p].num_steps = 1; trafficPersonVec[p].co = 0.0f; trafficPersonVec[p].gas = 0.0f; assert(trafficPersonVec[p].indexPathCurr + 1 < indexPathVec_d_size); if (indexPathVec[trafficPersonVec[p].indexPathCurr + 1] != END_OF_PATH) { trafficPersonVec[p].LC_initOKLanes = 0xFF; trafficPersonVec[p].LC_endOKLanes = 0xFF; } trafficPersonVec[p].path_length_gpu = 0; trafficPersonVec[p].prevEdge = firstEdge; return; } // set up next edge info int indexCurrentEdge = trafficPersonVec[p].indexPathCurr; assert(indexCurrentEdge < indexPathVec_d_size); uint currentEdge = indexPathVec[indexCurrentEdge]; assert(currentEdge < edgesData_d_size); int indexNextEdge = trafficPersonVec[p].indexPathCurr + 1; assert(indexNextEdge < indexPathVec_d_size); uint nextEdge = indexPathVec[indexNextEdge]; assert(nextEdge < edgesData_d_size || nextEdge == END_OF_PATH); if (nextEdge != END_OF_PATH) { trafficPersonVec[p].LC_initOKLanes = 0xFF; trafficPersonVec[p].LC_endOKLanes = 0xFF; } //2. it is moving trafficPersonVec[p].num_steps++; trafficPersonVec[p].last_time_simulated = fmaxf(currentTime, trafficPersonVec[p].last_time_simulated); //2.1 try to move float numMToMove; bool nextVehicleIsATrafficLight = false; //when we're on a new edge for the first time if (currentEdge == trafficPersonVec[p].nextEdge) { trafficPersonVec[p].end_time_on_prev_edge = currentTime - deltaTime; float elapsed_s = (trafficPersonVec[p].end_time_on_prev_edge - trafficPersonVec[p].start_time_on_prev_edge); //multiply by delta_time to get seconds elapsed (not half seconds) // We filter whenever elapsed_s == 0, which means the time granularity was not enough to measure the speed // We also filter whenever 0 > elapsed_s > 5, because it causes manual_v to turn extraordinarily high assert(trafficPersonVec[p].prevEdge < edgesData_d_size); if (elapsed_s > MINIMUM_NUMBER_OF_CARS_TO_MEASURE_SPEED) { trafficPersonVec[p].manual_v = edgesData[trafficPersonVec[p].prevEdge].length / elapsed_s; edgesData[trafficPersonVec[p].prevEdge].curr_iter_num_cars += 1; edgesData[trafficPersonVec[p].prevEdge].curr_cum_vel += trafficPersonVec[p].manual_v; } trafficPersonVec[p].start_time_on_prev_edge = currentTime; trafficPersonVec[p].prevEdge = currentEdge; } trafficPersonVec[p].nextEdge = nextEdge; // www.vwi.tu-dresden.de/~treiber/MicroApplet/IDM.html // IDM float thirdTerm = 0; // 2.1.1 Find front car int numCellsCheck = max(30.0f, trafficPersonVec[p].v * deltaTime * 2); //30 or double of the speed*time // a) SAME LINE (BEFORE SIGNALING) bool found = false; bool noFirstInLaneBeforeSign = false; //use for stop control (just let 1st to pass) TODO(pavan): I DON'T GET THIS bool noFirstInLaneAfterSign = false; //use for stop control (just let 1st to pass) float s; float delta_v; uchar laneChar; ushort byteInLine = (ushort) floor(trafficPersonVec[p].posInLaneM); ushort numOfCells = ceil((edgesData[currentEdge].length - intersectionClearance)); //intersectionClearance hardcoded to 7.8f - why? for (ushort b = byteInLine + 2; (b < numOfCells) && (!found) && (numCellsCheck > 0); b++, numCellsCheck--) { // ShiftRead + WIDTH * (width number * # lanes + # laneInEdge) + b TODO(pavan): WHAT IS THIS? //TODO(pavan): double check what mapToReadShift is printing out assert(trafficPersonVec[p].indexPathCurr < indexPathVec_d_size); const uint posToSample = mapToReadShift + kMaxMapWidthM *(currentEdge + (((int) (byteInLine / kMaxMapWidthM)) * edgesData[currentEdge].numLines) + trafficPersonVec[p].numOfLaneInEdge) + b % kMaxMapWidthM; assert(posToSample < laneMap_d_size); laneChar = laneMap[posToSample]; //TODO(pavan): Is this clause for when it is not at the intersection yet but it has found a car in front of it? if (laneChar != 0xFF) { s = ((float) (b - byteInLine)); //m delta_v = trafficPersonVec[p].v - (laneChar / 3.0f); //laneChar is in 3*ms (to save space in array) found = true; noFirstInLaneBeforeSign = true; break; } } // NEXT LINE // e) MOVING ALONG IN THE NEXT EDGE if (!found && numCellsCheck > 0) { //check if in next line if ((nextEdge != END_OF_PATH) && (edgesData[currentEdge].nextIntersMapped != trafficPersonVec[p].end_intersection)) { // we haven't arrived to destination (check next line) ushort nextEdgeLaneToBe = trafficPersonVec[p].numOfLaneInEdge; //same lane //printf("trafficPersonVec[p].numOfLaneInEdge %u\n",trafficPersonVec[p].numOfLaneInEdge); assert(nextEdge < edgesData_d_size); if (nextEdgeLaneToBe >= edgesData[nextEdge].numLines) { nextEdgeLaneToBe = edgesData[nextEdge].numLines - 1; //change line if there are less roads } //printf("2trafficPersonVec[p].numOfLaneInEdge %u\n",trafficPersonVec[p].numOfLaneInEdge); ushort numOfCells = ceil(edgesData[nextEdge].length); for (ushort b = 0; (b < numOfCells) && (!found) && (numCellsCheck > 0); b++, numCellsCheck--) { const uint posToSample = mapToReadShift + kMaxMapWidthM * (nextEdge + nextEdgeLaneToBe) + b; // b18 not changed since we check first width assert(posToSample < laneMap_d_size); laneChar = laneMap[posToSample]; if (laneChar != 0xFF) { s = ((float) (b)); //m delta_v = trafficPersonVec[p].v - (laneChar / 3.0f); // laneChar is in 3*ms (to save space in array) found = true; break; } } } } float s_star; if (found && delta_v > 0) { //car in front and slower than us // 2.1.2 calculate dv_dt // The following operation is taken from Designing Large-Scale Interactive Traffic Animations for Urban Modeling // Section 4.3.1. Car-Following Model formula (2) s_star = simParameters.s_0 + max(0.0f, (trafficPersonVec[p].v * trafficPersonVec[p].T + (trafficPersonVec[p].v * delta_v) / (2 * sqrtf(trafficPersonVec[p].a * trafficPersonVec[p].b)))); thirdTerm = powf(((s_star) / (s)), 2); } // The following operation is taken from Designing Large-Scale Interactive Traffic Animations for Urban Modeling // Section 4.3.1. Car-Following Model formula (1) // And also Architecture for Modular Microsimulation of Real Estate Markets and Transportation // Section 6.3.2 Per-vehicle and traffic control simulation formula (7) float dv_dt = trafficPersonVec[p].a * (1.0f - std::pow(( trafficPersonVec[p].v / edgesData[currentEdge].maxSpeedMperSec), 4) - thirdTerm); // 2.1.3 update values numMToMove = max(0.0f, trafficPersonVec[p].v * deltaTime + 0.5f * (dv_dt) * deltaTime * deltaTime); trafficPersonVec[p].v += dv_dt * deltaTime; if (trafficPersonVec[p].v < 0) { trafficPersonVec[p].v = 0; dv_dt = 0.0f; } trafficPersonVec[p].cum_v += trafficPersonVec[p].v; if (calculatePollution && ((float(currentTime) == int(currentTime)))) { // enabled and each second (assuming deltaTime 0.5f) const float coStep = calculateCOStep(trafficPersonVec[p].v); if (coStep > 0) { trafficPersonVec[p].co += coStep; } trafficPersonVec[p].gas += calculateGasConsumption(dv_dt, trafficPersonVec[p].v); } if (trafficPersonVec[p].v == 0) { //if not moving not do anything else ushort posInLineCells = (ushort) (trafficPersonVec[p].posInLaneM); const uint posToSample = mapToWriteShift + kMaxMapWidthM * (currentEdge + (((int) (posInLineCells / kMaxMapWidthM)) * edgesData[currentEdge].numLines) + trafficPersonVec[p].numOfLaneInEdge) + posInLineCells % kMaxMapWidthM; assert(posToSample < laneMap_d_size); laneMap[posToSample] = 0; return; } // COLOR trafficPersonVec[p].color = p << 8; // STOP (check if it is a stop if it can go through) trafficPersonVec[p].posInLaneM = trafficPersonVec[p].posInLaneM + numMToMove; //2.2 close to intersection //2.2 check if change intersection if (trafficPersonVec[p].posInLaneM > edgesData[currentEdge].length) { //reach intersection numMToMove = trafficPersonVec[p].posInLaneM - edgesData[currentEdge].length; trafficPersonVec[p].posInLaneM = numMToMove; trafficPersonVec[p].dist_traveled += edgesData[currentEdge].length; trafficPersonVec[p].path_length_gpu++; //2.2.1 find next edge assert(indexCurrentEdge < indexPathVec_d_size); assert(currentEdge < edgesData_d_size); trafficPersonVec[p].LC_stateofLaneChanging = 0; //2.1 check if end if (nextEdge != END_OF_PATH) { assert(nextEdge < edgesData_d_size); if (trafficPersonVec[p].numOfLaneInEdge >= edgesData[nextEdge].numLines) { trafficPersonVec[p].numOfLaneInEdge = edgesData[nextEdge].numLines - 1; //change line if there are less roads } //TODO: Test if the following line is doing the conversion wrong uchar vInMpS = (uchar) (trafficPersonVec[p].v * 3); //speed in m/s to fit in uchar ushort posInLineCells = (ushort) (trafficPersonVec[p].posInLaneM); const uint posToSample = mapToWriteShift + kMaxMapWidthM * (nextEdge + (((int) (posInLineCells / kMaxMapWidthM)) * edgesData[nextEdge].numLines) + trafficPersonVec[p].numOfLaneInEdge) + posInLineCells % kMaxMapWidthM; // note the last % should not happen assert(posToSample < laneMap_d_size); laneMap[posToSample] = vInMpS; trafficPersonVec[p].LC_initOKLanes = 0xFF; trafficPersonVec[p].LC_endOKLanes = 0xFF; } else { trafficPersonVec[p].active == 2; } trafficPersonVec[p].indexPathCurr++; trafficPersonVec[p].LC_stateofLaneChanging = 0; } else { //does not reach an intersection assert(indexCurrentEdge < indexPathVec_d_size); assert(indexNextEdge < indexPathVec_d_size); assert(currentEdge < edgesData_d_size); assert(nextEdge < edgesData_d_size || nextEdge == END_OF_PATH); // LANE CHANGING (happens when we are not reached the intersection) if (trafficPersonVec[p].v > 3.0f && trafficPersonVec[p].num_steps % 5 == 0) { //at least 10km/h to try to change lane //just check every (5 steps) 5 seconds // next thing is not a traffic light // skip if there is one lane (avoid to do this) // skip if it is the last edge if (!nextVehicleIsATrafficLight && edgesData[currentEdge].numLines > 1 && nextEdge != END_OF_PATH) { //////////////////////////////////////////////////// // LC 1 update lane changing status if (trafficPersonVec[p].LC_stateofLaneChanging == 0) { // 2.2-exp((x-1)^2) float x = trafficPersonVec[p].posInLaneM / edgesData[currentEdge].length; if (x > 0.4f) { //just after 40% of the road float probabiltyMandatoryState = 2.2 - exp((x - 1) * (x - 1)); //if (((float) qrand() / RAND_MAX) < probabiltyMandatoryState) { if ((((int) (x * 100) % 100) / 100.0f) < probabiltyMandatoryState) { // pseudo random number trafficPersonVec[p].LC_stateofLaneChanging = 1; } } } // LC 2 NOT MANDATORY STATE if (trafficPersonVec[p].LC_stateofLaneChanging == 0) { // discretionary change: v slower than the current road limit and deccelerating and moving if ((trafficPersonVec[p].v < (edgesData[currentEdge].maxSpeedMperSec * 0.7f)) && (dv_dt < 0) && trafficPersonVec[p].v > 3.0f) { bool leftLane = trafficPersonVec[p].numOfLaneInEdge > 0; //at least one lane on the left bool rightLane = trafficPersonVec[p].numOfLaneInEdge < edgesData[currentEdge].numLines - 1; //at least one lane if (leftLane && rightLane) { if (int(trafficPersonVec[p].v) % 2 == 0) { // pseudo random leftLane = false; } else { rightLane = false; } } ushort laneToCheck; if (leftLane) { laneToCheck = trafficPersonVec[p].numOfLaneInEdge - 1; } else { laneToCheck = trafficPersonVec[p].numOfLaneInEdge + 1; } uchar v_a, v_b; float gap_a, gap_b; assert(currentEdge + trafficPersonVec[p].numOfLaneInEdge < trafficLights_d_size); uchar trafficLightState = trafficLights[currentEdge + trafficPersonVec[p].numOfLaneInEdge]; calculateGapsLC(mapToReadShift, laneMap, trafficLightState, currentEdge + laneToCheck, edgesData[currentEdge].numLines, trafficPersonVec[p].posInLaneM, edgesData[currentEdge].length, v_a, v_b, gap_a, gap_b, laneMap_d_size); if (gap_a == 1000.0f && gap_b == 1000.0f) { //lag and lead car very far trafficPersonVec[p].numOfLaneInEdge = laneToCheck; // CHANGE LINE } else { // NOT ALONE float b1A = 0.05f, b2A = 0.15f; float b1B = 0.15f, b2B = 0.40f; // simParameters.s_0-> critical lead gap float g_na_D, g_bn_D; bool acceptLC = true; if (gap_a != 1000.0f) { g_na_D = max(simParameters.s_0, simParameters.s_0 + b1A * trafficPersonVec[p].v + b2A * (trafficPersonVec[p].v - v_a * 3.0f)); if (gap_a < g_na_D) { //gap smaller than critical gap acceptLC = false; } } if (acceptLC && gap_b != 1000.0f) { g_bn_D = max(simParameters.s_0, simParameters.s_0 + b1B * v_b * 3.0f + b2B * (v_b * 3.0f - trafficPersonVec[p].v)); if (gap_b < g_bn_D) { //gap smaller than critical gap acceptLC = false; } } if (acceptLC) { trafficPersonVec[p].numOfLaneInEdge = laneToCheck; // CHANGE LINE } } } }// Discretionary // LC 3 *MANDATORY* STATE if (trafficPersonVec[p].LC_stateofLaneChanging == 1) { // LC 3.1 Calculate the correct lanes if (trafficPersonVec[p].LC_endOKLanes == 0xFF) { calculateLaneCarShouldBe(currentEdge, nextEdge, intersections, edgesData[currentEdge].nextIntersMapped, edgesData[currentEdge].numLines, trafficPersonVec[p].LC_initOKLanes, trafficPersonVec[p].LC_endOKLanes); if (trafficPersonVec[p].LC_initOKLanes == 0 && trafficPersonVec[p].LC_endOKLanes == 0) { } } bool leftLane = false, rightLane = false; // LC 3.2 CORRECT LANES--> DICRETIONARY LC WITHIN if (trafficPersonVec[p].numOfLaneInEdge >= trafficPersonVec[p].LC_initOKLanes && trafficPersonVec[p].numOfLaneInEdge < trafficPersonVec[p].LC_endOKLanes) { // for discretionary it should be under some circustances if ((trafficPersonVec[p].v < (edgesData[currentEdge].maxSpeedMperSec * 0.7f)) && (dv_dt < 0) && trafficPersonVec[p].v > 3.0f) { leftLane = (trafficPersonVec[p].numOfLaneInEdge > 0) && //at least one lane on the left (trafficPersonVec[p].numOfLaneInEdge - 1 >= trafficPersonVec[p].LC_initOKLanes) && (trafficPersonVec[p].numOfLaneInEdge - 1 < trafficPersonVec[p].LC_endOKLanes); rightLane = (trafficPersonVec[p].numOfLaneInEdge < edgesData[currentEdge].numLines - 1) && //at least one lane (trafficPersonVec[p].numOfLaneInEdge + 1 >= trafficPersonVec[p].LC_initOKLanes) && (trafficPersonVec[p].numOfLaneInEdge + 1 < trafficPersonVec[p].LC_endOKLanes); } } else { // LC 3.3 INCORRECT LANES--> MANDATORY LC if (trafficPersonVec[p].numOfLaneInEdge < trafficPersonVec[p].LC_initOKLanes) { rightLane = true; } else { leftLane = true; } if (rightLane && trafficPersonVec[p].numOfLaneInEdge + 1 >= edgesData[currentEdge].numLines) { printf("ERROR: RT laneToCheck>=edgeNumLanes\n"); } if (leftLane && trafficPersonVec[p].numOfLaneInEdge == 0) { printf("ERROR %u: LT laneToCheck>=edgeNumLanes OK %u-%u NE %u\n", p, trafficPersonVec[p].LC_initOKLanes, trafficPersonVec[p].LC_endOKLanes, currentEdge); } } if (leftLane || rightLane) { // choose lane (if necessary) if (leftLane && rightLane) { if ((int) (trafficPersonVec[p].posInLaneM) % 2 == 0) { //pseudo random leftLane = false; } else { rightLane = false; } } ushort laneToCheck; if (leftLane) { laneToCheck = trafficPersonVec[p].numOfLaneInEdge - 1; } else { laneToCheck = trafficPersonVec[p].numOfLaneInEdge + 1; } if (laneToCheck >= edgesData[currentEdge].numLines) { printf("ERROR: laneToCheck>=edgesData[currentEdge].numLines %u %u\n", laneToCheck, edgesData[currentEdge].numLines); } uchar v_a, v_b; float gap_a, gap_b; assert(currentEdge + trafficPersonVec[p].numOfLaneInEdge < trafficLights_d_size); uchar trafficLightState = trafficLights[currentEdge + trafficPersonVec[p].numOfLaneInEdge]; calculateGapsLC(mapToReadShift, laneMap, trafficLightState, currentEdge + laneToCheck, edgesData[currentEdge].numLines, trafficPersonVec[p].posInLaneM, edgesData[currentEdge].length, v_a, v_b, gap_a, gap_b, laneMap_d_size); if (gap_a == 1000.0f && gap_b == 1000.0f) { //lag and lead car very far trafficPersonVec[p].numOfLaneInEdge = laneToCheck; // CHANGE LINE } else { // NOT ALONE float b1A = 0.05f, b2A = 0.15f; float b1B = 0.15f, b2B = 0.40f; float gamma = 0.000025; // simParameters.s_0-> critical lead gap float distEnd = edgesData[currentEdge].length - trafficPersonVec[p].posInLaneM; float expTerm = (1 - exp(-gamma * distEnd * distEnd)); float g_na_M, g_bn_M; bool acceptLC = true; if (gap_a != 1000.0f) { g_na_M = max(simParameters.s_0, simParameters.s_0 + (b1A * trafficPersonVec[p].v + b2A * (trafficPersonVec[p].v - v_a * 3.0f))); if (gap_a < g_na_M) { //gap smaller than critical gap acceptLC = false; } } if (acceptLC && gap_b != 1000.0f) { g_bn_M = max(simParameters.s_0, simParameters.s_0 + (b1B * v_b * 3.0f + b2B * (v_b * 3.0f - trafficPersonVec[p].v))); if (gap_b < g_bn_M) { //gap smaller than critical gap acceptLC = false; } } if (acceptLC) { trafficPersonVec[p].numOfLaneInEdge = laneToCheck; // CHANGE LINE } } } }// Mandatory }//at least two lanes and not stopped by traffic light } uchar vInMpS = (uchar) (trafficPersonVec[p].v * 3); //speed in m/s to fit in uchar ushort posInLineCells = (ushort) (trafficPersonVec[p].posInLaneM); const uint posToSample = mapToWriteShift + kMaxMapWidthM * (currentEdge + (((int) (posInLineCells / kMaxMapWidthM)) * edgesData[currentEdge].numLines) + trafficPersonVec[p].numOfLaneInEdge) + posInLineCells % kMaxMapWidthM; assert(posToSample < laneMap_d_size); laneMap[posToSample] = vInMpS; } } /* __global__ void kernel_intersectionSTOPSimulation( uint numIntersections, float currentTime, LC::B18IntersectionData *intersections, uchar *trafficLights, LC::B18EdgeData* edgesData,//for the length uchar* laneMap,//to check if there are cars uint mapToReadShift) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<numIntersections) {//CUDA check (inside margins) const float deltaEvent = 0.0f; //if(i==0)printf("i %d\n",i); if (currentTime > intersections[i].nextEvent && intersections[i].totalInOutEdges > 0) { uint edgeOT = intersections[i].edge[intersections[i].state]; uchar numLinesO = edgeOT >> 24; uint edgeONum = edgeOT & kMaskLaneMap; // 0xFFFFF // red old traffic lights for (int nL = 0; nL < numLinesO; nL++) { trafficLights[edgeONum + nL] = 0x00; //red old traffic light } for (int iN = 0; iN <= intersections[i].totalInOutEdges + 1; iN++) { //to give a round intersections[i].state = (intersections[i].state + 1) % intersections[i].totalInOutEdges;//next light if ((intersections[i].edge[intersections[i].state] & kMaskInEdge) == kMaskInEdge) { // 0x800000 uint edgeIT = intersections[i].edge[intersections[i].state]; uint edgeINum = edgeIT & kMaskLaneMap; //get edgeI 0xFFFFF uchar numLinesI = edgeIT >> 24; /// check if someone in this edge int rangeToCheck = 5.0f; //5m ushort firstPosToCheck = edgesData[edgeINum].length - intersectionClearance; //last po bool atLeastOneStopped = false; for (int posCheck = firstPosToCheck; rangeToCheck >= 0 && posCheck >= 0; posCheck--, rangeToCheck--) { //as many cells as the rangeToCheck says for (int nL = 0; nL < numLinesI; nL++) { //int cellNum = mapToReadShift + maxWidth * (edgeINum + nL) + posCheck; const uint posToSample = mapToReadShift + kMaxMapWidthM * (edgeINum + (((int) (posCheck / kMaxMapWidthM)) * numLinesI) + nL) + posCheck % kMaxMapWidthM; if (laneMap[posToSample] == 0) { //car stopped trafficLights[edgeINum + nL] = 0x0F; // STOP SIGN 0x0F--> Let pass atLeastOneStopped = true; } } } if (atLeastOneStopped == true) { intersections[i].nextEvent = currentTime + deltaEvent; //just move forward time if changed (otherwise check in next iteration) break; } } } } /// } }// */ __global__ void kernel_intersectionOneSimulation( uint numIntersections, float currentTime, LC::B18IntersectionData *intersections, uchar *trafficLights) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i<numIntersections){//CUDA check (inside margins) const float deltaEvent = 20.0f; /// !!!! if (currentTime > intersections[i].nextEvent && intersections[i].totalInOutEdges > 0) { uint edgeOT = intersections[i].edge[intersections[i].state]; uchar numLinesO = edgeOT >> 24; uint edgeONum = edgeOT & kMaskLaneMap; // 0xFFFFF; // red old traffic lights if ((edgeOT&kMaskInEdge) == kMaskInEdge) { // Just do it if we were in in for (int nL = 0; nL < numLinesO; nL++) { trafficLights[edgeONum + nL] = 0x00; //red old traffic light } } for (int iN = 0; iN <= intersections[i].totalInOutEdges + 1; iN++) { //to give a round intersections[i].state = (intersections[i].state + 1) % intersections[i].totalInOutEdges;//next light if ((intersections[i].edge[intersections[i].state] & kMaskInEdge) == kMaskInEdge) { // 0x800000 // green new traffic lights uint edgeIT = intersections[i].edge[intersections[i].state]; uint edgeINum = edgeIT & kMaskLaneMap; // 0xFFFFF; //get edgeI uchar numLinesI = edgeIT >> 24; for (int nL = 0; nL < numLinesI; nL++) { trafficLights[edgeINum + nL] = 0xFF; } //trafficLights[edgeINum]=0xFF; break; } }//green new traffic light intersections[i].nextEvent = currentTime + deltaEvent; } } }// // Kernel that executes on the CUDA device __global__ void kernel_sampleTraffic( int numPeople, LC::B18TrafficPerson *trafficPersonVec, uint *indexPathVec, int indexPathVec_d_size, float *accSpeedPerLinePerTimeInterval, uint accSpeedPerLinePerTimeInterval_d_size, float *numVehPerLinePerTimeInterval, uint numVehPerLinePerTimeInterval_d_size, uint offset) { int p = blockIdx.x * blockDim.x + threadIdx.x; if (p >= numPeople) { //CUDA check (inside margins) return; } if (trafficPersonVec[p].active == 1 && trafficPersonVec[p].indexPathCurr != END_OF_PATH) { assert(trafficPersonVec[p].indexPathCurr < indexPathVec_d_size); int edgeNum = indexPathVec[trafficPersonVec[p].indexPathCurr]; assert(edgeNum + offset < accSpeedPerLinePerTimeInterval_d_size); accSpeedPerLinePerTimeInterval[edgeNum + offset] += trafficPersonVec[p].v / 3.0f; assert(edgeNum + offset < numVehPerLinePerTimeInterval_d_size); numVehPerLinePerTimeInterval[edgeNum + offset]++; } } __global__ void kernel_resetPeople( int numPeople, LC::B18TrafficPerson *trafficPersonVec) { int p = blockIdx.x * blockDim.x + threadIdx.x; if (p < numPeople) {//CUDA check (inside margins) trafficPersonVec[p].active = 0; } } void b18GetSampleTrafficCUDA(std::vector<float>& accSpeedPerLinePerTimeInterval, std::vector<float>& numVehPerLinePerTimeInterval) { // copy back people size_t size = accSpeedPerLinePerTimeInterval.size() * sizeof(float); cudaMemcpy(accSpeedPerLinePerTimeInterval.data(), accSpeedPerLinePerTimeInterval_d, size, cudaMemcpyDeviceToHost); size_t sizeI = numVehPerLinePerTimeInterval.size() * sizeof(uchar); cudaMemcpy(numVehPerLinePerTimeInterval.data(), numVehPerLinePerTimeInterval_d, sizeI, cudaMemcpyDeviceToHost); } void b18ResetPeopleLanesCUDA(uint numPeople) { kernel_resetPeople << < ceil(numPeople / 1024.0f), 1024 >> > (numPeople, trafficPersonVec_d); cudaMemset(&laneMap_d[0], -1, halfLaneMap*sizeof(unsigned char)); cudaMemset(&laneMap_d[halfLaneMap], -1, halfLaneMap*sizeof(unsigned char)); } void b18SimulateTrafficCUDA(float currentTime, uint numPeople, uint numIntersections, float deltaTime, const parameters simParameters, int numBlocks, int threadsPerBlock) { intersectionBench.startMeasuring(); const uint numStepsTogether = 12; //change also in density (10 per hour) // 1. CHANGE MAP: set map to use and clean the other if (readFirstMapC==true) { mapToReadShift=0; mapToWriteShift=halfLaneMap; gpuErrchk(cudaMemset(&laneMap_d[halfLaneMap], -1, halfLaneMap*sizeof(unsigned char)));//clean second half } else { mapToReadShift=halfLaneMap; mapToWriteShift=0; gpuErrchk(cudaMemset(&laneMap_d[0], -1, halfLaneMap*sizeof(unsigned char)));//clean first half } readFirstMapC=!readFirstMapC;//next iteration invert use // Simulate intersections. kernel_intersectionOneSimulation << < ceil(numIntersections / 512.0f), 512 >> > (numIntersections, currentTime, intersections_d, trafficLights_d); gpuErrchk(cudaPeekAtLastError()); intersectionBench.stopMeasuring(); peopleBench.startMeasuring(); // Simulate people. kernel_trafficSimulation <<< numBlocks, threadsPerBlock>> > (numPeople, currentTime, mapToReadShift, mapToWriteShift, trafficPersonVec_d, indexPathVec_d, indexPathVec_d_size, edgesData_d, edgesData_d_size, laneMap_d, laneMap_d_size, intersections_d, trafficLights_d, trafficLights_d_size, deltaTime, simParameters); cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError()); peopleBench.stopMeasuring(); }
932f5fc95ef2e3afd1d8098ecd0d0940e8ced9e6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" namespace caffe { const float kBNLL_THRESHOLD = 50.; template <typename Dtype> __global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); } } template <typename Dtype> void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( BNLLForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void BNLLBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); out_diff[index] = in_diff[index] * expval / (expval + 1.); } } template <typename Dtype> void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( BNLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, bottom_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_CLASS(BNLLLayer); } // namespace caffe
932f5fc95ef2e3afd1d8098ecd0d0940e8ced9e6.cu
#include <algorithm> #include <vector> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" namespace caffe { const float kBNLL_THRESHOLD = 50.; template <typename Dtype> __global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); } } template <typename Dtype> void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) BNLLForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void BNLLBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); out_diff[index] = in_diff[index] * expval / (expval + 1.); } } template <typename Dtype> void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) BNLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, bottom_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_CLASS(BNLLLayer); } // namespace caffe
8694ef3043e3baa6ad5adb247dc6375608660f91.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "segmentation.h" #define THREADS 16 #define thresholdVar 2 __global__ void histogramSum(int* g_idata, int* g_odata) { extern __shared__ int temp[]; int tid = threadIdx.x; temp[tid] = g_idata[tid + blockIdx.x * blockDim.x]; for (int d = blockDim.x >> 1; d > 0; d >>= 1) { __syncthreads(); if (tid < d) temp[tid] += temp[tid + d]; } if (tid == 0) { //printf("temp[0] %d\n", temp[0]); g_odata[blockIdx.x] = temp[0]; } } void reduction_gold(int* odata, int* idata, int len) { *odata = 0; for (int i = 0; i < len; i++) *odata += idata[i]; } __global__ void greyscale(unsigned char* inImg, int* outImg, int width, int height, int channels) { // IMPLEMENTS ALGORITHM FOR 3 CHANNEL GREYSCALE IMAGE //int x = threadIdx.x + blockIdx.x * blockDim.x; //int y = threadIdx.y + blockIdx.y * blockDim.y; //if (x < width && y < height) { // int grayOffset = y * width + x; // int rgbOffset = grayOffset * channels; // unsigned char r = originalImg[rgbOffset]; // unsigned char g = originalImg[rgbOffset + 1]; // unsigned char b = originalImg[rgbOffset + 2]; // int offset = (r + g + b) / channels; // for (int i = 0; i < channels; i++) { // greyImg[rgbOffset + i] = offset; // } //} // IMPLEMENTS ALGORITHM FOR 1 CHANNEL GREYSCALE IMAGE int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < width && y < height) { int grayOffset = y * width + x; int rgbOffset = grayOffset * channels; unsigned char r = inImg[rgbOffset]; unsigned char g = inImg[rgbOffset + 1]; unsigned char b = inImg[rgbOffset + 2]; outImg[grayOffset] = (int)(r + g + b) / 3; //printf("gray offset %d \n", outImg[grayOffset]); } } __device__ void calculateThresholdValues(int mean, int* thresholdValue) { int step = mean / thresholdVar; int currentValue = 0; for (int i = 0; i < thresholdVar; i++) { currentValue += step; *(thresholdValue + i) = currentValue; //thresholdValue[i] = currentValue; } } __device__ void calculateGreyValues(int* greyValue) { int maxValue = 255; int step = maxValue / thresholdVar; int currentValue = 0; for (int i = 0; i < thresholdVar; i++) { currentValue += step; *(greyValue + i) = currentValue; } } __global__ void threshold(int* inImg, unsigned char* outImg, unsigned int width, unsigned int height, int channels, int mean, int variable = 0) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int pixel = y * width + x; int value = 0; int thresholdValues[thresholdVar]; int greyValues[thresholdVar]; int* thresholdVariable = &thresholdValues[0]; int* greys = &greyValues[0]; if (variable) { calculateThresholdValues(mean, thresholdVariable); calculateGreyValues(greyValues); } if (x < width && y < height) { if (inImg[pixel] > mean) value = 255; if (variable) { if (inImg[pixel] < mean && inImg[pixel] > * (thresholdVariable + 1)) value = *(greys + variable); if (inImg[pixel] < *(thresholdVariable + 1) && inImg[pixel] > * (thresholdVariable + 0)) value = *(greys + variable - 1); } outImg[pixel * channels] = value; outImg[pixel * channels + 1] = value; outImg[pixel * channels + 2] = value; } } void convertToGreyscale(unsigned char* inImg, int* outImg, int width, int height, int channels) { dim3 dimGrid = dim3((width / THREADS) + 1, (height / THREADS) + 1, 1); dim3 dimBlock = dim3(THREADS, THREADS, 1); unsigned char* d_originalImg = NULL; int* d_greyImg = NULL; int size = width * height; hipMalloc((void**)&d_originalImg, size * channels * sizeof(unsigned char)); hipMalloc((void**)&d_greyImg, size * sizeof(int)); hipMemcpy(d_originalImg, inImg, size * channels, hipMemcpyHostToDevice); greyscale << <dimGrid, dimBlock >> > (d_originalImg, d_greyImg, width, height, channels); hipMemcpy(outImg, d_greyImg, size * sizeof(int), hipMemcpyDeviceToHost); hipFree(d_originalImg); hipFree(d_greyImg); } void thresholdFilter(int* inImg, unsigned char* outImg, int width, int height, int channels, int variableThreshold = 0) { dim3 dimGrid = dim3((width / THREADS) + 1, (height / THREADS) + 1, 1); dim3 dimBlock = dim3(THREADS, THREADS, 1); int size = width * height; // CALCULATES HISTOGRAM MEAN VALUE int* host_sum = (int*)malloc(sizeof(int) * size); int* d_idata; int* d_odata; int sharedMemSize = sizeof(int) * THREADS; int sumResult = 0; int mean; // Using histogram sum on host int host_calculated; reduction_gold(&host_calculated, inImg, size); //int* dev_lastBlockCounter; //hipMalloc((void**)&dev_lastBlockCounter, sizeof(int)); //hipMemset(dev_lastBlockCounter, 0, sizeof(int)); hipMalloc((void**)&d_idata, size * sizeof(int)); hipMalloc((void**)&d_odata, size * sizeof(int)); hipMemcpy(d_idata, inImg, size * sizeof(int), hipMemcpyHostToDevice); ////sumCommMultiBlock << <gridSize, blockSize >> > (d_idata, size, d_odata, dev_lastBlockCounter); histogramSum << < 1, size, sharedMemSize >> > (d_idata, d_odata); hipMemcpy(host_sum, d_odata, size * sizeof(int), hipMemcpyDeviceToHost); //reduction_gold(&sumResult, host_sum, size/THREADS); ////hipMemcpy(&d_mean, d_odata, sizeof(int), hipMemcpyDeviceToHost); hipFree(d_odata); hipFree(d_idata); int using_cuda_mean = (int)host_sum / size; mean = (int)host_calculated / size; //printf(" calucalted by device host_sum %d size % d mean %d\n", host_sum[0], size, using_cuda_mean); //printf(" calucalted by host \host_calculated %d size % d mean %d\n", host_calculated, size, mean); int* d_grey = NULL; unsigned char* d_threshold = NULL; hipMalloc((void**)&d_grey, size * sizeof(int)); hipMalloc((void**)&d_threshold, size * channels); hipMemcpy(d_grey, inImg, size * sizeof(int), hipMemcpyHostToDevice); if (variableThreshold) { threshold << <dimGrid, dimBlock >> > (d_grey, d_threshold, width, height, channels, mean, 1); } else { threshold << <dimGrid, dimBlock >> > (d_grey, d_threshold, width, height, channels, mean); } hipMemcpy(outImg, d_threshold, size * channels, hipMemcpyDeviceToHost); hipFree(d_grey); hipFree(d_threshold); }
8694ef3043e3baa6ad5adb247dc6375608660f91.cu
#include "segmentation.h" #define THREADS 16 #define thresholdVar 2 __global__ void histogramSum(int* g_idata, int* g_odata) { extern __shared__ int temp[]; int tid = threadIdx.x; temp[tid] = g_idata[tid + blockIdx.x * blockDim.x]; for (int d = blockDim.x >> 1; d > 0; d >>= 1) { __syncthreads(); if (tid < d) temp[tid] += temp[tid + d]; } if (tid == 0) { //printf("temp[0] %d\n", temp[0]); g_odata[blockIdx.x] = temp[0]; } } void reduction_gold(int* odata, int* idata, int len) { *odata = 0; for (int i = 0; i < len; i++) *odata += idata[i]; } __global__ void greyscale(unsigned char* inImg, int* outImg, int width, int height, int channels) { // IMPLEMENTS ALGORITHM FOR 3 CHANNEL GREYSCALE IMAGE //int x = threadIdx.x + blockIdx.x * blockDim.x; //int y = threadIdx.y + blockIdx.y * blockDim.y; //if (x < width && y < height) { // int grayOffset = y * width + x; // int rgbOffset = grayOffset * channels; // unsigned char r = originalImg[rgbOffset]; // unsigned char g = originalImg[rgbOffset + 1]; // unsigned char b = originalImg[rgbOffset + 2]; // int offset = (r + g + b) / channels; // for (int i = 0; i < channels; i++) { // greyImg[rgbOffset + i] = offset; // } //} // IMPLEMENTS ALGORITHM FOR 1 CHANNEL GREYSCALE IMAGE int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < width && y < height) { int grayOffset = y * width + x; int rgbOffset = grayOffset * channels; unsigned char r = inImg[rgbOffset]; unsigned char g = inImg[rgbOffset + 1]; unsigned char b = inImg[rgbOffset + 2]; outImg[grayOffset] = (int)(r + g + b) / 3; //printf("gray offset %d \n", outImg[grayOffset]); } } __device__ void calculateThresholdValues(int mean, int* thresholdValue) { int step = mean / thresholdVar; int currentValue = 0; for (int i = 0; i < thresholdVar; i++) { currentValue += step; *(thresholdValue + i) = currentValue; //thresholdValue[i] = currentValue; } } __device__ void calculateGreyValues(int* greyValue) { int maxValue = 255; int step = maxValue / thresholdVar; int currentValue = 0; for (int i = 0; i < thresholdVar; i++) { currentValue += step; *(greyValue + i) = currentValue; } } __global__ void threshold(int* inImg, unsigned char* outImg, unsigned int width, unsigned int height, int channels, int mean, int variable = 0) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int pixel = y * width + x; int value = 0; int thresholdValues[thresholdVar]; int greyValues[thresholdVar]; int* thresholdVariable = &thresholdValues[0]; int* greys = &greyValues[0]; if (variable) { calculateThresholdValues(mean, thresholdVariable); calculateGreyValues(greyValues); } if (x < width && y < height) { if (inImg[pixel] > mean) value = 255; if (variable) { if (inImg[pixel] < mean && inImg[pixel] > * (thresholdVariable + 1)) value = *(greys + variable); if (inImg[pixel] < *(thresholdVariable + 1) && inImg[pixel] > * (thresholdVariable + 0)) value = *(greys + variable - 1); } outImg[pixel * channels] = value; outImg[pixel * channels + 1] = value; outImg[pixel * channels + 2] = value; } } void convertToGreyscale(unsigned char* inImg, int* outImg, int width, int height, int channels) { dim3 dimGrid = dim3((width / THREADS) + 1, (height / THREADS) + 1, 1); dim3 dimBlock = dim3(THREADS, THREADS, 1); unsigned char* d_originalImg = NULL; int* d_greyImg = NULL; int size = width * height; cudaMalloc((void**)&d_originalImg, size * channels * sizeof(unsigned char)); cudaMalloc((void**)&d_greyImg, size * sizeof(int)); cudaMemcpy(d_originalImg, inImg, size * channels, cudaMemcpyHostToDevice); greyscale << <dimGrid, dimBlock >> > (d_originalImg, d_greyImg, width, height, channels); cudaMemcpy(outImg, d_greyImg, size * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_originalImg); cudaFree(d_greyImg); } void thresholdFilter(int* inImg, unsigned char* outImg, int width, int height, int channels, int variableThreshold = 0) { dim3 dimGrid = dim3((width / THREADS) + 1, (height / THREADS) + 1, 1); dim3 dimBlock = dim3(THREADS, THREADS, 1); int size = width * height; // CALCULATES HISTOGRAM MEAN VALUE int* host_sum = (int*)malloc(sizeof(int) * size); int* d_idata; int* d_odata; int sharedMemSize = sizeof(int) * THREADS; int sumResult = 0; int mean; // Using histogram sum on host int host_calculated; reduction_gold(&host_calculated, inImg, size); //int* dev_lastBlockCounter; //cudaMalloc((void**)&dev_lastBlockCounter, sizeof(int)); //cudaMemset(dev_lastBlockCounter, 0, sizeof(int)); cudaMalloc((void**)&d_idata, size * sizeof(int)); cudaMalloc((void**)&d_odata, size * sizeof(int)); cudaMemcpy(d_idata, inImg, size * sizeof(int), cudaMemcpyHostToDevice); ////sumCommMultiBlock << <gridSize, blockSize >> > (d_idata, size, d_odata, dev_lastBlockCounter); histogramSum << < 1, size, sharedMemSize >> > (d_idata, d_odata); cudaMemcpy(host_sum, d_odata, size * sizeof(int), cudaMemcpyDeviceToHost); //reduction_gold(&sumResult, host_sum, size/THREADS); ////cudaMemcpy(&d_mean, d_odata, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_odata); cudaFree(d_idata); int using_cuda_mean = (int)host_sum / size; mean = (int)host_calculated / size; //printf(" calucalted by device host_sum %d size % d mean %d\n", host_sum[0], size, using_cuda_mean); //printf(" calucalted by host \host_calculated %d size % d mean %d\n", host_calculated, size, mean); int* d_grey = NULL; unsigned char* d_threshold = NULL; cudaMalloc((void**)&d_grey, size * sizeof(int)); cudaMalloc((void**)&d_threshold, size * channels); cudaMemcpy(d_grey, inImg, size * sizeof(int), cudaMemcpyHostToDevice); if (variableThreshold) { threshold << <dimGrid, dimBlock >> > (d_grey, d_threshold, width, height, channels, mean, 1); } else { threshold << <dimGrid, dimBlock >> > (d_grey, d_threshold, width, height, channels, mean); } cudaMemcpy(outImg, d_threshold, size * channels, cudaMemcpyDeviceToHost); cudaFree(d_grey); cudaFree(d_threshold); }
f8049670eb51b4e3082869ea05d0bc244124c42a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <vector> /* -------------------------------------------------------------------- Se da o matrice ce contine N puncte 3-dimesionale (deci o matrice cu N linii si 3 coloane). Implementati un program CUDA ce calculeaza distanta intre un punct 3-dimensional p0 dat si fiecare din cele N puncte din matrice. Urmati, pe rand, pasii de mai jos. --------------------------------------------------------------------*/ __global__ void compute_distance(float *p, float p0_x, float p0_y, float p0_z, float *distante, int N); int main() { // Numarul N de puncte int N = 1000; // Matricea ce contine punctele float *p_h; // punctul p0 float p0_x = 0; float p0_y = 0; float p0_z = 0; // Distantele intre p0 si fiecare p_i float *distante_h; /* -------------------------------------------------------------------- 1. Alocati memorie pe host pentru matricea p_h si vectorul distante_h dupa cum urmeaza: * Matricea este de N linii si 3 coloane si este alocata ca un singur bloc de memorie (o singura alocare de N*3 elemente) * Vectorul ce contine distantele are N elemente 0.7142 puncte ----------------------------------------------------------------------*/ /* ------------------------ REZOLVARE PCT 1 -----------------------------*/ /* ------------------------ CODUL TAU DE AICI ---------------------------*/ p_h = new float[N * 3]; distante_h = new float[N]; /* ------------------------ CODUL TAU PANA AICI ---------------------------*/ /* -------------------------------------------------------------------- 2. Initializati matricea ce contine punctele dupa cum urmeaza: * Primul punct (adica prima linie din matrice) are coordonatele 0,0,0, al 2-lea punct are 1,1,1, al 3-lea 2,2,2, etc 0.7142 puncte ----------------------------------------------------------------------*/ /* ------------------------ REZOLVARE PCT 2 -----------------------------*/ /* ------------------------ CODUL TAU DE AICI ---------------------------*/ for (int i = 0; i < N; i++) { for (int j = 0; j < 3; j++) { p_h[i * 3 + j] = i; } } //verificare, nu face parte din rezolvare for (int i = 0; i < 10; i++) { std::cout << p_h[i * 3] << " " << p_h[i * 3 + 1] << " " << p_h[i * 3 + 2] << std::endl; } /* ------------------------ CODUL TAU PANA AICI ---------------------------*/ // Matricea si cei doi vectori pentru memoria device float *p_d; float *distante_d; /* -------------------------------------------------------------------- 3. Alocati memorie pe device pentru matrice si vectorul ce contine distantele 0.7142 puncte ----------------------------------------------------------------------*/ /* ------------------------ REZOLVARE PCT 3 -----------------------------*/ /* ------------------------ CODUL TAU DE AICI ---------------------------*/ hipMalloc((void**)&p_d, N * 3 * sizeof(float)); hipMalloc((void**)&distante_d, N * sizeof(float)); /* ------------------------ CODUL TAU PANA AICI ---------------------------*/ /* -------------------------------------------------------------------- 4. Copiati continutul matricei si vectorului b de pe host pe device 0.7142 puncte ----------------------------------------------------------------------*/ /* ------------------------ REZOLVARE PCT 4 -----------------------------*/ /* ------------------------ CODUL TAU DE AICI ---------------------------*/ hipMemcpy(p_d, p_h, N * 3 * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(distante_d, distante_h, N * sizeof(float), hipMemcpyHostToDevice); /* ------------------------ CODUL TAU PANA AICI ---------------------------*/ // Numarul de thread-uri pe bloc dim3 threadsPerBlock; threadsPerBlock.x = 128; threadsPerBlock.y = 1; threadsPerBlock.z = 1; // Numarul de blocuri dim3 numBlocks; /* -------------------------------------------------------------------- 5. Calculati numarul de blocuri astfel incat: * se obtine un grid 1D de thread-uri * Numarul de thread-uri lansate in executie exte mai mare sau egal cu N 0.7142 puncte ----------------------------------------------------------------------*/ /* ------------------------ REZOLVARE PCT 5 -----------------------------*/ /* ------------------------ CODUL TAU DE AICI ---------------------------*/ numBlocks.x = N / threadsPerBlock.x; numBlocks.y = 1; numBlocks.z = 1; /* ------------------------ CODUL TAU PANA AICI ---------------------------*/ // Se lanseaza in executie kernel-ul cuda compute_distance << <numBlocks, threadsPerBlock >> > (p_d, p0_x, p0_y, p0_z, distante_d, N); /* -------------------------------------------------------------------- 6. Copiati continutul vectorului ce contine distantele calculate de pe device pe host 0.7142 puncte ----------------------------------------------------------------------*/ /* ------------------------ REZOLVARE PCT 6 -----------------------------*/ /* ------------------------ CODUL TAU DE AICI ---------------------------*/ hipMemcpy(distante_h, distante_d, N * sizeof(float), hipMemcpyDeviceToHost); //verificare for (int i = 0; i < 10; i++) { std::cout << distante_h[i] << std::endl; } /* ------------------------ CODUL TAU PANA AICI ---------------------------*/ return 0; } __global__ void compute_distance(float *p, float p0_x, float p0_y, float p0_z, float *distances, int N) { // Indicele fiecarui thread int index; /* -------------------------------------------------------------------- 7. Calculati valoarea indicelui utilizand variabilele predefinite blockDim blocIdx si threadIdx astfel incat: thread-ul zero are index 0, thread-ul 1 are indicele 1, thread-ul 2 are indicele 2, etc. 0.7142 puncte ----------------------------------------------------------------------*/ /* ------------------------ REZOLVARE PCT 7 -----------------------------*/ /* ------------------------ CODUL TAU DE AICI ---------------------------*/ index = blockDim.x*blockIdx.x + threadIdx.x; /* ------------------------ CODUL TAU PANA AICI ---------------------------*/ /* -------------------------------------------------------------------- 8. Calculti distanta intre punctul p0 si fiecare punct din matricea p0 2 puncte ----------------------------------------------------------------------*/ /* ------------------------ REZOLVARE PCT 8 -----------------------------*/ /* ------------------------ CODUL TAU DE AICI ---------------------------*/ float pi_x = p[index * 3]; float pi_y = p[index * 3 + 1]; float pi_z = p[index * 3 + 2]; distances[index] = sqrt(pow(pi_x - p0_x, 2) + pow(pi_y - p0_y, 2) + pow(pi_z - p0_z, 2)); /* ------------------------ CODUL TAU PANA AICI ---------------------------*/ }
f8049670eb51b4e3082869ea05d0bc244124c42a.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <vector> /* -------------------------------------------------------------------- Se da o matrice ce contine N puncte 3-dimesionale (deci o matrice cu N linii si 3 coloane). Implementati un program CUDA ce calculeaza distanta intre un punct 3-dimensional p0 dat si fiecare din cele N puncte din matrice. Urmati, pe rand, pasii de mai jos. --------------------------------------------------------------------*/ __global__ void compute_distance(float *p, float p0_x, float p0_y, float p0_z, float *distante, int N); int main() { // Numarul N de puncte int N = 1000; // Matricea ce contine punctele float *p_h; // punctul p0 float p0_x = 0; float p0_y = 0; float p0_z = 0; // Distantele intre p0 si fiecare p_i float *distante_h; /* -------------------------------------------------------------------- 1. Alocati memorie pe host pentru matricea p_h si vectorul distante_h dupa cum urmeaza: * Matricea este de N linii si 3 coloane si este alocata ca un singur bloc de memorie (o singura alocare de N*3 elemente) * Vectorul ce contine distantele are N elemente 0.7142 puncte ----------------------------------------------------------------------*/ /* ------------------------ REZOLVARE PCT 1 -----------------------------*/ /* ------------------------ CODUL TAU DE AICI ---------------------------*/ p_h = new float[N * 3]; distante_h = new float[N]; /* ------------------------ CODUL TAU PANA AICI ---------------------------*/ /* -------------------------------------------------------------------- 2. Initializati matricea ce contine punctele dupa cum urmeaza: * Primul punct (adica prima linie din matrice) are coordonatele 0,0,0, al 2-lea punct are 1,1,1, al 3-lea 2,2,2, etc 0.7142 puncte ----------------------------------------------------------------------*/ /* ------------------------ REZOLVARE PCT 2 -----------------------------*/ /* ------------------------ CODUL TAU DE AICI ---------------------------*/ for (int i = 0; i < N; i++) { for (int j = 0; j < 3; j++) { p_h[i * 3 + j] = i; } } //verificare, nu face parte din rezolvare for (int i = 0; i < 10; i++) { std::cout << p_h[i * 3] << " " << p_h[i * 3 + 1] << " " << p_h[i * 3 + 2] << std::endl; } /* ------------------------ CODUL TAU PANA AICI ---------------------------*/ // Matricea si cei doi vectori pentru memoria device float *p_d; float *distante_d; /* -------------------------------------------------------------------- 3. Alocati memorie pe device pentru matrice si vectorul ce contine distantele 0.7142 puncte ----------------------------------------------------------------------*/ /* ------------------------ REZOLVARE PCT 3 -----------------------------*/ /* ------------------------ CODUL TAU DE AICI ---------------------------*/ cudaMalloc((void**)&p_d, N * 3 * sizeof(float)); cudaMalloc((void**)&distante_d, N * sizeof(float)); /* ------------------------ CODUL TAU PANA AICI ---------------------------*/ /* -------------------------------------------------------------------- 4. Copiati continutul matricei si vectorului b de pe host pe device 0.7142 puncte ----------------------------------------------------------------------*/ /* ------------------------ REZOLVARE PCT 4 -----------------------------*/ /* ------------------------ CODUL TAU DE AICI ---------------------------*/ cudaMemcpy(p_d, p_h, N * 3 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(distante_d, distante_h, N * sizeof(float), cudaMemcpyHostToDevice); /* ------------------------ CODUL TAU PANA AICI ---------------------------*/ // Numarul de thread-uri pe bloc dim3 threadsPerBlock; threadsPerBlock.x = 128; threadsPerBlock.y = 1; threadsPerBlock.z = 1; // Numarul de blocuri dim3 numBlocks; /* -------------------------------------------------------------------- 5. Calculati numarul de blocuri astfel incat: * se obtine un grid 1D de thread-uri * Numarul de thread-uri lansate in executie exte mai mare sau egal cu N 0.7142 puncte ----------------------------------------------------------------------*/ /* ------------------------ REZOLVARE PCT 5 -----------------------------*/ /* ------------------------ CODUL TAU DE AICI ---------------------------*/ numBlocks.x = N / threadsPerBlock.x; numBlocks.y = 1; numBlocks.z = 1; /* ------------------------ CODUL TAU PANA AICI ---------------------------*/ // Se lanseaza in executie kernel-ul cuda compute_distance << <numBlocks, threadsPerBlock >> > (p_d, p0_x, p0_y, p0_z, distante_d, N); /* -------------------------------------------------------------------- 6. Copiati continutul vectorului ce contine distantele calculate de pe device pe host 0.7142 puncte ----------------------------------------------------------------------*/ /* ------------------------ REZOLVARE PCT 6 -----------------------------*/ /* ------------------------ CODUL TAU DE AICI ---------------------------*/ cudaMemcpy(distante_h, distante_d, N * sizeof(float), cudaMemcpyDeviceToHost); //verificare for (int i = 0; i < 10; i++) { std::cout << distante_h[i] << std::endl; } /* ------------------------ CODUL TAU PANA AICI ---------------------------*/ return 0; } __global__ void compute_distance(float *p, float p0_x, float p0_y, float p0_z, float *distances, int N) { // Indicele fiecarui thread int index; /* -------------------------------------------------------------------- 7. Calculati valoarea indicelui utilizand variabilele predefinite blockDim blocIdx si threadIdx astfel incat: thread-ul zero are index 0, thread-ul 1 are indicele 1, thread-ul 2 are indicele 2, etc. 0.7142 puncte ----------------------------------------------------------------------*/ /* ------------------------ REZOLVARE PCT 7 -----------------------------*/ /* ------------------------ CODUL TAU DE AICI ---------------------------*/ index = blockDim.x*blockIdx.x + threadIdx.x; /* ------------------------ CODUL TAU PANA AICI ---------------------------*/ /* -------------------------------------------------------------------- 8. Calculti distanta intre punctul p0 si fiecare punct din matricea p0 2 puncte ----------------------------------------------------------------------*/ /* ------------------------ REZOLVARE PCT 8 -----------------------------*/ /* ------------------------ CODUL TAU DE AICI ---------------------------*/ float pi_x = p[index * 3]; float pi_y = p[index * 3 + 1]; float pi_z = p[index * 3 + 2]; distances[index] = sqrt(pow(pi_x - p0_x, 2) + pow(pi_y - p0_y, 2) + pow(pi_z - p0_z, 2)); /* ------------------------ CODUL TAU PANA AICI ---------------------------*/ }
fb36f1049e1f507f48ab346d55cb373d70879ba2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cassert> #include <cstring> #include <vector> #include "paddle/fluid/inference/tensorrt/plugin/special_slice_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { #if IS_TRT_VERSION_GE(6000) SpecialSlicePluginDynamic::SpecialSlicePluginDynamic() {} SpecialSlicePluginDynamic::SpecialSlicePluginDynamic(void const* serial_data, size_t serial_length) {} SpecialSlicePluginDynamic::~SpecialSlicePluginDynamic() {} nvinfer1::IPluginV2DynamicExt* SpecialSlicePluginDynamic::clone() const { return new SpecialSlicePluginDynamic(); } const char* SpecialSlicePluginDynamic::getPluginType() const { return "special_slice_plugin"; } int SpecialSlicePluginDynamic::getNbOutputs() const { return 1; } int SpecialSlicePluginDynamic::initialize() { return 0; } size_t SpecialSlicePluginDynamic::getSerializationSize() const { size_t serialize_size = 0; return serialize_size; } void SpecialSlicePluginDynamic::serialize(void* buffer) const {} nvinfer1::DimsExprs SpecialSlicePluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs* inputs, int nb_inputs, nvinfer1::IExprBuilder& expr_builder) { nvinfer1::DimsExprs output(inputs[0]); auto one = expr_builder.constant(1); output.d[0] = expr_builder.operation(nvinfer1::DimensionOperation::kSUB, *inputs[1].d[0], *one); return output; } void SpecialSlicePluginDynamic::configurePlugin( const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs, const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs) {} size_t SpecialSlicePluginDynamic::getWorkspaceSize( const nvinfer1::PluginTensorDesc* inputs, int nbInputs, const nvinfer1::PluginTensorDesc* outputs, int nbOutputs) const { return 0; } void SpecialSlicePluginDynamic::destroy() { delete this; } void SpecialSlicePluginDynamic::terminate() {} bool SpecialSlicePluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc* desc, int nb_inputs, int nb_outputs) { if (pos == 0) // slice tensor return (desc[pos].type == nvinfer1::DataType::kHALF && desc[pos].format == nvinfer1::TensorFormat::kLINEAR); // || desc[pos].type == // nvinfer1::DataType::kFLOAT); if (pos == 1) // cu_seqlen return (desc[pos].type == nvinfer1::DataType::kINT32 && desc[pos].format == nvinfer1::TensorFormat::kLINEAR); return (desc[pos].type == nvinfer1::DataType::kHALF && desc[pos].format == nvinfer1::TensorFormat::kLINEAR); // || desc[pos].type == // nvinfer1::DataType::kFLOAT); } nvinfer1::DataType SpecialSlicePluginDynamic::getOutputDataType( int index, const nvinfer1::DataType* input_types, int nb_inputs) const { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "The index should be equal to 0")); return input_types[0]; } template <typename T> __global__ void SpecialSliceKernel(const T* slice_input, const int32_t* cu_seqlens, T* output) { const int hidden = blockDim.x; const int batch = blockIdx.x; output[batch * hidden + threadIdx.x] = slice_input[cu_seqlens[batch] * hidden + threadIdx.x]; } int SpecialSlicePluginDynamic::enqueue( const nvinfer1::PluginTensorDesc* input_desc, const nvinfer1::PluginTensorDesc* output_desc, const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) { auto input_dims = input_desc[0].dims; // (sum(S), 768, 1, 1) auto out_dims = output_desc[0].dims; // (batch, 768, 1, 1) assert(input_desc[0].type == nvinfer1::DataType::kHALF); const int32_t hidden = input_dims.d[1]; const int num_blocks = out_dims.d[0]; // batch size const int num_threads = hidden; const half* slice_input = static_cast<const half*>(inputs[0]); const int32_t* cu_seqlens = static_cast<const int32_t*>(inputs[1]); half* output = static_cast<half*>(outputs[0]); hipLaunchKernelGGL(( SpecialSliceKernel), dim3(num_blocks), dim3(num_threads), 0, stream, slice_input, cu_seqlens, output); return hipGetLastError() != hipSuccess; } SpecialSlicePluginDynamicCreator::SpecialSlicePluginDynamicCreator() {} const char* SpecialSlicePluginDynamicCreator::getPluginName() const { return "special_slice_plugin"; } const char* SpecialSlicePluginDynamicCreator::getPluginVersion() const { return "1"; } const nvinfer1::PluginFieldCollection* SpecialSlicePluginDynamicCreator::getFieldNames() { return &field_collection_; } nvinfer1::IPluginV2* SpecialSlicePluginDynamicCreator::createPlugin( const char* name, const nvinfer1::PluginFieldCollection* fc) { return new SpecialSlicePluginDynamic(); } nvinfer1::IPluginV2* SpecialSlicePluginDynamicCreator::deserializePlugin( const char* name, const void* serial_data, size_t serial_length) { auto plugin = new SpecialSlicePluginDynamic(serial_data, serial_length); return plugin; } void SpecialSlicePluginDynamicCreator::setPluginNamespace( const char* lib_namespace) { plugin_namespace_ = lib_namespace; } const char* SpecialSlicePluginDynamicCreator::getPluginNamespace() const { return plugin_namespace_.c_str(); } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
fb36f1049e1f507f48ab346d55cb373d70879ba2.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cassert> #include <cstring> #include <vector> #include "paddle/fluid/inference/tensorrt/plugin/special_slice_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { #if IS_TRT_VERSION_GE(6000) SpecialSlicePluginDynamic::SpecialSlicePluginDynamic() {} SpecialSlicePluginDynamic::SpecialSlicePluginDynamic(void const* serial_data, size_t serial_length) {} SpecialSlicePluginDynamic::~SpecialSlicePluginDynamic() {} nvinfer1::IPluginV2DynamicExt* SpecialSlicePluginDynamic::clone() const { return new SpecialSlicePluginDynamic(); } const char* SpecialSlicePluginDynamic::getPluginType() const { return "special_slice_plugin"; } int SpecialSlicePluginDynamic::getNbOutputs() const { return 1; } int SpecialSlicePluginDynamic::initialize() { return 0; } size_t SpecialSlicePluginDynamic::getSerializationSize() const { size_t serialize_size = 0; return serialize_size; } void SpecialSlicePluginDynamic::serialize(void* buffer) const {} nvinfer1::DimsExprs SpecialSlicePluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs* inputs, int nb_inputs, nvinfer1::IExprBuilder& expr_builder) { nvinfer1::DimsExprs output(inputs[0]); auto one = expr_builder.constant(1); output.d[0] = expr_builder.operation(nvinfer1::DimensionOperation::kSUB, *inputs[1].d[0], *one); return output; } void SpecialSlicePluginDynamic::configurePlugin( const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs, const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs) {} size_t SpecialSlicePluginDynamic::getWorkspaceSize( const nvinfer1::PluginTensorDesc* inputs, int nbInputs, const nvinfer1::PluginTensorDesc* outputs, int nbOutputs) const { return 0; } void SpecialSlicePluginDynamic::destroy() { delete this; } void SpecialSlicePluginDynamic::terminate() {} bool SpecialSlicePluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc* desc, int nb_inputs, int nb_outputs) { if (pos == 0) // slice tensor return (desc[pos].type == nvinfer1::DataType::kHALF && desc[pos].format == nvinfer1::TensorFormat::kLINEAR); // || desc[pos].type == // nvinfer1::DataType::kFLOAT); if (pos == 1) // cu_seqlen return (desc[pos].type == nvinfer1::DataType::kINT32 && desc[pos].format == nvinfer1::TensorFormat::kLINEAR); return (desc[pos].type == nvinfer1::DataType::kHALF && desc[pos].format == nvinfer1::TensorFormat::kLINEAR); // || desc[pos].type == // nvinfer1::DataType::kFLOAT); } nvinfer1::DataType SpecialSlicePluginDynamic::getOutputDataType( int index, const nvinfer1::DataType* input_types, int nb_inputs) const { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "The index should be equal to 0")); return input_types[0]; } template <typename T> __global__ void SpecialSliceKernel(const T* slice_input, const int32_t* cu_seqlens, T* output) { const int hidden = blockDim.x; const int batch = blockIdx.x; output[batch * hidden + threadIdx.x] = slice_input[cu_seqlens[batch] * hidden + threadIdx.x]; } int SpecialSlicePluginDynamic::enqueue( const nvinfer1::PluginTensorDesc* input_desc, const nvinfer1::PluginTensorDesc* output_desc, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) { auto input_dims = input_desc[0].dims; // (sum(S), 768, 1, 1) auto out_dims = output_desc[0].dims; // (batch, 768, 1, 1) assert(input_desc[0].type == nvinfer1::DataType::kHALF); const int32_t hidden = input_dims.d[1]; const int num_blocks = out_dims.d[0]; // batch size const int num_threads = hidden; const half* slice_input = static_cast<const half*>(inputs[0]); const int32_t* cu_seqlens = static_cast<const int32_t*>(inputs[1]); half* output = static_cast<half*>(outputs[0]); SpecialSliceKernel<<<num_blocks, num_threads, 0, stream>>>( slice_input, cu_seqlens, output); return cudaGetLastError() != cudaSuccess; } SpecialSlicePluginDynamicCreator::SpecialSlicePluginDynamicCreator() {} const char* SpecialSlicePluginDynamicCreator::getPluginName() const { return "special_slice_plugin"; } const char* SpecialSlicePluginDynamicCreator::getPluginVersion() const { return "1"; } const nvinfer1::PluginFieldCollection* SpecialSlicePluginDynamicCreator::getFieldNames() { return &field_collection_; } nvinfer1::IPluginV2* SpecialSlicePluginDynamicCreator::createPlugin( const char* name, const nvinfer1::PluginFieldCollection* fc) { return new SpecialSlicePluginDynamic(); } nvinfer1::IPluginV2* SpecialSlicePluginDynamicCreator::deserializePlugin( const char* name, const void* serial_data, size_t serial_length) { auto plugin = new SpecialSlicePluginDynamic(serial_data, serial_length); return plugin; } void SpecialSlicePluginDynamicCreator::setPluginNamespace( const char* lib_namespace) { plugin_namespace_ = lib_namespace; } const char* SpecialSlicePluginDynamicCreator::getPluginNamespace() const { return plugin_namespace_.c_str(); } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle