hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
4f549f53b53797eb79c2d93403f6616bd3248b85.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <sys/time.h> __global__ void saxpyDevice(int n, float a, float *x, float *y){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = a*x[i] + y[i]; } void saxpy(int n, float a, float *x, float *y){ float *d_x, *d_y; // allocate GPU memory, and upload data hipMalloc(&d_x, n*sizeof(float)); hipMalloc(&d_y, n*sizeof(float)); hipMemcpy(d_x, x, n*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y, y, n*sizeof(float), hipMemcpyHostToDevice); // send instructions to GPU hipLaunchKernelGGL(( saxpyDevice), dim3((n+255)/256), dim3(256), 0, 0, n, 2.0f, d_x, d_y); // download data, and free GPU memory hipMemcpy(y, d_y, n*sizeof(float), hipMemcpyDeviceToHost); hipFree(d_x); hipFree(d_y); } int main(void){ int N = 1<<20; float *x, *y; x = (float*)malloc(N*sizeof(float)); y = (float*)malloc(N*sizeof(float)); struct timeval t0, t1; gettimeofday(&t0, NULL); for (int i=0; i<100; i++) saxpy(N, 2.0f, x, y); gettimeofday(&t1, NULL); std::cout<<"CUDA = "<<(t1.tv_sec - t0.tv_sec)*1000 + (t1.tv_usec-t0.tv_usec)/1000<<"ms"<<std::endl; return 0; }
4f549f53b53797eb79c2d93403f6616bd3248b85.cu
#include <iostream> #include <sys/time.h> __global__ void saxpyDevice(int n, float a, float *x, float *y){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = a*x[i] + y[i]; } void saxpy(int n, float a, float *x, float *y){ float *d_x, *d_y; // allocate GPU memory, and upload data cudaMalloc(&d_x, n*sizeof(float)); cudaMalloc(&d_y, n*sizeof(float)); cudaMemcpy(d_x, x, n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, n*sizeof(float), cudaMemcpyHostToDevice); // send instructions to GPU saxpyDevice<<<(n+255)/256, 256>>>(n, 2.0f, d_x, d_y); // download data, and free GPU memory cudaMemcpy(y, d_y, n*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_x); cudaFree(d_y); } int main(void){ int N = 1<<20; float *x, *y; x = (float*)malloc(N*sizeof(float)); y = (float*)malloc(N*sizeof(float)); struct timeval t0, t1; gettimeofday(&t0, NULL); for (int i=0; i<100; i++) saxpy(N, 2.0f, x, y); gettimeofday(&t1, NULL); std::cout<<"CUDA = "<<(t1.tv_sec - t0.tv_sec)*1000 + (t1.tv_usec-t0.tv_usec)/1000<<"ms"<<std::endl; return 0; }
b8d472f374b42911217a24616f1e2b50be25409d.hip
// !!! This is a file automatically generated by hipify!!! #include <time.h> #include "common.hpp" #include "simulation.hpp" #include "graphics.hpp" Renderer* renderer; Params sim_params{}; size_t milisec = 25; void draw_scene() { // delegate to renderer if (renderer != nullptr) renderer->draw_scene(); } void handle_resize(int w, int h) { // delegate to renderer if (renderer != nullptr) renderer->handle_resize(w, h); } void handle_keypress(unsigned char key, int x, int y) { // delegate to renderer if (renderer != nullptr) renderer->handle_keypress(key, x, y); } void keyboard_navigator(int key, int x, int y) { // delegate to renderer if (renderer != nullptr) renderer->keyboard_navigator(key, x, y); } void update(int val) { if (renderer != nullptr) renderer->make_step(val); glutPostRedisplay(); glutTimerFunc(sim_params.display_dt, update, 0); } int main(int argc, char** argv) { // create default params set // parse from user input if any assert(init(argc, argv, sim_params) == 0); // initialize OpenGL DBG_MSG("Initializing GLUT graphics"); glutInit(&argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH); glutInitWindowSize(400, 400); glutCreateWindow("Nbody Simulation"); DBG_MSG("Initializing CUDA context."); cuda_check(hipSetDevice(0)); cuda_check(hipGLSetGLDevice(0)); hipDeviceSynchronize(); // Set handler functions for drawing, keypresses, and window resizes glutDisplayFunc(draw_scene); glutKeyboardFunc(handle_keypress); glutSpecialFunc(keyboard_navigator); glutReshapeFunc(handle_resize); glewInit(); Simulator* simulator; // init the simulator on device init_simulator << <1, 1>>> (simulator, sim_params, time(NULL)); cuda_check(hipDeviceSynchronize()); DBG_MSG("Simulator initializaiton completed"); // init renderer and assigne simulator to it! DBG_MSG("Initializing renderer"); renderer = new Renderer(sim_params, simulator); renderer->init_graphics(); init_simulation_data << <1, 1>>> (simulator); cuda_check(hipDeviceSynchronize()); DBG_MSG("Starting glut loop"); glutMainLoop(); free_simulator << <1, 1>>> (simulator); hipDeviceReset(); return 0; }
b8d472f374b42911217a24616f1e2b50be25409d.cu
#include <time.h> #include "common.hpp" #include "simulation.hpp" #include "graphics.hpp" Renderer* renderer; Params sim_params{}; size_t milisec = 25; void draw_scene() { // delegate to renderer if (renderer != nullptr) renderer->draw_scene(); } void handle_resize(int w, int h) { // delegate to renderer if (renderer != nullptr) renderer->handle_resize(w, h); } void handle_keypress(unsigned char key, int x, int y) { // delegate to renderer if (renderer != nullptr) renderer->handle_keypress(key, x, y); } void keyboard_navigator(int key, int x, int y) { // delegate to renderer if (renderer != nullptr) renderer->keyboard_navigator(key, x, y); } void update(int val) { if (renderer != nullptr) renderer->make_step(val); glutPostRedisplay(); glutTimerFunc(sim_params.display_dt, update, 0); } int main(int argc, char** argv) { // create default params set // parse from user input if any assert(init(argc, argv, sim_params) == 0); // initialize OpenGL DBG_MSG("Initializing GLUT graphics"); glutInit(&argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH); glutInitWindowSize(400, 400); glutCreateWindow("Nbody Simulation"); DBG_MSG("Initializing CUDA context."); cuda_check(cudaSetDevice(0)); cuda_check(cudaGLSetGLDevice(0)); cudaDeviceSynchronize(); // Set handler functions for drawing, keypresses, and window resizes glutDisplayFunc(draw_scene); glutKeyboardFunc(handle_keypress); glutSpecialFunc(keyboard_navigator); glutReshapeFunc(handle_resize); glewInit(); Simulator* simulator; // init the simulator on device init_simulator << <1, 1>>> (simulator, sim_params, time(NULL)); cuda_check(cudaDeviceSynchronize()); DBG_MSG("Simulator initializaiton completed"); // init renderer and assigne simulator to it! DBG_MSG("Initializing renderer"); renderer = new Renderer(sim_params, simulator); renderer->init_graphics(); init_simulation_data << <1, 1>>> (simulator); cuda_check(cudaDeviceSynchronize()); DBG_MSG("Starting glut loop"); glutMainLoop(); free_simulator << <1, 1>>> (simulator); cudaDeviceReset(); return 0; }
572c47db644dc5227081882f7c66b16bf2047f40.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zgetf2.cu normal z -> c, Wed Sep 17 15:08:23 2014 */ #include "common_magma.h" #define PRECISION_c #define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column #define cswap_bs 64 //#if (GPUSHMEM < 200) #define cgeru_bs 512 // 512 is max threads for 1.x cards //#else //#define cgeru_bs 1024 //#endif void magma_cswap( magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx); void magma_cscal_cgeru( magma_int_t m, magma_int_t n, magmaFloatComplex *A, magma_int_t lda); /** CGETF2 computes an LU factorization of a general m-by-n matrix A using partial pivoting with row interchanges. The factorization has the form A = P * L * U where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n), and U is upper triangular (upper trapezoidal if m < n). This is the right-looking Level 2 BLAS version of the algorithm. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 and N <= 1024. On CUDA architecture 1.x cards, N <= 512. @param[in,out] A COMPLEX array, dimension (LDA,N) On entry, the m by n matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] ipiv INTEGER array, dimension (min(M,N)) The pivot indices; for 1 <= i <= min(M,N), row i of the matrix was interchanged with row IPIV(i). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, U(k,k) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations. @ingroup magma_cgesv_aux ********************************************************************/ extern "C" magma_int_t magma_cgetf2_gpu( magma_int_t m, magma_int_t n, magmaFloatComplex *A, magma_int_t lda, magma_int_t *ipiv, magma_int_t* info ) { *info = 0; if (m < 0) { *info = -1; } else if (n < 0 || n > cgeru_bs) { *info = -2; } else if (lda < max(1,m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // Quick return if possible if (m == 0 || n == 0) { return *info; } magma_int_t min_mn = min(m, n); magma_int_t j, jp; for( j=0; j < min_mn; j++ ) { hipDeviceSetCacheConfig( hipFuncCachePreferShared ); // Find pivot and test for singularity. jp = j - 1 + magma_icamax(m-j, A(j,j), 1); ipiv[j] = jp + 1; // ipiv uses Fortran one-based index // Can't check value of A since it is on GPU //if ( A(jp, j) != 0.0) { hipDeviceSetCacheConfig( hipFuncCachePreferL1 ); // Apply the interchange to columns 1:N. if (jp != j) { magma_cswap(n, A, j, jp, lda); } // Compute elements J+1:M of J-th column. if (j < m) { magma_cscal_cgeru(m-j, n-j, A(j, j), lda); } //} //else if (*info == 0) { // *info = j; //} } return *info; } __global__ void kernel_cswap(int n, magmaFloatComplex *x, int i, int j, int incx) { int id = blockIdx.x * cswap_bs + threadIdx.x; if (id < n) { magmaFloatComplex tmp = x[i + incx*id]; x[i + incx*id] = x[j + incx*id]; x[j + incx*id] = tmp; } } void magma_cswap(magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx) { /* cswap two row vectors: ith and jth */ dim3 threads(cswap_bs, 1, 1); int num_blocks = (n - 1)/cswap_bs + 1; dim3 grid(num_blocks,1); hipLaunchKernelGGL(( kernel_cswap), dim3(grid), dim3(threads), 0, magma_stream , n, x, i, j, incx); } // dynamically allocated shared memory, set to size n when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ magmaFloatComplex shared_data[]; __global__ void kernel_cscal_cgeru(int m, int n, magmaFloatComplex *A, int lda) { magmaFloatComplex *shared_y = shared_data; int tid = blockIdx.x * cgeru_bs + threadIdx.x; magmaFloatComplex reg = MAGMA_C_ZERO; if (threadIdx.x < n) { shared_y[threadIdx.x] = A[lda * threadIdx.x]; } __syncthreads(); if (tid < m && tid > 0) { reg = A[tid]; reg *= MAGMA_C_DIV(MAGMA_C_ONE, shared_y[0]); A[tid] = reg; #pragma unroll for(int i=1; i < n; i++) { A[tid + i*lda] += (MAGMA_C_NEG_ONE) * shared_y[i] * reg; } } } void magma_cscal_cgeru(magma_int_t m, magma_int_t n, magmaFloatComplex *A, magma_int_t lda) { /* Specialized kernel which merged cscal and cgeru the two kernels 1) cscale the first column vector A(1:M-1,0) with 1/A(0,0); 2) Performe a cgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1); */ dim3 threads(cgeru_bs, 1, 1); int num_blocks = (m - 1)/cgeru_bs + 1; dim3 grid(num_blocks,1); size_t shared_size = sizeof(magmaFloatComplex)*(n); hipLaunchKernelGGL(( kernel_cscal_cgeru), dim3(grid), dim3(threads), shared_size, magma_stream, m, n, A, lda); }
572c47db644dc5227081882f7c66b16bf2047f40.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zgetf2.cu normal z -> c, Wed Sep 17 15:08:23 2014 */ #include "common_magma.h" #define PRECISION_c #define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column #define cswap_bs 64 //#if (GPUSHMEM < 200) #define cgeru_bs 512 // 512 is max threads for 1.x cards //#else //#define cgeru_bs 1024 //#endif void magma_cswap( magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx); void magma_cscal_cgeru( magma_int_t m, magma_int_t n, magmaFloatComplex *A, magma_int_t lda); /** CGETF2 computes an LU factorization of a general m-by-n matrix A using partial pivoting with row interchanges. The factorization has the form A = P * L * U where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n), and U is upper triangular (upper trapezoidal if m < n). This is the right-looking Level 2 BLAS version of the algorithm. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 and N <= 1024. On CUDA architecture 1.x cards, N <= 512. @param[in,out] A COMPLEX array, dimension (LDA,N) On entry, the m by n matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] ipiv INTEGER array, dimension (min(M,N)) The pivot indices; for 1 <= i <= min(M,N), row i of the matrix was interchanged with row IPIV(i). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, U(k,k) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations. @ingroup magma_cgesv_aux ********************************************************************/ extern "C" magma_int_t magma_cgetf2_gpu( magma_int_t m, magma_int_t n, magmaFloatComplex *A, magma_int_t lda, magma_int_t *ipiv, magma_int_t* info ) { *info = 0; if (m < 0) { *info = -1; } else if (n < 0 || n > cgeru_bs) { *info = -2; } else if (lda < max(1,m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // Quick return if possible if (m == 0 || n == 0) { return *info; } magma_int_t min_mn = min(m, n); magma_int_t j, jp; for( j=0; j < min_mn; j++ ) { cudaDeviceSetCacheConfig( cudaFuncCachePreferShared ); // Find pivot and test for singularity. jp = j - 1 + magma_icamax(m-j, A(j,j), 1); ipiv[j] = jp + 1; // ipiv uses Fortran one-based index // Can't check value of A since it is on GPU //if ( A(jp, j) != 0.0) { cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ); // Apply the interchange to columns 1:N. if (jp != j) { magma_cswap(n, A, j, jp, lda); } // Compute elements J+1:M of J-th column. if (j < m) { magma_cscal_cgeru(m-j, n-j, A(j, j), lda); } //} //else if (*info == 0) { // *info = j; //} } return *info; } __global__ void kernel_cswap(int n, magmaFloatComplex *x, int i, int j, int incx) { int id = blockIdx.x * cswap_bs + threadIdx.x; if (id < n) { magmaFloatComplex tmp = x[i + incx*id]; x[i + incx*id] = x[j + incx*id]; x[j + incx*id] = tmp; } } void magma_cswap(magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx) { /* cswap two row vectors: ith and jth */ dim3 threads(cswap_bs, 1, 1); int num_blocks = (n - 1)/cswap_bs + 1; dim3 grid(num_blocks,1); kernel_cswap<<< grid, threads, 0, magma_stream >>>(n, x, i, j, incx); } // dynamically allocated shared memory, set to size n when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ magmaFloatComplex shared_data[]; __global__ void kernel_cscal_cgeru(int m, int n, magmaFloatComplex *A, int lda) { magmaFloatComplex *shared_y = shared_data; int tid = blockIdx.x * cgeru_bs + threadIdx.x; magmaFloatComplex reg = MAGMA_C_ZERO; if (threadIdx.x < n) { shared_y[threadIdx.x] = A[lda * threadIdx.x]; } __syncthreads(); if (tid < m && tid > 0) { reg = A[tid]; reg *= MAGMA_C_DIV(MAGMA_C_ONE, shared_y[0]); A[tid] = reg; #pragma unroll for(int i=1; i < n; i++) { A[tid + i*lda] += (MAGMA_C_NEG_ONE) * shared_y[i] * reg; } } } void magma_cscal_cgeru(magma_int_t m, magma_int_t n, magmaFloatComplex *A, magma_int_t lda) { /* Specialized kernel which merged cscal and cgeru the two kernels 1) cscale the first column vector A(1:M-1,0) with 1/A(0,0); 2) Performe a cgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1); */ dim3 threads(cgeru_bs, 1, 1); int num_blocks = (m - 1)/cgeru_bs + 1; dim3 grid(num_blocks,1); size_t shared_size = sizeof(magmaFloatComplex)*(n); kernel_cscal_cgeru<<< grid, threads, shared_size, magma_stream>>>(m, n, A, lda); }
9b9fc84eaf41413561314d5772557a4f8e57f862.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zgeaxpy.cu normal z -> d, Fri Sep 11 18:29:42 2015 */ #include "common_magma.h" #include "common_magmasparse.h" #define BLOCK_SIZE 256 // axpy kernel for matrices stored in the MAGMA format __global__ void dgeaxpy_kernel( int num_rows, int num_cols, double alpha, double * dx, double beta, double * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if( row<num_rows ){ for( j=0; j<num_cols; j++ ){ int idx = row + j*num_rows; dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ]; } } } /** Purpose ------- This routine computes Y = alpha * X + beta * Y on the GPU. The input format is a dense matrix (vector block) stored in magma_d_matrix format. Arguments --------- @param[in] alpha double scalar multiplier. @param[in] X magma_d_matrix input/output matrix Y. @param[in] beta double scalar multiplier. @param[in,out] Y magma_d_matrix* input matrix X. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dgeaxpy( double alpha, magma_d_matrix X, double beta, magma_d_matrix *Y, magma_queue_t queue ) { int m = X.num_rows; int n = X.num_cols; dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( dgeaxpy_kernel), dim3(grid), dim3(threads), 0, queue , m, n, alpha, X.dval, beta, Y->dval ); return MAGMA_SUCCESS; }
9b9fc84eaf41413561314d5772557a4f8e57f862.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zgeaxpy.cu normal z -> d, Fri Sep 11 18:29:42 2015 */ #include "common_magma.h" #include "common_magmasparse.h" #define BLOCK_SIZE 256 // axpy kernel for matrices stored in the MAGMA format __global__ void dgeaxpy_kernel( int num_rows, int num_cols, double alpha, double * dx, double beta, double * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if( row<num_rows ){ for( j=0; j<num_cols; j++ ){ int idx = row + j*num_rows; dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ]; } } } /** Purpose ------- This routine computes Y = alpha * X + beta * Y on the GPU. The input format is a dense matrix (vector block) stored in magma_d_matrix format. Arguments --------- @param[in] alpha double scalar multiplier. @param[in] X magma_d_matrix input/output matrix Y. @param[in] beta double scalar multiplier. @param[in,out] Y magma_d_matrix* input matrix X. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dgeaxpy( double alpha, magma_d_matrix X, double beta, magma_d_matrix *Y, magma_queue_t queue ) { int m = X.num_rows; int n = X.num_cols; dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; dgeaxpy_kernel<<< grid, threads, 0, queue >>> ( m, n, alpha, X.dval, beta, Y->dval ); return MAGMA_SUCCESS; }
069ba8cef46df0f4c5d4d35e28c3baf1b363b571.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //nvcc -ptx "E:\ 7\\kernel.cu" -ccbin "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Tools\MSVC\14.16.27023\bin\Hostx64\x64" //nvcc -ptx "E:\ 7\\kernel.cu" -ccbin "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Tools\MSVC\14.16.27023\bin\Hostx64\x64" -gencode arch=compute_35,code=sm_35 -rdc=true __device__ float getneighbor(float3 *A, const unsigned int N,const unsigned int M, const unsigned int x, const unsigned int y){ if (A[x+y*M].z == 0.0) { return A[x+y*M].x; } return 0.0;//A[x+y*M].x; } __global__ void kernel(float *U, const unsigned int N,const unsigned int M,const int t,const float v,const float d_x,const float d_t, const float b=20) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N*M) return; unsigned int x = i % M; unsigned int y = i / M; if (x+1>=M || y+1>=N || x<1 || y<1) {U[(x+y*M)*4+(t+1)%3]=0.0;return;} float val; if (U[(x+y*M)*4+3] > 0.0){ val = -4*U[(x+y*M)*4+t%3]; val+= (U[(x-1+y*M)*4+3]>0.0)? U[(x-1+y*M)*4+t%3]:0;//U[(x+1+y*M)*4+t%3]; val+= (U[(x+1+y*M)*4+3]>0.0)? U[(x+1+y*M)*4+t%3]:0;//U[(x-1+y*M)*4+t%3]; val+= (U[(x+M+y*M)*4+3]>0.0)? U[(x+M+y*M)*4+t%3]:0;//U[(x-M+y*M)*4+t%3]; val+= (U[(x-M+y*M)*4+3]>0.0)? U[(x-M+y*M)*4+t%3]:0;//U[(x+M+y*M)*4+t%3]; val*=(U[(x+y*M)*4+3]*U[(x+y*M)*4+3])*v*v*d_t*d_t/(d_x*d_x); // ??? //float b=20; val += 2*U[(x+y*M)*4+t%3]-U[(x+y*M)*4+(t-1)%3]*(1-d_t*b*0.5); val/=(1+d_t*b*0.5); } else {val = 0.0;} U[(x+y*M)*4+(t+1)%3]=val; } __global__ void kernelAndSetIR(float *U, float *IR, const unsigned int N,const unsigned int M,const int t,const float v,const float d_x,const float d_t,const int x_ir,const int y_ir, const float b=20) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N*M) return; unsigned int x = i % M; unsigned int y = i / M; if (x+1>=M || y+1>=N || x<1 || y<1) {U[(x+y*M)*4+(t+1)%3]=0.0;return;} float val; if (U[(x+y*M)*4+3] > 0.0){ val = -4*U[(x+y*M)*4+t%3]; val+= (U[(x-1+y*M)*4+3]>0.0)? U[(x-1+y*M)*4+t%3]:0;//U[(x+1+y*M)*4+t%3]; val+= (U[(x+1+y*M)*4+3]>0.0)? U[(x+1+y*M)*4+t%3]:0;//U[(x-1+y*M)*4+t%3]; val+= (U[(x+M+y*M)*4+3]>0.0)? U[(x+M+y*M)*4+t%3]:0;//U[(x-M+y*M)*4+t%3]; val+= (U[(x-M+y*M)*4+3]>0.0)? U[(x-M+y*M)*4+t%3]:0;//U[(x+M+y*M)*4+t%3]; val*=(U[(x+y*M)*4+3]*U[(x+y*M)*4+3])*v*v*d_t*d_t/(d_x*d_x); // ??? //float b=20; val += 2*U[(x+y*M)*4+t%3]-U[(x+y*M)*4+(t-1)%3]*(1-d_t*b*0.5); val/=(1+d_t*b*0.5); } else {val = 0.0;} U[(x+y*M)*4+(t+1)%3]=val; if (x==x_ir && y==y_ir) IR[t-1]=val; // (t-1) because we run with t+1 } __global__ void kernelAndSetIRAndSource(float *U, float *IR, const unsigned int N,const unsigned int M,const int t,const float v,const float d_x,const float d_t,const int x_ir,const int y_ir, const int x_s,const int y_s,float* F,const float b=20) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N*M) return; unsigned int x = i % M; unsigned int y = i / M; if (x+1>=M || y+1>=N || x<1 || y<1) {U[(x+y*M)*4+(t+1)%3]=0.0;return;} float val; if (U[(x+y*M)*4+3] > 0.0){ val = -4*U[(x+y*M)*4+t%3]; val+= (U[(x-1+y*M)*4+3]>0.0)? U[(x-1+y*M)*4+t%3]:0;//U[(x+1+y*M)*4+t%3]; val+= (U[(x+1+y*M)*4+3]>0.0)? U[(x+1+y*M)*4+t%3]:0;//U[(x-1+y*M)*4+t%3]; val+= (U[(x+M+y*M)*4+3]>0.0)? U[(x+M+y*M)*4+t%3]:0;//U[(x-M+y*M)*4+t%3]; val+= (U[(x-M+y*M)*4+3]>0.0)? U[(x-M+y*M)*4+t%3]:0;//U[(x+M+y*M)*4+t%3]; if (x==x_s && y==y_s) val+=F[t-1]; // Source val*=(U[(x+y*M)*4+3]*U[(x+y*M)*4+3])*v*v*d_t*d_t/(d_x*d_x); // ??? //float b=20; val += 2*U[(x+y*M)*4+t%3]-U[(x+y*M)*4+(t-1)%3]*(1-d_t*b*0.5); val/=(1+d_t*b*0.5); } else {val = 0.0;} U[(x+y*M)*4+(t+1)%3]=val; if (x==x_ir && y==y_ir) IR[t-1]=val; // (t-1) because we run with t+1 } __global__ void kernel2(float *U, float *U1, const unsigned int N,const unsigned int M,const float v,const float d_x,const float d_t) { float3 *A=(float3*) U; float3 *A1=(float3*) U1; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N*M) return; unsigned int x = i % M; unsigned int y = i / M; if (x+1>=M || y+1>=N || x<1 || y<1) { A1[x+y*M] = A[x+y*M]; return;} //A[i].x = i; float pos = A[x+y*M].x; float vel = A[x+y*M].y; float wall = A[x+y*M].z; if (wall == 0.0){ float m = A[x+1+y*M].x;//getneighbor(A,N,M,x+1,y); m += A[x-1+y*M].x;//getneighbor(A,N,M,x-1,y); m += A[x+y*M+M].x;//getneighbor(A,N,M,x,y+1); m += A[x+y*M-M].x;//getneighbor(A,N,M,x,y-1); m *= .25; vel = 1.5*(1.0-wall)*(m-pos)+vel; pos=pos+vel; } else {pos = 0.0;vel = 0.0;} A1[x+y*M].x = pos;//+0.1; A1[x+y*M].y = vel; A1[x+y*M].z = wall; } // __global__ void kernel2(float *U, const unsigned int N,const unsigned int M,int t,const float v,const float d_x,const float d_t) // { // float3 *A1=(float3*) U; // float3 *A2=(float3*) (U+1); // float3 *A3=(float3*) (U+2); // float3 *wall=(float3*) (U+3); // float3* A[3] = {A1,A2,A3}; // unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // if (i >= N*M) return; // unsigned int x = i % M; // unsigned int y = i / M; // if (x+1>=M || y+1>=N || x<1 || y<1) {return;} // // //for (t:=0;t<1000;++t){ // float val; // if (wall[x+y*M].x == 0.0){ // val = A[t%3][x-1+y*M].x-4*A[t%3][x+y*M].x+A[t%3][x+1+y*M].x+A[t%3][x+y*M+M].x+A[t%3][x+y*M-M].x; // val*=v*v*d_t*d_t/(d_x*d_x); // val += 2*A[t%3][x+y*M].x-A[(t-1)%3][x+y*M].x; // } else {val = 0.0;} // A[(t+1)%3][x+y*M].x=val; // // }
069ba8cef46df0f4c5d4d35e28c3baf1b363b571.cu
//nvcc -ptx "E:\семестр 7\НИР\kernel.cu" -ccbin "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Tools\MSVC\14.16.27023\bin\Hostx64\x64" //nvcc -ptx "E:\семестр 7\НИР\kernel.cu" -ccbin "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Tools\MSVC\14.16.27023\bin\Hostx64\x64" -gencode arch=compute_35,code=sm_35 -rdc=true __device__ float getneighbor(float3 *A, const unsigned int N,const unsigned int M, const unsigned int x, const unsigned int y){ if (A[x+y*M].z == 0.0) { return A[x+y*M].x; } return 0.0;//A[x+y*M].x; } __global__ void kernel(float *U, const unsigned int N,const unsigned int M,const int t,const float v,const float d_x,const float d_t, const float b=20) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N*M) return; unsigned int x = i % M; unsigned int y = i / M; if (x+1>=M || y+1>=N || x<1 || y<1) {U[(x+y*M)*4+(t+1)%3]=0.0;return;} float val; if (U[(x+y*M)*4+3] > 0.0){ val = -4*U[(x+y*M)*4+t%3]; val+= (U[(x-1+y*M)*4+3]>0.0)? U[(x-1+y*M)*4+t%3]:0;//U[(x+1+y*M)*4+t%3]; val+= (U[(x+1+y*M)*4+3]>0.0)? U[(x+1+y*M)*4+t%3]:0;//U[(x-1+y*M)*4+t%3]; val+= (U[(x+M+y*M)*4+3]>0.0)? U[(x+M+y*M)*4+t%3]:0;//U[(x-M+y*M)*4+t%3]; val+= (U[(x-M+y*M)*4+3]>0.0)? U[(x-M+y*M)*4+t%3]:0;//U[(x+M+y*M)*4+t%3]; val*=(U[(x+y*M)*4+3]*U[(x+y*M)*4+3])*v*v*d_t*d_t/(d_x*d_x); // ??? //float b=20; val += 2*U[(x+y*M)*4+t%3]-U[(x+y*M)*4+(t-1)%3]*(1-d_t*b*0.5); val/=(1+d_t*b*0.5); } else {val = 0.0;} U[(x+y*M)*4+(t+1)%3]=val; } __global__ void kernelAndSetIR(float *U, float *IR, const unsigned int N,const unsigned int M,const int t,const float v,const float d_x,const float d_t,const int x_ir,const int y_ir, const float b=20) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N*M) return; unsigned int x = i % M; unsigned int y = i / M; if (x+1>=M || y+1>=N || x<1 || y<1) {U[(x+y*M)*4+(t+1)%3]=0.0;return;} float val; if (U[(x+y*M)*4+3] > 0.0){ val = -4*U[(x+y*M)*4+t%3]; val+= (U[(x-1+y*M)*4+3]>0.0)? U[(x-1+y*M)*4+t%3]:0;//U[(x+1+y*M)*4+t%3]; val+= (U[(x+1+y*M)*4+3]>0.0)? U[(x+1+y*M)*4+t%3]:0;//U[(x-1+y*M)*4+t%3]; val+= (U[(x+M+y*M)*4+3]>0.0)? U[(x+M+y*M)*4+t%3]:0;//U[(x-M+y*M)*4+t%3]; val+= (U[(x-M+y*M)*4+3]>0.0)? U[(x-M+y*M)*4+t%3]:0;//U[(x+M+y*M)*4+t%3]; val*=(U[(x+y*M)*4+3]*U[(x+y*M)*4+3])*v*v*d_t*d_t/(d_x*d_x); // ??? //float b=20; val += 2*U[(x+y*M)*4+t%3]-U[(x+y*M)*4+(t-1)%3]*(1-d_t*b*0.5); val/=(1+d_t*b*0.5); } else {val = 0.0;} U[(x+y*M)*4+(t+1)%3]=val; if (x==x_ir && y==y_ir) IR[t-1]=val; // (t-1) because we run with t+1 } __global__ void kernelAndSetIRAndSource(float *U, float *IR, const unsigned int N,const unsigned int M,const int t,const float v,const float d_x,const float d_t,const int x_ir,const int y_ir, const int x_s,const int y_s,float* F,const float b=20) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N*M) return; unsigned int x = i % M; unsigned int y = i / M; if (x+1>=M || y+1>=N || x<1 || y<1) {U[(x+y*M)*4+(t+1)%3]=0.0;return;} float val; if (U[(x+y*M)*4+3] > 0.0){ val = -4*U[(x+y*M)*4+t%3]; val+= (U[(x-1+y*M)*4+3]>0.0)? U[(x-1+y*M)*4+t%3]:0;//U[(x+1+y*M)*4+t%3]; val+= (U[(x+1+y*M)*4+3]>0.0)? U[(x+1+y*M)*4+t%3]:0;//U[(x-1+y*M)*4+t%3]; val+= (U[(x+M+y*M)*4+3]>0.0)? U[(x+M+y*M)*4+t%3]:0;//U[(x-M+y*M)*4+t%3]; val+= (U[(x-M+y*M)*4+3]>0.0)? U[(x-M+y*M)*4+t%3]:0;//U[(x+M+y*M)*4+t%3]; if (x==x_s && y==y_s) val+=F[t-1]; // Source val*=(U[(x+y*M)*4+3]*U[(x+y*M)*4+3])*v*v*d_t*d_t/(d_x*d_x); // ??? //float b=20; val += 2*U[(x+y*M)*4+t%3]-U[(x+y*M)*4+(t-1)%3]*(1-d_t*b*0.5); val/=(1+d_t*b*0.5); } else {val = 0.0;} U[(x+y*M)*4+(t+1)%3]=val; if (x==x_ir && y==y_ir) IR[t-1]=val; // (t-1) because we run with t+1 } __global__ void kernel2(float *U, float *U1, const unsigned int N,const unsigned int M,const float v,const float d_x,const float d_t) { float3 *A=(float3*) U; float3 *A1=(float3*) U1; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N*M) return; unsigned int x = i % M; unsigned int y = i / M; if (x+1>=M || y+1>=N || x<1 || y<1) { A1[x+y*M] = A[x+y*M]; return;} //A[i].x = i; float pos = A[x+y*M].x; float vel = A[x+y*M].y; float wall = A[x+y*M].z; if (wall == 0.0){ float m = A[x+1+y*M].x;//getneighbor(A,N,M,x+1,y); m += A[x-1+y*M].x;//getneighbor(A,N,M,x-1,y); m += A[x+y*M+M].x;//getneighbor(A,N,M,x,y+1); m += A[x+y*M-M].x;//getneighbor(A,N,M,x,y-1); m *= .25; vel = 1.5*(1.0-wall)*(m-pos)+vel; pos=pos+vel; } else {pos = 0.0;vel = 0.0;} A1[x+y*M].x = pos;//+0.1; A1[x+y*M].y = vel; A1[x+y*M].z = wall; } // __global__ void kernel2(float *U, const unsigned int N,const unsigned int M,int t,const float v,const float d_x,const float d_t) // { // float3 *A1=(float3*) U; // float3 *A2=(float3*) (U+1); // float3 *A3=(float3*) (U+2); // float3 *wall=(float3*) (U+3); // float3* A[3] = {A1,A2,A3}; // unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // if (i >= N*M) return; // unsigned int x = i % M; // unsigned int y = i / M; // if (x+1>=M || y+1>=N || x<1 || y<1) {return;} // // //for (t:=0;t<1000;++t){ // float val; // if (wall[x+y*M].x == 0.0){ // val = A[t%3][x-1+y*M].x-4*A[t%3][x+y*M].x+A[t%3][x+1+y*M].x+A[t%3][x+y*M+M].x+A[t%3][x+y*M-M].x; // val*=v*v*d_t*d_t/(d_x*d_x); // val += 2*A[t%3][x+y*M].x-A[(t-1)%3][x+y*M].x; // } else {val = 0.0;} // A[(t+1)%3][x+y*M].x=val; // // }
0a6974415821fb00353d106e0ce50efbc7e03bb8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void primal(float *y1, float *y2, float *xbar, float sigma, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x < w && y < h) { int i; float x1, x2, val, norm; for (int z = 0; z < nc; z++) { i = x + w * y + w * h * z; val = xbar[i]; x1 = (x+1<w) ? (xbar[(x+1) + w * y + w * h * z] - val) : 0.f; x2 = (y+1<h) ? (xbar[x + w * (y+1) + w * h * z] - val) : 0.f; x1 = y1[i] + sigma * x1; x2 = y2[i] + sigma * x2; norm = sqrtf(x1*x1+x2*x2); y1[i] = x1 / fmax(1.f, norm); y2[i] = x2 / fmax(1.f, norm); } } }
0a6974415821fb00353d106e0ce50efbc7e03bb8.cu
#include "includes.h" __global__ void primal(float *y1, float *y2, float *xbar, float sigma, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x < w && y < h) { int i; float x1, x2, val, norm; for (int z = 0; z < nc; z++) { i = x + w * y + w * h * z; val = xbar[i]; x1 = (x+1<w) ? (xbar[(x+1) + w * y + w * h * z] - val) : 0.f; x2 = (y+1<h) ? (xbar[x + w * (y+1) + w * h * z] - val) : 0.f; x1 = y1[i] + sigma * x1; x2 = y2[i] + sigma * x2; norm = sqrtf(x1*x1+x2*x2); y1[i] = x1 / fmax(1.f, norm); y2[i] = x2 / fmax(1.f, norm); } } }
d464a5963624117dde365d7e732d10b00b8c2b97.hip
// !!! This is a file automatically generated by hipify!!! #include "./c_runtime_api.h" #include <cassert> #include <cstdio> #include <hip/hip_runtime.h> /* TODO: Your code here */ /* all your GPU kernel code, e.g. matrix_softmax_cross_entropy_kernel */ // y = inputs[0], y_ = inputs[1] // np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True) __global__ void matrix_softmax_cross_entropy_kernel(int nrow, int ncol, const float *input_a, const float *input_b, float *output) { // Dynamic shared memory, size provided at kernel launch. extern __shared__ float loss_per_row[]; // Two dimensional thread blocks. int y = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; if (y >= nrow) { return; } input_a += y * ncol; input_b += y * ncol; float maxval = *input_a; // Find max for a row. for (int x = 1; x < ncol; ++x) { maxval = max(maxval, input_a[x]); } // Deduct by max for a row, and raise to exp. float sum = 0; for (int x = 0; x < ncol; ++x) { sum += exp(input_a[x] - maxval); } // Compute per-row loss. float loss = 0; for (int x = 0; x < ncol; ++x) { loss -= input_b[x] * log(exp(input_a[x] - maxval) / sum); } loss_per_row[y] = loss; __syncthreads(); // Compute reduce_mean across rows. float mean_loss = 0; // Use a single thread to reduce mean across rows. if ((threadIdx.x == 0) && (threadIdx.y == 0)) { for (int i = 0; i < nrow; ++i) { mean_loss += loss_per_row[i]; } mean_loss /= nrow; output[0] = mean_loss; } } int DLGpuArraySet(DLArrayHandle arr, float value) { /* TODO: Your code here */ return 0; } int DLGpuBroadcastTo(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuReduceSumAxisZero(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuMatrixElementwiseAdd(const DLArrayHandle matA, const DLArrayHandle matB, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuMatrixElementwiseAddByConst(const DLArrayHandle input, float val, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuMatrixElementwiseMultiply(const DLArrayHandle matA, const DLArrayHandle matB, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuMatrixMultiplyByConst(const DLArrayHandle input, float val, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuMatrixMultiply(const DLArrayHandle matA, bool transposeA, const DLArrayHandle matB, bool transposeB, DLArrayHandle matC) { /* TODO: Your code here */ // Hint: DO NOT use cublas return 0; } int DLGpuRelu(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuReluGradient(const DLArrayHandle input, const DLArrayHandle in_grad, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuSoftmax(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuSoftmaxCrossEntropy(const DLArrayHandle input_a, const DLArrayHandle input_b, DLArrayHandle output) { assert(input_a->ndim == 2); assert(input_b->ndim == 2); assert(output->ndim == 1); assert(input_a->shape[0] == input_b->shape[0] && input_a->shape[1] == input_b->shape[1]); int nrow = input_a->shape[0]; // Maximum x- or y-dimension of a block = 1024 // But we need 'nrow' shared memory, and max shared memory is 48KB. // Conservatively allow max 16KB shared memory. assert(nrow <= 1024 * 4); int ncol = input_a->shape[1]; const float *input_data_a = (const float *)input_a->data; const float *input_data_b = (const float *)input_b->data; float *output_data = (float *)output->data; dim3 threads; if (nrow <= 1024) { threads.x = nrow; } else { threads.x = 1024; threads.y = (nrow + 1023) / 1024; } // 1 block, each block with 'threads' number of threads with 'nrow' shared // memory size hipLaunchKernelGGL(( matrix_softmax_cross_entropy_kernel), dim3(1), dim3(threads), nrow * sizeof(float), 0, nrow, ncol, input_data_a, input_data_b, output_data); return 0; }
d464a5963624117dde365d7e732d10b00b8c2b97.cu
#include "./c_runtime_api.h" #include <cassert> #include <cstdio> #include <cuda_runtime.h> /* TODO: Your code here */ /* all your GPU kernel code, e.g. matrix_softmax_cross_entropy_kernel */ // y = inputs[0], y_ = inputs[1] // np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True) __global__ void matrix_softmax_cross_entropy_kernel(int nrow, int ncol, const float *input_a, const float *input_b, float *output) { // Dynamic shared memory, size provided at kernel launch. extern __shared__ float loss_per_row[]; // Two dimensional thread blocks. int y = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; if (y >= nrow) { return; } input_a += y * ncol; input_b += y * ncol; float maxval = *input_a; // Find max for a row. for (int x = 1; x < ncol; ++x) { maxval = max(maxval, input_a[x]); } // Deduct by max for a row, and raise to exp. float sum = 0; for (int x = 0; x < ncol; ++x) { sum += exp(input_a[x] - maxval); } // Compute per-row loss. float loss = 0; for (int x = 0; x < ncol; ++x) { loss -= input_b[x] * log(exp(input_a[x] - maxval) / sum); } loss_per_row[y] = loss; __syncthreads(); // Compute reduce_mean across rows. float mean_loss = 0; // Use a single thread to reduce mean across rows. if ((threadIdx.x == 0) && (threadIdx.y == 0)) { for (int i = 0; i < nrow; ++i) { mean_loss += loss_per_row[i]; } mean_loss /= nrow; output[0] = mean_loss; } } int DLGpuArraySet(DLArrayHandle arr, float value) { /* TODO: Your code here */ return 0; } int DLGpuBroadcastTo(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuReduceSumAxisZero(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuMatrixElementwiseAdd(const DLArrayHandle matA, const DLArrayHandle matB, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuMatrixElementwiseAddByConst(const DLArrayHandle input, float val, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuMatrixElementwiseMultiply(const DLArrayHandle matA, const DLArrayHandle matB, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuMatrixMultiplyByConst(const DLArrayHandle input, float val, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuMatrixMultiply(const DLArrayHandle matA, bool transposeA, const DLArrayHandle matB, bool transposeB, DLArrayHandle matC) { /* TODO: Your code here */ // Hint: DO NOT use cublas return 0; } int DLGpuRelu(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuReluGradient(const DLArrayHandle input, const DLArrayHandle in_grad, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuSoftmax(const DLArrayHandle input, DLArrayHandle output) { /* TODO: Your code here */ return 0; } int DLGpuSoftmaxCrossEntropy(const DLArrayHandle input_a, const DLArrayHandle input_b, DLArrayHandle output) { assert(input_a->ndim == 2); assert(input_b->ndim == 2); assert(output->ndim == 1); assert(input_a->shape[0] == input_b->shape[0] && input_a->shape[1] == input_b->shape[1]); int nrow = input_a->shape[0]; // Maximum x- or y-dimension of a block = 1024 // But we need 'nrow' shared memory, and max shared memory is 48KB. // Conservatively allow max 16KB shared memory. assert(nrow <= 1024 * 4); int ncol = input_a->shape[1]; const float *input_data_a = (const float *)input_a->data; const float *input_data_b = (const float *)input_b->data; float *output_data = (float *)output->data; dim3 threads; if (nrow <= 1024) { threads.x = nrow; } else { threads.x = 1024; threads.y = (nrow + 1023) / 1024; } // 1 block, each block with 'threads' number of threads with 'nrow' shared // memory size matrix_softmax_cross_entropy_kernel<<<1, threads, nrow * sizeof(float)>>>( nrow, ncol, input_data_a, input_data_b, output_data); return 0; }
240d1036a564ff6321fba649a09b7a8feda9ba20.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by search_single_cta_00_generate.py * * Make changes there and run in this directory: * * > python search_single_cta_00_generate.py * */ #include <raft/neighbors/detail/cagra/search_single_cta_kernel-inl.cuh> namespace raft::neighbors::cagra::detail::single_cta_search { #define instantiate_single_cta_select_and_run( \ TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T) \ template void select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T>( \ raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, \ raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, \ INDEX_T* const topk_indices_ptr, \ DISTANCE_T* const topk_distances_ptr, \ const DATA_T* const queries_ptr, \ const uint32_t num_queries, \ const INDEX_T* dev_seed_ptr, \ uint32_t* const num_executed_iterations, \ uint32_t topk, \ uint32_t num_itopk_candidates, \ uint32_t block_size, \ uint32_t smem_size, \ int64_t hash_bitlen, \ INDEX_T* hashmap_ptr, \ size_t small_hash_bitlen, \ size_t small_hash_reset_interval, \ uint32_t num_random_samplings, \ uint64_t rand_xor_mask, \ uint32_t num_seeds, \ size_t itopk_size, \ size_t search_width, \ size_t min_iterations, \ size_t max_iterations, \ hipStream_t stream); instantiate_single_cta_select_and_run(16, 256, float, uint32_t, float); #undef instantiate_single_cta_search_kernel } // namespace raft::neighbors::cagra::detail::single_cta_search
240d1036a564ff6321fba649a09b7a8feda9ba20.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by search_single_cta_00_generate.py * * Make changes there and run in this directory: * * > python search_single_cta_00_generate.py * */ #include <raft/neighbors/detail/cagra/search_single_cta_kernel-inl.cuh> namespace raft::neighbors::cagra::detail::single_cta_search { #define instantiate_single_cta_select_and_run( \ TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T) \ template void select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T>( \ raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, \ raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, \ INDEX_T* const topk_indices_ptr, \ DISTANCE_T* const topk_distances_ptr, \ const DATA_T* const queries_ptr, \ const uint32_t num_queries, \ const INDEX_T* dev_seed_ptr, \ uint32_t* const num_executed_iterations, \ uint32_t topk, \ uint32_t num_itopk_candidates, \ uint32_t block_size, \ uint32_t smem_size, \ int64_t hash_bitlen, \ INDEX_T* hashmap_ptr, \ size_t small_hash_bitlen, \ size_t small_hash_reset_interval, \ uint32_t num_random_samplings, \ uint64_t rand_xor_mask, \ uint32_t num_seeds, \ size_t itopk_size, \ size_t search_width, \ size_t min_iterations, \ size_t max_iterations, \ cudaStream_t stream); instantiate_single_cta_select_and_run(16, 256, float, uint32_t, float); #undef instantiate_single_cta_search_kernel } // namespace raft::neighbors::cagra::detail::single_cta_search
9a0b313cd0136b89dccf45df7cf5829b38fffd4b.hip
// !!! This is a file automatically generated by hipify!!! // CUDA Device Query #include <stdio.h> // Print device properties void printDevProp(hipDeviceProp_t devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } int main() { // Number of CUDA devices int devCount; hipGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, i); printDevProp(devProp); } return 0; }
9a0b313cd0136b89dccf45df7cf5829b38fffd4b.cu
// CUDA Device Query #include <stdio.h> // Print device properties void printDevProp(cudaDeviceProp devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } int main() { // Number of CUDA devices int devCount; cudaGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printDevProp(devProp); } return 0; }
f2e3f89b0b5b095a0c1504f7f6cb5f1390a2eaad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <iostream> template<class Int> __host__ __device__ static void inv_lift_orig(Int* p, unsigned int s) { Int x, y, z, w; x = *p; p += s; y = *p; p += s; z = *p; p += s; w = *p; p += s; y += w >> 1; w -= y >> 1; y += w; w <<= 1; w -= y; z += x; x <<= 1; x -= z; y += z; z <<= 1; z -= y; w += x; x <<= 1; x -= w; p -= s; *p = w; p -= s; *p = z; p -= s; *p = y; p -= s; *p = x; } template<class Int> __host__ __device__ static void inv_lift_fix2(Int* p, unsigned int s) { volatile Int x, y, z, w; x = *p; p += s; y = *p; p += s; z = *p; p += s; w = *p; p += s; y += w >> 1; w -= y >> 1; y += w; w <<= 1; w -= y; z += x; x <<= 1; x -= z; y += z; z <<= 1; z -= y; w += x; x <<= 1; x -= w; p -= s; *p = w; p -= s; *p = z; p -= s; *p = y; p -= s; *p = x; } template<class Int> __host__ __device__ static void inv_lift_fix1(Int* p, unsigned int s) { Int x, y, z, w; x = *p; y = p[s * 1]; z = p[s * 2]; w = p[s * 3]; y += w >> 1; w -= y >> 1; y += w; w <<= 1; w -= y; z += x; x <<= 1; x -= z; y += z; z <<= 1; z -= y; w += x; x <<= 1; x -= w; p[s * 3] -= s; p[s * 2] -= s; p[s] -= s; p[0] -= s; } template<class Int> __global__ void gpuTest ( Int *iblock ) { int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; int z = threadIdx.z + blockDim.z*blockIdx.z; int idx = z*gridDim.x*blockDim.x*gridDim.y*blockDim.y + y*gridDim.x*blockDim.x + x; inv_lift_orig(iblock + idx * 64, 16); } template<class Int> void cpuTest(std::vector<Int> &h_c) { int i = 0; //for (i = 0; i < h_c.size(); i++){ inv_lift_orig(thrust::raw_pointer_cast(h_c.data()) + i * 64, 16); //} } typedef long long Int; int main() { const int nx = 256; const int ny = 256; const int nz = 256; const int arraySize = nx*ny*nz; const int thread_cnt = arraySize / 64; thrust::host_vector<Int> h_cout, h_a; h_cout.resize(arraySize); h_a.resize(arraySize); for (int i = 0; i < arraySize; i++){ h_a[i] = i; } thrust::device_vector<Int> d_a, d_c; d_a.resize(arraySize); d_c.resize(arraySize); d_a = h_a; d_c = h_a; gpuTest<Int> << <1,1 >> >(thrust::raw_pointer_cast(d_c.data())); //dim3 emax_size(nx / 4, ny / 4, nz / 4); //dim3 block_size(8, 8, 8); //dim3 grid_size = emax_size; //grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; //cudaInvXForm<Int> << <block_size, grid_size >> >(thrust::raw_pointer_cast(d_c.data())); //hipStreamSynchronize(0); hipError_t cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "bitshiftKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); exit(1); } h_cout = d_c; std::vector<Int> h_c; h_c.resize(arraySize); thrust::copy(h_a.begin(), h_a.end(), h_c.begin()); cpuTest<Int>(h_c); for (int i = 0; i < h_c.size(); i++){ if (h_c[i] != h_cout[i]){ std::cout << "Borked (values should be the same): at index " << i << " cpu value: " << h_c[i] << " gpu value: " << h_cout[i] << std::endl; exit(1); } } std::cout << "Finished correctly." << std::endl; return 0; }
f2e3f89b0b5b095a0c1504f7f6cb5f1390a2eaad.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <iostream> template<class Int> __host__ __device__ static void inv_lift_orig(Int* p, unsigned int s) { Int x, y, z, w; x = *p; p += s; y = *p; p += s; z = *p; p += s; w = *p; p += s; y += w >> 1; w -= y >> 1; y += w; w <<= 1; w -= y; z += x; x <<= 1; x -= z; y += z; z <<= 1; z -= y; w += x; x <<= 1; x -= w; p -= s; *p = w; p -= s; *p = z; p -= s; *p = y; p -= s; *p = x; } template<class Int> __host__ __device__ static void inv_lift_fix2(Int* p, unsigned int s) { volatile Int x, y, z, w; x = *p; p += s; y = *p; p += s; z = *p; p += s; w = *p; p += s; y += w >> 1; w -= y >> 1; y += w; w <<= 1; w -= y; z += x; x <<= 1; x -= z; y += z; z <<= 1; z -= y; w += x; x <<= 1; x -= w; p -= s; *p = w; p -= s; *p = z; p -= s; *p = y; p -= s; *p = x; } template<class Int> __host__ __device__ static void inv_lift_fix1(Int* p, unsigned int s) { Int x, y, z, w; x = *p; y = p[s * 1]; z = p[s * 2]; w = p[s * 3]; y += w >> 1; w -= y >> 1; y += w; w <<= 1; w -= y; z += x; x <<= 1; x -= z; y += z; z <<= 1; z -= y; w += x; x <<= 1; x -= w; p[s * 3] -= s; p[s * 2] -= s; p[s] -= s; p[0] -= s; } template<class Int> __global__ void gpuTest ( Int *iblock ) { int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; int z = threadIdx.z + blockDim.z*blockIdx.z; int idx = z*gridDim.x*blockDim.x*gridDim.y*blockDim.y + y*gridDim.x*blockDim.x + x; inv_lift_orig(iblock + idx * 64, 16); } template<class Int> void cpuTest(std::vector<Int> &h_c) { int i = 0; //for (i = 0; i < h_c.size(); i++){ inv_lift_orig(thrust::raw_pointer_cast(h_c.data()) + i * 64, 16); //} } typedef long long Int; int main() { const int nx = 256; const int ny = 256; const int nz = 256; const int arraySize = nx*ny*nz; const int thread_cnt = arraySize / 64; thrust::host_vector<Int> h_cout, h_a; h_cout.resize(arraySize); h_a.resize(arraySize); for (int i = 0; i < arraySize; i++){ h_a[i] = i; } thrust::device_vector<Int> d_a, d_c; d_a.resize(arraySize); d_c.resize(arraySize); d_a = h_a; d_c = h_a; gpuTest<Int> << <1,1 >> >(thrust::raw_pointer_cast(d_c.data())); //dim3 emax_size(nx / 4, ny / 4, nz / 4); //dim3 block_size(8, 8, 8); //dim3 grid_size = emax_size; //grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; //cudaInvXForm<Int> << <block_size, grid_size >> >(thrust::raw_pointer_cast(d_c.data())); //cudaStreamSynchronize(0); cudaError_t cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "bitshiftKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); exit(1); } h_cout = d_c; std::vector<Int> h_c; h_c.resize(arraySize); thrust::copy(h_a.begin(), h_a.end(), h_c.begin()); cpuTest<Int>(h_c); for (int i = 0; i < h_c.size(); i++){ if (h_c[i] != h_cout[i]){ std::cout << "Borked (values should be the same): at index " << i << " cpu value: " << h_c[i] << " gpu value: " << h_cout[i] << std::endl; exit(1); } } std::cout << "Finished correctly." << std::endl; return 0; }
f42e5b4f0791cc21ebd9c7f56797319b01afdf47.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __device__ static int ros_Integrator_rodas3(double * __restrict__ var, const double * __restrict__ fix, const double Tstart, const double Tend, double &T, // Integration parameters const int autonomous, const int vectorTol, const int Max_no_steps, const double roundoff, const double Hmin, const double Hmax, const double Hstart, double &Hexit, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, // Status parameters int &Nfun, int &Njac, int &Nstp, int &Nacc, int &Nrej, int &Ndec, int &Nsol, int &Nsng, // cuda global mem buffers const double * __restrict__ rconst, const double * __restrict__ absTol, const double * __restrict__ relTol, double * __restrict__ varNew, double * __restrict__ Fcn0, double * __restrict__ K, double * __restrict__ dFdT, double * __restrict__ jac0, double * __restrict__ Ghimj, double * __restrict__ varErr, // for update_rconst const double * __restrict__ khet_st, const double * __restrict__ khet_tr, const double * __restrict__ jx, // VL_GLO const int VL_GLO, const int offset ) { int index = blockIdx.x*blockDim.x+threadIdx.x+offset; double H, Hnew, HC, HG, Fac; // Tau - not used double Err; //*varErr; int direction; int rejectLastH, rejectMoreH; const double DELTAMIN = 1.0E-5; const int ros_S = 4; /* Not sure if it worth it for shared */ double ros_A[15]; double ros_C[15]; int ros_NewF[8]; double ros_M[6]; double ros_E[6]; double ros_Alpha[6]; double ros_Gamma[6]; ros_A[0] = 0.0E+00; ros_A[1] = 2.0E+00; ros_A[2] = 0.0E+00; ros_A[3] = 2.0E+00; ros_A[4] = 0.0E+00; ros_A[5] = 1.0E+00; ros_C[0] = 4.0E+00; ros_C[1] = 1.0E+00; ros_C[2] =- 1.0E+00; ros_C[3] = 1.0E+00; ros_C[4] =- 1.0E+00; ros_C[5] =- (8.0E+00/3.0E+00); ros_NewF[0] = 1; ros_NewF[1] = 0; ros_NewF[2] = 1; ros_NewF[3] = 1; ros_M[0] = 2.0E+00; ros_M[1] = 0.0E+00; ros_M[2] = 1.0E+00; ros_M[3] = 1.0E+00; ros_E[0] = 0.0E+00; ros_E[1] = 0.0E+00; ros_E[2] = 0.0E+00; ros_E[3] = 1.0E+00; ros_Alpha[0] = 0.0E+00; ros_Alpha[1] = 0.0E+00; ros_Alpha[2] = 1.0E+00; ros_Alpha[3] = 1.0E+00; ros_Gamma[0] = 0.5E+00; ros_Gamma[1] = 1.5E+00; ros_Gamma[2] = 0.0E+00; ros_Gamma[3] = 0.0E+00; // ~~~> Initial preparations T = Tstart; Hexit = 0.0; H = fmin(Hstart,Hmax); if (fabs(H) <= 10.0*roundoff) H = DELTAMIN; if (Tend >= Tstart) { direction = + 1; } else { direction = - 1; } rejectLastH=0; rejectMoreH=0; // ~~~> Time loop begins below // TimeLoop: while((direction > 0) && ((T- Tend)+ roundoff <= ZERO) || (direction < 0) && ((Tend-T)+ roundoff <= ZERO)) { if (Nstp > Max_no_steps) // Too many steps return -6; // Step size too small if (H <= roundoff){ // Step size too small //if (((T+ 0.1*H) == T) || (H <= roundoff)) { return -7; } // ~~~> Limit H if necessary to avoid going beyond Tend Hexit = H; H = fmin(H,fabs(Tend-T)); // ~~~> Compute the function at current time Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO); /// VAR READ - Fcn0 Write // ~~~> Compute the function derivative with respect to T if (!autonomous) ros_FunTimeDerivative(T, roundoff, var, fix, rconst, dFdT, Fcn0, Nfun, khet_st, khet_tr, jx, VL_GLO); /// VAR READ - fcn0 read // ~~~> Compute the Jacobian at current time Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO); /// VAR READ // ~~~> Repeat step calculation until current step accepted // UntilAccepted: while(1) { ros_PrepareMatrix(H, direction, 0.5E+00, jac0, Ghimj, Nsng, Ndec, VL_GLO); // ~~~> Compute the stages // Stage: { int istage = 0; for (int i=0; i<NVAR; i++) K(index,istage,i) = varNew(index,i) = Fcn0(index,i); if ((!autonomous)) { HG = direction*H*ros_Gamma[istage]; for (int i=0; i<NVAR; i++){ K(index,istage,i) += dFdT(index,i)*HG; } } // R ,RW, RW, R, R ros_Solve(Ghimj, K, Nsol, istage, ros_S); } // Stage #pragma unroll for (int istage=1; istage < ros_S; istage++) { // For the 1st istage the function has been computed previously if (istage == 0) { for (int i=0; i<NVAR; i++){ varNew(index,i) = Fcn0(index,i); // FCN0 Read } } else if(ros_NewF[istage]) { for (int i=0; i<NVAR; i++){ varNew(index,i) = var(index,i); } for (int j=0; j < (istage); j++){ for (int i=0; i<NVAR; i++){ varNew(index,i) = K(index,j,i)*ros_A[(istage)*(istage-1)/2 + j] + varNew(index,i); } } Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO); // FCN <- varNew / not overlap } for (int i=0; i<NVAR; i++) K(index,istage,i) = varNew(index,i); for (int j=0; j<(istage); j++) { HC = ros_C[(istage)*(istage-1)/2 + j]/(direction*H); for (int i=0; i<NVAR; i++){ double tmp = K(index,j,i); K(index,istage,i) += tmp*HC; } } if ((!autonomous) && (ros_Gamma[istage] )) { HG = direction*H*ros_Gamma[istage]; for (int i=0; i<NVAR; i++){ K(index,istage,i) += dFdT(index,i)*HG; } } // R ,RW, RW, R, R ros_Solve(Ghimj, K, Nsol, istage, ros_S); } // Stage // ~~~> Compute the new solution for (int i=0; i<NVAR; i++){ double tmpNew = var(index,i); /// VAR READ double tmpErr = ZERO; #pragma unroll for (int j=0; j<ros_S; j++){ double tmp = K(index,j,i); tmpNew += tmp*ros_M[j]; tmpErr += tmp*ros_E[j]; } varNew(index,i) = tmpNew; // varNew is killed varErr(index,i) = tmpErr; } Err = ros_ErrorNorm(var, varNew, varErr, absTol, relTol, vectorTol); /// VAR-varNew READ // ~~~> New step size is bounded by FacMin <= Hnew/H <= FacMax Fac = fmin(FacMax,fmax(FacMin,FacSafe/pow(Err,ONE/3.0))); Hnew = H*Fac; // ~~~> Check the error magnitude and adjust step size Nstp = Nstp+ 1; if((Err <= ONE) || (H <= Hmin)) // ~~~> Accept step { Nacc = Nacc + 1; for (int j=0; j<NVAR ; j++) var(index,j) = fmax(varNew(index,j),ZERO); /////////// VAR WRITE - last VarNew read T = T + direction*H; Hnew = fmax(Hmin,fmin(Hnew,Hmax)); if (rejectLastH) // No step size increase after a rejected step Hnew = fmin(Hnew,H); rejectLastH = 0; rejectMoreH = 0; H = Hnew; break; // EXIT THE LOOP: WHILE STEP NOT ACCEPTED } else // ~~~> Reject step { if (rejectMoreH) Hnew = H*FacRej; rejectMoreH = rejectLastH; rejectLastH = 1; H = Hnew; if (Nacc >= 1) Nrej += 1; } // Err <= 1 } // UntilAccepted } // TimeLoop // ~~~> Succesful exit return 0; // ~~~> The integration was successful } __global__ void Rosenbrock_rodas3(double * __restrict__ conc, const double Tstart, const double Tend, double * __restrict__ rstatus, int * __restrict__ istatus, const int autonomous, const int vectorTol, const int UplimTol, const int Max_no_steps, double * __restrict__ d_jac0, double * __restrict__ d_Ghimj, double * __restrict__ d_varNew, double * __restrict__ d_K, double * __restrict__ d_varErr,double * __restrict__ d_dFdT ,double * __restrict__ d_Fcn0, double * __restrict__ d_var, double * __restrict__ d_fix, double * __restrict__ d_rconst, const double Hmin, const double Hmax, const double Hstart, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, const double roundoff, const double * __restrict__ absTol, const double * __restrict__ relTol, const double * __restrict__ khet_st, const double * __restrict__ khet_tr, const double * __restrict__ jx, const double * __restrict__ temp_gpu, const double * __restrict__ press_gpu, const double * __restrict__ cair_gpu, const int VL_GLO, const int offset) { int index = blockIdx.x*blockDim.x+threadIdx.x+offset; /* * In theory someone can aggregate accesses together, * however due to algorithm, threads access * different parts of memory, making it harder to * optimize accesses. * */ double *Ghimj = &d_Ghimj[index*LU_NONZERO]; double *K = &d_K[index*NVAR*3]; double *varNew = &d_varNew[index*NVAR]; double *Fcn0 = &d_Fcn0[index*NVAR]; double *dFdT = &d_dFdT[index*NVAR]; double *jac0 = &d_jac0[index*LU_NONZERO]; double *varErr = &d_varErr[index*NVAR]; double *var = &d_var[index*NSPEC]; double *fix = &d_fix[index*NFIX]; double *rconst = &d_rconst[index*NREACT]; if (index < VL_GLO) { int Nfun,Njac,Nstp,Nacc,Nrej,Ndec,Nsol,Nsng; double Texit, Hexit; Nfun = 0; Njac = 0; Nstp = 0; Nacc = 0; Nrej = 0; Ndec = 0; Nsol = 0; Nsng = 0; const int method = 4; /* FIXME: add check for method */ const double *ros_A = &ros[method-1].ros_A[0]; const double *ros_C = &ros[method-1].ros_C[0]; const double *ros_M = &ros[method-1].ros_M[0]; const double *ros_E = &ros[method-1].ros_E[0]; const double *ros_Alpha = &ros[method-1].ros_Alpha[0]; const double *ros_Gamma = &ros[method-1].ros_Gamma[0]; const int *ros_NewF = &ros[method-1].ros_NewF[0]; const int ros_S = ros[method-1].ros_S; const double ros_ELO = ros[method-1].ros_ELO; /* Copy data from global memory to temporary array */ /* * Optimization note: if we ever have enough constant * memory, we could use it for storing the data. * In current architectures if we use constant memory * only a few threads will be able to run on the fly. * */ for (int i=0; i<NSPEC; i++) var(index,i) = conc(index,i); for (int i=0; i<NFIX; i++) fix(index,i) = conc(index,NVAR+i); update_rconst(var, khet_st, khet_tr, jx, rconst, temp_gpu, press_gpu, cair_gpu, VL_GLO, offset); ros_Integrator_rodas3(var, fix, Tstart, Tend, Texit, // Integration parameters autonomous, vectorTol, Max_no_steps, roundoff, Hmin, Hmax, Hstart, Hexit, FacMin, FacMax, FacRej, FacSafe, // Status parameters Nfun, Njac, Nstp, Nacc, Nrej, Ndec, Nsol, Nsng, // cuda global mem buffers rconst, absTol, relTol, varNew, Fcn0, K, dFdT, jac0, Ghimj, varErr, // For update rconst khet_st, khet_tr, jx, VL_GLO, offset ); for (int i=0; i<NVAR; i++) conc(index,i) = var(index,i); /* Statistics */ istatus(index,ifun) = Nfun; istatus(index,ijac) = Njac; istatus(index,istp) = Nstp; istatus(index,iacc) = Nacc; istatus(index,irej) = Nrej; istatus(index,idec) = Ndec; istatus(index,isol) = Nsol; istatus(index,isng) = Nsng; // Last T and H rstatus(index,itexit) = Texit; rstatus(index,ihexit) = Hexit; } }
f42e5b4f0791cc21ebd9c7f56797319b01afdf47.cu
__device__ static int ros_Integrator_rodas3(double * __restrict__ var, const double * __restrict__ fix, const double Tstart, const double Tend, double &T, // Integration parameters const int autonomous, const int vectorTol, const int Max_no_steps, const double roundoff, const double Hmin, const double Hmax, const double Hstart, double &Hexit, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, // Status parameters int &Nfun, int &Njac, int &Nstp, int &Nacc, int &Nrej, int &Ndec, int &Nsol, int &Nsng, // cuda global mem buffers const double * __restrict__ rconst, const double * __restrict__ absTol, const double * __restrict__ relTol, double * __restrict__ varNew, double * __restrict__ Fcn0, double * __restrict__ K, double * __restrict__ dFdT, double * __restrict__ jac0, double * __restrict__ Ghimj, double * __restrict__ varErr, // for update_rconst const double * __restrict__ khet_st, const double * __restrict__ khet_tr, const double * __restrict__ jx, // VL_GLO const int VL_GLO, const int offset ) { int index = blockIdx.x*blockDim.x+threadIdx.x+offset; double H, Hnew, HC, HG, Fac; // Tau - not used double Err; //*varErr; int direction; int rejectLastH, rejectMoreH; const double DELTAMIN = 1.0E-5; const int ros_S = 4; /* Not sure if it worth it for shared */ double ros_A[15]; double ros_C[15]; int ros_NewF[8]; double ros_M[6]; double ros_E[6]; double ros_Alpha[6]; double ros_Gamma[6]; ros_A[0] = 0.0E+00; ros_A[1] = 2.0E+00; ros_A[2] = 0.0E+00; ros_A[3] = 2.0E+00; ros_A[4] = 0.0E+00; ros_A[5] = 1.0E+00; ros_C[0] = 4.0E+00; ros_C[1] = 1.0E+00; ros_C[2] =- 1.0E+00; ros_C[3] = 1.0E+00; ros_C[4] =- 1.0E+00; ros_C[5] =- (8.0E+00/3.0E+00); ros_NewF[0] = 1; ros_NewF[1] = 0; ros_NewF[2] = 1; ros_NewF[3] = 1; ros_M[0] = 2.0E+00; ros_M[1] = 0.0E+00; ros_M[2] = 1.0E+00; ros_M[3] = 1.0E+00; ros_E[0] = 0.0E+00; ros_E[1] = 0.0E+00; ros_E[2] = 0.0E+00; ros_E[3] = 1.0E+00; ros_Alpha[0] = 0.0E+00; ros_Alpha[1] = 0.0E+00; ros_Alpha[2] = 1.0E+00; ros_Alpha[3] = 1.0E+00; ros_Gamma[0] = 0.5E+00; ros_Gamma[1] = 1.5E+00; ros_Gamma[2] = 0.0E+00; ros_Gamma[3] = 0.0E+00; // ~~~> Initial preparations T = Tstart; Hexit = 0.0; H = fmin(Hstart,Hmax); if (fabs(H) <= 10.0*roundoff) H = DELTAMIN; if (Tend >= Tstart) { direction = + 1; } else { direction = - 1; } rejectLastH=0; rejectMoreH=0; // ~~~> Time loop begins below // TimeLoop: while((direction > 0) && ((T- Tend)+ roundoff <= ZERO) || (direction < 0) && ((Tend-T)+ roundoff <= ZERO)) { if (Nstp > Max_no_steps) // Too many steps return -6; // Step size too small if (H <= roundoff){ // Step size too small //if (((T+ 0.1*H) == T) || (H <= roundoff)) { return -7; } // ~~~> Limit H if necessary to avoid going beyond Tend Hexit = H; H = fmin(H,fabs(Tend-T)); // ~~~> Compute the function at current time Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO); /// VAR READ - Fcn0 Write // ~~~> Compute the function derivative with respect to T if (!autonomous) ros_FunTimeDerivative(T, roundoff, var, fix, rconst, dFdT, Fcn0, Nfun, khet_st, khet_tr, jx, VL_GLO); /// VAR READ - fcn0 read // ~~~> Compute the Jacobian at current time Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO); /// VAR READ // ~~~> Repeat step calculation until current step accepted // UntilAccepted: while(1) { ros_PrepareMatrix(H, direction, 0.5E+00, jac0, Ghimj, Nsng, Ndec, VL_GLO); // ~~~> Compute the stages // Stage: { int istage = 0; for (int i=0; i<NVAR; i++) K(index,istage,i) = varNew(index,i) = Fcn0(index,i); if ((!autonomous)) { HG = direction*H*ros_Gamma[istage]; for (int i=0; i<NVAR; i++){ K(index,istage,i) += dFdT(index,i)*HG; } } // R ,RW, RW, R, R ros_Solve(Ghimj, K, Nsol, istage, ros_S); } // Stage #pragma unroll for (int istage=1; istage < ros_S; istage++) { // For the 1st istage the function has been computed previously if (istage == 0) { for (int i=0; i<NVAR; i++){ varNew(index,i) = Fcn0(index,i); // FCN0 Read } } else if(ros_NewF[istage]) { for (int i=0; i<NVAR; i++){ varNew(index,i) = var(index,i); } for (int j=0; j < (istage); j++){ for (int i=0; i<NVAR; i++){ varNew(index,i) = K(index,j,i)*ros_A[(istage)*(istage-1)/2 + j] + varNew(index,i); } } Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO); // FCN <- varNew / not overlap } for (int i=0; i<NVAR; i++) K(index,istage,i) = varNew(index,i); for (int j=0; j<(istage); j++) { HC = ros_C[(istage)*(istage-1)/2 + j]/(direction*H); for (int i=0; i<NVAR; i++){ double tmp = K(index,j,i); K(index,istage,i) += tmp*HC; } } if ((!autonomous) && (ros_Gamma[istage] )) { HG = direction*H*ros_Gamma[istage]; for (int i=0; i<NVAR; i++){ K(index,istage,i) += dFdT(index,i)*HG; } } // R ,RW, RW, R, R ros_Solve(Ghimj, K, Nsol, istage, ros_S); } // Stage // ~~~> Compute the new solution for (int i=0; i<NVAR; i++){ double tmpNew = var(index,i); /// VAR READ double tmpErr = ZERO; #pragma unroll for (int j=0; j<ros_S; j++){ double tmp = K(index,j,i); tmpNew += tmp*ros_M[j]; tmpErr += tmp*ros_E[j]; } varNew(index,i) = tmpNew; // varNew is killed varErr(index,i) = tmpErr; } Err = ros_ErrorNorm(var, varNew, varErr, absTol, relTol, vectorTol); /// VAR-varNew READ // ~~~> New step size is bounded by FacMin <= Hnew/H <= FacMax Fac = fmin(FacMax,fmax(FacMin,FacSafe/pow(Err,ONE/3.0))); Hnew = H*Fac; // ~~~> Check the error magnitude and adjust step size Nstp = Nstp+ 1; if((Err <= ONE) || (H <= Hmin)) // ~~~> Accept step { Nacc = Nacc + 1; for (int j=0; j<NVAR ; j++) var(index,j) = fmax(varNew(index,j),ZERO); /////////// VAR WRITE - last VarNew read T = T + direction*H; Hnew = fmax(Hmin,fmin(Hnew,Hmax)); if (rejectLastH) // No step size increase after a rejected step Hnew = fmin(Hnew,H); rejectLastH = 0; rejectMoreH = 0; H = Hnew; break; // EXIT THE LOOP: WHILE STEP NOT ACCEPTED } else // ~~~> Reject step { if (rejectMoreH) Hnew = H*FacRej; rejectMoreH = rejectLastH; rejectLastH = 1; H = Hnew; if (Nacc >= 1) Nrej += 1; } // Err <= 1 } // UntilAccepted } // TimeLoop // ~~~> Succesful exit return 0; // ~~~> The integration was successful } __global__ void Rosenbrock_rodas3(double * __restrict__ conc, const double Tstart, const double Tend, double * __restrict__ rstatus, int * __restrict__ istatus, const int autonomous, const int vectorTol, const int UplimTol, const int Max_no_steps, double * __restrict__ d_jac0, double * __restrict__ d_Ghimj, double * __restrict__ d_varNew, double * __restrict__ d_K, double * __restrict__ d_varErr,double * __restrict__ d_dFdT ,double * __restrict__ d_Fcn0, double * __restrict__ d_var, double * __restrict__ d_fix, double * __restrict__ d_rconst, const double Hmin, const double Hmax, const double Hstart, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, const double roundoff, const double * __restrict__ absTol, const double * __restrict__ relTol, const double * __restrict__ khet_st, const double * __restrict__ khet_tr, const double * __restrict__ jx, const double * __restrict__ temp_gpu, const double * __restrict__ press_gpu, const double * __restrict__ cair_gpu, const int VL_GLO, const int offset) { int index = blockIdx.x*blockDim.x+threadIdx.x+offset; /* * In theory someone can aggregate accesses together, * however due to algorithm, threads access * different parts of memory, making it harder to * optimize accesses. * */ double *Ghimj = &d_Ghimj[index*LU_NONZERO]; double *K = &d_K[index*NVAR*3]; double *varNew = &d_varNew[index*NVAR]; double *Fcn0 = &d_Fcn0[index*NVAR]; double *dFdT = &d_dFdT[index*NVAR]; double *jac0 = &d_jac0[index*LU_NONZERO]; double *varErr = &d_varErr[index*NVAR]; double *var = &d_var[index*NSPEC]; double *fix = &d_fix[index*NFIX]; double *rconst = &d_rconst[index*NREACT]; if (index < VL_GLO) { int Nfun,Njac,Nstp,Nacc,Nrej,Ndec,Nsol,Nsng; double Texit, Hexit; Nfun = 0; Njac = 0; Nstp = 0; Nacc = 0; Nrej = 0; Ndec = 0; Nsol = 0; Nsng = 0; const int method = 4; /* FIXME: add check for method */ const double *ros_A = &ros[method-1].ros_A[0]; const double *ros_C = &ros[method-1].ros_C[0]; const double *ros_M = &ros[method-1].ros_M[0]; const double *ros_E = &ros[method-1].ros_E[0]; const double *ros_Alpha = &ros[method-1].ros_Alpha[0]; const double *ros_Gamma = &ros[method-1].ros_Gamma[0]; const int *ros_NewF = &ros[method-1].ros_NewF[0]; const int ros_S = ros[method-1].ros_S; const double ros_ELO = ros[method-1].ros_ELO; /* Copy data from global memory to temporary array */ /* * Optimization note: if we ever have enough constant * memory, we could use it for storing the data. * In current architectures if we use constant memory * only a few threads will be able to run on the fly. * */ for (int i=0; i<NSPEC; i++) var(index,i) = conc(index,i); for (int i=0; i<NFIX; i++) fix(index,i) = conc(index,NVAR+i); update_rconst(var, khet_st, khet_tr, jx, rconst, temp_gpu, press_gpu, cair_gpu, VL_GLO, offset); ros_Integrator_rodas3(var, fix, Tstart, Tend, Texit, // Integration parameters autonomous, vectorTol, Max_no_steps, roundoff, Hmin, Hmax, Hstart, Hexit, FacMin, FacMax, FacRej, FacSafe, // Status parameters Nfun, Njac, Nstp, Nacc, Nrej, Ndec, Nsol, Nsng, // cuda global mem buffers rconst, absTol, relTol, varNew, Fcn0, K, dFdT, jac0, Ghimj, varErr, // For update rconst khet_st, khet_tr, jx, VL_GLO, offset ); for (int i=0; i<NVAR; i++) conc(index,i) = var(index,i); /* Statistics */ istatus(index,ifun) = Nfun; istatus(index,ijac) = Njac; istatus(index,istp) = Nstp; istatus(index,iacc) = Nacc; istatus(index,irej) = Nrej; istatus(index,idec) = Ndec; istatus(index,isol) = Nsol; istatus(index,isng) = Nsng; // Last T and H rstatus(index,itexit) = Texit; rstatus(index,ihexit) = Hexit; } }
d39fb14c443fa4940d790e5f80093d33e822e976.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "matrix_sinh.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *A = NULL; hipMalloc(&A, XSIZE*YSIZE); double *C = NULL; hipMalloc(&C, XSIZE*YSIZE); unsigned int size = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( matrix_sinh), dim3(gridBlock),dim3(threadBlock), 0, 0, A,C,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( matrix_sinh), dim3(gridBlock),dim3(threadBlock), 0, 0, A,C,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( matrix_sinh), dim3(gridBlock),dim3(threadBlock), 0, 0, A,C,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d39fb14c443fa4940d790e5f80093d33e822e976.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "matrix_sinh.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); double *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); unsigned int size = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); matrix_sinh<<<gridBlock,threadBlock>>>(A,C,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { matrix_sinh<<<gridBlock,threadBlock>>>(A,C,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { matrix_sinh<<<gridBlock,threadBlock>>>(A,C,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
506e839c6b610acb600e1d3136deac3276708595.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "bilateralOptimizedGpuColsKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *inputImage = NULL; hipMalloc(&inputImage, XSIZE*YSIZE); float *outputImage = NULL; hipMalloc(&outputImage, XSIZE*YSIZE); int rows = XSIZE; int cols = YSIZE; uint32_t window = 1; float sigmaD = 1; float sigmaR = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( bilateralOptimizedGpuColsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, inputImage,outputImage,rows,cols,window,sigmaD,sigmaR); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( bilateralOptimizedGpuColsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, inputImage,outputImage,rows,cols,window,sigmaD,sigmaR); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( bilateralOptimizedGpuColsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, inputImage,outputImage,rows,cols,window,sigmaD,sigmaR); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
506e839c6b610acb600e1d3136deac3276708595.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "bilateralOptimizedGpuColsKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *inputImage = NULL; cudaMalloc(&inputImage, XSIZE*YSIZE); float *outputImage = NULL; cudaMalloc(&outputImage, XSIZE*YSIZE); int rows = XSIZE; int cols = YSIZE; uint32_t window = 1; float sigmaD = 1; float sigmaR = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); bilateralOptimizedGpuColsKernel<<<gridBlock,threadBlock>>>(inputImage,outputImage,rows,cols,window,sigmaD,sigmaR); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { bilateralOptimizedGpuColsKernel<<<gridBlock,threadBlock>>>(inputImage,outputImage,rows,cols,window,sigmaD,sigmaR); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { bilateralOptimizedGpuColsKernel<<<gridBlock,threadBlock>>>(inputImage,outputImage,rows,cols,window,sigmaD,sigmaR); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6e47727edfbcf6e9843215a258110ad22c4bf4b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #include <vector> // #define CUDA_ERROR_CHECK // enable error check in cuda #include "/home/wxie/AI/Spike/July_2019/Spike_July_2019/Spike/Backend/CUDA/Helpers/ErrorCheck.hpp" using namespace std; // Kernel function to do nested loops __global__ void find_max(int max_x, int max_y, float *tot, float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; if(i < max_x && j<max_y) { if(*tot < x[i]) atomicExch(tot, x[i]); } } int main(void) { int Nx = 1<<10; int Ny = 1<<10; float *d_x = NULL, *d_y = NULL; float *d_tot = NULL; CudaSafeCall(hipMalloc((void **)&d_x, sizeof(float)*Nx)); CudaSafeCall(hipMalloc((void **)&d_y, sizeof(float)*Ny)); CudaSafeCall(hipMalloc((void **)&d_tot, sizeof(float))); // Allocate Unified Memory accessible from CPU or GPU vector<float> vx; vector<float> vy; // initialize x and y arrays on the host for (int i = 0; i < Nx; i++) vx.push_back(i); for (int i = 0; i < Ny; i++) vy.push_back(i*10); // float tot = 0; for(int i = 0; i<vx.size(); i++) for(int j = 0; j<vy.size(); j++) tot += vx[i] + vy[j]; cout<<"CPU: tot: "<<tot<<endl; // CudaSafeCall(hipMemcpy(d_x, vx.data(), vx.size()*sizeof(float), hipMemcpyHostToDevice)); CudaSafeCall(hipMemcpy(d_y, vy.data(), vy.size()*sizeof(float), hipMemcpyHostToDevice)); // int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, find_max, 0, Nx+Ny); //.. bx*by can not go beyond the blockSize, or hardware limit, which is 1024; //.. bx*bx = blockSize && bx/by=Nx/Ny, solve the equation int bx = sqrt(blockSize*Nx/(float)Ny); int by = bx*Ny/(float)Nx; dim3 blockSize_3D(bx, by); dim3 gridSize_3D((Nx+bx-1)/bx, (Ny+by+1)/by); cout<<"blockSize: "<<blockSize<<endl; cout<<"bx: "<<bx<<" by: "<<by<<" gx: "<<gridSize_3D.x<<" gy: "<<gridSize_3D.y<<endl; // calculate theoretical occupancy int maxActiveBlocks; hipOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks, find_max, blockSize, 0); int device; hipDeviceProp_t props; hipGetDevice(&device); hipGetDeviceProperties(&props, device); float occupancy = (maxActiveBlocks * blockSize / props.warpSize) / (float)(props.maxThreadsPerMultiProcessor / props.warpSize); printf("Launched blocks of size %d. Theoretical occupancy: %f\n", blockSize, occupancy); // Run kernel on 1M elements on the GPU tot = 0; hipLaunchKernelGGL(( find_max), dim3(gridSize_3D), dim3(blockSize_3D), 0, 0, Nx, Ny, d_tot, d_x, d_y); // Wait for GPU to finish before accessing on host CudaCheckError(); //.. defined in SPIKE include hipDeviceSynchronize() // CudaSafeCall(hipMemcpy(&tot, d_tot, sizeof(float), hipMemcpyDeviceToHost)); // cout<<" GPU: tot: "<<tot<<endl; // Free memory CudaSafeCall(hipFree(d_x)); CudaSafeCall(hipFree(d_y)); return 0; }
6e47727edfbcf6e9843215a258110ad22c4bf4b5.cu
#include <iostream> #include <math.h> #include <vector> // #define CUDA_ERROR_CHECK // enable error check in cuda #include "/home/wxie/AI/Spike/July_2019/Spike_July_2019/Spike/Backend/CUDA/Helpers/ErrorCheck.hpp" using namespace std; // Kernel function to do nested loops __global__ void find_max(int max_x, int max_y, float *tot, float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; if(i < max_x && j<max_y) { if(*tot < x[i]) atomicExch(tot, x[i]); } } int main(void) { int Nx = 1<<10; int Ny = 1<<10; float *d_x = NULL, *d_y = NULL; float *d_tot = NULL; CudaSafeCall(cudaMalloc((void **)&d_x, sizeof(float)*Nx)); CudaSafeCall(cudaMalloc((void **)&d_y, sizeof(float)*Ny)); CudaSafeCall(cudaMalloc((void **)&d_tot, sizeof(float))); // Allocate Unified Memory – accessible from CPU or GPU vector<float> vx; vector<float> vy; // initialize x and y arrays on the host for (int i = 0; i < Nx; i++) vx.push_back(i); for (int i = 0; i < Ny; i++) vy.push_back(i*10); // float tot = 0; for(int i = 0; i<vx.size(); i++) for(int j = 0; j<vy.size(); j++) tot += vx[i] + vy[j]; cout<<"CPU: tot: "<<tot<<endl; // CudaSafeCall(cudaMemcpy(d_x, vx.data(), vx.size()*sizeof(float), cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(d_y, vy.data(), vy.size()*sizeof(float), cudaMemcpyHostToDevice)); // int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, find_max, 0, Nx+Ny); //.. bx*by can not go beyond the blockSize, or hardware limit, which is 1024; //.. bx*bx = blockSize && bx/by=Nx/Ny, solve the equation int bx = sqrt(blockSize*Nx/(float)Ny); int by = bx*Ny/(float)Nx; dim3 blockSize_3D(bx, by); dim3 gridSize_3D((Nx+bx-1)/bx, (Ny+by+1)/by); cout<<"blockSize: "<<blockSize<<endl; cout<<"bx: "<<bx<<" by: "<<by<<" gx: "<<gridSize_3D.x<<" gy: "<<gridSize_3D.y<<endl; // calculate theoretical occupancy int maxActiveBlocks; cudaOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks, find_max, blockSize, 0); int device; cudaDeviceProp props; cudaGetDevice(&device); cudaGetDeviceProperties(&props, device); float occupancy = (maxActiveBlocks * blockSize / props.warpSize) / (float)(props.maxThreadsPerMultiProcessor / props.warpSize); printf("Launched blocks of size %d. Theoretical occupancy: %f\n", blockSize, occupancy); // Run kernel on 1M elements on the GPU tot = 0; find_max<<<gridSize_3D, blockSize_3D>>>(Nx, Ny, d_tot, d_x, d_y); // Wait for GPU to finish before accessing on host CudaCheckError(); //.. defined in SPIKE include cudaDeviceSynchronize() // CudaSafeCall(cudaMemcpy(&tot, d_tot, sizeof(float), cudaMemcpyDeviceToHost)); // cout<<" GPU: tot: "<<tot<<endl; // Free memory CudaSafeCall(cudaFree(d_x)); CudaSafeCall(cudaFree(d_y)); return 0; }
506d9c13e83709327e33a654563bcac5a98a5c75.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" #include "softmax_op.h" #include "softmax_with_loss_op.h" namespace caffe2 { namespace { __global__ void LabelCrossEntropyKernel( const int N, const int D, const float* logPdata, const int* labeldata, const float* weights, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D); float weight = weights ? weights[i] : 1.0; Ydata[i] = -logPdata[i * D + labeldata[i]] * weight; } } __global__ void LabelCrossEntropyGradientKernel( const int N, const int D, const float* Pdata, const int* labeldata, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { int idx = i * D + labeldata[i]; dXdata[idx] = Pdata[idx] - 1.; } } __global__ void LabelCrossEntropyGradientKernelWeighted( const int N, const int D, const float* Pdata, const int* labeldata, float* dXdata, const float* weights) { CUDA_1D_KERNEL_LOOP(i, N * D) { int row = i / D; int d = i % D; float val = Pdata[i] - 1.0 * (d == labeldata[row]); float weight = weights[row]; dXdata[i] = val * weight; } } __global__ void ProbCrossEntropyKernel( const int N, const int D, const float* Pdata, const float* labeldata, const float* weights, float* Ydata) { typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int i = blockIdx.x; i < N; i += gridDim.x) { float weight = weights ? weights[i] : 1.0; float sum = 0.0; float total_prob = 0.0; for (int j = threadIdx.x; j < D; j += blockDim.x) { int idx = i * D + j; CUDA_KERNEL_ASSERT(labeldata[idx] >= 0); total_prob += labeldata[idx]; sum += -logf(max(Pdata[idx], FLT_MIN)) * labeldata[idx] * weight; } float tot = BlockReduce(temp_storage).Sum(sum); __syncthreads(); float total_prob_sum = BlockReduce(temp_storage).Sum(total_prob); if (threadIdx.x == 0) { Ydata[i] = tot; // Sanity check CUDA_KERNEL_ASSERT(abs(1.0 - total_prob_sum) < 1e-5f); } __syncthreads(); } } __global__ void ProbCrossEntropyGradientKernel( const int N, const int D, const float* Pdata, const float* labeldata, float* dXdata, const float* weights) { if (weights == NULL) { CUDA_1D_KERNEL_LOOP(idx, N * D) { dXdata[idx] = Pdata[idx] - labeldata[idx]; } } else { CUDA_1D_KERNEL_LOOP(idx, N * D) { dXdata[idx] = (Pdata[idx] - labeldata[idx]) * weights[idx / D]; } } } __global__ void RowMaxKernel(const int rows, const int cols, const float* data, float* out) { typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int rowIndex = blockIdx.x; rowIndex < rows; rowIndex += gridDim.x) { float maxval = -FLT_MAX; // NB: The memory accesses here are sequentialized; without unrolling // the loop, there will not be any ILP. However, because we are running // this kernel with a lot of threads, this should not be a big problem. // However, if we reduce the number of threads to take advantage of // warp-wide // synchronization, this may become a problem again. for (int colIndex = threadIdx.x; colIndex < cols; colIndex += blockDim.x) { maxval = max(data[rowIndex * cols + colIndex], maxval); } maxval = BlockReduce(temp_storage).Reduce(maxval, hipcub::Max()); if (threadIdx.x == 0) { out[rowIndex] = maxval; } __syncthreads(); } } __global__ void SpatialSoftmaxKernel(const int num, const int D, const int W, const int H, const float* Xdata, float* Pdata) { CUDA_1D_KERNEL_LOOP(index, num * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; // Subtract max on each cell for numerical reasons float max_val = -FLT_MAX; for(int c = 0; c < D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; max_val = max(max_val, Xdata[idx]); } // Exponentiate float expsum = 0.0f; for(int c = 0; c < D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; float expx = exp(Xdata[idx] - max_val); Pdata[idx] = expx; expsum += expx; } // Normalize for(int c=0; c<D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; Pdata[idx] /= expsum; } } } #define DONTCARE (-1) __global__ void SpatialCrossEntropyLossKernel(const int N, const int D, const int W, const int H, const float* Pdata, const int* label_data, const float *weights, float* loss_data, float* weight_data) { CUDA_1D_KERNEL_LOOP(index, N * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; const int label = static_cast<int>(label_data[index]); if (label != DONTCARE) { CUDA_KERNEL_ASSERT(label >= 0 && label < D); float weight = (weights == NULL ? 1.0 : weights[index]); loss_data[index] = -log(max( Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight; weight_data[index] = weight; } else { loss_data[index] = 0; weight_data[index] = 0; } } } __global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D, const int W, const int H, const int* label_data, const float* weights, float* dX_data, float* weights_) { CUDA_1D_KERNEL_LOOP(index, N * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; const int label = static_cast<int>(label_data[index]); if (label != DONTCARE) { int data_idx = i * (H * W * D) + label * (H * W) + y * W + x; dX_data[data_idx] -= 1.0; if (weights != NULL) { float weight = weights[index]; for (int c = 0; c < D; ++c) { int data_idx = i * (H * W * D) + c * (H * W) + y * W + x; dX_data[data_idx] *= weight; } weights_[index] = weight; } else { weights_[index] = 1.0; } } else { // Ignore-label, so set all gradients for this positions // tp zero for (int c = 0; c < D; ++c) { int data_idx = i * (H * W * D) + c * (H * W) + y * W + x; dX_data[data_idx] = 0.0; } weights_[index] = 0.0; } } } __global__ void SoftmaxNormalizeLogsKernel( const int nthreads, const int D, const float* logits, const float* rowmax, const float* scales, float* out_log) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index / D; out_log[index] = logits[index] - rowmax[n] - logf(max(scales[n], FLT_MIN)); } } __global__ void SoftmaxNormalizeKernel( const int nthreads, const int D, const float* probs, const float* scales, float* out) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index / D; out[index] = probs[index] / scales[n]; } } void Softmax( const int N, const int D, const float* logits, const float* sum_multiplier, float* scales, float* rowmax, float* probs, bool log_softmax, CUDAContext* context) { const int size = N * D; hipLaunchKernelGGL(( RowMaxKernel), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, D, logits, rowmax); // Put the intermediate result X - max(X) into Y context->Copy<float, CUDAContext, CUDAContext>(size, logits, probs); // Subtract the scale math::Gemm<float, CUDAContext>( CblasNoTrans, CblasNoTrans, N, D, 1, -1, rowmax, sum_multiplier, 1, probs, context); // Exponentiation math::Exp<float, CUDAContext>(size, probs, probs, context); // Sum exponentiated values math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier, 0, scales, context); // Normalize if (!log_softmax) { hipLaunchKernelGGL(( SoftmaxNormalizeKernel), dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, D, probs, scales, probs); } else { hipLaunchKernelGGL(( SoftmaxNormalizeLogsKernel), dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, D, logits, rowmax, scales, probs); } } } // namespace template<> bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels / targets auto* P = Output(0); // Probabilities from softmax auto* avg_loss = Output(1); // Average loss const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL); const auto canonical_axis = X.canonical_axis_index(axis_); int N, D; if (spatial_mode_) { N = X.dim32(0); D = X.dim32(1); } else { N = X.size_to_dim(canonical_axis); // batch size D = X.size_from_dim(canonical_axis); } P->ResizeLike(X); total_weight_ptr_.Resize(1); DCHECK(!(spatial_mode_ && label_prob_mode_)); // Do not currently support both if (!spatial_mode_) { if (label_prob_mode_) { DCHECK_GE(T.ndim(), 2); DCHECK_EQ(T.size_to_dim(canonical_axis), N); DCHECK_EQ(T.size_from_dim(canonical_axis), D); } else { if (T.ndim() == canonical_axis) { DCHECK_EQ(T.size(), N); } else { DCHECK_EQ(T.size_to_dim(canonical_axis), N); DCHECK_EQ(T.size_from_dim(canonical_axis), 1); } } avg_loss->Resize(vector<TIndex>()); if (losses_.size() != N) { losses_.Resize(N); } if (rowmax_.size() != N) { rowmax_.Resize(N); } if (sum_multiplier_.size() != D) { sum_multiplier_.Resize(D); math::Set<float, CUDAContext>( D, 1.f, sum_multiplier_.mutable_data<float>(), &context_); } Softmax( N, D, X.data<float>(), sum_multiplier_.data<float>(), losses_.mutable_data<float>(), rowmax_.mutable_data<float>(), P->mutable_data<float>(), !label_prob_mode_, // logarithmic output &context_); // Compute label xent loss per example if (!label_prob_mode_) { hipLaunchKernelGGL(( LabelCrossEntropyKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, P->data<float>(), T.data<int>(), weights, losses_.mutable_data<float>()); // Since we had logarithmic output, we need to exponentiate // them again. math::Exp<float, CUDAContext>( N * D, P->data<float>(), P->mutable_data<float>(), &context_); } else { hipLaunchKernelGGL(( ProbCrossEntropyKernel), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, P->data<float>(), T.data<float>(), weights, losses_.mutable_data<float>()); } float total_weight = N; if (weights) { // Sum weights math::Sum<float, CUDAContext>(N, weights, total_weight_ptr_.mutable_data<float>(), &context_); hipMemcpyAsync(&total_weight, total_weight_ptr_.data<float>(), sizeof(float), hipMemcpyDeviceToHost, context_.cuda_stream()); } // Sum of all losses float* avg_loss_data = avg_loss->mutable_data<float>(); math::Sum<float, CUDAContext>( losses_.size(), losses_.data<float>(), avg_loss_data, &context_); // Average of input batch size if (total_weight > 0) { math::Scale<float, CUDAContext>( 1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_); } } else { DCHECK_EQ(X.ndim(), 4); DCHECK_EQ(T.ndim(), 3); DCHECK_EQ(T.dim32(0), N); int H = X.dim32(2); int W = X.dim32(3); if (losses_.size() != N * W * H) { losses_.Resize(N * W * H); } if (weights_.size() != N * W * H) { weights_.Resize(N * W * H); } const float* Xdata = X.data<float>(); float* Pdata = P->mutable_data<float>(); // Softmax for each x,y location hipLaunchKernelGGL(( SpatialSoftmaxKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, W, H, Xdata, Pdata); // Cross entropy avg_loss->Resize(vector<TIndex>()); float* avg_loss_data = avg_loss->mutable_data<float>(); math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_); const int* label_data = T.data<int>(); math::Set<float, CUDAContext>( 1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_); hipLaunchKernelGGL(( SpatialCrossEntropyLossKernel), dim3(CAFFE_GET_BLOCKS(N * W * H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, W, H, P->data<float>(), label_data, weights, losses_.mutable_data<float>(), weights_.mutable_data<float>()); // Somewhat awkward scalar passing from device to host float h_total_weight; math::Sum<float, CUDAContext>( weights_.size(), weights_.data<float>(), total_weight_ptr_.mutable_data<float>(), &context_); hipMemcpyAsync(&h_total_weight, total_weight_ptr_.data<float>(), sizeof(float), hipMemcpyDeviceToHost, context_.cuda_stream()); math::Sum<float, CUDAContext>( losses_.size(), losses_.data<float>(), avg_loss_data, &context_); // Final scaling if (h_total_weight > 0) { math::Scale<float, CUDAContext>( 1, scale_ / h_total_weight, avg_loss_data, avg_loss_data, &context_); } } return true; } template<> bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels / targets // Input(2) is weights, if given auto& P = Input(InputSize() - 2); // Probabilities from softmax auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL); auto* dX = Output(0); dX->ResizeLike(X); const auto canonical_axis = X.canonical_axis_index(axis_); int N, D; if (spatial_mode_) { N = X.dim32(0); D = X.dim32(1); } else { N = X.size_to_dim(canonical_axis); // batch size D = X.size_from_dim(canonical_axis); } if (only_loss_) { // Memory saving trick to share the buffer with the softmax output. // Softmax output is thus overwritten. dX->ShareData(P); } total_weight_ptr_.Resize(1); if (!spatial_mode_) { if (label_prob_mode_) { DCHECK_GE(T.ndim(), 2); DCHECK_EQ(T.size_to_dim(canonical_axis), N); DCHECK_EQ(T.size_from_dim(canonical_axis), D); } else { if (T.ndim() == canonical_axis) { DCHECK_EQ(T.size(), N); } else { DCHECK_EQ(T.size_to_dim(canonical_axis), N); DCHECK_EQ(T.size_from_dim(canonical_axis), 1); } } // Subtract 1 from labeled positions if (!label_prob_mode_) { if (weights == nullptr) { // Copy softmax probabilities into dX if (!only_loss_) { context_.Copy<float, CUDAContext, CUDAContext>( P.size(), P.data<float>(), dX->mutable_data<float>()); } hipLaunchKernelGGL(( LabelCrossEntropyGradientKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, P.data<float>(), T.data<int>(), dX->mutable_data<float>()); } else { // Weighted version gets the Pdata values internally hipLaunchKernelGGL(( LabelCrossEntropyGradientKernelWeighted), dim3(CAFFE_GET_BLOCKS(N * D)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, P.data<float>(), T.data<int>(), dX->mutable_data<float>(), weights); } } else { hipLaunchKernelGGL(( ProbCrossEntropyGradientKernel), dim3(CAFFE_GET_BLOCKS(N * D)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, P.data<float>(), T.data<float>(), dX->mutable_data<float>(), weights); } float total_weight = N; if (weights) { // Sum weights math::Sum<float, CUDAContext>( N, weights, total_weight_ptr_.mutable_data<float>(), &context_); hipMemcpyAsync(&total_weight, total_weight_ptr_.data<float>(), sizeof(float), hipMemcpyDeviceToHost, context_.cuda_stream()); } // Scale by d_avg_loss / N if (total_weight > 0) { math::Scale<float, CUDAContext>( dX->size(), scale_ / total_weight, dX->data<float>(), dX->mutable_data<float>(), &context_); } math::Scale<float, CUDAContext>( dX->size(), d_avg_loss.data<float>(), dX->data<float>(), dX->mutable_data<float>(), &context_); } else { // Spatial mode, compute softmax for each x, y location DCHECK_EQ(X.ndim(), 4); DCHECK_EQ(T.ndim(), 3); int H = X.dim32(2); int W = X.dim32(3); dX->ResizeLike(X); if (weights_.size() != N * W * H) { weights_.Resize(N * W * H); } const float* Pdata = P.data<float>(); float* dX_data = dX->mutable_data<float>(); const int* label_data = T.data<int>(); const float* d_avg_loss_data = d_avg_loss.data<float>(); // Copy softmax probabilities into dX. All but the neuron // corresponding to the correct label has gradient equaling e(x_j) // which is the probability under softmax. context_.Copy<float, CUDAContext, CUDAContext>(P.size(), Pdata, dX_data); math::Set<float, CUDAContext>( 1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_); hipLaunchKernelGGL(( SpatialSoftmaxLossGradientKernel), dim3(CAFFE_GET_BLOCKS(N * W * H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, W, H, label_data, weights, dX_data, weights_.mutable_data<float>()); math::Sum<float, CUDAContext>( weights_.size(), weights_.data<float>(), total_weight_ptr_.mutable_data<float>(), &context_); // Somewhat awkward scalar passing from device to host float h_total_weight; hipMemcpyAsync(&h_total_weight, total_weight_ptr_.data<float>(), sizeof(float), hipMemcpyDeviceToHost, context_.cuda_stream()); // Final scaling if (h_total_weight > 0) { math::Scale<float, CUDAContext>( dX->size(), scale_ / h_total_weight, dX->data<float>(), dX->mutable_data<float>(), &context_); } math::Scale<float, CUDAContext>( dX->size(), d_avg_loss.data<float>(), dX->data<float>(), dX->mutable_data<float>(), &context_); } return true; } // Implementation for the CUDA context. template <> bool SoftmaxOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* P = Output(0); const auto canonical_axis = X.canonical_axis_index(axis_); const int N = X.size_to_dim(canonical_axis); const int D = X.size_from_dim(canonical_axis); P->ResizeLike(X); if (sum_multiplier_.size() != D) { sum_multiplier_.Resize(D); math::Set<float, CUDAContext>( D, 1.f, sum_multiplier_.mutable_data<float>(), &context_); } if (scale_.size() != N) { scale_.Resize(N); } if (rowmax_.size() != N) { rowmax_.Resize(N); } Softmax( N, D, X.data<float>(), sum_multiplier_.data<float>(), scale_.mutable_data<float>(), rowmax_.mutable_data<float>(), P->mutable_data<float>(), false, &context_); return true; } #define SOFTMAX_NUM_THREADS 128 // The softmax gradient kernel. This kernel has to be called with the number of // threads per block being no more than SOFTMAX_NUM_THREADS. namespace { __global__ void softmax_gradient_kernel( const int dim, const float* Y, const float* dY, float* dX) { Y += blockIdx.x * dim; dY += blockIdx.x * dim; dX += blockIdx.x * dim; const int idx = threadIdx.x; __shared__ float reduction_buffer[SOFTMAX_NUM_THREADS]; float tmp; // A two-level reduction to compute the inner products. tmp = 0; for (int i = idx; i < dim; i += blockDim.x) { tmp += dY[i] * Y[i]; } reduction_buffer[idx] = tmp; __syncthreads(); if (idx == 0) { tmp = reduction_buffer[0]; for (int i = 1; i < blockDim.x; ++i) tmp += reduction_buffer[i]; reduction_buffer[0] = tmp; } __syncthreads(); // Compute gradient. tmp = reduction_buffer[0]; for (int i = idx; i < dim; i += blockDim.x) { dX[i] = Y[i] * (dY[i] - tmp); } } } // namespace template <> bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() { auto& Y = Input(0); auto& dY = Input(1); auto* dX = Output(0); const auto canonical_axis = Y.canonical_axis_index(axis_); const int N = Y.size_to_dim(canonical_axis); const int D = Y.size_from_dim(canonical_axis); dX->ResizeLike(Y); hipLaunchKernelGGL(( softmax_gradient_kernel), dim3(N), dim3(SOFTMAX_NUM_THREADS), 0, context_.cuda_stream(), D, Y.data<float>(), dY.data<float>(), dX->mutable_data<float>()); return true; } namespace { REGISTER_CUDA_OPERATOR(SoftmaxWithLoss, SoftmaxWithLossOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient, SoftmaxWithLossGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>); } // namespace } // namespace caffe2
506d9c13e83709327e33a654563bcac5a98a5c75.cu
#include <cfloat> #include <cub/block/block_reduce.cuh> #include "caffe2/core/context_gpu.h" #include "softmax_op.h" #include "softmax_with_loss_op.h" namespace caffe2 { namespace { __global__ void LabelCrossEntropyKernel( const int N, const int D, const float* logPdata, const int* labeldata, const float* weights, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D); float weight = weights ? weights[i] : 1.0; Ydata[i] = -logPdata[i * D + labeldata[i]] * weight; } } __global__ void LabelCrossEntropyGradientKernel( const int N, const int D, const float* Pdata, const int* labeldata, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { int idx = i * D + labeldata[i]; dXdata[idx] = Pdata[idx] - 1.; } } __global__ void LabelCrossEntropyGradientKernelWeighted( const int N, const int D, const float* Pdata, const int* labeldata, float* dXdata, const float* weights) { CUDA_1D_KERNEL_LOOP(i, N * D) { int row = i / D; int d = i % D; float val = Pdata[i] - 1.0 * (d == labeldata[row]); float weight = weights[row]; dXdata[i] = val * weight; } } __global__ void ProbCrossEntropyKernel( const int N, const int D, const float* Pdata, const float* labeldata, const float* weights, float* Ydata) { typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int i = blockIdx.x; i < N; i += gridDim.x) { float weight = weights ? weights[i] : 1.0; float sum = 0.0; float total_prob = 0.0; for (int j = threadIdx.x; j < D; j += blockDim.x) { int idx = i * D + j; CUDA_KERNEL_ASSERT(labeldata[idx] >= 0); total_prob += labeldata[idx]; sum += -logf(max(Pdata[idx], FLT_MIN)) * labeldata[idx] * weight; } float tot = BlockReduce(temp_storage).Sum(sum); __syncthreads(); float total_prob_sum = BlockReduce(temp_storage).Sum(total_prob); if (threadIdx.x == 0) { Ydata[i] = tot; // Sanity check CUDA_KERNEL_ASSERT(abs(1.0 - total_prob_sum) < 1e-5f); } __syncthreads(); } } __global__ void ProbCrossEntropyGradientKernel( const int N, const int D, const float* Pdata, const float* labeldata, float* dXdata, const float* weights) { if (weights == NULL) { CUDA_1D_KERNEL_LOOP(idx, N * D) { dXdata[idx] = Pdata[idx] - labeldata[idx]; } } else { CUDA_1D_KERNEL_LOOP(idx, N * D) { dXdata[idx] = (Pdata[idx] - labeldata[idx]) * weights[idx / D]; } } } __global__ void RowMaxKernel(const int rows, const int cols, const float* data, float* out) { typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int rowIndex = blockIdx.x; rowIndex < rows; rowIndex += gridDim.x) { float maxval = -FLT_MAX; // NB: The memory accesses here are sequentialized; without unrolling // the loop, there will not be any ILP. However, because we are running // this kernel with a lot of threads, this should not be a big problem. // However, if we reduce the number of threads to take advantage of // warp-wide // synchronization, this may become a problem again. for (int colIndex = threadIdx.x; colIndex < cols; colIndex += blockDim.x) { maxval = max(data[rowIndex * cols + colIndex], maxval); } maxval = BlockReduce(temp_storage).Reduce(maxval, cub::Max()); if (threadIdx.x == 0) { out[rowIndex] = maxval; } __syncthreads(); } } __global__ void SpatialSoftmaxKernel(const int num, const int D, const int W, const int H, const float* Xdata, float* Pdata) { CUDA_1D_KERNEL_LOOP(index, num * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; // Subtract max on each cell for numerical reasons float max_val = -FLT_MAX; for(int c = 0; c < D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; max_val = max(max_val, Xdata[idx]); } // Exponentiate float expsum = 0.0f; for(int c = 0; c < D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; float expx = exp(Xdata[idx] - max_val); Pdata[idx] = expx; expsum += expx; } // Normalize for(int c=0; c<D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; Pdata[idx] /= expsum; } } } #define DONTCARE (-1) __global__ void SpatialCrossEntropyLossKernel(const int N, const int D, const int W, const int H, const float* Pdata, const int* label_data, const float *weights, float* loss_data, float* weight_data) { CUDA_1D_KERNEL_LOOP(index, N * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; const int label = static_cast<int>(label_data[index]); if (label != DONTCARE) { CUDA_KERNEL_ASSERT(label >= 0 && label < D); float weight = (weights == NULL ? 1.0 : weights[index]); loss_data[index] = -log(max( Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight; weight_data[index] = weight; } else { loss_data[index] = 0; weight_data[index] = 0; } } } __global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D, const int W, const int H, const int* label_data, const float* weights, float* dX_data, float* weights_) { CUDA_1D_KERNEL_LOOP(index, N * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; const int label = static_cast<int>(label_data[index]); if (label != DONTCARE) { int data_idx = i * (H * W * D) + label * (H * W) + y * W + x; dX_data[data_idx] -= 1.0; if (weights != NULL) { float weight = weights[index]; for (int c = 0; c < D; ++c) { int data_idx = i * (H * W * D) + c * (H * W) + y * W + x; dX_data[data_idx] *= weight; } weights_[index] = weight; } else { weights_[index] = 1.0; } } else { // Ignore-label, so set all gradients for this positions // tp zero for (int c = 0; c < D; ++c) { int data_idx = i * (H * W * D) + c * (H * W) + y * W + x; dX_data[data_idx] = 0.0; } weights_[index] = 0.0; } } } __global__ void SoftmaxNormalizeLogsKernel( const int nthreads, const int D, const float* logits, const float* rowmax, const float* scales, float* out_log) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index / D; out_log[index] = logits[index] - rowmax[n] - logf(max(scales[n], FLT_MIN)); } } __global__ void SoftmaxNormalizeKernel( const int nthreads, const int D, const float* probs, const float* scales, float* out) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index / D; out[index] = probs[index] / scales[n]; } } void Softmax( const int N, const int D, const float* logits, const float* sum_multiplier, float* scales, float* rowmax, float* probs, bool log_softmax, CUDAContext* context) { const int size = N * D; RowMaxKernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, D, logits, rowmax); // Put the intermediate result X - max(X) into Y context->Copy<float, CUDAContext, CUDAContext>(size, logits, probs); // Subtract the scale math::Gemm<float, CUDAContext>( CblasNoTrans, CblasNoTrans, N, D, 1, -1, rowmax, sum_multiplier, 1, probs, context); // Exponentiation math::Exp<float, CUDAContext>(size, probs, probs, context); // Sum exponentiated values math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier, 0, scales, context); // Normalize if (!log_softmax) { SoftmaxNormalizeKernel<<< CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, D, probs, scales, probs); } else { SoftmaxNormalizeLogsKernel<<< CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, D, logits, rowmax, scales, probs); } } } // namespace template<> bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels / targets auto* P = Output(0); // Probabilities from softmax auto* avg_loss = Output(1); // Average loss const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL); const auto canonical_axis = X.canonical_axis_index(axis_); int N, D; if (spatial_mode_) { N = X.dim32(0); D = X.dim32(1); } else { N = X.size_to_dim(canonical_axis); // batch size D = X.size_from_dim(canonical_axis); } P->ResizeLike(X); total_weight_ptr_.Resize(1); DCHECK(!(spatial_mode_ && label_prob_mode_)); // Do not currently support both if (!spatial_mode_) { if (label_prob_mode_) { DCHECK_GE(T.ndim(), 2); DCHECK_EQ(T.size_to_dim(canonical_axis), N); DCHECK_EQ(T.size_from_dim(canonical_axis), D); } else { if (T.ndim() == canonical_axis) { DCHECK_EQ(T.size(), N); } else { DCHECK_EQ(T.size_to_dim(canonical_axis), N); DCHECK_EQ(T.size_from_dim(canonical_axis), 1); } } avg_loss->Resize(vector<TIndex>()); if (losses_.size() != N) { losses_.Resize(N); } if (rowmax_.size() != N) { rowmax_.Resize(N); } if (sum_multiplier_.size() != D) { sum_multiplier_.Resize(D); math::Set<float, CUDAContext>( D, 1.f, sum_multiplier_.mutable_data<float>(), &context_); } Softmax( N, D, X.data<float>(), sum_multiplier_.data<float>(), losses_.mutable_data<float>(), rowmax_.mutable_data<float>(), P->mutable_data<float>(), !label_prob_mode_, // logarithmic output &context_); // Compute label xent loss per example if (!label_prob_mode_) { LabelCrossEntropyKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, P->data<float>(), T.data<int>(), weights, losses_.mutable_data<float>()); // Since we had logarithmic output, we need to exponentiate // them again. math::Exp<float, CUDAContext>( N * D, P->data<float>(), P->mutable_data<float>(), &context_); } else { ProbCrossEntropyKernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, P->data<float>(), T.data<float>(), weights, losses_.mutable_data<float>()); } float total_weight = N; if (weights) { // Sum weights math::Sum<float, CUDAContext>(N, weights, total_weight_ptr_.mutable_data<float>(), &context_); cudaMemcpyAsync(&total_weight, total_weight_ptr_.data<float>(), sizeof(float), cudaMemcpyDeviceToHost, context_.cuda_stream()); } // Sum of all losses float* avg_loss_data = avg_loss->mutable_data<float>(); math::Sum<float, CUDAContext>( losses_.size(), losses_.data<float>(), avg_loss_data, &context_); // Average of input batch size if (total_weight > 0) { math::Scale<float, CUDAContext>( 1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_); } } else { DCHECK_EQ(X.ndim(), 4); DCHECK_EQ(T.ndim(), 3); DCHECK_EQ(T.dim32(0), N); int H = X.dim32(2); int W = X.dim32(3); if (losses_.size() != N * W * H) { losses_.Resize(N * W * H); } if (weights_.size() != N * W * H) { weights_.Resize(N * W * H); } const float* Xdata = X.data<float>(); float* Pdata = P->mutable_data<float>(); // Softmax for each x,y location SpatialSoftmaxKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, W, H, Xdata, Pdata); // Cross entropy avg_loss->Resize(vector<TIndex>()); float* avg_loss_data = avg_loss->mutable_data<float>(); math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_); const int* label_data = T.data<int>(); math::Set<float, CUDAContext>( 1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_); SpatialCrossEntropyLossKernel<<<CAFFE_GET_BLOCKS(N * W * H), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, W, H, P->data<float>(), label_data, weights, losses_.mutable_data<float>(), weights_.mutable_data<float>()); // Somewhat awkward scalar passing from device to host float h_total_weight; math::Sum<float, CUDAContext>( weights_.size(), weights_.data<float>(), total_weight_ptr_.mutable_data<float>(), &context_); cudaMemcpyAsync(&h_total_weight, total_weight_ptr_.data<float>(), sizeof(float), cudaMemcpyDeviceToHost, context_.cuda_stream()); math::Sum<float, CUDAContext>( losses_.size(), losses_.data<float>(), avg_loss_data, &context_); // Final scaling if (h_total_weight > 0) { math::Scale<float, CUDAContext>( 1, scale_ / h_total_weight, avg_loss_data, avg_loss_data, &context_); } } return true; } template<> bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels / targets // Input(2) is weights, if given auto& P = Input(InputSize() - 2); // Probabilities from softmax auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL); auto* dX = Output(0); dX->ResizeLike(X); const auto canonical_axis = X.canonical_axis_index(axis_); int N, D; if (spatial_mode_) { N = X.dim32(0); D = X.dim32(1); } else { N = X.size_to_dim(canonical_axis); // batch size D = X.size_from_dim(canonical_axis); } if (only_loss_) { // Memory saving trick to share the buffer with the softmax output. // Softmax output is thus overwritten. dX->ShareData(P); } total_weight_ptr_.Resize(1); if (!spatial_mode_) { if (label_prob_mode_) { DCHECK_GE(T.ndim(), 2); DCHECK_EQ(T.size_to_dim(canonical_axis), N); DCHECK_EQ(T.size_from_dim(canonical_axis), D); } else { if (T.ndim() == canonical_axis) { DCHECK_EQ(T.size(), N); } else { DCHECK_EQ(T.size_to_dim(canonical_axis), N); DCHECK_EQ(T.size_from_dim(canonical_axis), 1); } } // Subtract 1 from labeled positions if (!label_prob_mode_) { if (weights == nullptr) { // Copy softmax probabilities into dX if (!only_loss_) { context_.Copy<float, CUDAContext, CUDAContext>( P.size(), P.data<float>(), dX->mutable_data<float>()); } LabelCrossEntropyGradientKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, P.data<float>(), T.data<int>(), dX->mutable_data<float>()); } else { // Weighted version gets the Pdata values internally LabelCrossEntropyGradientKernelWeighted<<< CAFFE_GET_BLOCKS(N * D), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, P.data<float>(), T.data<int>(), dX->mutable_data<float>(), weights); } } else { ProbCrossEntropyGradientKernel<<< CAFFE_GET_BLOCKS(N * D), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, P.data<float>(), T.data<float>(), dX->mutable_data<float>(), weights); } float total_weight = N; if (weights) { // Sum weights math::Sum<float, CUDAContext>( N, weights, total_weight_ptr_.mutable_data<float>(), &context_); cudaMemcpyAsync(&total_weight, total_weight_ptr_.data<float>(), sizeof(float), cudaMemcpyDeviceToHost, context_.cuda_stream()); } // Scale by d_avg_loss / N if (total_weight > 0) { math::Scale<float, CUDAContext>( dX->size(), scale_ / total_weight, dX->data<float>(), dX->mutable_data<float>(), &context_); } math::Scale<float, CUDAContext>( dX->size(), d_avg_loss.data<float>(), dX->data<float>(), dX->mutable_data<float>(), &context_); } else { // Spatial mode, compute softmax for each x, y location DCHECK_EQ(X.ndim(), 4); DCHECK_EQ(T.ndim(), 3); int H = X.dim32(2); int W = X.dim32(3); dX->ResizeLike(X); if (weights_.size() != N * W * H) { weights_.Resize(N * W * H); } const float* Pdata = P.data<float>(); float* dX_data = dX->mutable_data<float>(); const int* label_data = T.data<int>(); const float* d_avg_loss_data = d_avg_loss.data<float>(); // Copy softmax probabilities into dX. All but the neuron // corresponding to the correct label has gradient equaling e(x_j) // which is the probability under softmax. context_.Copy<float, CUDAContext, CUDAContext>(P.size(), Pdata, dX_data); math::Set<float, CUDAContext>( 1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_); SpatialSoftmaxLossGradientKernel<<<CAFFE_GET_BLOCKS(N * W * H), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, W, H, label_data, weights, dX_data, weights_.mutable_data<float>()); math::Sum<float, CUDAContext>( weights_.size(), weights_.data<float>(), total_weight_ptr_.mutable_data<float>(), &context_); // Somewhat awkward scalar passing from device to host float h_total_weight; cudaMemcpyAsync(&h_total_weight, total_weight_ptr_.data<float>(), sizeof(float), cudaMemcpyDeviceToHost, context_.cuda_stream()); // Final scaling if (h_total_weight > 0) { math::Scale<float, CUDAContext>( dX->size(), scale_ / h_total_weight, dX->data<float>(), dX->mutable_data<float>(), &context_); } math::Scale<float, CUDAContext>( dX->size(), d_avg_loss.data<float>(), dX->data<float>(), dX->mutable_data<float>(), &context_); } return true; } // Implementation for the CUDA context. template <> bool SoftmaxOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* P = Output(0); const auto canonical_axis = X.canonical_axis_index(axis_); const int N = X.size_to_dim(canonical_axis); const int D = X.size_from_dim(canonical_axis); P->ResizeLike(X); if (sum_multiplier_.size() != D) { sum_multiplier_.Resize(D); math::Set<float, CUDAContext>( D, 1.f, sum_multiplier_.mutable_data<float>(), &context_); } if (scale_.size() != N) { scale_.Resize(N); } if (rowmax_.size() != N) { rowmax_.Resize(N); } Softmax( N, D, X.data<float>(), sum_multiplier_.data<float>(), scale_.mutable_data<float>(), rowmax_.mutable_data<float>(), P->mutable_data<float>(), false, &context_); return true; } #define SOFTMAX_NUM_THREADS 128 // The softmax gradient kernel. This kernel has to be called with the number of // threads per block being no more than SOFTMAX_NUM_THREADS. namespace { __global__ void softmax_gradient_kernel( const int dim, const float* Y, const float* dY, float* dX) { Y += blockIdx.x * dim; dY += blockIdx.x * dim; dX += blockIdx.x * dim; const int idx = threadIdx.x; __shared__ float reduction_buffer[SOFTMAX_NUM_THREADS]; float tmp; // A two-level reduction to compute the inner products. tmp = 0; for (int i = idx; i < dim; i += blockDim.x) { tmp += dY[i] * Y[i]; } reduction_buffer[idx] = tmp; __syncthreads(); if (idx == 0) { tmp = reduction_buffer[0]; for (int i = 1; i < blockDim.x; ++i) tmp += reduction_buffer[i]; reduction_buffer[0] = tmp; } __syncthreads(); // Compute gradient. tmp = reduction_buffer[0]; for (int i = idx; i < dim; i += blockDim.x) { dX[i] = Y[i] * (dY[i] - tmp); } } } // namespace template <> bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() { auto& Y = Input(0); auto& dY = Input(1); auto* dX = Output(0); const auto canonical_axis = Y.canonical_axis_index(axis_); const int N = Y.size_to_dim(canonical_axis); const int D = Y.size_from_dim(canonical_axis); dX->ResizeLike(Y); softmax_gradient_kernel<<< N, SOFTMAX_NUM_THREADS, 0, context_.cuda_stream()>>>( D, Y.data<float>(), dY.data<float>(), dX->mutable_data<float>()); return true; } namespace { REGISTER_CUDA_OPERATOR(SoftmaxWithLoss, SoftmaxWithLossOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient, SoftmaxWithLossGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>); } // namespace } // namespace caffe2
9a63dc98191d31a270e710222a192100e37dda79.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2016 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "ShapeSphere.h" #include "ShapeConvexPolygon.h" #include "ShapePolyhedron.h" #include "ShapeConvexPolyhedron.h" #include "ShapeSpheropolyhedron.h" #include "ShapeSpheropolygon.h" #include "ShapeSimplePolygon.h" #include "ShapeEllipsoid.h" #include "ShapeFacetedSphere.h" #include "ShapeSphinx.h" #include "ShapeUnion.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapeConvexPolyhedron<8> template hipError_t gpu_hpmc_free_volume<ShapeConvexPolyhedron<8> >(const hpmc_free_volume_args_t &args, const typename ShapeConvexPolyhedron<8> ::param_type *d_params); template hipError_t gpu_hpmc_update<ShapeConvexPolyhedron<8> >(const hpmc_args_t& args, const typename ShapeConvexPolyhedron<8> ::param_type *d_params); template void gpu_hpmc_implicit_count_overlaps<ShapeConvexPolyhedron<8> >(const hpmc_implicit_args_t& args, const typename ShapeConvexPolyhedron<8> ::param_type *d_params); template hipError_t gpu_hpmc_implicit_accept_reject<ShapeConvexPolyhedron<8> >(const hpmc_implicit_args_t& args, const typename ShapeConvexPolyhedron<8> ::param_type *d_params); }; // end namespace detail } // end namespace hpmc
9a63dc98191d31a270e710222a192100e37dda79.cu
// Copyright (c) 2009-2016 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "ShapeSphere.h" #include "ShapeConvexPolygon.h" #include "ShapePolyhedron.h" #include "ShapeConvexPolyhedron.h" #include "ShapeSpheropolyhedron.h" #include "ShapeSpheropolygon.h" #include "ShapeSimplePolygon.h" #include "ShapeEllipsoid.h" #include "ShapeFacetedSphere.h" #include "ShapeSphinx.h" #include "ShapeUnion.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapeConvexPolyhedron<8> template cudaError_t gpu_hpmc_free_volume<ShapeConvexPolyhedron<8> >(const hpmc_free_volume_args_t &args, const typename ShapeConvexPolyhedron<8> ::param_type *d_params); template cudaError_t gpu_hpmc_update<ShapeConvexPolyhedron<8> >(const hpmc_args_t& args, const typename ShapeConvexPolyhedron<8> ::param_type *d_params); template void gpu_hpmc_implicit_count_overlaps<ShapeConvexPolyhedron<8> >(const hpmc_implicit_args_t& args, const typename ShapeConvexPolyhedron<8> ::param_type *d_params); template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeConvexPolyhedron<8> >(const hpmc_implicit_args_t& args, const typename ShapeConvexPolyhedron<8> ::param_type *d_params); }; // end namespace detail } // end namespace hpmc
7e7dc6dcca9ada85ef149fb3a082e5f4aaf5449c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 24 #define THREADS 8 __global__ void reduce(float *A, float *results) { __shared__ float sdata[THREADS]; int i = blockDim.x*blockIdx.x+threadIdx.x; sdata[threadIdx.x] = A[i]; for(unsigned s = blockDim.x/2;s > 0; s>>=1) { if(threadIdx.x < s && sdata[threadIdx.x] < sdata[threadIdx.x+s]) sdata[threadIdx.x] = sdata[threadIdx.x+s]; __syncthreads(); } if(threadIdx.x == 0) results[blockIdx.x] = sdata[0]; } int main() { float A[N], *A_d, *results, *results_d, result; int i; dim3 dimBlock(THREADS); dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x); hipSetDevice(0); for (i=0; i<N; i++) A[i] = N-i; A[3] = 2*N; A[N-3] = -N; hipMalloc((void **) &A_d, sizeof(float)*N); hipMemcpy(A_d, A, sizeof(float)*N, hipMemcpyHostToDevice); hipMalloc((void **) &results_d, dimGrid.x*sizeof(float)); hipLaunchKernelGGL(( reduce), dim3(dimGrid), dim3(dimBlock), 0, 0, A_d, results_d); results = (float*)malloc(dimGrid.x*sizeof(float)); hipMemcpy(results, results_d, dimGrid.x*sizeof(float), hipMemcpyDeviceToHost); result = results[0]; for(i=1;i<dimGrid.x;i++) if(result < results[i]) result = results[i]; printf("%f\n", result); hipFree(A_d); hipFree(results_d); }
7e7dc6dcca9ada85ef149fb3a082e5f4aaf5449c.cu
#include <stdio.h> #define N 24 #define THREADS 8 __global__ void reduce(float *A, float *results) { __shared__ float sdata[THREADS]; int i = blockDim.x*blockIdx.x+threadIdx.x; sdata[threadIdx.x] = A[i]; for(unsigned s = blockDim.x/2;s > 0; s>>=1) { if(threadIdx.x < s && sdata[threadIdx.x] < sdata[threadIdx.x+s]) sdata[threadIdx.x] = sdata[threadIdx.x+s]; __syncthreads(); } if(threadIdx.x == 0) results[blockIdx.x] = sdata[0]; } int main() { float A[N], *A_d, *results, *results_d, result; int i; dim3 dimBlock(THREADS); dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x); cudaSetDevice(0); for (i=0; i<N; i++) A[i] = N-i; A[3] = 2*N; A[N-3] = -N; cudaMalloc((void **) &A_d, sizeof(float)*N); cudaMemcpy(A_d, A, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMalloc((void **) &results_d, dimGrid.x*sizeof(float)); reduce<<<dimGrid, dimBlock>>>(A_d, results_d); results = (float*)malloc(dimGrid.x*sizeof(float)); cudaMemcpy(results, results_d, dimGrid.x*sizeof(float), cudaMemcpyDeviceToHost); result = results[0]; for(i=1;i<dimGrid.x;i++) if(result < results[i]) result = results[i]; printf("%f\n", result); cudaFree(A_d); cudaFree(results_d); }
1f01eaead8e33c5cd1f0753b167bd68085ba1f0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel_hip.cuh" #define THREADS_PER_BLOCK 1024 __global__ void Fadd_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] + y[index]; } } void Fadd_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Fadd_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); hipDeviceSynchronize(); } __global__ void Fadd_kernel(const dtype* x, dtype** y, dtype* r, int count, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { dtype sum = 0; int offset = 0; for(int idx = 0; idx < count; idx++) { int global = index + offset; int idx = global / size; int idy = global % size; sum += (x[index] + y[idx][idy]); offset += size; } r[index] = sum; } } void Fadd_impl(const dtype* x, dtype** y, dtype* r, int count, int size) { hipLaunchKernelGGL(( Fadd_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, count, size); hipDeviceSynchronize(); } __global__ void Fadd_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = x[idx][idy] + y[idx][idy]; } } void Fadd_impl(dtype** x, dtype** y, dtype** r, int dim0, int size) { hipLaunchKernelGGL(( Fadd_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, dim0, size); hipDeviceSynchronize(); } __global__ void Fsubtract_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] - y[index]; } } void Fsubtract_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Fsubtract_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); hipDeviceSynchronize(); } __global__ void Fmultiply_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * y[index]; } } void Fmultiply_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Fmultiply_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); hipDeviceSynchronize(); } __global__ void Fmultiply_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = x[idx][idy] * y[idx][idy]; } } void Fmultiply_impl(dtype** x, dtype** y, dtype** r, int dim0, int size) { hipLaunchKernelGGL(( Fmultiply_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, dim0, size); hipDeviceSynchronize(); } __global__ void Fdivide_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] / y[index]; } } void Fdivide_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Fdivide_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); hipDeviceSynchronize(); } __global__ void Fmultiply_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * y; } } void Fmultiply_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) { hipLaunchKernelGGL(( Fmultiply_scalar_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); hipDeviceSynchronize(); } __global__ void Fadd_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] + y; } } void Fadd_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) { hipLaunchKernelGGL(( Fadd_scalar_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); hipDeviceSynchronize(); } __global__ void Fsquare_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * x[index]; } } void Fsquare_impl(const dtype* x, dtype* r, int size) { hipLaunchKernelGGL(( Fsquare_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size); hipDeviceSynchronize(); } __global__ void Ftanh_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = tanh(x[index]); } } void Ftanh_impl(const dtype* x, dtype* r, int size) { hipLaunchKernelGGL(( Ftanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size); hipDeviceSynchronize(); } __global__ void Ftanh_kernel(dtype** x, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = tanh(x[idx][idy]); } } void Ftanh_impl(dtype** x, dtype** r, int dim0, int size) { hipLaunchKernelGGL(( Ftanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, size); hipDeviceSynchronize(); } __global__ void Fsigmoid_kernel(dtype** x, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = 1.0 / (1.0 + exp(-x[idx][idy])); } } void Fsigmoid_impl(dtype** x, dtype** r, int dim0, int size) { hipLaunchKernelGGL(( Fsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, size); hipDeviceSynchronize(); } __global__ void Dsigmoid_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = (1 - y[idx][idy]) * y[idx][idy]; } } void Dsigmoid_impl(dtype** x, dtype** y, dtype** r, int dim0, int size){ hipLaunchKernelGGL(( Dsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, dim0, size); hipDeviceSynchronize(); } __global__ void Dtanh_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = (1 + y[index]) * (1 - y[index]); } } void Dtanh_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Dtanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); hipDeviceSynchronize(); } __global__ void Dtanh_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = (1 + y[idx][idy]) * (1 - y[idx][idy]); } } void Dtanh_impl(dtype** x, dtype** y, dtype** r, int dim0, int size){ hipLaunchKernelGGL(( Dtanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, dim0, size); hipDeviceSynchronize(); } __global__ void Fsigmoid_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = 1.0 / (1.0 + exp(-x[index])); } } void Fsigmoid_impl(const dtype* x, dtype* r, int size) { hipLaunchKernelGGL(( Fsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size); hipDeviceSynchronize(); } __global__ void Dsigmoid_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = (1 - y[index]) * y[index]; } } void Dsigmoid_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Dsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); hipDeviceSynchronize(); } __global__ void Fsqrt_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = sqrt(x[index]); } } void Fsqrt_impl(const dtype* x, dtype* r, int size) { hipLaunchKernelGGL(( Fsqrt_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size); hipDeviceSynchronize(); } __global__ void concat_kernel(const dtype *src, dtype* dst, int offset, int dim) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < dim) { dst[offset + index] = src[index]; } } void concat_impl(const dtype *src, dtype* dst, int offset, int dim) { hipLaunchKernelGGL(( concat_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, dst, offset, dim); hipDeviceSynchronize(); } __global__ void unconcat_kernel(const dtype *src, dtype* dst, int offset, int dim) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < dim) { dst[index] = src[offset + index]; } } void unconcat_impl(const dtype *src, dtype* dst, int offset, int dim) { hipLaunchKernelGGL(( unconcat_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, dst, offset, dim); hipDeviceSynchronize(); } __global__ void Ftranspose_kernel(const dtype* x, dtype* r, int dim0, int dim1, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index % dim0 * dim1 + index / dim0]; } } void Ftranspose_impl(const dtype* x, dtype* r, int dim0, int dim1, int size) { hipLaunchKernelGGL(( Ftranspose_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, dim1, size); hipDeviceSynchronize(); } __global__ void set_col_kernel(dtype* x, int dim0, int col, int size, dtype val) { int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { x[i] = val; } } void set_col_impl(dtype* x, int dim0, int col, int size, dtype val) { hipLaunchKernelGGL(( set_col_kernel), dim3((dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, dim0, col, size, val); hipDeviceSynchronize(); } __global__ void FLookup_kernel(const dtype* x, dtype** r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < r_size) { int col_index = index / xdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % xdim0; int x_index = col * xdim0 + offset; if(x_index < xdim0 * xdim1) { r[col_index][offset] = x[x_index]; } } } } void FLookup_impl(const dtype* x, dtype** r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { hipLaunchKernelGGL(( FLookup_kernel), dim3((r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, xdim0, xdim1, r_size, cols, col_num); } __global__ void DLookup_kernel(dtype* gx, dtype** loss, int gxdim0, int gxdim1, int l_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < l_size) { int col_index = index / gxdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % gxdim0; int gx_index = col * gxdim0 + offset; if(gx_index < gxdim0 * gxdim1) { atomicAdd(gx + gx_index, loss[col_index][offset]); //gx[gx_index] += loss[col_index][offset]; } } } } void DLookup_impl(dtype* gx, dtype** loss, int gxdim0, int gxdim1, int l_size, int* cols, int col_num) { hipLaunchKernelGGL(( DLookup_kernel), dim3((l_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gx, loss, gxdim0, gxdim1, l_size, cols, col_num); } __global__ void get_cols_kernel(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < r_size) { int col_index = index / xdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % xdim0; int x_index = col * xdim0 + offset; if(x_index < xdim0 * xdim1) { r[index] = x[x_index]; } } } } void get_cols_impl(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { hipLaunchKernelGGL(( get_cols_kernel), dim3((r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, xdim0, xdim1, r_size, cols, col_num); } __global__ void get_col_kernel(const dtype* x, dtype* r, int dim0, int col, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { r[index] = x[i]; } } void get_col_impl(const dtype* x, dtype* r, int dim0, int col, int size) { hipLaunchKernelGGL(( get_col_kernel), dim3((dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, col, size); hipDeviceSynchronize(); } __global__ void Fadd_col_kernel(dtype* x, const dtype* y, int col, int dim0, int size){ int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { x[i] = x[i] + y[index]; } } void Fadd_col_impl(dtype* x, const dtype* y, int col, int dim0, int size) { hipLaunchKernelGGL(( Fadd_col_kernel), dim3((dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, col, dim0, size); hipDeviceSynchronize(); } template<int BLOCK_SIZE> __global__ void Favgpooling_kernel( dtype **px, int skip, int n, dtype *py) { __shared__ dtype temp[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; int index_start = bid % skip + (bid / skip) * skip * n; temp[tid] = 0; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; temp[tid] += px[idx][idy]; } ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) temp[tid] += temp[tid + k]; \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) py[bid] = temp[0] / n; } void Favgpooling_impl(dtype** x, dtype* y, int n, int r, int s) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k:hipLaunchKernelGGL(( ::Favgpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } hipDeviceSynchronize(); } __global__ void Davgpooling_kernel(const dtype* gy, int gy_size, int gx_size, int n, dtype** gx) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < gx_size) { int idx = i / gy_size; int idy = i % gy_size; atomicAdd(gx[idx] + idy, gy[idy] / n); //gx[idx][idy] += (gy[idy] / n); } } void Davgpooling_impl(const dtype* gy, int gy_size, int gx_size, int n, dtype** gx) { hipLaunchKernelGGL(( Davgpooling_kernel), dim3((gx_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gy, gy_size, gx_size, n, gx); hipDeviceSynchronize(); } template<int BLOCK_SIZE> __global__ void Fsumpooling_kernel( dtype **px, int skip, int n, dtype *py) { __shared__ dtype temp[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; int index_start = bid % skip + (bid / skip) * skip * n; temp[tid] = 0; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; temp[tid] += val; } ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) temp[tid] += temp[tid + k]; \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) py[bid] = temp[0]; } void Fsumpooling_impl(dtype** x, dtype* y, int n, int r, int s) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k:hipLaunchKernelGGL(( ::Fsumpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } hipDeviceSynchronize(); } __global__ void Dsumpooling_kernel(const dtype* gy, int gy_size, int gx_size, dtype** gx) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < gx_size) { int idx = i / gy_size; int idy = i % gy_size; atomicAdd(gx[idx] + idy, gy[idy]); //gx[idx][idy] += gy[idy]; } } void Dsumpooling_impl(const dtype* gy, int gy_size, int gx_size, dtype** gx) { hipLaunchKernelGGL(( Dsumpooling_kernel), dim3((gx_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gy, gy_size, gx_size, gx); hipDeviceSynchronize(); } template<int BLOCK_SIZE> __global__ void Fmaxpooling_kernel( dtype **px, int skip, int n, dtype *py, int* index) { __shared__ dtype temp[BLOCK_SIZE]; __shared__ int temp_index[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; dtype thread_max = NEGATIVE_INFINITY; int index_start = bid % skip + (bid / skip) * skip * n; int index_max; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; if(val > thread_max) { thread_max = val; index_max = index_start + i * skip; } } temp[tid] = thread_max; temp_index[tid] = index_max; ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) if(temp[tid + k] > temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];} } void Fmaxpooling_impl(dtype** x, dtype* y, int n, int r, int s, int* index){ int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k:hipLaunchKernelGGL(( ::Fmaxpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y, index); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } hipDeviceSynchronize(); } __global__ void Dmaxpooling_kernel(const dtype* gy, dtype** gx, int* index, int dim) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < dim) { int idx = index[i] / dim; int idy = index[i] % dim; atomicAdd(gx[idx] + idy, gy[i]); //gx[idx][idy] += gy[i]; } } void Dmaxpooling_impl(const dtype* gy, dtype** gx, int* index, int dim) { hipLaunchKernelGGL(( Dmaxpooling_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gy, gx, index, dim); hipDeviceSynchronize(); } template<int BLOCK_SIZE> __global__ void Fminpooling_kernel( dtype **px, int skip, int n, dtype *py, int* index) { __shared__ dtype temp[BLOCK_SIZE]; __shared__ int temp_index[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; dtype thread_min = POSITIVE_INFINITY; int index_start = bid % skip + (bid / skip) * skip * n; int index_min; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; if(val < thread_min) { thread_min = val; index_min = index_start + i * skip; } } temp[tid] = thread_min; temp_index[tid] = index_min; ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) if(temp[tid + k] < temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];} } void Fminpooling_impl(dtype** x, dtype* y, int n, int r, int s, int* index) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k:hipLaunchKernelGGL(( ::Fminpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y, index); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } } __global__ void Dminpooling_kernel(const dtype* gy, dtype** gx, int* index, int dim) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < dim) { int idx = index[i] / dim; int idy = index[i] % dim; atomicAdd(gx[idx] + idy, gy[i]); //gx[idx][idy] += gy[i]; } } void Dminpooling_impl(const dtype* gy, dtype** gx, int* index, int dim) { hipLaunchKernelGGL(( Dminpooling_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gy, gx, index, dim); hipDeviceSynchronize(); }
1f01eaead8e33c5cd1f0753b167bd68085ba1f0e.cu
#include "kernel.cuh" #define THREADS_PER_BLOCK 1024 __global__ void Fadd_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] + y[index]; } } void Fadd_impl(const dtype* x, const dtype* y, dtype* r, int size) { Fadd_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); cudaDeviceSynchronize(); } __global__ void Fadd_kernel(const dtype* x, dtype** y, dtype* r, int count, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { dtype sum = 0; int offset = 0; for(int idx = 0; idx < count; idx++) { int global = index + offset; int idx = global / size; int idy = global % size; sum += (x[index] + y[idx][idy]); offset += size; } r[index] = sum; } } void Fadd_impl(const dtype* x, dtype** y, dtype* r, int count, int size) { Fadd_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, count, size); cudaDeviceSynchronize(); } __global__ void Fadd_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = x[idx][idy] + y[idx][idy]; } } void Fadd_impl(dtype** x, dtype** y, dtype** r, int dim0, int size) { Fadd_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, dim0, size); cudaDeviceSynchronize(); } __global__ void Fsubtract_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] - y[index]; } } void Fsubtract_impl(const dtype* x, const dtype* y, dtype* r, int size) { Fsubtract_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); cudaDeviceSynchronize(); } __global__ void Fmultiply_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * y[index]; } } void Fmultiply_impl(const dtype* x, const dtype* y, dtype* r, int size) { Fmultiply_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); cudaDeviceSynchronize(); } __global__ void Fmultiply_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = x[idx][idy] * y[idx][idy]; } } void Fmultiply_impl(dtype** x, dtype** y, dtype** r, int dim0, int size) { Fmultiply_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, dim0, size); cudaDeviceSynchronize(); } __global__ void Fdivide_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] / y[index]; } } void Fdivide_impl(const dtype* x, const dtype* y, dtype* r, int size) { Fdivide_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); cudaDeviceSynchronize(); } __global__ void Fmultiply_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * y; } } void Fmultiply_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) { Fmultiply_scalar_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); cudaDeviceSynchronize(); } __global__ void Fadd_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] + y; } } void Fadd_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) { Fadd_scalar_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); cudaDeviceSynchronize(); } __global__ void Fsquare_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * x[index]; } } void Fsquare_impl(const dtype* x, dtype* r, int size) { Fsquare_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size); cudaDeviceSynchronize(); } __global__ void Ftanh_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = tanh(x[index]); } } void Ftanh_impl(const dtype* x, dtype* r, int size) { Ftanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size); cudaDeviceSynchronize(); } __global__ void Ftanh_kernel(dtype** x, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = tanh(x[idx][idy]); } } void Ftanh_impl(dtype** x, dtype** r, int dim0, int size) { Ftanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, size); cudaDeviceSynchronize(); } __global__ void Fsigmoid_kernel(dtype** x, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = 1.0 / (1.0 + exp(-x[idx][idy])); } } void Fsigmoid_impl(dtype** x, dtype** r, int dim0, int size) { Fsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, size); cudaDeviceSynchronize(); } __global__ void Dsigmoid_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = (1 - y[idx][idy]) * y[idx][idy]; } } void Dsigmoid_impl(dtype** x, dtype** y, dtype** r, int dim0, int size){ Dsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, dim0, size); cudaDeviceSynchronize(); } __global__ void Dtanh_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = (1 + y[index]) * (1 - y[index]); } } void Dtanh_impl(const dtype* x, const dtype* y, dtype* r, int size) { Dtanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); cudaDeviceSynchronize(); } __global__ void Dtanh_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = (1 + y[idx][idy]) * (1 - y[idx][idy]); } } void Dtanh_impl(dtype** x, dtype** y, dtype** r, int dim0, int size){ Dtanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, dim0, size); cudaDeviceSynchronize(); } __global__ void Fsigmoid_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = 1.0 / (1.0 + exp(-x[index])); } } void Fsigmoid_impl(const dtype* x, dtype* r, int size) { Fsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size); cudaDeviceSynchronize(); } __global__ void Dsigmoid_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = (1 - y[index]) * y[index]; } } void Dsigmoid_impl(const dtype* x, const dtype* y, dtype* r, int size) { Dsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); cudaDeviceSynchronize(); } __global__ void Fsqrt_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = sqrt(x[index]); } } void Fsqrt_impl(const dtype* x, dtype* r, int size) { Fsqrt_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size); cudaDeviceSynchronize(); } __global__ void concat_kernel(const dtype *src, dtype* dst, int offset, int dim) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < dim) { dst[offset + index] = src[index]; } } void concat_impl(const dtype *src, dtype* dst, int offset, int dim) { concat_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, dst, offset, dim); cudaDeviceSynchronize(); } __global__ void unconcat_kernel(const dtype *src, dtype* dst, int offset, int dim) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < dim) { dst[index] = src[offset + index]; } } void unconcat_impl(const dtype *src, dtype* dst, int offset, int dim) { unconcat_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, dst, offset, dim); cudaDeviceSynchronize(); } __global__ void Ftranspose_kernel(const dtype* x, dtype* r, int dim0, int dim1, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index % dim0 * dim1 + index / dim0]; } } void Ftranspose_impl(const dtype* x, dtype* r, int dim0, int dim1, int size) { Ftranspose_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, dim1, size); cudaDeviceSynchronize(); } __global__ void set_col_kernel(dtype* x, int dim0, int col, int size, dtype val) { int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { x[i] = val; } } void set_col_impl(dtype* x, int dim0, int col, int size, dtype val) { set_col_kernel<<<(dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, dim0, col, size, val); cudaDeviceSynchronize(); } __global__ void FLookup_kernel(const dtype* x, dtype** r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < r_size) { int col_index = index / xdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % xdim0; int x_index = col * xdim0 + offset; if(x_index < xdim0 * xdim1) { r[col_index][offset] = x[x_index]; } } } } void FLookup_impl(const dtype* x, dtype** r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { FLookup_kernel<<<(r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>> (x, r, xdim0, xdim1, r_size, cols, col_num); } __global__ void DLookup_kernel(dtype* gx, dtype** loss, int gxdim0, int gxdim1, int l_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < l_size) { int col_index = index / gxdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % gxdim0; int gx_index = col * gxdim0 + offset; if(gx_index < gxdim0 * gxdim1) { atomicAdd(gx + gx_index, loss[col_index][offset]); //gx[gx_index] += loss[col_index][offset]; } } } } void DLookup_impl(dtype* gx, dtype** loss, int gxdim0, int gxdim1, int l_size, int* cols, int col_num) { DLookup_kernel<<<(l_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>> (gx, loss, gxdim0, gxdim1, l_size, cols, col_num); } __global__ void get_cols_kernel(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < r_size) { int col_index = index / xdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % xdim0; int x_index = col * xdim0 + offset; if(x_index < xdim0 * xdim1) { r[index] = x[x_index]; } } } } void get_cols_impl(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { get_cols_kernel<<<(r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>> (x, r, xdim0, xdim1, r_size, cols, col_num); } __global__ void get_col_kernel(const dtype* x, dtype* r, int dim0, int col, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { r[index] = x[i]; } } void get_col_impl(const dtype* x, dtype* r, int dim0, int col, int size) { get_col_kernel<<<(dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, col, size); cudaDeviceSynchronize(); } __global__ void Fadd_col_kernel(dtype* x, const dtype* y, int col, int dim0, int size){ int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { x[i] = x[i] + y[index]; } } void Fadd_col_impl(dtype* x, const dtype* y, int col, int dim0, int size) { Fadd_col_kernel<<<(dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, col, dim0, size); cudaDeviceSynchronize(); } template<int BLOCK_SIZE> __global__ void Favgpooling_kernel( dtype **px, int skip, int n, dtype *py) { __shared__ dtype temp[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; int index_start = bid % skip + (bid / skip) * skip * n; temp[tid] = 0; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; temp[tid] += px[idx][idy]; } ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) temp[tid] += temp[tid + k]; \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) py[bid] = temp[0] / n; } void Favgpooling_impl(dtype** x, dtype* y, int n, int r, int s) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k: ::Favgpooling_kernel<k><<<r, k>>>(x, s, n, y); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } cudaDeviceSynchronize(); } __global__ void Davgpooling_kernel(const dtype* gy, int gy_size, int gx_size, int n, dtype** gx) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < gx_size) { int idx = i / gy_size; int idy = i % gy_size; atomicAdd(gx[idx] + idy, gy[idy] / n); //gx[idx][idy] += (gy[idy] / n); } } void Davgpooling_impl(const dtype* gy, int gy_size, int gx_size, int n, dtype** gx) { Davgpooling_kernel<<<(gx_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(gy, gy_size, gx_size, n, gx); cudaDeviceSynchronize(); } template<int BLOCK_SIZE> __global__ void Fsumpooling_kernel( dtype **px, int skip, int n, dtype *py) { __shared__ dtype temp[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; int index_start = bid % skip + (bid / skip) * skip * n; temp[tid] = 0; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; temp[tid] += val; } ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) temp[tid] += temp[tid + k]; \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) py[bid] = temp[0]; } void Fsumpooling_impl(dtype** x, dtype* y, int n, int r, int s) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k: ::Fsumpooling_kernel<k><<<r, k>>>(x, s, n, y); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } cudaDeviceSynchronize(); } __global__ void Dsumpooling_kernel(const dtype* gy, int gy_size, int gx_size, dtype** gx) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < gx_size) { int idx = i / gy_size; int idy = i % gy_size; atomicAdd(gx[idx] + idy, gy[idy]); //gx[idx][idy] += gy[idy]; } } void Dsumpooling_impl(const dtype* gy, int gy_size, int gx_size, dtype** gx) { Dsumpooling_kernel<<<(gx_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(gy, gy_size, gx_size, gx); cudaDeviceSynchronize(); } template<int BLOCK_SIZE> __global__ void Fmaxpooling_kernel( dtype **px, int skip, int n, dtype *py, int* index) { __shared__ dtype temp[BLOCK_SIZE]; __shared__ int temp_index[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; dtype thread_max = NEGATIVE_INFINITY; int index_start = bid % skip + (bid / skip) * skip * n; int index_max; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; if(val > thread_max) { thread_max = val; index_max = index_start + i * skip; } } temp[tid] = thread_max; temp_index[tid] = index_max; ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) if(temp[tid + k] > temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];} } void Fmaxpooling_impl(dtype** x, dtype* y, int n, int r, int s, int* index){ int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k: ::Fmaxpooling_kernel<k><<<r, k>>>(x, s, n, y, index); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } cudaDeviceSynchronize(); } __global__ void Dmaxpooling_kernel(const dtype* gy, dtype** gx, int* index, int dim) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < dim) { int idx = index[i] / dim; int idy = index[i] % dim; atomicAdd(gx[idx] + idy, gy[i]); //gx[idx][idy] += gy[i]; } } void Dmaxpooling_impl(const dtype* gy, dtype** gx, int* index, int dim) { Dmaxpooling_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(gy, gx, index, dim); cudaDeviceSynchronize(); } template<int BLOCK_SIZE> __global__ void Fminpooling_kernel( dtype **px, int skip, int n, dtype *py, int* index) { __shared__ dtype temp[BLOCK_SIZE]; __shared__ int temp_index[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; dtype thread_min = POSITIVE_INFINITY; int index_start = bid % skip + (bid / skip) * skip * n; int index_min; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; if(val < thread_min) { thread_min = val; index_min = index_start + i * skip; } } temp[tid] = thread_min; temp_index[tid] = index_min; ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) if(temp[tid + k] < temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];} } void Fminpooling_impl(dtype** x, dtype* y, int n, int r, int s, int* index) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k: ::Fminpooling_kernel<k><<<r, k>>>(x, s, n, y, index); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } } __global__ void Dminpooling_kernel(const dtype* gy, dtype** gx, int* index, int dim) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < dim) { int idx = index[i] / dim; int idy = index[i] % dim; atomicAdd(gx[idx] + idy, gy[i]); //gx[idx][idy] += gy[i]; } } void Dminpooling_impl(const dtype* gy, dtype** gx, int* index, int dim) { Dminpooling_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(gy, gx, index, dim); cudaDeviceSynchronize(); }
3d7de378acd3c981fb2059b5e91ddd6d469abd6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #include "cuda_utils.h" #include "timer.c" typedef float dtype; const int size_const = 32; __global__ void matTrans(dtype* AT, dtype* A, int N) { /* Fill your code here */ int tb_size = blockDim.x; int col = blockIdx.x * tb_size + threadIdx.x; int row = blockIdx.y * tb_size + threadIdx.y; //int width = gridDim.x*tb_size; int chunk = blockDim.y; // +1 i s to avoid shared memory bank conflicts!! __shared__ dtype ldata[size_const+1][size_const+1]; //copy data from global to shared memory and transpose for (int i = 0; i < tb_size; i += chunk){ // GPU simple ://AT[col*N + row + i] = A[(row+i) * N + col]; //if(col < N && row + i < N) ldata[threadIdx.y + i][threadIdx.x] = A[((row+i)%N)*N + col%N]; //else ldata[threadIdx.y + i][threadIdx.x] = 0; } __syncthreads(); //since it's been transposed while copying, block ids are inverted col = blockIdx.y * tb_size + threadIdx.x; row = blockIdx.x * tb_size + threadIdx.y; //coalesced writes: for (int j = 0; j < tb_size; j += chunk) if(row+j < N && col < N) AT[(row+j)*N + col] = ldata[threadIdx.x][threadIdx.y +j]; } void parseArg (int argc, char** argv, int* N) { if(argc == 2) { *N = atoi (argv[1]); assert (*N > 0); } else { fprintf (stderr, "usage: %s <N>\n", argv[0]); exit (EXIT_FAILURE); } } void initArr (dtype* in, int N) { int i; for(i = 0; i < N; i++) { in[i] = (dtype) rand () / RAND_MAX; } } void cpuTranspose (dtype* A, dtype* AT, int N) { int i, j; for(i = 0; i < N; i++) { for(j = 0; j < N; j++) { AT[j * N + i] = A[i * N + j]; } } } int cmpArr (dtype* a, dtype* b, int N) { int cnt, i; cnt = 0; for(i = 0; i < N; i++) { if(abs(a[i] - b[i]) > 1e-6) cnt++; } return cnt; } void gpuTranspose (dtype* A, dtype* AT, int N) { struct stopwatch_t* timer = NULL; long double t_gpu,t_malloc,t_pcie,t_gpu2; dtype * d_A, *d_AT; /* Setup timers */ stopwatch_init (); timer = stopwatch_create (); const int tb_x = 32; //size of each thread block const int tb_y = 8; //each thread blocks width dim3 numTB ; numTB.x = (int)ceil((double)N/(double)tb_x) ; numTB.y = (int)ceil((double)N/(double)tb_x) ; dim3 tbSize; //= (tile,rows); tbSize.x = tb_x; tbSize.y = tb_y; stopwatch_start (timer); /* run your kernel here */ CUDA_CHECK_ERROR (hipMalloc ((void**) &d_A, N * N * sizeof (dtype))); CUDA_CHECK_ERROR (hipMalloc ((void**) &d_AT, N * N * sizeof (dtype))); t_malloc = stopwatch_stop (timer); fprintf (stderr, "hipMalloc: %Lg seconds\n", t_malloc); stopwatch_start (timer); // copy arrays to device via PCIe CUDA_CHECK_ERROR (hipMemcpy (d_A, A, N * N * sizeof (dtype), hipMemcpyHostToDevice)); t_pcie = stopwatch_stop (timer); fprintf (stderr, "hipMemcpy (and padding): %Lg seconds\n", t_pcie); stopwatch_start (timer); // kernel invocation hipLaunchKernelGGL(( matTrans), dim3(numTB),dim3(tbSize), 0, 0, d_AT, d_A, N); hipDeviceSynchronize (); t_gpu = stopwatch_stop (timer); CUDA_CHECK_ERROR (hipMemcpy (AT, d_AT, N * N * sizeof (dtype), hipMemcpyDeviceToHost)); // hipDeviceSynchronize (); fprintf (stderr, "GPU transpose: %Lg secs ==> %Lg billion elements/second\n", t_gpu, (N * N) / t_gpu * 1e-9 ); } int main(int argc, char** argv) { /* variables */ dtype *A, *ATgpu, *ATcpu; int err; int N; struct stopwatch_t* timer = NULL; long double t_cpu; N = -1; parseArg (argc, argv, &N); /* input and output matrices on host */ /* output */ ATcpu = (dtype*) malloc (N * N * sizeof (dtype)); ATgpu = (dtype*) malloc (N * N * sizeof (dtype)); /* input */ A = (dtype*) malloc (N * N * sizeof (dtype)); initArr (A, N * N); /* GPU transpose kernel */ gpuTranspose (A, ATgpu, N); /* Setup timers */ stopwatch_init (); timer = stopwatch_create (); stopwatch_start (timer); /* compute reference array */ cpuTranspose (A, ATcpu, N); t_cpu = stopwatch_stop (timer); fprintf (stderr, "Time to execute CPU transpose kernel: %Lg secs\n", t_cpu); /* check correctness */ err = cmpArr (ATgpu, ATcpu, N * N); if(err) { fprintf (stderr, "Transpose failed: %d\n", err); } else { fprintf (stderr, "Transpose successful\n"); } free (A); free (ATgpu); free (ATcpu); return 0; }
3d7de378acd3c981fb2059b5e91ddd6d469abd6e.cu
#include <stdlib.h> #include <stdio.h> #include <math.h> #include "cuda_utils.h" #include "timer.c" typedef float dtype; const int size_const = 32; __global__ void matTrans(dtype* AT, dtype* A, int N) { /* Fill your code here */ int tb_size = blockDim.x; int col = blockIdx.x * tb_size + threadIdx.x; int row = blockIdx.y * tb_size + threadIdx.y; //int width = gridDim.x*tb_size; int chunk = blockDim.y; // +1 i s to avoid shared memory bank conflicts!! __shared__ dtype ldata[size_const+1][size_const+1]; //copy data from global to shared memory and transpose for (int i = 0; i < tb_size; i += chunk){ // GPU simple ://AT[col*N + row + i] = A[(row+i) * N + col]; //if(col < N && row + i < N) ldata[threadIdx.y + i][threadIdx.x] = A[((row+i)%N)*N + col%N]; //else ldata[threadIdx.y + i][threadIdx.x] = 0; } __syncthreads(); //since it's been transposed while copying, block ids are inverted col = blockIdx.y * tb_size + threadIdx.x; row = blockIdx.x * tb_size + threadIdx.y; //coalesced writes: for (int j = 0; j < tb_size; j += chunk) if(row+j < N && col < N) AT[(row+j)*N + col] = ldata[threadIdx.x][threadIdx.y +j]; } void parseArg (int argc, char** argv, int* N) { if(argc == 2) { *N = atoi (argv[1]); assert (*N > 0); } else { fprintf (stderr, "usage: %s <N>\n", argv[0]); exit (EXIT_FAILURE); } } void initArr (dtype* in, int N) { int i; for(i = 0; i < N; i++) { in[i] = (dtype) rand () / RAND_MAX; } } void cpuTranspose (dtype* A, dtype* AT, int N) { int i, j; for(i = 0; i < N; i++) { for(j = 0; j < N; j++) { AT[j * N + i] = A[i * N + j]; } } } int cmpArr (dtype* a, dtype* b, int N) { int cnt, i; cnt = 0; for(i = 0; i < N; i++) { if(abs(a[i] - b[i]) > 1e-6) cnt++; } return cnt; } void gpuTranspose (dtype* A, dtype* AT, int N) { struct stopwatch_t* timer = NULL; long double t_gpu,t_malloc,t_pcie,t_gpu2; dtype * d_A, *d_AT; /* Setup timers */ stopwatch_init (); timer = stopwatch_create (); const int tb_x = 32; //size of each thread block const int tb_y = 8; //each thread blocks width dim3 numTB ; numTB.x = (int)ceil((double)N/(double)tb_x) ; numTB.y = (int)ceil((double)N/(double)tb_x) ; dim3 tbSize; //= (tile,rows); tbSize.x = tb_x; tbSize.y = tb_y; stopwatch_start (timer); /* run your kernel here */ CUDA_CHECK_ERROR (cudaMalloc ((void**) &d_A, N * N * sizeof (dtype))); CUDA_CHECK_ERROR (cudaMalloc ((void**) &d_AT, N * N * sizeof (dtype))); t_malloc = stopwatch_stop (timer); fprintf (stderr, "cudaMalloc: %Lg seconds\n", t_malloc); stopwatch_start (timer); // copy arrays to device via PCIe CUDA_CHECK_ERROR (cudaMemcpy (d_A, A, N * N * sizeof (dtype), cudaMemcpyHostToDevice)); t_pcie = stopwatch_stop (timer); fprintf (stderr, "cudaMemcpy (and padding): %Lg seconds\n", t_pcie); stopwatch_start (timer); // kernel invocation matTrans<<<numTB,tbSize>>>(d_AT, d_A, N); cudaThreadSynchronize (); t_gpu = stopwatch_stop (timer); CUDA_CHECK_ERROR (cudaMemcpy (AT, d_AT, N * N * sizeof (dtype), cudaMemcpyDeviceToHost)); // cudaThreadSynchronize (); fprintf (stderr, "GPU transpose: %Lg secs ==> %Lg billion elements/second\n", t_gpu, (N * N) / t_gpu * 1e-9 ); } int main(int argc, char** argv) { /* variables */ dtype *A, *ATgpu, *ATcpu; int err; int N; struct stopwatch_t* timer = NULL; long double t_cpu; N = -1; parseArg (argc, argv, &N); /* input and output matrices on host */ /* output */ ATcpu = (dtype*) malloc (N * N * sizeof (dtype)); ATgpu = (dtype*) malloc (N * N * sizeof (dtype)); /* input */ A = (dtype*) malloc (N * N * sizeof (dtype)); initArr (A, N * N); /* GPU transpose kernel */ gpuTranspose (A, ATgpu, N); /* Setup timers */ stopwatch_init (); timer = stopwatch_create (); stopwatch_start (timer); /* compute reference array */ cpuTranspose (A, ATcpu, N); t_cpu = stopwatch_stop (timer); fprintf (stderr, "Time to execute CPU transpose kernel: %Lg secs\n", t_cpu); /* check correctness */ err = cmpArr (ATgpu, ATcpu, N * N); if(err) { fprintf (stderr, "Transpose failed: %d\n", err); } else { fprintf (stderr, "Transpose successful\n"); } free (A); free (ATgpu); free (ATcpu); return 0; }
7ac04a31edaa30d37d6d9f218531c828dde26e19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE template <typename T, typename C> __global__ void awkward_ListOffsetArray_compact_offsets(T* tooffsets, const C* fromoffsets, int64_t length, uint64_t invocation_index, uint64_t* err_code) { if (err_code[0] == NO_ERROR) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < length) { int64_t diff = (int64_t)fromoffsets[0]; if (thread_id == 0) { tooffsets[0] = 0; } tooffsets[thread_id + 1] = fromoffsets[thread_id + 1] - diff; } } }
7ac04a31edaa30d37d6d9f218531c828dde26e19.cu
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE template <typename T, typename C> __global__ void awkward_ListOffsetArray_compact_offsets(T* tooffsets, const C* fromoffsets, int64_t length, uint64_t invocation_index, uint64_t* err_code) { if (err_code[0] == NO_ERROR) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id < length) { int64_t diff = (int64_t)fromoffsets[0]; if (thread_id == 0) { tooffsets[0] = 0; } tooffsets[thread_id + 1] = fromoffsets[thread_id + 1] - diff; } } }
603ad0fbca4410b0812d491886e2022f4f7cabb0.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <cmath> #include <cstdio> #include <cstdlib> #include <hip/hip_runtime.h> #include <iomanip> #include <iostream> #include <omp.h> #include <vector> #define C 4 #define THREADS 1024 // 2^10 #define MAX 110 #define MAX_S MAX* MAX #define PERM_MAX (MAX * (MAX - 1) * (MAX - 2) * (MAX - 3)) / 24 #define pb push_back #define mp make_pair #define gpuErrChk(ans) \ { \ gpuAssert((ans), __FILE__, __LINE__); \ } inline void gpuAssert(hipError_t code, char* file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) getchar(); } } using namespace std; typedef long long int64; typedef pair<int, int> ii; /* sz ---> Adjacency matrix dimension (1D) perm ---> Number of permutations of an instance graph ---> Adjacency matrix itself seeds ---> Set of seeds faces ---> Set of triangular faces for the output */ struct Node { int sz, perm; int graph[MAX_S], seeds[C * PERM_MAX], F_ANS[6 * MAX]; }; /* faces ---> Number of triangular faces count ---> Number of remaining vertices tmpMax ---> Max value obtained for a seed F ---> Set of triangular faces V ---> Set of remaining vertices */ struct Params { int *faces, *count, *tmpMax; int *F, *V; }; /* SIZE ---> Number of vertices BLOCKS ---> Number of blocks PERM ---> Number of permutations R ---> Output graph for a possible solution F ---> Set of triangular faces of an instance qtd ---> Number of possible 4-cliques */ int SIZE, PERM, GPU_CNT = 1; int R[MAX_S], F[6 * MAX], bib[MAX]; int qtd = 0; Node* N; /* Generates a list containing the vertices which are not on the planar graph. */ __device__ void generateList(Node* devN, Params* devP, int t, int offset) { int sz = devN->sz, perm = devN->perm; int va = devN->seeds[(t + offset) * 4], vb = devN->seeds[(t + offset) * 4 + 1], vc = devN->seeds[(t + offset) * 4 + 2], vd = devN->seeds[(t + offset) * 4 + 3]; for (int i = 0; i < sz; i++) { if (i == va || i == vb || i == vc || i == vd) devP->V[t + i * perm] = -1; else devP->V[t + i * perm] = i; } } /* Returns the weight of the planar graph so far. */ __device__ void generateTriangularFaceList(Node* devN, Params* devP, int graph[], int t, int offset) { int sz = devN->sz, perm = devN->perm; int va = devN->seeds[(t + offset) * 4], vb = devN->seeds[(t + offset) * 4 + 1], vc = devN->seeds[(t + offset) * 4 + 2], vd = devN->seeds[(t + offset) * 4 + 3]; /* Generate first triangle of the output graph */ devP->F[t + (devP->faces[t] * 3) * perm] = va; devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vb; devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vc; /* Generate the next 3 possible faces */ devP->F[t + (devP->faces[t] * 3) * perm] = va; devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vb; devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vd; devP->F[t + (devP->faces[t] * 3) * perm] = va; devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vc; devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vd; devP->F[t + (devP->faces[t] * 3) * perm] = vb; devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vc; devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vd; int resp = graph[va * sz + vb] + graph[va * sz + vc] + graph[vb * sz + vc]; resp += graph[va * sz + vd] + graph[vb * sz + vd] + graph[vc * sz + vd]; devP->tmpMax[t] = resp; } /* Insert a new vertex, 3 new triangular faces and removes face 'f' from the set. */ __device__ int operationT2(Node* devN, Params* devP, int graph[], int new_vertex, int f, int t) { int sz = devN->sz, perm = devN->perm; /* Remove the chosen face and insert a new one */ int va = devP->F[t + (f * 3) * perm], vb = devP->F[t + (f * 3 + 1) * perm], vc = devP->F[t + (f * 3 + 2) * perm]; devP->F[t + (f * 3) * perm] = new_vertex, devP->F[t + (f * 3 + 1) * perm] = va, devP->F[t + (f * 3 + 2) * perm] = vb; /* and insert the other two possible faces. */ devP->F[t + (devP->faces[t] * 3) * perm] = new_vertex; devP->F[t + (devP->faces[t] * 3 + 1) * perm] = va; devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vc; devP->F[t + (devP->faces[t] * 3) * perm] = new_vertex; devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vb; devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vc; int resp = graph[va * sz + new_vertex] + graph[vb * sz + new_vertex] + graph[vc * sz + new_vertex]; return resp; } /* Return the vertex with the maximum gain inserting within a face 'f'. */ __device__ int maxGain(Node* devN, Params* devP, int graph[], int* f, int t) { int sz = devN->sz, perm = devN->perm; int gain = -1, vertex = -1; /* iterate through the remaining vertices */ for (int new_vertex = 0; new_vertex < sz; new_vertex++) { if (devP->V[t + new_vertex * perm] == -1) continue; /* and test which has the maximum gain with its insetion within all possible faces */ int faces = devP->faces[t]; for (int i = 0; i < faces; i++) { int va = devP->F[t + (i * 3) * perm], vb = devP->F[t + (i * 3 + 1) * perm], vc = devP->F[t + (i * 3 + 2) * perm]; int tmpGain = graph[va * sz + new_vertex] + graph[vb * sz + new_vertex] + graph[vc * sz + new_vertex]; if (tmpGain > gain) { gain = tmpGain; *f = i; vertex = new_vertex; } } } return vertex; } __device__ void tmfg(Node* devN, Params* devP, int graph[], int t) { int perm = devN->perm; while (devP->count[t]) { int f = -1; int vertex = maxGain(devN, devP, graph, &f, t); devP->V[t + vertex * perm] = -1; devP->tmpMax[t] += operationT2(devN, devP, graph, vertex, f, t); devP->count[t]--; } } __device__ void copyGraph(Node* devN, Params* devP, int t) { int faces = devP->faces[t], perm = devN->perm; for (int i = 0; i < faces; i++) { int va = devP->F[t + (i * 3) * perm], vb = devP->F[t + (i * 3 + 1) * perm], vc = devP->F[t + (i * 3 + 2) * perm]; devN->F_ANS[i * 3] = va, devN->F_ANS[i * 3 + 1] = vb, devN->F_ANS[i * 3 + 2] = vc; } } __device__ void initializeDevice(Params* devP, int sz, int t) { devP->faces[t] = 0; devP->tmpMax[t] = -1; devP->count[t] = sz - 4; } __global__ void tmfgParallel(Node* devN, Params devP, int* respMax, int offset, int mx) { int x = blockDim.x * blockIdx.x + threadIdx.x; devN->perm = mx; int sz = devN->sz, perm = devN->perm; /* Uncoment the following line to put the graph on the shared memory */ // extern __shared__ int graph[]; int* graph; /* Uncoment the following line to put the graph on the shared memory */ // for (int i = threadIdx.x; i < sz*sz; i += blockDim.x) // graph[i] = devN->graph[i]; // __syncthreads(); graph = devN->graph; if (x < mx && x < perm) { initializeDevice(&devP, sz, x); generateList(devN, &devP, x, offset); generateTriangularFaceList(devN, &devP, graph, x, offset); tmfg(devN, &devP, graph, x); atomicMax(respMax, devP.tmpMax[x]); __syncthreads(); if (devP.tmpMax[x] == *respMax) { copyGraph(devN, &devP, x); } __syncthreads(); } } int tmfgPrepare() { int finalResp = -1, pos = -1; #pragma omp parallel for num_threads(GPU_CNT) for (int gpu_id = 0; gpu_id < GPU_CNT; gpu_id++) { hipSetDevice(gpu_id); int range = (int)ceil(PERM / (double)GPU_CNT); int perm = ((gpu_id + 1) * range > PERM ? PERM - gpu_id * range : range); int offset = gpu_id * range; N->perm = perm; int resp = -1, *tmpResp; gpuErrChk(hipMalloc((void**)&tmpResp, sizeof(int))); gpuErrChk(hipMemcpy(tmpResp, &resp, sizeof(int), hipMemcpyHostToDevice)); Node* devN; Params devP; gpuErrChk(hipMalloc((void**)&devN, sizeof(Node))); gpuErrChk(hipMemcpy(devN, N, sizeof(Node), hipMemcpyHostToDevice)); size_t sz_node = sizeof(int) * MAX_S + sizeof(int) * C * PERM_MAX + sizeof(int) * 6 * MAX; size_t sz_prm = range * sizeof(int) * 3 + range * sizeof(int) * (7 * SIZE); printf("Using %d mbytes in Kernel %d\n", (sz_node + sz_prm) / (1 << 20), gpu_id); fprintf(stderr, "Using %d mbytes in Kernel %d\n", (sz_node + sz_prm) / (1 << 20), gpu_id); size_t cuInfo = 0, cuTotal = 0; gpuErrChk(hipMemGetInfo(&cuInfo, &cuTotal)); cuInfo *= 0.95; printf("Free memory: %dMB\nTotal memory: %dMB\n", cuInfo / (1 << 20), cuTotal / (1 << 20)); int it_range, it_perm, it_offset; int BATCH_CNT = (int)ceil(sz_prm / (double)cuInfo); printf("Iterations: %d\n", BATCH_CNT); it_range = (int)ceil(perm / (double)BATCH_CNT); gpuErrChk(hipMalloc((void**)&devP.faces, it_range * sizeof(int))); gpuErrChk(hipMalloc((void**)&devP.count, it_range * sizeof(int))); gpuErrChk(hipMalloc((void**)&devP.tmpMax, it_range * sizeof(int))); gpuErrChk(hipMalloc((void**)&devP.F, 6 * SIZE * it_range * sizeof(int))); gpuErrChk(hipMalloc((void**)&devP.V, SIZE * it_range * sizeof(int))); for (int btch_id = 0; btch_id < BATCH_CNT; btch_id++) { it_perm = ((btch_id + 1) * it_range > perm ? perm - btch_id * it_range : it_range); it_offset = btch_id * it_range + offset; dim3 blocks(it_perm / THREADS + 1, 1); dim3 threads(THREADS, 1); printf("Kernel %d launched with %d blocks, each w/ %d threads\n", btch_id + 1, it_range / THREADS + 1, THREADS); fprintf(stderr, "Kernel %d launched with %d blocks, each w/ %d threads\n", btch_id, it_range / THREADS + 1, THREADS); /* Uncoment the following line to put the graph on the shared memory */ // tmfgParallel <<<blocks, threads, SIZE*SIZE*sizeof(int)>>>(devN, devP, // tmpResp, it_offset, it_perm); hipLaunchKernelGGL(( tmfgParallel), dim3(blocks), dim3(threads), 0, 0, devN, devP, tmpResp, it_offset, it_perm); gpuErrChk(hipDeviceSynchronize()); /* Copy back the maximum weight and the set of faces which gave this result. */ gpuErrChk(hipMemcpy(&resp, tmpResp, sizeof(int), hipMemcpyDeviceToHost)); printf("Kernel finished.\nLocal maximum found in Kernel %d: %d\n", btch_id, resp); printf("Copying results...\n"); #pragma omp critical { if (resp > finalResp) { finalResp = resp; pos = gpu_id; } } if (pos == gpu_id) { gpuErrChk(hipMemcpy(&F, devN->F_ANS, 6 * MAX * sizeof(int), hipMemcpyDeviceToHost)); } } printf("Freeing memory...\n"); gpuErrChk(hipFree(devN)); gpuErrChk(hipFree(devP.faces)); gpuErrChk(hipFree(devP.count)); gpuErrChk(hipFree(devP.tmpMax)); gpuErrChk(hipFree(devP.F)); gpuErrChk(hipFree(devP.V)); gpuErrChk(hipDeviceReset()); } return finalResp; } /* Print elapsed time. */ void printElapsedTime(double start, double stop) { double elapsed = stop - start; printf("Elapsed time: %.3lfs.\n", elapsed); } double getTime() { timespec ts; clock_gettime(CLOCK_REALTIME, &ts); return double(ts.tv_sec) + double(ts.tv_nsec) / 1e9; } /* C ---> Size of the combination index ---> Current index in data[] data[] ---> Temporary array to store a current combination i ---> Index of current element in vertices[] */ void combineUntil(int index, vector<int>& data, int i) { // Current cobination is ready, print it if (index == C) { for (int j = 0; j < C; j++) { N->seeds[qtd * C + j] = data[j]; } qtd++; return; } // When there are no more elements to put in data[] if (i >= SIZE) return; //current is inserted; put next at a next location data[index] = i; combineUntil(index + 1, data, i + 1); //current is deleted; replace it with next combineUntil(index, data, i + 1); } void combine() { vector<int> data(C); /* print all combinations of size 'r' using a temporary array 'data' */ combineUntil(0, data, 0); } void initialize() { for (int i = 0; i < SIZE - 1; i++) { for (int j = i + 1; j < SIZE; j++) { R[i * SIZE + j] = R[j * SIZE + i] = -1; } } } void readInput() { int x; cin >> SIZE; PERM = bib[SIZE - 1]; N = (Node*)malloc(sizeof(Node)); N->sz = SIZE; for (int i = 0; i < SIZE - 1; i++) { for (int j = i + 1; j < SIZE; j++) { cin >> x; N->graph[i * SIZE + j] = x; N->graph[j * SIZE + i] = x; } } } /* Define the number of permutations and blocks */ void sizeDefinitions() { for (int i = 4; i <= MAX; i++) { int resp = 1; for (int j = i - 3; j <= i; j++) resp *= j; resp /= 24; bib[i - 1] = resp; } } int main(int argv, char** argc) { ios::sync_with_stdio(false); sizeDefinitions(); /* Read the input, which is given by a size of a graph and its weighted edges. The given graph is dense. */ readInput(); initialize(); /* Given the number of vertices, generate multiple 4-clique seeds. */ combine(); if (argv == 2) { hipSetDevice(atoi(argc[1])); } else if (argv == 3) { GPU_CNT = atoi(argc[2]); int d; hipGetDeviceCount(&d); if (GPU_CNT > d) GPU_CNT = d; } double start = getTime(); int respMax = tmfgPrepare(); double stop = getTime(); /* Reconstruct the graph given the faces of the graph */ for (int i = 0; i < 2 * SIZE; i++) { int va = F[i * 3], vb = F[i * 3 + 1], vc = F[i * 3 + 2]; if (va == vb && vb == vc) continue; R[va * SIZE + vb] = R[vb * SIZE + va] = N->graph[va * SIZE + vb]; R[va * SIZE + vc] = R[vc * SIZE + va] = N->graph[va * SIZE + vc]; R[vb * SIZE + vc] = R[vc * SIZE + vb] = N->graph[vb * SIZE + vc]; } cout << "Printing generated graph: " << endl; for (int i = 0; i < SIZE - 1; i++) { for (int j = i + 1; j < SIZE; j++) { cout << R[i * SIZE + j] << " "; } cout << endl; } cout << endl; printElapsedTime(start, stop); cout << "Maximum weight found: " << respMax << endl; free(N); return 0; }
603ad0fbca4410b0812d491886e2022f4f7cabb0.cu
#include <algorithm> #include <cmath> #include <cstdio> #include <cstdlib> #include <cuda_runtime.h> #include <iomanip> #include <iostream> #include <omp.h> #include <vector> #define C 4 #define THREADS 1024 // 2^10 #define MAX 110 #define MAX_S MAX* MAX #define PERM_MAX (MAX * (MAX - 1) * (MAX - 2) * (MAX - 3)) / 24 #define pb push_back #define mp make_pair #define gpuErrChk(ans) \ { \ gpuAssert((ans), __FILE__, __LINE__); \ } inline void gpuAssert(cudaError_t code, char* file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) getchar(); } } using namespace std; typedef long long int64; typedef pair<int, int> ii; /* sz ---> Adjacency matrix dimension (1D) perm ---> Number of permutations of an instance graph ---> Adjacency matrix itself seeds ---> Set of seeds faces ---> Set of triangular faces for the output */ struct Node { int sz, perm; int graph[MAX_S], seeds[C * PERM_MAX], F_ANS[6 * MAX]; }; /* faces ---> Number of triangular faces count ---> Number of remaining vertices tmpMax ---> Max value obtained for a seed F ---> Set of triangular faces V ---> Set of remaining vertices */ struct Params { int *faces, *count, *tmpMax; int *F, *V; }; /* SIZE ---> Number of vertices BLOCKS ---> Number of blocks PERM ---> Number of permutations R ---> Output graph for a possible solution F ---> Set of triangular faces of an instance qtd ---> Number of possible 4-cliques */ int SIZE, PERM, GPU_CNT = 1; int R[MAX_S], F[6 * MAX], bib[MAX]; int qtd = 0; Node* N; /* Generates a list containing the vertices which are not on the planar graph. */ __device__ void generateList(Node* devN, Params* devP, int t, int offset) { int sz = devN->sz, perm = devN->perm; int va = devN->seeds[(t + offset) * 4], vb = devN->seeds[(t + offset) * 4 + 1], vc = devN->seeds[(t + offset) * 4 + 2], vd = devN->seeds[(t + offset) * 4 + 3]; for (int i = 0; i < sz; i++) { if (i == va || i == vb || i == vc || i == vd) devP->V[t + i * perm] = -1; else devP->V[t + i * perm] = i; } } /* Returns the weight of the planar graph so far. */ __device__ void generateTriangularFaceList(Node* devN, Params* devP, int graph[], int t, int offset) { int sz = devN->sz, perm = devN->perm; int va = devN->seeds[(t + offset) * 4], vb = devN->seeds[(t + offset) * 4 + 1], vc = devN->seeds[(t + offset) * 4 + 2], vd = devN->seeds[(t + offset) * 4 + 3]; /* Generate first triangle of the output graph */ devP->F[t + (devP->faces[t] * 3) * perm] = va; devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vb; devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vc; /* Generate the next 3 possible faces */ devP->F[t + (devP->faces[t] * 3) * perm] = va; devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vb; devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vd; devP->F[t + (devP->faces[t] * 3) * perm] = va; devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vc; devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vd; devP->F[t + (devP->faces[t] * 3) * perm] = vb; devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vc; devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vd; int resp = graph[va * sz + vb] + graph[va * sz + vc] + graph[vb * sz + vc]; resp += graph[va * sz + vd] + graph[vb * sz + vd] + graph[vc * sz + vd]; devP->tmpMax[t] = resp; } /* Insert a new vertex, 3 new triangular faces and removes face 'f' from the set. */ __device__ int operationT2(Node* devN, Params* devP, int graph[], int new_vertex, int f, int t) { int sz = devN->sz, perm = devN->perm; /* Remove the chosen face and insert a new one */ int va = devP->F[t + (f * 3) * perm], vb = devP->F[t + (f * 3 + 1) * perm], vc = devP->F[t + (f * 3 + 2) * perm]; devP->F[t + (f * 3) * perm] = new_vertex, devP->F[t + (f * 3 + 1) * perm] = va, devP->F[t + (f * 3 + 2) * perm] = vb; /* and insert the other two possible faces. */ devP->F[t + (devP->faces[t] * 3) * perm] = new_vertex; devP->F[t + (devP->faces[t] * 3 + 1) * perm] = va; devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vc; devP->F[t + (devP->faces[t] * 3) * perm] = new_vertex; devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vb; devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vc; int resp = graph[va * sz + new_vertex] + graph[vb * sz + new_vertex] + graph[vc * sz + new_vertex]; return resp; } /* Return the vertex with the maximum gain inserting within a face 'f'. */ __device__ int maxGain(Node* devN, Params* devP, int graph[], int* f, int t) { int sz = devN->sz, perm = devN->perm; int gain = -1, vertex = -1; /* iterate through the remaining vertices */ for (int new_vertex = 0; new_vertex < sz; new_vertex++) { if (devP->V[t + new_vertex * perm] == -1) continue; /* and test which has the maximum gain with its insetion within all possible faces */ int faces = devP->faces[t]; for (int i = 0; i < faces; i++) { int va = devP->F[t + (i * 3) * perm], vb = devP->F[t + (i * 3 + 1) * perm], vc = devP->F[t + (i * 3 + 2) * perm]; int tmpGain = graph[va * sz + new_vertex] + graph[vb * sz + new_vertex] + graph[vc * sz + new_vertex]; if (tmpGain > gain) { gain = tmpGain; *f = i; vertex = new_vertex; } } } return vertex; } __device__ void tmfg(Node* devN, Params* devP, int graph[], int t) { int perm = devN->perm; while (devP->count[t]) { int f = -1; int vertex = maxGain(devN, devP, graph, &f, t); devP->V[t + vertex * perm] = -1; devP->tmpMax[t] += operationT2(devN, devP, graph, vertex, f, t); devP->count[t]--; } } __device__ void copyGraph(Node* devN, Params* devP, int t) { int faces = devP->faces[t], perm = devN->perm; for (int i = 0; i < faces; i++) { int va = devP->F[t + (i * 3) * perm], vb = devP->F[t + (i * 3 + 1) * perm], vc = devP->F[t + (i * 3 + 2) * perm]; devN->F_ANS[i * 3] = va, devN->F_ANS[i * 3 + 1] = vb, devN->F_ANS[i * 3 + 2] = vc; } } __device__ void initializeDevice(Params* devP, int sz, int t) { devP->faces[t] = 0; devP->tmpMax[t] = -1; devP->count[t] = sz - 4; } __global__ void tmfgParallel(Node* devN, Params devP, int* respMax, int offset, int mx) { int x = blockDim.x * blockIdx.x + threadIdx.x; devN->perm = mx; int sz = devN->sz, perm = devN->perm; /* Uncoment the following line to put the graph on the shared memory */ // extern __shared__ int graph[]; int* graph; /* Uncoment the following line to put the graph on the shared memory */ // for (int i = threadIdx.x; i < sz*sz; i += blockDim.x) // graph[i] = devN->graph[i]; // __syncthreads(); graph = devN->graph; if (x < mx && x < perm) { initializeDevice(&devP, sz, x); generateList(devN, &devP, x, offset); generateTriangularFaceList(devN, &devP, graph, x, offset); tmfg(devN, &devP, graph, x); atomicMax(respMax, devP.tmpMax[x]); __syncthreads(); if (devP.tmpMax[x] == *respMax) { copyGraph(devN, &devP, x); } __syncthreads(); } } int tmfgPrepare() { int finalResp = -1, pos = -1; #pragma omp parallel for num_threads(GPU_CNT) for (int gpu_id = 0; gpu_id < GPU_CNT; gpu_id++) { cudaSetDevice(gpu_id); int range = (int)ceil(PERM / (double)GPU_CNT); int perm = ((gpu_id + 1) * range > PERM ? PERM - gpu_id * range : range); int offset = gpu_id * range; N->perm = perm; int resp = -1, *tmpResp; gpuErrChk(cudaMalloc((void**)&tmpResp, sizeof(int))); gpuErrChk(cudaMemcpy(tmpResp, &resp, sizeof(int), cudaMemcpyHostToDevice)); Node* devN; Params devP; gpuErrChk(cudaMalloc((void**)&devN, sizeof(Node))); gpuErrChk(cudaMemcpy(devN, N, sizeof(Node), cudaMemcpyHostToDevice)); size_t sz_node = sizeof(int) * MAX_S + sizeof(int) * C * PERM_MAX + sizeof(int) * 6 * MAX; size_t sz_prm = range * sizeof(int) * 3 + range * sizeof(int) * (7 * SIZE); printf("Using %d mbytes in Kernel %d\n", (sz_node + sz_prm) / (1 << 20), gpu_id); fprintf(stderr, "Using %d mbytes in Kernel %d\n", (sz_node + sz_prm) / (1 << 20), gpu_id); size_t cuInfo = 0, cuTotal = 0; gpuErrChk(cudaMemGetInfo(&cuInfo, &cuTotal)); cuInfo *= 0.95; printf("Free memory: %dMB\nTotal memory: %dMB\n", cuInfo / (1 << 20), cuTotal / (1 << 20)); int it_range, it_perm, it_offset; int BATCH_CNT = (int)ceil(sz_prm / (double)cuInfo); printf("Iterations: %d\n", BATCH_CNT); it_range = (int)ceil(perm / (double)BATCH_CNT); gpuErrChk(cudaMalloc((void**)&devP.faces, it_range * sizeof(int))); gpuErrChk(cudaMalloc((void**)&devP.count, it_range * sizeof(int))); gpuErrChk(cudaMalloc((void**)&devP.tmpMax, it_range * sizeof(int))); gpuErrChk(cudaMalloc((void**)&devP.F, 6 * SIZE * it_range * sizeof(int))); gpuErrChk(cudaMalloc((void**)&devP.V, SIZE * it_range * sizeof(int))); for (int btch_id = 0; btch_id < BATCH_CNT; btch_id++) { it_perm = ((btch_id + 1) * it_range > perm ? perm - btch_id * it_range : it_range); it_offset = btch_id * it_range + offset; dim3 blocks(it_perm / THREADS + 1, 1); dim3 threads(THREADS, 1); printf("Kernel %d launched with %d blocks, each w/ %d threads\n", btch_id + 1, it_range / THREADS + 1, THREADS); fprintf(stderr, "Kernel %d launched with %d blocks, each w/ %d threads\n", btch_id, it_range / THREADS + 1, THREADS); /* Uncoment the following line to put the graph on the shared memory */ // tmfgParallel <<<blocks, threads, SIZE*SIZE*sizeof(int)>>>(devN, devP, // tmpResp, it_offset, it_perm); tmfgParallel<<<blocks, threads>>>(devN, devP, tmpResp, it_offset, it_perm); gpuErrChk(cudaDeviceSynchronize()); /* Copy back the maximum weight and the set of faces which gave this result. */ gpuErrChk(cudaMemcpy(&resp, tmpResp, sizeof(int), cudaMemcpyDeviceToHost)); printf("Kernel finished.\nLocal maximum found in Kernel %d: %d\n", btch_id, resp); printf("Copying results...\n"); #pragma omp critical { if (resp > finalResp) { finalResp = resp; pos = gpu_id; } } if (pos == gpu_id) { gpuErrChk(cudaMemcpy(&F, devN->F_ANS, 6 * MAX * sizeof(int), cudaMemcpyDeviceToHost)); } } printf("Freeing memory...\n"); gpuErrChk(cudaFree(devN)); gpuErrChk(cudaFree(devP.faces)); gpuErrChk(cudaFree(devP.count)); gpuErrChk(cudaFree(devP.tmpMax)); gpuErrChk(cudaFree(devP.F)); gpuErrChk(cudaFree(devP.V)); gpuErrChk(cudaDeviceReset()); } return finalResp; } /* Print elapsed time. */ void printElapsedTime(double start, double stop) { double elapsed = stop - start; printf("Elapsed time: %.3lfs.\n", elapsed); } double getTime() { timespec ts; clock_gettime(CLOCK_REALTIME, &ts); return double(ts.tv_sec) + double(ts.tv_nsec) / 1e9; } /* C ---> Size of the combination index ---> Current index in data[] data[] ---> Temporary array to store a current combination i ---> Index of current element in vertices[] */ void combineUntil(int index, vector<int>& data, int i) { // Current cobination is ready, print it if (index == C) { for (int j = 0; j < C; j++) { N->seeds[qtd * C + j] = data[j]; } qtd++; return; } // When there are no more elements to put in data[] if (i >= SIZE) return; //current is inserted; put next at a next location data[index] = i; combineUntil(index + 1, data, i + 1); //current is deleted; replace it with next combineUntil(index, data, i + 1); } void combine() { vector<int> data(C); /* print all combinations of size 'r' using a temporary array 'data' */ combineUntil(0, data, 0); } void initialize() { for (int i = 0; i < SIZE - 1; i++) { for (int j = i + 1; j < SIZE; j++) { R[i * SIZE + j] = R[j * SIZE + i] = -1; } } } void readInput() { int x; cin >> SIZE; PERM = bib[SIZE - 1]; N = (Node*)malloc(sizeof(Node)); N->sz = SIZE; for (int i = 0; i < SIZE - 1; i++) { for (int j = i + 1; j < SIZE; j++) { cin >> x; N->graph[i * SIZE + j] = x; N->graph[j * SIZE + i] = x; } } } /* Define the number of permutations and blocks */ void sizeDefinitions() { for (int i = 4; i <= MAX; i++) { int resp = 1; for (int j = i - 3; j <= i; j++) resp *= j; resp /= 24; bib[i - 1] = resp; } } int main(int argv, char** argc) { ios::sync_with_stdio(false); sizeDefinitions(); /* Read the input, which is given by a size of a graph and its weighted edges. The given graph is dense. */ readInput(); initialize(); /* Given the number of vertices, generate multiple 4-clique seeds. */ combine(); if (argv == 2) { cudaSetDevice(atoi(argc[1])); } else if (argv == 3) { GPU_CNT = atoi(argc[2]); int d; cudaGetDeviceCount(&d); if (GPU_CNT > d) GPU_CNT = d; } double start = getTime(); int respMax = tmfgPrepare(); double stop = getTime(); /* Reconstruct the graph given the faces of the graph */ for (int i = 0; i < 2 * SIZE; i++) { int va = F[i * 3], vb = F[i * 3 + 1], vc = F[i * 3 + 2]; if (va == vb && vb == vc) continue; R[va * SIZE + vb] = R[vb * SIZE + va] = N->graph[va * SIZE + vb]; R[va * SIZE + vc] = R[vc * SIZE + va] = N->graph[va * SIZE + vc]; R[vb * SIZE + vc] = R[vc * SIZE + vb] = N->graph[vb * SIZE + vc]; } cout << "Printing generated graph: " << endl; for (int i = 0; i < SIZE - 1; i++) { for (int j = i + 1; j < SIZE; j++) { cout << R[i * SIZE + j] << " "; } cout << endl; } cout << endl; printElapsedTime(start, stop); cout << "Maximum weight found: " << respMax << endl; free(N); return 0; }
76aa21bfd807b9cfe1f210ad3ec661f9aa08f550.hip
// !!! This is a file automatically generated by hipify!!! #ifndef TENSORFLOW_KERNELS_CONV_OPS_im2col_gpu_H_ #define TENSORFLOW_KERNELS_CONV_OPS_im2col_gpu_H_ // #if GOOGLE_CUDA #define EIGEN_USE_GPU #include "deform_conv.h" #include "hip/hip_runtime.h" #include "tensorflow/core/util/cuda_kernel_helper.h" #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/platform/logging.h" #include <algorithm> #include <cstring> #include <vector> #include <stdio.h> namespace tensorflow { typedef Eigen::GpuDevice GPUDevice; typedef std::vector<int32> TShape; template <typename DType> __device__ DType deform_im2col_bilinear(const DType* thread_input_ptr, const int input_width, const int thread_input_h_left, // How many grids left in height const int thread_input_w_left, // How many grids left in width DType kernel_height_loc, // height w.r.t kernel start height DType kernel_width_loc) { // width w.r.t kernel start width int height_low = floor(kernel_height_loc); int width_low = floor(kernel_width_loc); int height_high; int width_high; if (height_low >= thread_input_h_left - 1) { height_high = height_low = thread_input_h_left - 1; kernel_height_loc = (DType)height_low; } else { height_high = height_low + 1; } if (width_low >= thread_input_w_left - 1) { width_high = width_low = thread_input_w_left - 1; kernel_width_loc = (DType)width_low; } else { width_high = width_low + 1; } DType height_low_dist = kernel_height_loc - height_low; DType width_low_dist = kernel_width_loc - width_low; DType height_high_dist = 1 - height_low_dist; DType width_high_dist = 1 - width_low_dist; // --------- // | 1 | 2 | // --------- // | 3 | 4 | // --------- DType v1 = thread_input_ptr[height_low * input_width + width_low]; DType v2 = thread_input_ptr[height_low * input_width + width_high]; DType v3 = thread_input_ptr[height_high * input_width + width_low]; DType v4 = thread_input_ptr[height_high * input_width + width_high]; DType w1 = height_high_dist * width_high_dist; DType w2 = height_high_dist * width_low_dist; DType w3 = height_low_dist * width_high_dist; DType w4 = height_low_dist * width_low_dist; DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } // template <typename DType> // __device__ DType deform_im2col_bilinear(const DType* bottom_data, const int data_width, // const int height, const int width, DType h, DType w) { // int h_low = floor(h); // int w_low = floor(w); // int h_high; // int w_high; // if (h_low >= height - 1) { // h_high = h_low = height - 1; // h = (DType)h_low; // } // else { // h_high = h_low + 1; // } // if (w_low >= width - 1) { // w_high = w_low = width - 1; // w = (DType)w_low; // } // else { // w_high = w_low + 1; // } // DType lh = h - h_low; // DType lw = w - w_low; // DType hh = 1 - lh, hw = 1 - lw; // DType v1 = bottom_data[h_low * data_width + w_low]; // DType v2 = bottom_data[h_low * data_width + w_high]; // DType v3 = bottom_data[h_high * data_width + w_low]; // DType v4 = bottom_data[h_high * data_width + w_high]; // DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; // DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); // return val; // } template <typename DType> __global__ void deform_im2col_2d_gpu_kernel( const int num_kernels_per_filter, const DType* batch_input_ptr, const DType* batch_offset_ptr, const int input_height, const int input_width, const int kernel_height, const int kernel_width, const int pad_height, const int pad_width, const int stride_height, const int stride_width, const int dilation_height, const int dilation_width, const int channel_per_deform_group, const int output_height, const int output_width, DType* col_buf_3d_flatten_ptr) { CUDA_1D_KERNEL_LOOP(index, num_kernels_per_filter) { // index index of output matrix const int thread_output_w = index % output_width; const int thread_output_h = (index / output_width) % output_height; const int thread_channel = (index / output_width) / output_height; const int thread_filter_init_loc = thread_channel * kernel_height * kernel_width; // compute deformable group index const int deform_group_idx = thread_channel / channel_per_deform_group; const int thread_input_h = thread_output_h * stride_height - pad_height; const int thread_input_w = thread_output_w * stride_width - pad_width; // printf("%d, %d\n", pad_height, pad_width); const int thread_input_h_left = input_height - thread_input_h; const int thread_input_w_left = input_width - thread_input_w; DType* current_data_col_ptr = col_buf_3d_flatten_ptr + thread_filter_init_loc * output_height * output_width + thread_output_h * output_width + thread_output_w; const DType* thread_input_ptr = batch_input_ptr + thread_channel * input_height * input_width + thread_input_h * input_width + thread_input_w; const DType* thread_offset_ptr = batch_offset_ptr + 2 * deform_group_idx * kernel_height * kernel_width * output_height * output_width; // offset -> [2 * deform_group * kernel_width * kernel_height, output_height, output_width] for (int i = 0; i < kernel_height; i++) { for (int j = 0; j < kernel_width; j++) { const int offset_h_ptr = 2 * (i * kernel_width + j) * output_height * output_width + thread_output_h * output_width + thread_output_w; const int offset_w_ptr = 2 * (i * kernel_width + j) * output_height * output_width + output_height * output_width + thread_output_h * output_width + thread_output_w; const DType offset_h = thread_offset_ptr[offset_h_ptr]; const DType offset_w = thread_offset_ptr[offset_w_ptr]; // The datatype of following variables need to be changed into <Dtype> if the deformable conv is activated. const DType current_input_h = thread_input_h + i * dilation_height + offset_h; const DType current_input_w = thread_input_w + j * dilation_width + offset_w; DType val = static_cast<DType>(0); if (current_input_h >= 0 && current_input_w >= 0 && current_input_h < input_height && current_input_w < input_width) { const DType kernel_height_loc = i * dilation_height + offset_h; const DType kernel_width_loc = j * dilation_width + offset_w; val = deform_im2col_bilinear(thread_input_ptr, input_width, thread_input_h_left, thread_input_w_left, kernel_height_loc, kernel_width_loc); } *current_data_col_ptr = val; current_data_col_ptr += output_height * output_width; } } } } namespace functor { inline int ProdShape(const TShape & shape, int start); template <typename DType> struct deform_im2col_2d<GPUDevice, DType> { void operator()(const GPUDevice& d, // 0 -> device const DType* batch_input_ptr, // 1 -> input data start pointer, ranging according to n const DType* batch_offset_ptr, const TShape& input_shape, // 2 -> input shape = [N, C, H, W] const TShape& col_buf_shape, // 3 -> shape = [filter_3d_flatten, output_rows, output_cols] const TShape& kernel_2d_shape, // 4 -> kernel 2D shape const TShape& pad_2d_shape, // 5 -> padding 2D shape const TShape& stride_2d_shape, // 6 -> stride 2D shape const TShape& dilation_2d_shape, const int deform_group, DType* col_buf_3d_flatten_ptr) { // 7 -> flatten col_buf_3d, shape = [1 * filter_3d_flatten_dim_ * output_2d_flatten_dim_] // num_axes should be smaller than block size int num_spatial_axes = kernel_2d_shape.size(); int channel_per_deform_group = input_shape[1] / deform_group; int num_kernels_per_filter = input_shape[1] * ProdShape(col_buf_shape, 1); CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels_per_filter, d); CHECK_LT(num_spatial_axes, config.thread_per_block); switch (num_spatial_axes) { case 2: hipLaunchKernelGGL(( deform_im2col_2d_gpu_kernel<DType>) , dim3(config.block_count), dim3(config.thread_per_block), 0, d.stream(), num_kernels_per_filter, batch_input_ptr, batch_offset_ptr, input_shape[2], input_shape[3], kernel_2d_shape[0], kernel_2d_shape[1], pad_2d_shape[0], pad_2d_shape[1], stride_2d_shape[0], stride_2d_shape[1], dilation_2d_shape[0], dilation_2d_shape[1], channel_per_deform_group, col_buf_shape[1], col_buf_shape[2], col_buf_3d_flatten_ptr); break; default: LOG(FATAL) << "im2col_nd_gpu does not support computation with " << num_spatial_axes << " spatial axes"; } } }; inline int ProdShape(const TShape &shape, int start) { int64 res = 1; for(int i=start; i<shape.size(); i++) { res*=shape[i]; } return res; } } #define DECLARE_GPU_SPEC(DType) \ template struct functor::deform_im2col_2d<GPUDevice, DType>; // extern template struct Copy<GPUDevice, T>; TF_CALL_float(DECLARE_GPU_SPEC); TF_CALL_double(DECLARE_GPU_SPEC); // TF_CALL_half(DECLARE_GPU_SPEC); // TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPEC); #undef DECLARE_GPU_SPEC } #endif // TENSORFLOW_KERNELS_CONV_OPS_im2col_gpu_H_
76aa21bfd807b9cfe1f210ad3ec661f9aa08f550.cu
#ifndef TENSORFLOW_KERNELS_CONV_OPS_im2col_gpu_H_ #define TENSORFLOW_KERNELS_CONV_OPS_im2col_gpu_H_ // #if GOOGLE_CUDA #define EIGEN_USE_GPU #include "deform_conv.h" #include "cuda.h" #include "tensorflow/core/util/cuda_kernel_helper.h" #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/platform/logging.h" #include <algorithm> #include <cstring> #include <vector> #include <stdio.h> namespace tensorflow { typedef Eigen::GpuDevice GPUDevice; typedef std::vector<int32> TShape; template <typename DType> __device__ DType deform_im2col_bilinear(const DType* thread_input_ptr, const int input_width, const int thread_input_h_left, // How many grids left in height const int thread_input_w_left, // How many grids left in width DType kernel_height_loc, // height w.r.t kernel start height DType kernel_width_loc) { // width w.r.t kernel start width int height_low = floor(kernel_height_loc); int width_low = floor(kernel_width_loc); int height_high; int width_high; if (height_low >= thread_input_h_left - 1) { height_high = height_low = thread_input_h_left - 1; kernel_height_loc = (DType)height_low; } else { height_high = height_low + 1; } if (width_low >= thread_input_w_left - 1) { width_high = width_low = thread_input_w_left - 1; kernel_width_loc = (DType)width_low; } else { width_high = width_low + 1; } DType height_low_dist = kernel_height_loc - height_low; DType width_low_dist = kernel_width_loc - width_low; DType height_high_dist = 1 - height_low_dist; DType width_high_dist = 1 - width_low_dist; // --------- // | 1 | 2 | // --------- // | 3 | 4 | // --------- DType v1 = thread_input_ptr[height_low * input_width + width_low]; DType v2 = thread_input_ptr[height_low * input_width + width_high]; DType v3 = thread_input_ptr[height_high * input_width + width_low]; DType v4 = thread_input_ptr[height_high * input_width + width_high]; DType w1 = height_high_dist * width_high_dist; DType w2 = height_high_dist * width_low_dist; DType w3 = height_low_dist * width_high_dist; DType w4 = height_low_dist * width_low_dist; DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } // template <typename DType> // __device__ DType deform_im2col_bilinear(const DType* bottom_data, const int data_width, // const int height, const int width, DType h, DType w) { // int h_low = floor(h); // int w_low = floor(w); // int h_high; // int w_high; // if (h_low >= height - 1) { // h_high = h_low = height - 1; // h = (DType)h_low; // } // else { // h_high = h_low + 1; // } // if (w_low >= width - 1) { // w_high = w_low = width - 1; // w = (DType)w_low; // } // else { // w_high = w_low + 1; // } // DType lh = h - h_low; // DType lw = w - w_low; // DType hh = 1 - lh, hw = 1 - lw; // DType v1 = bottom_data[h_low * data_width + w_low]; // DType v2 = bottom_data[h_low * data_width + w_high]; // DType v3 = bottom_data[h_high * data_width + w_low]; // DType v4 = bottom_data[h_high * data_width + w_high]; // DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; // DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); // return val; // } template <typename DType> __global__ void deform_im2col_2d_gpu_kernel( const int num_kernels_per_filter, const DType* batch_input_ptr, const DType* batch_offset_ptr, const int input_height, const int input_width, const int kernel_height, const int kernel_width, const int pad_height, const int pad_width, const int stride_height, const int stride_width, const int dilation_height, const int dilation_width, const int channel_per_deform_group, const int output_height, const int output_width, DType* col_buf_3d_flatten_ptr) { CUDA_1D_KERNEL_LOOP(index, num_kernels_per_filter) { // index index of output matrix const int thread_output_w = index % output_width; const int thread_output_h = (index / output_width) % output_height; const int thread_channel = (index / output_width) / output_height; const int thread_filter_init_loc = thread_channel * kernel_height * kernel_width; // compute deformable group index const int deform_group_idx = thread_channel / channel_per_deform_group; const int thread_input_h = thread_output_h * stride_height - pad_height; const int thread_input_w = thread_output_w * stride_width - pad_width; // printf("%d, %d\n", pad_height, pad_width); const int thread_input_h_left = input_height - thread_input_h; const int thread_input_w_left = input_width - thread_input_w; DType* current_data_col_ptr = col_buf_3d_flatten_ptr + thread_filter_init_loc * output_height * output_width + thread_output_h * output_width + thread_output_w; const DType* thread_input_ptr = batch_input_ptr + thread_channel * input_height * input_width + thread_input_h * input_width + thread_input_w; const DType* thread_offset_ptr = batch_offset_ptr + 2 * deform_group_idx * kernel_height * kernel_width * output_height * output_width; // offset -> [2 * deform_group * kernel_width * kernel_height, output_height, output_width] for (int i = 0; i < kernel_height; i++) { for (int j = 0; j < kernel_width; j++) { const int offset_h_ptr = 2 * (i * kernel_width + j) * output_height * output_width + thread_output_h * output_width + thread_output_w; const int offset_w_ptr = 2 * (i * kernel_width + j) * output_height * output_width + output_height * output_width + thread_output_h * output_width + thread_output_w; const DType offset_h = thread_offset_ptr[offset_h_ptr]; const DType offset_w = thread_offset_ptr[offset_w_ptr]; // The datatype of following variables need to be changed into <Dtype> if the deformable conv is activated. const DType current_input_h = thread_input_h + i * dilation_height + offset_h; const DType current_input_w = thread_input_w + j * dilation_width + offset_w; DType val = static_cast<DType>(0); if (current_input_h >= 0 && current_input_w >= 0 && current_input_h < input_height && current_input_w < input_width) { const DType kernel_height_loc = i * dilation_height + offset_h; const DType kernel_width_loc = j * dilation_width + offset_w; val = deform_im2col_bilinear(thread_input_ptr, input_width, thread_input_h_left, thread_input_w_left, kernel_height_loc, kernel_width_loc); } *current_data_col_ptr = val; current_data_col_ptr += output_height * output_width; } } } } namespace functor { inline int ProdShape(const TShape & shape, int start); template <typename DType> struct deform_im2col_2d<GPUDevice, DType> { void operator()(const GPUDevice& d, // 0 -> device const DType* batch_input_ptr, // 1 -> input data start pointer, ranging according to n const DType* batch_offset_ptr, const TShape& input_shape, // 2 -> input shape = [N, C, H, W] const TShape& col_buf_shape, // 3 -> shape = [filter_3d_flatten, output_rows, output_cols] const TShape& kernel_2d_shape, // 4 -> kernel 2D shape const TShape& pad_2d_shape, // 5 -> padding 2D shape const TShape& stride_2d_shape, // 6 -> stride 2D shape const TShape& dilation_2d_shape, const int deform_group, DType* col_buf_3d_flatten_ptr) { // 7 -> flatten col_buf_3d, shape = [1 * filter_3d_flatten_dim_ * output_2d_flatten_dim_] // num_axes should be smaller than block size int num_spatial_axes = kernel_2d_shape.size(); int channel_per_deform_group = input_shape[1] / deform_group; int num_kernels_per_filter = input_shape[1] * ProdShape(col_buf_shape, 1); CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels_per_filter, d); CHECK_LT(num_spatial_axes, config.thread_per_block); switch (num_spatial_axes) { case 2: deform_im2col_2d_gpu_kernel<DType> <<<config.block_count, config.thread_per_block, 0, d.stream()>>>( num_kernels_per_filter, batch_input_ptr, batch_offset_ptr, input_shape[2], input_shape[3], kernel_2d_shape[0], kernel_2d_shape[1], pad_2d_shape[0], pad_2d_shape[1], stride_2d_shape[0], stride_2d_shape[1], dilation_2d_shape[0], dilation_2d_shape[1], channel_per_deform_group, col_buf_shape[1], col_buf_shape[2], col_buf_3d_flatten_ptr); break; default: LOG(FATAL) << "im2col_nd_gpu does not support computation with " << num_spatial_axes << " spatial axes"; } } }; inline int ProdShape(const TShape &shape, int start) { int64 res = 1; for(int i=start; i<shape.size(); i++) { res*=shape[i]; } return res; } } #define DECLARE_GPU_SPEC(DType) \ template struct functor::deform_im2col_2d<GPUDevice, DType>; // extern template struct Copy<GPUDevice, T>; TF_CALL_float(DECLARE_GPU_SPEC); TF_CALL_double(DECLARE_GPU_SPEC); // TF_CALL_half(DECLARE_GPU_SPEC); // TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPEC); #undef DECLARE_GPU_SPEC } #endif // TENSORFLOW_KERNELS_CONV_OPS_im2col_gpu_H_
8ffb1ae1172ecb3e6bf1ead4ad3dfacbdb1d5189.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <iostream> using namespace std; //DEVICE __global__ void kernelVector_x_constant( float* arr, int n, int k ) { //Obtengo el indice del hilo fisico int idx = blockIdx.x * blockDim.x + threadIdx.x; //Mientras el hilo sea valido para la operacin if( idx<n ) { //Multiplico el elemento por la constante arr[ idx ] = arr[ idx ] * k; } } //HOST int main() { int size = 1000000; //Separo memoria en la RAM del HOST float* arr = new float[size]; float* arr_DEVICE = NULL; //Inicializo el arreglo en el HOST for( int index = 0; index<size ; index++ ) { arr[index] = index; } //Separo memoria en la RAM del DEVICE ( la misma cantidad de bytes que en el HOST ) hipMalloc((void**)&arr_DEVICE, size * sizeof(float)); //Copio el bloque de memoria del HOST al DEVICE hipMemcpy( arr_DEVICE, arr, size * sizeof(float), hipMemcpyHostToDevice); ///////////////////////// EJECUTO EL KERNEL DE CUDA //////////////////////////// //////// 512 Hilos //////// ceil(1000000/512) Bloques hipLaunchKernelGGL(( kernelVector_x_constant), dim3(ceil(size/512.0)), dim3(512) , 0, 0, arr_DEVICE, size, 65 ); //Fuerzo una llamada Sincrona hipDeviceSynchronize(); //Copio mis datos ya procesados a la RAM del HOST hipMemcpy( arr, arr_DEVICE, size * sizeof(float), hipMemcpyDeviceToHost); //Con una impresin de los primeros 100 visualizo el resultado for( int index = 0; index<100 ; index++ ) { cout<<arr[index]<<endl; } //Libero memoria en la RAM del DEVICE hipFree( arr_DEVICE ); //Libero memoria en la RAM del HOST delete[] arr; cin.get(); }
8ffb1ae1172ecb3e6bf1ead4ad3dfacbdb1d5189.cu
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> using namespace std; //DEVICE __global__ void kernelVector_x_constant( float* arr, int n, int k ) { //Obtengo el indice del hilo fisico int idx = blockIdx.x * blockDim.x + threadIdx.x; //Mientras el hilo sea valido para la operaci�n if( idx<n ) { //Multiplico el elemento por la constante arr[ idx ] = arr[ idx ] * k; } } //HOST int main() { int size = 1000000; //Separo memoria en la RAM del HOST float* arr = new float[size]; float* arr_DEVICE = NULL; //Inicializo el arreglo en el HOST for( int index = 0; index<size ; index++ ) { arr[index] = index; } //Separo memoria en la RAM del DEVICE ( la misma cantidad de bytes que en el HOST ) cudaMalloc((void**)&arr_DEVICE, size * sizeof(float)); //Copio el bloque de memoria del HOST al DEVICE cudaMemcpy( arr_DEVICE, arr, size * sizeof(float), cudaMemcpyHostToDevice); ///////////////////////// EJECUTO EL KERNEL DE CUDA //////////////////////////// //////// 512 Hilos //////// ceil(1000000/512) Bloques kernelVector_x_constant<<< ceil(size/512.0), 512 >>>( arr_DEVICE, size, 65 ); //Fuerzo una llamada Sincrona cudaThreadSynchronize(); //Copio mis datos ya procesados a la RAM del HOST cudaMemcpy( arr, arr_DEVICE, size * sizeof(float), cudaMemcpyDeviceToHost); //Con una impresi�n de los primeros 100 visualizo el resultado for( int index = 0; index<100 ; index++ ) { cout<<arr[index]<<endl; } //Libero memoria en la RAM del DEVICE cudaFree( arr_DEVICE ); //Libero memoria en la RAM del HOST delete[] arr; cin.get(); }
0cbf16618e9af4c9f3b4c5b3bee2ddebd65f3e49.hip
// !!! This is a file automatically generated by hipify!!! cudaMalloc(A_device) // Allocate Memory on Device hipMemcpy(A_host, A_device, hipMemcpyHostToDevice) // Initialize A with the Data from Host hipMemcpy(A_host, A_device, hipMemcpyDeviceToHost) // Copy the Resultant Data back to Host hipFree(A_device) // Free Device Memory
0cbf16618e9af4c9f3b4c5b3bee2ddebd65f3e49.cu
cudaMalloc(A_device) // Allocate Memory on Device cudaMemcpy(A_host, A_device, cudaMemcpyHostToDevice) // Initialize A with the Data from Host cudaMemcpy(A_host, A_device, cudaMemcpyDeviceToHost) // Copy the Resultant Data back to Host cudaFree(A_device) // Free Device Memory
09e4f93e0708d1b749bb17dbc51173b99e60f995.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #define L 114 static int AREA = L*L; static int NTOT = L*L - (4*L -4); // #define T 6. // #define T 0.1 // #define T 2.26918531421 #define T_CYCLE_START 1.5 #define T_CYCLE_END 3 #define T_CYCLE_STEP 0.04 #define SINGLETEMP 3.0 int n_temps = ( T_CYCLE_END - T_CYCLE_START )/ (T_CYCLE_STEP); #define J 1. #define SEED 1000 // print history true/false #define HISTORY 1 struct measure_plan { int steps_repeat; int t_max_sim; int t_measure_wait; int t_measure_interval; } static PLAN = { .steps_repeat = 100, .t_max_sim = 250, .t_measure_wait = 50, .t_measure_interval = 10 }; // average tracker struct struct avg_tr { double sum; double sum_squares; int n; }; struct avg_tr new_avg_tr(int locn) { struct avg_tr a = { .sum = 0, .sum_squares = 0, .n = locn}; return a; } void update_avg(struct avg_tr * tr_p, double newval) { tr_p->sum += newval; tr_p->sum_squares += (newval*newval); } double average( struct avg_tr tr) { return (tr.sum)/((double) tr.n) ; } double stdev( struct avg_tr tr) { return sqrt( ( tr.sum_squares)/((double) tr.n) - pow(( (tr.sum)/((double) tr.n) ),2) ); } double variance( struct avg_tr tr) { return ( ( tr.sum_squares)/((double) tr.n) - pow(( (tr.sum)/((double) tr.n) ),2) ); } double unitrand(){ return (double)rand() / (double)RAND_MAX; } void init_random(char grid[L][L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { grid[x][y] = rand() & 1; } } } void init_t0(char grid[L][L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { grid[x][y] = 0; } } } // can segfault char grid_step(char grid[L][L], int x, int y, int xstep, int ystep) { return grid[x+xstep][y+ystep]; } // segfault if applied to an edge spin, must be called only on the inner L-1 grid // *2 -4 remaps {0,1} into {-1,1} char deltaH(char grid[L][L], int x, int y) { char s0 = grid[x][y]; char j1 = s0 ^ grid_step(grid, x, y, 1, 0); char j2 = s0 ^ grid_step(grid, x, y, -1, 0); char j3 = s0 ^ grid_step(grid, x, y, 0, 1); char j4 = s0 ^ grid_step(grid, x, y, 0, -1); return -((j1 + j2 + j3 + j4) *2 -4)*2*J; } void flip(char grid[L][L], int x, int y) { grid[x][y] = !grid[x][y]; } void update_spin(char grid[L][L], int x, int y, double temperature) { double dh = (double) deltaH(grid, x, y); // printf("dh: %f \n", dh); double p = exp( -dh / temperature); double ur = unitrand(); //CHANGE // printf("p: %f, unitrand: %f \n", p, ur); if(ur < p ) { flip(grid, x, y); } } void update_grid_white(char grid[L][L], double temperature) { for(int x = 1; x<L-1; x+=1) { for(int y = (1 + x%2) ; y<L-1; y+=2) { update_spin(grid, x, y, temperature); } } } void update_grid_black(char grid[L][L], double temperature) { for(int x = 1; x<L-1; x+=1) { for(int y = (1 + (x+1)%2) ; y<L-1; y+=2) { update_spin(grid, x, y, temperature); } } } void dump(char grid[L][L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { // if(grid[x][y] == 0) printf(""); // else printf(""); if(grid[x][y] == 0) printf(" "); else printf(""); // printf("%i", grid[x][y]); } printf("\n"); } printf("\n"); } double measure_m(char grid[L][L]) { int m = 0; for(int x = 1; x<L-1; x++) { for(int y = 1; y<L-1; y++) { m += (grid[x][y]*2. -1.); // printf("x %i m %f \n", x, grid[x][y] ); } } return (((double) m ) / (double) NTOT) ; } void measure_cycle(char startgrid[L][L], struct measure_plan pl, FILE *resf, double temperature) { char grid[L][L]; //OUTER REP LOOP double n_measures_per_sim = (double) ((pl.t_max_sim - pl.t_measure_wait)/pl.t_measure_interval); struct avg_tr avg_of_all_sims_tr = new_avg_tr(pl.steps_repeat); float avg_of_sims = 0; for( int krep=0; krep< pl.steps_repeat; krep++) { srand(SEED + krep); memcpy(grid, startgrid, L*L*sizeof(char) ); // INNER SIM LOOPS if(HISTORY) printf("# simulation %i\n", krep+1); if(HISTORY) printf("# waiting thermalization for the first %i sim steps\n", pl.t_measure_wait); int ksim=0; for( ; ksim<pl.t_measure_wait; ksim++) { update_grid_black(grid, temperature); update_grid_white(grid, temperature); if( ksim % pl.t_measure_interval == 0) { // print all history if(HISTORY) printf("%i %f\n", ksim, measure_m(grid)); } } if(HISTORY) printf("# end thermalization\n"); struct avg_tr sim_avg_tr = new_avg_tr(n_measures_per_sim); for( ; ksim<pl.t_max_sim; ksim++) { update_grid_black(grid, temperature); update_grid_white(grid, temperature); if( ksim % pl.t_measure_interval == 0) { double locres = measure_m(grid); // print all history if(HISTORY) printf("%i %f\n", ksim, locres); update_avg(&sim_avg_tr, locres); } } // END INNER SIM LOOPS if(HISTORY) printf("# end simulation %i\n", krep+1); if(HISTORY) printf("# average for simulation %i: %f +- %f \n", krep+1, average(sim_avg_tr), stdev(sim_avg_tr)); update_avg(&avg_of_all_sims_tr, average(sim_avg_tr)); } // END OUTER REP LOOP fprintf(resf, "%f ", temperature); fprintf(resf, "%f ", average(avg_of_all_sims_tr)); fprintf(resf, "%f\n", stdev(avg_of_all_sims_tr)); // fprintf(resf, "\n\n"); if(HISTORY) dump(grid); } int main() { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); FILE *resf = fopen("results.txt", "w"); fprintf(resf, "# cpu1\n"); fprintf(resf, "# parameters:\n# linear_size: %i\n", L); fprintf(resf, "#temp_start: %f\n# coupling: %f\n# repetitions: %i\n", 0., J, PLAN.steps_repeat); fprintf(resf, "# simulation_t_max: %i\n# thermalization_time: %i\n# time_between_measurements: %i\n# base_random_seed: %i\n", PLAN.t_max_sim, PLAN.t_measure_wait, PLAN.t_measure_interval, SEED); fprintf(resf, "# extra:\n# area: %i\n# active_spins_excluding_boundaries:%i\n", AREA, NTOT); fprintf(resf, "\n"); fprintf(resf, "# columns: temperature - average magnetization - uncertainty \n"); srand(SEED); char startgrid[L][L]; init_t0(startgrid); // dump(startgrid); // cycle for( double kt=T_CYCLE_START; kt<T_CYCLE_END; kt+=T_CYCLE_STEP ) { measure_cycle(startgrid, PLAN, resf, kt); } // just one // measure_cycle(startgrid, PLAN, resf, SINGLETEMP); hipEventRecord(stop); hipEventSynchronize(stop); float total_time = 0; hipEventElapsedTime(&total_time, start, stop); FILE *timef = fopen("time.txt", "w"); long int total_flips = ((long int)(n_temps))* ((long int)((PLAN.steps_repeat))) * ((long int)(PLAN.t_max_sim)) * ((long int)(NTOT)); fprintf(timef, "# cpu1\n"); fprintf(timef, "# total execution time (milliseconds):\n"); fprintf(timef, "%f\n", total_time); fprintf(timef, "# total spin flips performed:\n"); fprintf(timef, "%li\n", total_flips); fprintf(timef, "# average spin flips per millisecond:\n"); fprintf(timef, "%Lf\n", ((long double) total_flips )/( (long double) total_time ) ); fclose(timef); fclose(resf); }
09e4f93e0708d1b749bb17dbc51173b99e60f995.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #define L 114 static int AREA = L*L; static int NTOT = L*L - (4*L -4); // #define T 6. // #define T 0.1 // #define T 2.26918531421 #define T_CYCLE_START 1.5 #define T_CYCLE_END 3 #define T_CYCLE_STEP 0.04 #define SINGLETEMP 3.0 int n_temps = ( T_CYCLE_END - T_CYCLE_START )/ (T_CYCLE_STEP); #define J 1. #define SEED 1000 // print history true/false #define HISTORY 1 struct measure_plan { int steps_repeat; int t_max_sim; int t_measure_wait; int t_measure_interval; } static PLAN = { .steps_repeat = 100, .t_max_sim = 250, .t_measure_wait = 50, .t_measure_interval = 10 }; // average tracker struct struct avg_tr { double sum; double sum_squares; int n; }; struct avg_tr new_avg_tr(int locn) { struct avg_tr a = { .sum = 0, .sum_squares = 0, .n = locn}; return a; } void update_avg(struct avg_tr * tr_p, double newval) { tr_p->sum += newval; tr_p->sum_squares += (newval*newval); } double average( struct avg_tr tr) { return (tr.sum)/((double) tr.n) ; } double stdev( struct avg_tr tr) { return sqrt( ( tr.sum_squares)/((double) tr.n) - pow(( (tr.sum)/((double) tr.n) ),2) ); } double variance( struct avg_tr tr) { return ( ( tr.sum_squares)/((double) tr.n) - pow(( (tr.sum)/((double) tr.n) ),2) ); } double unitrand(){ return (double)rand() / (double)RAND_MAX; } void init_random(char grid[L][L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { grid[x][y] = rand() & 1; } } } void init_t0(char grid[L][L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { grid[x][y] = 0; } } } // can segfault char grid_step(char grid[L][L], int x, int y, int xstep, int ystep) { return grid[x+xstep][y+ystep]; } // segfault if applied to an edge spin, must be called only on the inner L-1 grid // *2 -4 remaps {0,1} into {-1,1} char deltaH(char grid[L][L], int x, int y) { char s0 = grid[x][y]; char j1 = s0 ^ grid_step(grid, x, y, 1, 0); char j2 = s0 ^ grid_step(grid, x, y, -1, 0); char j3 = s0 ^ grid_step(grid, x, y, 0, 1); char j4 = s0 ^ grid_step(grid, x, y, 0, -1); return -((j1 + j2 + j3 + j4) *2 -4)*2*J; } void flip(char grid[L][L], int x, int y) { grid[x][y] = !grid[x][y]; } void update_spin(char grid[L][L], int x, int y, double temperature) { double dh = (double) deltaH(grid, x, y); // printf("dh: %f \n", dh); double p = exp( -dh / temperature); double ur = unitrand(); //CHANGE // printf("p: %f, unitrand: %f \n", p, ur); if(ur < p ) { flip(grid, x, y); } } void update_grid_white(char grid[L][L], double temperature) { for(int x = 1; x<L-1; x+=1) { for(int y = (1 + x%2) ; y<L-1; y+=2) { update_spin(grid, x, y, temperature); } } } void update_grid_black(char grid[L][L], double temperature) { for(int x = 1; x<L-1; x+=1) { for(int y = (1 + (x+1)%2) ; y<L-1; y+=2) { update_spin(grid, x, y, temperature); } } } void dump(char grid[L][L]) { for(int x = 0; x<L; x++) { for(int y = 0; y<L; y++) { // if(grid[x][y] == 0) printf("•"); // else printf("◘"); if(grid[x][y] == 0) printf(" "); else printf("█"); // printf("%i", grid[x][y]); } printf("\n"); } printf("\n"); } double measure_m(char grid[L][L]) { int m = 0; for(int x = 1; x<L-1; x++) { for(int y = 1; y<L-1; y++) { m += (grid[x][y]*2. -1.); // printf("x %i m %f \n", x, grid[x][y] ); } } return (((double) m ) / (double) NTOT) ; } void measure_cycle(char startgrid[L][L], struct measure_plan pl, FILE *resf, double temperature) { char grid[L][L]; //OUTER REP LOOP double n_measures_per_sim = (double) ((pl.t_max_sim - pl.t_measure_wait)/pl.t_measure_interval); struct avg_tr avg_of_all_sims_tr = new_avg_tr(pl.steps_repeat); float avg_of_sims = 0; for( int krep=0; krep< pl.steps_repeat; krep++) { srand(SEED + krep); memcpy(grid, startgrid, L*L*sizeof(char) ); // INNER SIM LOOPS if(HISTORY) printf("# simulation %i\n", krep+1); if(HISTORY) printf("# waiting thermalization for the first %i sim steps\n", pl.t_measure_wait); int ksim=0; for( ; ksim<pl.t_measure_wait; ksim++) { update_grid_black(grid, temperature); update_grid_white(grid, temperature); if( ksim % pl.t_measure_interval == 0) { // print all history if(HISTORY) printf("%i %f\n", ksim, measure_m(grid)); } } if(HISTORY) printf("# end thermalization\n"); struct avg_tr sim_avg_tr = new_avg_tr(n_measures_per_sim); for( ; ksim<pl.t_max_sim; ksim++) { update_grid_black(grid, temperature); update_grid_white(grid, temperature); if( ksim % pl.t_measure_interval == 0) { double locres = measure_m(grid); // print all history if(HISTORY) printf("%i %f\n", ksim, locres); update_avg(&sim_avg_tr, locres); } } // END INNER SIM LOOPS if(HISTORY) printf("# end simulation %i\n", krep+1); if(HISTORY) printf("# average for simulation %i: %f +- %f \n", krep+1, average(sim_avg_tr), stdev(sim_avg_tr)); update_avg(&avg_of_all_sims_tr, average(sim_avg_tr)); } // END OUTER REP LOOP fprintf(resf, "%f ", temperature); fprintf(resf, "%f ", average(avg_of_all_sims_tr)); fprintf(resf, "%f\n", stdev(avg_of_all_sims_tr)); // fprintf(resf, "\n\n"); if(HISTORY) dump(grid); } int main() { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); FILE *resf = fopen("results.txt", "w"); fprintf(resf, "# cpu1\n"); fprintf(resf, "# parameters:\n# linear_size: %i\n", L); fprintf(resf, "#temp_start: %f\n# coupling: %f\n# repetitions: %i\n", 0., J, PLAN.steps_repeat); fprintf(resf, "# simulation_t_max: %i\n# thermalization_time: %i\n# time_between_measurements: %i\n# base_random_seed: %i\n", PLAN.t_max_sim, PLAN.t_measure_wait, PLAN.t_measure_interval, SEED); fprintf(resf, "# extra:\n# area: %i\n# active_spins_excluding_boundaries:%i\n", AREA, NTOT); fprintf(resf, "\n"); fprintf(resf, "# columns: temperature - average magnetization - uncertainty \n"); srand(SEED); char startgrid[L][L]; init_t0(startgrid); // dump(startgrid); // cycle for( double kt=T_CYCLE_START; kt<T_CYCLE_END; kt+=T_CYCLE_STEP ) { measure_cycle(startgrid, PLAN, resf, kt); } // just one // measure_cycle(startgrid, PLAN, resf, SINGLETEMP); cudaEventRecord(stop); cudaEventSynchronize(stop); float total_time = 0; cudaEventElapsedTime(&total_time, start, stop); FILE *timef = fopen("time.txt", "w"); long int total_flips = ((long int)(n_temps))* ((long int)((PLAN.steps_repeat))) * ((long int)(PLAN.t_max_sim)) * ((long int)(NTOT)); fprintf(timef, "# cpu1\n"); fprintf(timef, "# total execution time (milliseconds):\n"); fprintf(timef, "%f\n", total_time); fprintf(timef, "# total spin flips performed:\n"); fprintf(timef, "%li\n", total_flips); fprintf(timef, "# average spin flips per millisecond:\n"); fprintf(timef, "%Lf\n", ((long double) total_flips )/( (long double) total_time ) ); fclose(timef); fclose(resf); }
b1c871a8f82e4c1b0f1b1ab307dd4703cd4813d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/scatter_impl.cuh" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" // Specializations of atomic div for complex types __device__ inline Complex<float> ScatterDivComplex(Complex<float>* address, Complex<float> val) { auto ptr_addr = reinterpret_cast<float*>(address); float addr_real = (*address).real(); float addr_imag = (*address).imag(); float temp = (pow(val.real(), static_cast<float>(2)) + pow(val.imag(), static_cast<float>(2))); MsAtomicMul(ptr_addr, val.real()); MsAtomicAdd(ptr_addr, addr_imag * val.imag()); MsAtomicMul(ptr_addr + 1, val.real()); MsAtomicSub(ptr_addr + 1, addr_real * val.imag()); return Complex<float>(MsAtomicDiv(ptr_addr, temp), MsAtomicDiv(ptr_addr + 1, temp)); } __device__ inline Complex<double> ScatterDivComplex(Complex<double>* address, Complex<double> val) { auto ptr_addr = reinterpret_cast<double*>(address); double addr_real = (*address).real(); double addr_imag = (*address).imag(); double temp = (pow(val.real(), static_cast<double>(2)) + pow(val.imag(), static_cast<double>(2))); MsAtomicMul(ptr_addr, val.real()); MsAtomicAdd(ptr_addr, addr_imag * val.imag()); MsAtomicMul(ptr_addr + 1, val.real()); MsAtomicSub(ptr_addr + 1, addr_real * val.imag()); return Complex<double>(MsAtomicDiv(ptr_addr, temp), MsAtomicDiv(ptr_addr + 1, temp)); } // Specializations of atomic mul for complex types __device__ inline Complex<float> ScatterMulComplex(Complex<float>* address, Complex<float> val) { auto ptr_addr = reinterpret_cast<float*>(address); float addr_real = (*address).real(); float addr_imag = (*address).imag(); MsAtomicMul(ptr_addr, val.real()); MsAtomicMul(ptr_addr + 1, val.real()); return Complex<float>(MsAtomicSub(ptr_addr, addr_imag * val.imag()), MsAtomicAdd(ptr_addr + 1, addr_real * val.imag())); } __device__ inline Complex<double> ScatterMulComplex(Complex<double>* address, Complex<double> val) { auto ptr_addr = reinterpret_cast<double*>(address); double addr_real = (*address).real(); double addr_imag = (*address).imag(); MsAtomicMul(ptr_addr, val.real()); MsAtomicMul(ptr_addr + 1, val.real()); return Complex<double>(MsAtomicSub(ptr_addr, addr_imag * val.imag()), MsAtomicAdd(ptr_addr + 1, addr_real * val.imag())); } template <typename T, typename S> __global__ void ScatterDivKernel(S size_limit, const size_t inner_size, const size_t updates_size, const S *indices, const T *updates, T *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; MsAtomicDiv(&input[current_pos], updates[pos]); } } __global__ void ScatterDivKernel(int size_limit, const size_t inner_size, const size_t updates_size, const int *indices, const Complex<float> *updates, Complex<float> *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; ScatterDivComplex(&input[current_pos], updates[pos]); } } __global__ void ScatterDivKernel(int64_t size_limit, const size_t inner_size, const size_t updates_size, const int64_t *indices, const Complex<float> *updates, Complex<float> *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; ScatterDivComplex(&input[current_pos], updates[pos]); } } __global__ void ScatterDivKernel(int size_limit, const size_t inner_size, const size_t updates_size, const int *indices, const Complex<double> *updates, Complex<double> *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; ScatterDivComplex(&input[current_pos], updates[pos]); } } __global__ void ScatterDivKernel(int64_t size_limit, const size_t inner_size, const size_t updates_size, const int64_t *indices, const Complex<double> *updates, Complex<double> *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; ScatterDivComplex(&input[current_pos], updates[pos]); } } template <typename T, typename S> __global__ void ScatterMulKernel(S size_limit, const size_t inner_size, const size_t updates_size, const S *indices, const T *updates, T *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; MsAtomicMul(&input[current_pos], updates[pos]); } } __global__ void ScatterMulKernel(int size_limit, const size_t inner_size, const size_t updates_size, const int *indices, const Complex<float> *updates, Complex<float> *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; ScatterMulComplex(&input[current_pos], updates[pos]); } } __global__ void ScatterMulKernel(int64_t size_limit, const size_t inner_size, const size_t updates_size, const int64_t *indices, const Complex<float> *updates, Complex<float> *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; ScatterMulComplex(&input[current_pos], updates[pos]); } } __global__ void ScatterMulKernel(int size_limit, const size_t inner_size, const size_t updates_size, const int *indices, const Complex<double> *updates, Complex<double> *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; ScatterMulComplex(&input[current_pos], updates[pos]); } } __global__ void ScatterMulKernel(int64_t size_limit, const size_t inner_size, const size_t updates_size, const int64_t *indices, const Complex<double> *updates, Complex<double> *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; ScatterMulComplex(&input[current_pos], updates[pos]); } } template <typename T, typename S> void Scatter(enum ScatterType func_type, S size_limit, const size_t &inner_size, const size_t &indices_size, const S *indices, const T *updates, T *input, const uint32_t &device_id, hipStream_t cuda_stream) { const size_t updates_size = inner_size * indices_size; switch (func_type) { case SCATTER_DIV: returnhipLaunchKernelGGL(( ScatterDivKernel), dim3(CUDA_BLOCKS(device_id, updates_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size_limit, inner_size, updates_size, indices, updates, input); case SCATTER_MUL: returnhipLaunchKernelGGL(( ScatterMulKernel), dim3(CUDA_BLOCKS(device_id, updates_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size_limit, inner_size, updates_size, indices, updates, input); default: break; } } template <typename S> void Scatter(enum ScatterType func_type, S size_limit, const size_t &inner_size, const size_t &indices_size, const S *indices, const Complex<float> *updates, Complex<float> *input, const uint32_t &device_id, hipStream_t cuda_stream) { const size_t updates_size = inner_size * indices_size; switch (func_type) { case SCATTER_DIV: returnhipLaunchKernelGGL(( ScatterDivKernel), dim3(CUDA_BLOCKS(device_id, updates_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size_limit, inner_size, updates_size, indices, updates, input); case SCATTER_MUL: returnhipLaunchKernelGGL(( ScatterMulKernel), dim3(CUDA_BLOCKS(device_id, updates_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size_limit, inner_size, updates_size, indices, updates, input); default: break; } } template <typename S> void Scatter(enum ScatterType func_type, S size_limit, const size_t &inner_size, const size_t &indices_size, const S *indices, const Complex<double> *updates, Complex<double> *input, const uint32_t &device_id, hipStream_t cuda_stream) { const size_t updates_size = inner_size * indices_size; switch (func_type) { case SCATTER_DIV: returnhipLaunchKernelGGL(( ScatterDivKernel), dim3(CUDA_BLOCKS(device_id, updates_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size_limit, inner_size, updates_size, indices, updates, input); case SCATTER_MUL: returnhipLaunchKernelGGL(( ScatterMulKernel), dim3(CUDA_BLOCKS(device_id, updates_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size_limit, inner_size, updates_size, indices, updates, input); default: break; } } template CUDA_LIB_EXPORT void Scatter<float, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const float *updates, float *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<float, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const float *updates, float *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<half, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const half *updates, half *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<half, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const half *updates, half *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<double, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const double *updates, double *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<double, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const double *updates, double *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<int8_t, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const int8_t *updates, int8_t *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<int8_t, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const int8_t *updates, int8_t *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<unsigned char, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const unsigned char *updates, unsigned char *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<unsigned char, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const unsigned char *updates, unsigned char *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<int16_t, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const int16_t *updates, int16_t *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<int16_t, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const int16_t *updates, int16_t *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<uint16_t, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const uint16_t *updates, uint16_t *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<uint16_t, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const uint16_t *updates, uint16_t *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<int, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const int *updates, int *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<int, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const int *updates, int *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<uint32_t, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const uint32_t *updates, uint32_t *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<uint32_t, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const uint32_t *updates, uint32_t *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<int64_t, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const int64_t *updates, int64_t *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<int64_t, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const int64_t *updates, int64_t *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<uint64_t, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const uint64_t *updates, uint64_t *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<uint64_t, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const uint64_t *updates, uint64_t *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<Complex<float>, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const Complex<float> *updates, Complex<float> *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<Complex<float>, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const Complex<float> *updates, Complex<float> *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<Complex<double>, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const Complex<double> *updates, Complex<double> *input, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<Complex<double>, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const Complex<double> *updates, Complex<double> *input, const uint32_t &device_id, hipStream_t cuda_stream);
b1c871a8f82e4c1b0f1b1ab307dd4703cd4813d7.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/scatter_impl.cuh" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" // Specializations of atomic div for complex types __device__ inline Complex<float> ScatterDivComplex(Complex<float>* address, Complex<float> val) { auto ptr_addr = reinterpret_cast<float*>(address); float addr_real = (*address).real(); float addr_imag = (*address).imag(); float temp = (pow(val.real(), static_cast<float>(2)) + pow(val.imag(), static_cast<float>(2))); MsAtomicMul(ptr_addr, val.real()); MsAtomicAdd(ptr_addr, addr_imag * val.imag()); MsAtomicMul(ptr_addr + 1, val.real()); MsAtomicSub(ptr_addr + 1, addr_real * val.imag()); return Complex<float>(MsAtomicDiv(ptr_addr, temp), MsAtomicDiv(ptr_addr + 1, temp)); } __device__ inline Complex<double> ScatterDivComplex(Complex<double>* address, Complex<double> val) { auto ptr_addr = reinterpret_cast<double*>(address); double addr_real = (*address).real(); double addr_imag = (*address).imag(); double temp = (pow(val.real(), static_cast<double>(2)) + pow(val.imag(), static_cast<double>(2))); MsAtomicMul(ptr_addr, val.real()); MsAtomicAdd(ptr_addr, addr_imag * val.imag()); MsAtomicMul(ptr_addr + 1, val.real()); MsAtomicSub(ptr_addr + 1, addr_real * val.imag()); return Complex<double>(MsAtomicDiv(ptr_addr, temp), MsAtomicDiv(ptr_addr + 1, temp)); } // Specializations of atomic mul for complex types __device__ inline Complex<float> ScatterMulComplex(Complex<float>* address, Complex<float> val) { auto ptr_addr = reinterpret_cast<float*>(address); float addr_real = (*address).real(); float addr_imag = (*address).imag(); MsAtomicMul(ptr_addr, val.real()); MsAtomicMul(ptr_addr + 1, val.real()); return Complex<float>(MsAtomicSub(ptr_addr, addr_imag * val.imag()), MsAtomicAdd(ptr_addr + 1, addr_real * val.imag())); } __device__ inline Complex<double> ScatterMulComplex(Complex<double>* address, Complex<double> val) { auto ptr_addr = reinterpret_cast<double*>(address); double addr_real = (*address).real(); double addr_imag = (*address).imag(); MsAtomicMul(ptr_addr, val.real()); MsAtomicMul(ptr_addr + 1, val.real()); return Complex<double>(MsAtomicSub(ptr_addr, addr_imag * val.imag()), MsAtomicAdd(ptr_addr + 1, addr_real * val.imag())); } template <typename T, typename S> __global__ void ScatterDivKernel(S size_limit, const size_t inner_size, const size_t updates_size, const S *indices, const T *updates, T *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; MsAtomicDiv(&input[current_pos], updates[pos]); } } __global__ void ScatterDivKernel(int size_limit, const size_t inner_size, const size_t updates_size, const int *indices, const Complex<float> *updates, Complex<float> *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; ScatterDivComplex(&input[current_pos], updates[pos]); } } __global__ void ScatterDivKernel(int64_t size_limit, const size_t inner_size, const size_t updates_size, const int64_t *indices, const Complex<float> *updates, Complex<float> *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; ScatterDivComplex(&input[current_pos], updates[pos]); } } __global__ void ScatterDivKernel(int size_limit, const size_t inner_size, const size_t updates_size, const int *indices, const Complex<double> *updates, Complex<double> *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; ScatterDivComplex(&input[current_pos], updates[pos]); } } __global__ void ScatterDivKernel(int64_t size_limit, const size_t inner_size, const size_t updates_size, const int64_t *indices, const Complex<double> *updates, Complex<double> *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; ScatterDivComplex(&input[current_pos], updates[pos]); } } template <typename T, typename S> __global__ void ScatterMulKernel(S size_limit, const size_t inner_size, const size_t updates_size, const S *indices, const T *updates, T *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; MsAtomicMul(&input[current_pos], updates[pos]); } } __global__ void ScatterMulKernel(int size_limit, const size_t inner_size, const size_t updates_size, const int *indices, const Complex<float> *updates, Complex<float> *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; ScatterMulComplex(&input[current_pos], updates[pos]); } } __global__ void ScatterMulKernel(int64_t size_limit, const size_t inner_size, const size_t updates_size, const int64_t *indices, const Complex<float> *updates, Complex<float> *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; ScatterMulComplex(&input[current_pos], updates[pos]); } } __global__ void ScatterMulKernel(int size_limit, const size_t inner_size, const size_t updates_size, const int *indices, const Complex<double> *updates, Complex<double> *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; ScatterMulComplex(&input[current_pos], updates[pos]); } } __global__ void ScatterMulKernel(int64_t size_limit, const size_t inner_size, const size_t updates_size, const int64_t *indices, const Complex<double> *updates, Complex<double> *input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < updates_size; pos += blockDim.x * gridDim.x) { const size_t index = pos / inner_size; const size_t offset = pos % inner_size; if (indices[index] < 0 || indices[index] >= size_limit) { continue; } const size_t current_pos = indices[index] * inner_size + offset; ScatterMulComplex(&input[current_pos], updates[pos]); } } template <typename T, typename S> void Scatter(enum ScatterType func_type, S size_limit, const size_t &inner_size, const size_t &indices_size, const S *indices, const T *updates, T *input, const uint32_t &device_id, cudaStream_t cuda_stream) { const size_t updates_size = inner_size * indices_size; switch (func_type) { case SCATTER_DIV: return ScatterDivKernel<<<CUDA_BLOCKS(device_id, updates_size), CUDA_THREADS(device_id), 0, cuda_stream>>>( size_limit, inner_size, updates_size, indices, updates, input); case SCATTER_MUL: return ScatterMulKernel<<<CUDA_BLOCKS(device_id, updates_size), CUDA_THREADS(device_id), 0, cuda_stream>>>( size_limit, inner_size, updates_size, indices, updates, input); default: break; } } template <typename S> void Scatter(enum ScatterType func_type, S size_limit, const size_t &inner_size, const size_t &indices_size, const S *indices, const Complex<float> *updates, Complex<float> *input, const uint32_t &device_id, cudaStream_t cuda_stream) { const size_t updates_size = inner_size * indices_size; switch (func_type) { case SCATTER_DIV: return ScatterDivKernel<<<CUDA_BLOCKS(device_id, updates_size), CUDA_THREADS(device_id), 0, cuda_stream>>>( size_limit, inner_size, updates_size, indices, updates, input); case SCATTER_MUL: return ScatterMulKernel<<<CUDA_BLOCKS(device_id, updates_size), CUDA_THREADS(device_id), 0, cuda_stream>>>( size_limit, inner_size, updates_size, indices, updates, input); default: break; } } template <typename S> void Scatter(enum ScatterType func_type, S size_limit, const size_t &inner_size, const size_t &indices_size, const S *indices, const Complex<double> *updates, Complex<double> *input, const uint32_t &device_id, cudaStream_t cuda_stream) { const size_t updates_size = inner_size * indices_size; switch (func_type) { case SCATTER_DIV: return ScatterDivKernel<<<CUDA_BLOCKS(device_id, updates_size), CUDA_THREADS(device_id), 0, cuda_stream>>>( size_limit, inner_size, updates_size, indices, updates, input); case SCATTER_MUL: return ScatterMulKernel<<<CUDA_BLOCKS(device_id, updates_size), CUDA_THREADS(device_id), 0, cuda_stream>>>( size_limit, inner_size, updates_size, indices, updates, input); default: break; } } template CUDA_LIB_EXPORT void Scatter<float, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const float *updates, float *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<float, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const float *updates, float *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<half, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const half *updates, half *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<half, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const half *updates, half *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<double, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const double *updates, double *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<double, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const double *updates, double *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<int8_t, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const int8_t *updates, int8_t *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<int8_t, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const int8_t *updates, int8_t *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<unsigned char, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const unsigned char *updates, unsigned char *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<unsigned char, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const unsigned char *updates, unsigned char *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<int16_t, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const int16_t *updates, int16_t *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<int16_t, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const int16_t *updates, int16_t *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<uint16_t, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const uint16_t *updates, uint16_t *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<uint16_t, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const uint16_t *updates, uint16_t *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<int, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const int *updates, int *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<int, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const int *updates, int *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<uint32_t, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const uint32_t *updates, uint32_t *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<uint32_t, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const uint32_t *updates, uint32_t *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<int64_t, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const int64_t *updates, int64_t *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<int64_t, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const int64_t *updates, int64_t *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<uint64_t, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const uint64_t *updates, uint64_t *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<uint64_t, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const uint64_t *updates, uint64_t *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<Complex<float>, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const Complex<float> *updates, Complex<float> *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<Complex<float>, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const Complex<float> *updates, Complex<float> *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<Complex<double>, int>(enum ScatterType func_type, int size_limit, const size_t &inner_size, const size_t &indices_size, const int *indices, const Complex<double> *updates, Complex<double> *input, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Scatter<Complex<double>, int64_t>(enum ScatterType func_type, int64_t size_limit, const size_t &inner_size, const size_t &indices_size, const int64_t *indices, const Complex<double> *updates, Complex<double> *input, const uint32_t &device_id, cudaStream_t cuda_stream);
521c1fc765162f126fadac51a91a1efe7976fbf0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utilities.cuh" // CUDA kernel to compute denoise image __global__ void nonLocalMeans(double* P, int m, int n, int w, double filtSigma, double* F) { double Wxy = 0.0; double Zx = 0.0; double D = 0.0; int i = blockIdx.x; int j = threadIdx.x; F[i*n+j] = 0.0; Zx = 0.0; for(int k = 0; k < m; ++k) { for(int l = 0; l < n; ++l) { Wxy = 0.0; D = 0.0; for(int p = -(w-1)/2; p <= (w-1)/2; ++p) { for(int q = -(w-1)/2; q <= (w-1)/2; ++q) { int temp = (P[i*n*w*w+j*w*w+(p+(w-1)/2)*w+(q+(w-1)/2)] - P[k*n*w*w+l*w*w+(p+(w-1)/2)*w+(q+(w-1)/2)]); D += temp*temp; } } Wxy = exp(-D/(filtSigma*filtSigma)); Zx += Wxy; // P[k][l][(w-1)/2][(w-1)/2] is the center pixel of current patch F[i*n+j] += Wxy * P[k*n*w*w + l*w*w + (w-1)/2*w + (w-1)/2]; } } F[i*n+j] /= Zx; return; } // Main function int main(int argc, char* argv[]) { // Various checks for valid input arguments if(argc < 8) { printf("Usage: ./V1 m n w input_image output_image_name\n"); return 1; } // Read input arguments int m = atoi(argv[1]); int n = atoi(argv[2]); int w = atoi(argv[3]); if(m != n) { printf("Only square images supported\n"); return 1; } if(m != 64 && m != 128 && m != 256) { printf("Only 64x64, 128x128 and 256x256 image sizes supported\n"); return 1; } if(w != 3 && w != 5 && w != 7) { printf("Only 3x3, 5x5 and 7x7 patch sizes supported\n"); return 1; } double patchSigma = 2.0; sscanf(argv[4],"%lf",&patchSigma); double filtSigma = 0.02; sscanf(argv[5],"%lf",&filtSigma); // Create gaussian kernel double* W = (double*)malloc(w*w*sizeof(double)); double sum = 0.0; for(int i = 0; i < w; ++i) { for(int j = 0; j < w; ++j) { W[i*w+j] = exp((-pow((double)(i-(w-1)/2)/(double)w, 2)-pow((double)(j-(w-1)/2)/(double)w, 2))/(2.0*patchSigma*patchSigma)); sum += W[i*w+j]; } } // Normalize for(int i = 0; i < w; ++i) for(int j = 0; j < w; ++j) W[i*w+j] /= sum; // Original Image extended to fit patches on the edges [(m+w-1)-by-(n+w-1)] double* X = (double*)malloc((m+w-1)*(n+w-1)*sizeof(double)); // 3D Patch Cube [m-by-n-by-w-by-w] double* P = (double*)malloc(m*n*w*w*sizeof(double)); // Filtered image [m-by-n] double* F = (double*)malloc(m*n*sizeof(double)); // Residual image (F - x) double* R = (double*)malloc(m*n*sizeof(double)); // 3D Patch cude pointer for GPU memory double* deviceP = NULL; hipMalloc(&deviceP, m*n*w*w*sizeof(double)); // Filtered image pointer for GPU memory double* deviceF = NULL; hipMalloc(&deviceF, m*n*sizeof(double)); FILE* fptr = fopen(argv[6], "r"); for(int i = (w-1)/2; i < m+(w-1)/2; ++i) for(int j = (w-1)/2; j < n+(w-1)/2; ++j) fscanf(fptr, "%lf,", X+i*(n+w-1)+j); // X[i*(n+w-1)+j] = (i+j); fclose(fptr); // Add noise to input image for(int i = (w-1)/2; i < m+(w-1)/2; ++i) for(int j = (w-1)/2; j < n+(w-1)/2; ++j) X[i*(n+w-1)+j] += gaussianRand(0.04); // Fill edges mirroring the inside of image // similar to padarray(inputImage, [(w-1)/2 (w-1)/2], 'symmetric') // Right and left part for(int i = (w-1)/2; i < m+(w-1)/2; ++i) { for(int j = 0; j < (w-1)/2; ++j) { X[i*(n+w-1)+j] = X[i*(n+w-1)+(w-j-2)]; } for(int j = 1; j <= (w-1)/2 ; ++j) { X[i*(n+w-1)+((n+w-1)-j)] = X[i*(n+w-1)+((n+w-1)-(w-(j-1)-1))]; } } // Upper and lower part for(int i = 0; i < m+(w-1); ++i) { for(int j = 0; j < (w-1)/2; ++j) { X[j*(n+w-1)+i] = X[(w-j-2)*(n+w-1)+i]; } for(int j = 1; j <= (w-1)/2 ; ++j) { X[((n+w-1)-j)*(n+w-1)+i] = X[((n+w-1)-(w-(j-1)-1))*(n+w-1)+i]; } } // Calculate all w-by-w patches from X multiplied // with gaussian kernel and save them to P // (i,j) is the center pixel of each patch // (k,l) is the patch element // appropriate offsets are used for(int i = (w-1)/2; i < m+(w-1)/2;++i) { for(int j = (w-1)/2; j < n+(w-1)/2; ++j) { for(int k = -(w-1)/2; k <= (w-1)/2; ++k) { for(int l = -(w-1)/2; l <= (w-1)/2; ++l) { P[(i-(w-1)/2)*n*w*w+(j-(w-1)/2)*w*w+(k+(w-1)/2)*w+(l+(w-1)/2)] = X[(i+k)*(n+w-1)+(j+l)]*W[(k+(w-1)/2)*w+(l+(w-1)/2)]; } } } } // Write noisy image to csv txt file // used by matlab script char outputFileName[100] = ""; sprintf(outputFileName, "../output_images/output_images_csv_txt/output_images_V1/%s_%d_%d_noisy.txt", argv[7], n, w); // printf("Writing noisy image to %s\n", outputFileName); printMatrixCsv(X, m+w-1, n+w-1, outputFileName); // Copy data for input and output // from CPU memory to GPU memory hipMemcpy(deviceP, P, m*n*w*w*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(deviceF, F, m*n*sizeof(double), hipMemcpyHostToDevice); // CUDA events used for measuring time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Start measuring time and call kernel hipEventRecord(start); hipLaunchKernelGGL(( nonLocalMeans), dim3(m),dim3(n), 0, 0, deviceP, m, n, w, filtSigma, deviceF); hipEventRecord(stop); // Copy data for input and output // from CPU memory to GPU memory hipMemcpy(P, deviceP, m*n*w*w*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(F, deviceF, m*n*sizeof(double), hipMemcpyDeviceToHost); // Find original denoised pixel // divinding by center pixel // of gaussian kernel value for(int i = 0; i < m; ++i) for(int j = 0; j < n; ++j) F[i*n+j] /= W[(w-1)/2*w+(w-1)/2]; hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); // Calculate residual image for(int i = 0; i < m; ++i) { for(int j = 0; j < n; ++j) { R[i*n+j] = F[i*n+j] - X[(i+(w-1)/2)*(n+w-1)+(j+(w-1)/2)]; } } // Write filtered image to csv txt file // used by matlab script sprintf(outputFileName, "../output_images/output_images_csv_txt/output_images_V1/%s_%d_%d_denoised.txt", argv[7], n, w); // printf("Writing denoised image to %s\n", outputFileName); printMatrixCsv(F, m, n, outputFileName); sprintf(outputFileName, "../output_images/output_images_csv_txt/output_images_V1/%s_%d_%d_residual.txt", argv[7], n, w); // printf("Writing residual image to %s\n", outputFileName); printMatrixCsv(R, m, n, outputFileName); printf("%lf\n", milliseconds); // Deallocate CPU and GPU memory hipFree(deviceP); hipFree(deviceF); free(X); free(F); free(W); free(P); free(R); return 0; }
521c1fc765162f126fadac51a91a1efe7976fbf0.cu
#include "utilities.cuh" // CUDA kernel to compute denoise image __global__ void nonLocalMeans(double* P, int m, int n, int w, double filtSigma, double* F) { double Wxy = 0.0; double Zx = 0.0; double D = 0.0; int i = blockIdx.x; int j = threadIdx.x; F[i*n+j] = 0.0; Zx = 0.0; for(int k = 0; k < m; ++k) { for(int l = 0; l < n; ++l) { Wxy = 0.0; D = 0.0; for(int p = -(w-1)/2; p <= (w-1)/2; ++p) { for(int q = -(w-1)/2; q <= (w-1)/2; ++q) { int temp = (P[i*n*w*w+j*w*w+(p+(w-1)/2)*w+(q+(w-1)/2)] - P[k*n*w*w+l*w*w+(p+(w-1)/2)*w+(q+(w-1)/2)]); D += temp*temp; } } Wxy = exp(-D/(filtSigma*filtSigma)); Zx += Wxy; // P[k][l][(w-1)/2][(w-1)/2] is the center pixel of current patch F[i*n+j] += Wxy * P[k*n*w*w + l*w*w + (w-1)/2*w + (w-1)/2]; } } F[i*n+j] /= Zx; return; } // Main function int main(int argc, char* argv[]) { // Various checks for valid input arguments if(argc < 8) { printf("Usage: ./V1 m n w input_image output_image_name\n"); return 1; } // Read input arguments int m = atoi(argv[1]); int n = atoi(argv[2]); int w = atoi(argv[3]); if(m != n) { printf("Only square images supported\n"); return 1; } if(m != 64 && m != 128 && m != 256) { printf("Only 64x64, 128x128 and 256x256 image sizes supported\n"); return 1; } if(w != 3 && w != 5 && w != 7) { printf("Only 3x3, 5x5 and 7x7 patch sizes supported\n"); return 1; } double patchSigma = 2.0; sscanf(argv[4],"%lf",&patchSigma); double filtSigma = 0.02; sscanf(argv[5],"%lf",&filtSigma); // Create gaussian kernel double* W = (double*)malloc(w*w*sizeof(double)); double sum = 0.0; for(int i = 0; i < w; ++i) { for(int j = 0; j < w; ++j) { W[i*w+j] = exp((-pow((double)(i-(w-1)/2)/(double)w, 2)-pow((double)(j-(w-1)/2)/(double)w, 2))/(2.0*patchSigma*patchSigma)); sum += W[i*w+j]; } } // Normalize for(int i = 0; i < w; ++i) for(int j = 0; j < w; ++j) W[i*w+j] /= sum; // Original Image extended to fit patches on the edges [(m+w-1)-by-(n+w-1)] double* X = (double*)malloc((m+w-1)*(n+w-1)*sizeof(double)); // 3D Patch Cube [m-by-n-by-w-by-w] double* P = (double*)malloc(m*n*w*w*sizeof(double)); // Filtered image [m-by-n] double* F = (double*)malloc(m*n*sizeof(double)); // Residual image (F - x) double* R = (double*)malloc(m*n*sizeof(double)); // 3D Patch cude pointer for GPU memory double* deviceP = NULL; cudaMalloc(&deviceP, m*n*w*w*sizeof(double)); // Filtered image pointer for GPU memory double* deviceF = NULL; cudaMalloc(&deviceF, m*n*sizeof(double)); FILE* fptr = fopen(argv[6], "r"); for(int i = (w-1)/2; i < m+(w-1)/2; ++i) for(int j = (w-1)/2; j < n+(w-1)/2; ++j) fscanf(fptr, "%lf,", X+i*(n+w-1)+j); // X[i*(n+w-1)+j] = (i+j); fclose(fptr); // Add noise to input image for(int i = (w-1)/2; i < m+(w-1)/2; ++i) for(int j = (w-1)/2; j < n+(w-1)/2; ++j) X[i*(n+w-1)+j] += gaussianRand(0.04); // Fill edges mirroring the inside of image // similar to padarray(inputImage, [(w-1)/2 (w-1)/2], 'symmetric') // Right and left part for(int i = (w-1)/2; i < m+(w-1)/2; ++i) { for(int j = 0; j < (w-1)/2; ++j) { X[i*(n+w-1)+j] = X[i*(n+w-1)+(w-j-2)]; } for(int j = 1; j <= (w-1)/2 ; ++j) { X[i*(n+w-1)+((n+w-1)-j)] = X[i*(n+w-1)+((n+w-1)-(w-(j-1)-1))]; } } // Upper and lower part for(int i = 0; i < m+(w-1); ++i) { for(int j = 0; j < (w-1)/2; ++j) { X[j*(n+w-1)+i] = X[(w-j-2)*(n+w-1)+i]; } for(int j = 1; j <= (w-1)/2 ; ++j) { X[((n+w-1)-j)*(n+w-1)+i] = X[((n+w-1)-(w-(j-1)-1))*(n+w-1)+i]; } } // Calculate all w-by-w patches from X multiplied // with gaussian kernel and save them to P // (i,j) is the center pixel of each patch // (k,l) is the patch element // appropriate offsets are used for(int i = (w-1)/2; i < m+(w-1)/2;++i) { for(int j = (w-1)/2; j < n+(w-1)/2; ++j) { for(int k = -(w-1)/2; k <= (w-1)/2; ++k) { for(int l = -(w-1)/2; l <= (w-1)/2; ++l) { P[(i-(w-1)/2)*n*w*w+(j-(w-1)/2)*w*w+(k+(w-1)/2)*w+(l+(w-1)/2)] = X[(i+k)*(n+w-1)+(j+l)]*W[(k+(w-1)/2)*w+(l+(w-1)/2)]; } } } } // Write noisy image to csv txt file // used by matlab script char outputFileName[100] = ""; sprintf(outputFileName, "../output_images/output_images_csv_txt/output_images_V1/%s_%d_%d_noisy.txt", argv[7], n, w); // printf("Writing noisy image to %s\n", outputFileName); printMatrixCsv(X, m+w-1, n+w-1, outputFileName); // Copy data for input and output // from CPU memory to GPU memory cudaMemcpy(deviceP, P, m*n*w*w*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(deviceF, F, m*n*sizeof(double), cudaMemcpyHostToDevice); // CUDA events used for measuring time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Start measuring time and call kernel cudaEventRecord(start); nonLocalMeans<<<m,n>>>(deviceP, m, n, w, filtSigma, deviceF); cudaEventRecord(stop); // Copy data for input and output // from CPU memory to GPU memory cudaMemcpy(P, deviceP, m*n*w*w*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(F, deviceF, m*n*sizeof(double), cudaMemcpyDeviceToHost); // Find original denoised pixel // divinding by center pixel // of gaussian kernel value for(int i = 0; i < m; ++i) for(int j = 0; j < n; ++j) F[i*n+j] /= W[(w-1)/2*w+(w-1)/2]; cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); // Calculate residual image for(int i = 0; i < m; ++i) { for(int j = 0; j < n; ++j) { R[i*n+j] = F[i*n+j] - X[(i+(w-1)/2)*(n+w-1)+(j+(w-1)/2)]; } } // Write filtered image to csv txt file // used by matlab script sprintf(outputFileName, "../output_images/output_images_csv_txt/output_images_V1/%s_%d_%d_denoised.txt", argv[7], n, w); // printf("Writing denoised image to %s\n", outputFileName); printMatrixCsv(F, m, n, outputFileName); sprintf(outputFileName, "../output_images/output_images_csv_txt/output_images_V1/%s_%d_%d_residual.txt", argv[7], n, w); // printf("Writing residual image to %s\n", outputFileName); printMatrixCsv(R, m, n, outputFileName); printf("%lf\n", milliseconds); // Deallocate CPU and GPU memory cudaFree(deviceP); cudaFree(deviceF); free(X); free(F); free(W); free(P); free(R); return 0; }
74bdafc7055713a332d47f7029ece7f1a2028b4b.hip
// !!! This is a file automatically generated by hipify!!! /* Some common routines for allocating Blas vectors, * filling them with some data and printing them. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> /* Includes, cuda */ #include <hip/hip_runtime.h> #include <rocblas.h> void checkCublasStatus(hipblasStatus_t status, const char *error); void checkCudaError(const char *errormsg); float *initManagedVector(int n) { float *ptr; hipMallocManaged(&ptr, n * sizeof(float)); // rows if (ptr == NULL) { fprintf(stderr,"Malloc for unified memory vector on host failed !\n"); exit(1); } return ptr; } void freeManagedVector(float *ptr) { hipFree(ptr); } void fillVector(float *a, int n, int offset) { int x; for(x=0; x<n; x++) a[x] = (float) x + offset; } void showVector(const char *name, float *a, int n) { int x; # if (DEBUG > 0) for(x=0; x<n; x++) # else x = n - 1; # endif { printf("%s[%02u]=%6.2f ",name,x,a[x]); printf("\n"); } }
74bdafc7055713a332d47f7029ece7f1a2028b4b.cu
/* Some common routines for allocating Blas vectors, * filling them with some data and printing them. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> /* Includes, cuda */ #include <cuda.h> #include <cublas_v2.h> void checkCublasStatus(cublasStatus_t status, const char *error); void checkCudaError(const char *errormsg); float *initManagedVector(int n) { float *ptr; cudaMallocManaged(&ptr, n * sizeof(float)); // rows if (ptr == NULL) { fprintf(stderr,"Malloc for unified memory vector on host failed !\n"); exit(1); } return ptr; } void freeManagedVector(float *ptr) { cudaFree(ptr); } void fillVector(float *a, int n, int offset) { int x; for(x=0; x<n; x++) a[x] = (float) x + offset; } void showVector(const char *name, float *a, int n) { int x; # if (DEBUG > 0) for(x=0; x<n; x++) # else x = n - 1; # endif { printf("%s[%02u]=%6.2f ",name,x,a[x]); printf("\n"); } }
9adc4301e72b1b0de96da5c6da76bd6503e3d872.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // -------------------------------------------------------- // Fast R-CNN // Copyright (c) Microsoft. All rights reserved. // Written by Ross Girshick, 2015. // Licensed under the BSD 2-clause "Simplified" license. // See LICENSE in the Fast R-CNN project root for license // information. // -------------------------------------------------------- #include <cfloat> #include "caffe/fast_rcnn_layers.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_level = bottom_rois[0]; int roi_start_w = bottom_rois[1]; int roi_start_h = bottom_rois[2]; int roi_end_w = bottom_rois[3]; int roi_end_h = bottom_rois[4]; // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += (roi_level * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename Dtype> void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_level = offset_bottom_rois[0]; // Skip if ROI's level doesn't match n if (n != roi_level) { continue; } int roi_start_w = offset_bottom_rois[1]; int roi_start_h = offset_bottom_rois[2]; int roi_end_w = offset_bottom_rois[3]; int roi_end_h = offset_bottom_rois[4]; // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const int* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) { gradient += offset_top_diff[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* argmax_data = max_idx_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, argmax_data, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer); } // namespace caffe
9adc4301e72b1b0de96da5c6da76bd6503e3d872.cu
// -------------------------------------------------------- // Fast R-CNN // Copyright (c) Microsoft. All rights reserved. // Written by Ross Girshick, 2015. // Licensed under the BSD 2-clause "Simplified" license. // See LICENSE in the Fast R-CNN project root for license // information. // -------------------------------------------------------- #include <cfloat> #include "caffe/fast_rcnn_layers.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_level = bottom_rois[0]; int roi_start_w = bottom_rois[1]; int roi_start_h = bottom_rois[2]; int roi_end_w = bottom_rois[3]; int roi_end_h = bottom_rois[4]; // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += (roi_level * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename Dtype> void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_level = offset_bottom_rois[0]; // Skip if ROI's level doesn't match n if (n != roi_level) { continue; } int roi_start_w = offset_bottom_rois[1]; int roi_start_h = offset_bottom_rois[2]; int roi_end_w = offset_bottom_rois[3]; int roi_end_h = offset_bottom_rois[4]; // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const int* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) { gradient += offset_top_diff[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* argmax_data = max_idx_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, argmax_data, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer); } // namespace caffe
63b140a14bea6cb98a63cc07a62154395f4d1196.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition (HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory Iowa State University and The Regents of the University of Michigan All rights reserved. HOOMD-blue may contain modifications ("Contributions") provided, and to which copyright is held, by various Contributors who have granted The Regents of the University of Michigan the right to modify and/or distribute such Contributions. You may redistribute, use, and create derivate works of HOOMD-blue, in source and binary forms, provided you abide by the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer both in the code and prominently in any materials provided with the distribution. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * All publications and presentations based on HOOMD-blue, including any reports or published results obtained, in whole or in part, with HOOMD-blue, will acknowledge its use according to the terms posted at the time of submission on: http://codeblue.umich.edu/hoomd-blue/citations.html * Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website: http://codeblue.umich.edu/hoomd-blue/ * Apart from the above required attributions, neither the name of the copyright holder nor the names of HOOMD-blue's contributors may be used to endorse or promote products derived from this software without specific prior written permission. Disclaimer THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Maintainer: joaander #include "Enforce2DUpdaterGPU_hip.cuh" #ifdef WIN32 #include <cassert> #else #include <assert.h> #endif #include <stdio.h> /*! \file Enforce2DUpdaterGPU.cu \brief Defines GPU kernel code for constraining systems to a 2D plane on the GPU. Used by Enforce2DUpdaterGPU. */ //! Constrains partcles to the xy plane on the GPU /*! \param N number of particles in system \param d_vel Particle velocities to constrain to xy plane \param d_accel Particle accelerations to constrain to xy plane */ extern "C" __global__ void gpu_enforce2d_kernel(const unsigned int N, Scalar4 *d_vel, Scalar3 *d_accel) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { // read the particle's velocity and acceleration (MEM TRANSFER: 32 bytes) Scalar4 vel = d_vel[idx]; Scalar3 accel = d_accel[idx]; // zero the z-velocity and z-acceleration(FLOPS: ?) vel.z = 0.0f; accel.z = 0.0f; // write out the results (MEM_TRANSFER: 32 bytes) d_vel[idx] = vel; d_accel[idx] = accel; } } /*! \param N number of particles in system \param d_vel Particle velocities to constrain to xy plane \param d_accel Particle accelerations to constrain to xy plane */ hipError_t gpu_enforce2d(const unsigned int N, Scalar4 *d_vel, Scalar3 *d_accel) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (N/block_size) + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel hipLaunchKernelGGL(( gpu_enforce2d_kernel), dim3(grid), dim3(threads) , 0, 0, N, d_vel, d_accel); return hipSuccess; }
63b140a14bea6cb98a63cc07a62154395f4d1196.cu
/* Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition (HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory Iowa State University and The Regents of the University of Michigan All rights reserved. HOOMD-blue may contain modifications ("Contributions") provided, and to which copyright is held, by various Contributors who have granted The Regents of the University of Michigan the right to modify and/or distribute such Contributions. You may redistribute, use, and create derivate works of HOOMD-blue, in source and binary forms, provided you abide by the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer both in the code and prominently in any materials provided with the distribution. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * All publications and presentations based on HOOMD-blue, including any reports or published results obtained, in whole or in part, with HOOMD-blue, will acknowledge its use according to the terms posted at the time of submission on: http://codeblue.umich.edu/hoomd-blue/citations.html * Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website: http://codeblue.umich.edu/hoomd-blue/ * Apart from the above required attributions, neither the name of the copyright holder nor the names of HOOMD-blue's contributors may be used to endorse or promote products derived from this software without specific prior written permission. Disclaimer THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Maintainer: joaander #include "Enforce2DUpdaterGPU.cuh" #ifdef WIN32 #include <cassert> #else #include <assert.h> #endif #include <stdio.h> /*! \file Enforce2DUpdaterGPU.cu \brief Defines GPU kernel code for constraining systems to a 2D plane on the GPU. Used by Enforce2DUpdaterGPU. */ //! Constrains partcles to the xy plane on the GPU /*! \param N number of particles in system \param d_vel Particle velocities to constrain to xy plane \param d_accel Particle accelerations to constrain to xy plane */ extern "C" __global__ void gpu_enforce2d_kernel(const unsigned int N, Scalar4 *d_vel, Scalar3 *d_accel) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { // read the particle's velocity and acceleration (MEM TRANSFER: 32 bytes) Scalar4 vel = d_vel[idx]; Scalar3 accel = d_accel[idx]; // zero the z-velocity and z-acceleration(FLOPS: ?) vel.z = 0.0f; accel.z = 0.0f; // write out the results (MEM_TRANSFER: 32 bytes) d_vel[idx] = vel; d_accel[idx] = accel; } } /*! \param N number of particles in system \param d_vel Particle velocities to constrain to xy plane \param d_accel Particle accelerations to constrain to xy plane */ cudaError_t gpu_enforce2d(const unsigned int N, Scalar4 *d_vel, Scalar3 *d_accel) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (N/block_size) + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel gpu_enforce2d_kernel<<< grid, threads >>>(N, d_vel, d_accel); return cudaSuccess; }
430e067422fe958b2f3bd49b6d647f6b860881f2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "compare.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_ip_v = NULL; hipMalloc(&d_ip_v, XSIZE*YSIZE); float *d_ip_ir = NULL; hipMalloc(&d_ip_ir, XSIZE*YSIZE); int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( compare), dim3(gridBlock),dim3(threadBlock), 0, 0, d_ip_v,d_ip_ir,len); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( compare), dim3(gridBlock),dim3(threadBlock), 0, 0, d_ip_v,d_ip_ir,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( compare), dim3(gridBlock),dim3(threadBlock), 0, 0, d_ip_v,d_ip_ir,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
430e067422fe958b2f3bd49b6d647f6b860881f2.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "compare.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_ip_v = NULL; cudaMalloc(&d_ip_v, XSIZE*YSIZE); float *d_ip_ir = NULL; cudaMalloc(&d_ip_ir, XSIZE*YSIZE); int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); compare<<<gridBlock,threadBlock>>>(d_ip_v,d_ip_ir,len); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { compare<<<gridBlock,threadBlock>>>(d_ip_v,d_ip_ir,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { compare<<<gridBlock,threadBlock>>>(d_ip_v,d_ip_ir,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
506451b795a32f8e1f74690dab638a74aaac640d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "implantCoeffs.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *matrices = NULL; hipMalloc(&matrices, XSIZE*YSIZE); float *coeffArray = NULL; hipMalloc(&coeffArray, XSIZE*YSIZE); int savedCoeffs = 1; int dimsize = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( implantCoeffs), dim3(gridBlock),dim3(threadBlock), 0, 0, matrices,coeffArray,savedCoeffs,dimsize); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( implantCoeffs), dim3(gridBlock),dim3(threadBlock), 0, 0, matrices,coeffArray,savedCoeffs,dimsize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( implantCoeffs), dim3(gridBlock),dim3(threadBlock), 0, 0, matrices,coeffArray,savedCoeffs,dimsize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
506451b795a32f8e1f74690dab638a74aaac640d.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "implantCoeffs.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *matrices = NULL; cudaMalloc(&matrices, XSIZE*YSIZE); float *coeffArray = NULL; cudaMalloc(&coeffArray, XSIZE*YSIZE); int savedCoeffs = 1; int dimsize = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); implantCoeffs<<<gridBlock,threadBlock>>>(matrices,coeffArray,savedCoeffs,dimsize); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { implantCoeffs<<<gridBlock,threadBlock>>>(matrices,coeffArray,savedCoeffs,dimsize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { implantCoeffs<<<gridBlock,threadBlock>>>(matrices,coeffArray,savedCoeffs,dimsize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5626b000c58be14c9e58525f8e0190a2cbcc9369.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <CImg.h> #include <iostream> #include <thrust/complex.h> using namespace cimg_library; using u = std::uint32_t; using i = std::int32_t; constexpr u max_iter = 2000; constexpr u window_x = 1920*4; constexpr u window_y = 1080*4; __global__ void generate_set(u win_x, u win_y, u* result, u max_iter); __device__ double map(double x, double in_min, double in_max, double out_min, double out_max); __device__ u generate_color(std::uint8_t r, std::uint8_t g, std::uint8_t b); __device__ u iter_to_color(u iterations, u max_iter); int main() { CImg<unsigned char> theImage(window_x, window_y, 1, 3, 0); u* res_arr; hipMallocManaged(&res_arr, sizeof(u)*window_x*window_y*2 /* FIXME: to avoid segfaults I added a *2 to the allocated memory */); hipLaunchKernelGGL(( generate_set), dim3(24),dim3(512), 0, 0, window_x, window_y, res_arr, max_iter); hipDeviceSynchronize(); for (u x = 0; x < window_x; ++x) { for (u y = 0; y < window_y; ++y) { u color = res_arr[x*window_x+y]; theImage(x, y, 0) = color & 0xff; theImage(x, y, 1) = (color>>8) & 0xff; theImage(x, y, 2) = (color>>16) & 0xff; } } hipFree(res_arr); theImage.save("img.png"); return 0; } __global__ void generate_set(u win_x, u win_y, u* result, u max_iter) { const thrust::complex<double> julia_c(-0.1f, 0.65f); u iter = 0; u x_index = blockIdx.x; u x_stride = gridDim.x; u y_index = threadIdx.x; u y_stride = blockDim.x; for (u x = x_index; x < win_x; x += x_stride) { for(u y = y_index; y < win_y; y += y_stride) { thrust::complex<double> a ( map(x, 0.0f, win_x, -2.5f, 1.5f), map(y, 0.0f, win_y, -1.25f, 1.25f) ); thrust::complex<double> z = 0; for (iter = 0; abs(z) <= 2 && iter < max_iter; ++iter) { z = z*z + a; } result[x*window_x+y] = iter_to_color(iter, max_iter); } } } __device__ u generate_color(std::uint8_t r, std::uint8_t g, std::uint8_t b) { return (b<<16) + (g<<8) + r; } __device__ u iter_to_color(u iterations, u max_iter) { switch (iterations*7/max_iter) { case 0: return generate_color(255, 0, 0); // red break; case 1: case 2: return generate_color(255, 128, 0); // orange break; case 3: return generate_color(0, 255, 0); // green break; case 4: return generate_color(0, 255, 128); // blue-greenish break; case 5: return generate_color(0, 255, 255); // light blue break; case 6: return generate_color(0, 0, 255); // dark blue break; case 7: return generate_color(0, 0, 0); break; } return 0; } __device__ double map(double x, double in_min, double in_max, double out_min, double out_max) { return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min; }
5626b000c58be14c9e58525f8e0190a2cbcc9369.cu
#include <CImg.h> #include <iostream> #include <thrust/complex.h> using namespace cimg_library; using u = std::uint32_t; using i = std::int32_t; constexpr u max_iter = 2000; constexpr u window_x = 1920*4; constexpr u window_y = 1080*4; __global__ void generate_set(u win_x, u win_y, u* result, u max_iter); __device__ double map(double x, double in_min, double in_max, double out_min, double out_max); __device__ u generate_color(std::uint8_t r, std::uint8_t g, std::uint8_t b); __device__ u iter_to_color(u iterations, u max_iter); int main() { CImg<unsigned char> theImage(window_x, window_y, 1, 3, 0); u* res_arr; cudaMallocManaged(&res_arr, sizeof(u)*window_x*window_y*2 /* FIXME: to avoid segfaults I added a *2 to the allocated memory */); generate_set<<<24,512>>>(window_x, window_y, res_arr, max_iter); cudaDeviceSynchronize(); for (u x = 0; x < window_x; ++x) { for (u y = 0; y < window_y; ++y) { u color = res_arr[x*window_x+y]; theImage(x, y, 0) = color & 0xff; theImage(x, y, 1) = (color>>8) & 0xff; theImage(x, y, 2) = (color>>16) & 0xff; } } cudaFree(res_arr); theImage.save("img.png"); return 0; } __global__ void generate_set(u win_x, u win_y, u* result, u max_iter) { const thrust::complex<double> julia_c(-0.1f, 0.65f); u iter = 0; u x_index = blockIdx.x; u x_stride = gridDim.x; u y_index = threadIdx.x; u y_stride = blockDim.x; for (u x = x_index; x < win_x; x += x_stride) { for(u y = y_index; y < win_y; y += y_stride) { thrust::complex<double> a ( map(x, 0.0f, win_x, -2.5f, 1.5f), map(y, 0.0f, win_y, -1.25f, 1.25f) ); thrust::complex<double> z = 0; for (iter = 0; abs(z) <= 2 && iter < max_iter; ++iter) { z = z*z + a; } result[x*window_x+y] = iter_to_color(iter, max_iter); } } } __device__ u generate_color(std::uint8_t r, std::uint8_t g, std::uint8_t b) { return (b<<16) + (g<<8) + r; } __device__ u iter_to_color(u iterations, u max_iter) { switch (iterations*7/max_iter) { case 0: return generate_color(255, 0, 0); // red break; case 1: case 2: return generate_color(255, 128, 0); // orange break; case 3: return generate_color(0, 255, 0); // green break; case 4: return generate_color(0, 255, 128); // blue-greenish break; case 5: return generate_color(0, 255, 255); // light blue break; case 6: return generate_color(0, 0, 255); // dark blue break; case 7: return generate_color(0, 0, 0); break; } return 0; } __device__ double map(double x, double in_min, double in_max, double out_min, double out_max) { return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min; }
SC.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This file is part of the LITIV framework; visit the original repository at // https://github.com/plstcharles/litiv for more information. // // Copyright 2017 Pierre-Luc St-Charles; pierre-luc.st-charles<at>polymtl.ca // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "SC.cuh" namespace impl { // note: as of 2017/08 with cuda 8.0 and msvc2015, nvcc fails to compile the kernel below // with the proper values for 'bGenDescMap' via template parameter on release builds // // ...no matter what template parameter value is given in the device call, the value in // the kernel always evaluates to 'false' (wtf) // // ...current workaround with a regular parameter might be slightly slower __global__ void scdesc_fill_desc_direct(const cv::cuda::PtrStep<cv::Point2f> oKeyPts, const cv::cuda::PtrStepSz<cv::Point2f> oContourPts, const cv::cuda::PtrStep<uchar> oDistMask, const hipTextureObject_t pDescLUMask_tex, int nMaskSize, cv::cuda::PtrStepSzf oDescs, bool bGenDescMap, bool bNonZeroInitBins, bool bNormalizeBins) { assert((oContourPts.cols==0 && oContourPts.rows==0) || oContourPts.cols==1); assert((nMaskSize%2)==1); assert((blockDim.x%warpSize)==0 && blockDim.y==1 && blockDim.z==1); const int nDescSize = oDescs.cols; assert(nDescSize>0); const int nStepPerDesc = __float2int_ru(float(nDescSize)/blockDim.x); assert(nStepPerDesc>=1); const int nLUTSize = nStepPerDesc*blockDim.x; assert(blockDim.x<=nLUTSize); extern __shared__ volatile float aTmpCommon[]; volatile float* aTmpDesc = aTmpCommon; int2 vKeyPt_i; float2 vKeyPt_f; float* aOutputDesc; if(bGenDescMap) { vKeyPt_i = make_int2(blockIdx.x,blockIdx.y); vKeyPt_f = make_float2((float)blockIdx.x,(float)blockIdx.y); aOutputDesc = oDescs.ptr(blockIdx.y*gridDim.x+blockIdx.x); } else { const cv::Point2f& oKeyPt = oKeyPts(blockIdx.x,0); vKeyPt_i = make_int2(__float2int_rn(oKeyPt.x),__float2int_rn(oKeyPt.y)); vKeyPt_f = make_float2(oKeyPt.x,oKeyPt.y); aOutputDesc = oDescs.ptr(blockIdx.x); } const float fInitVal = bNonZeroInitBins?max(10.0f/nDescSize,0.5f):0.0f; for(int nStep=0; nStep<nStepPerDesc; ++nStep) { const int nDescIdx = blockDim.x*nStep + threadIdx.x; aTmpDesc[nDescIdx] = (nDescIdx<nDescSize)?fInitVal:0.0f; } __syncthreads(); if(oDistMask(vKeyPt_i.y,vKeyPt_i.x)) { const int nContourPts = oContourPts.rows; const int nHalfMaskSize = nMaskSize/2; int nContourPtIdx = threadIdx.x; while(nContourPtIdx<nContourPts) { const cv::Point2f& oContourPt = oContourPts(nContourPtIdx,0); const int nLookupRow = __float2int_rn(oContourPt.y-vKeyPt_f.y)+nHalfMaskSize; const int nLookupCol = __float2int_rn(oContourPt.x-vKeyPt_f.x)+nHalfMaskSize; if(nLookupRow>=0 && nLookupRow<nMaskSize && nLookupCol>=0 && nLookupCol<nMaskSize) { const int nDescBinIdx = tex2D<int>(pDescLUMask_tex,nLookupCol,nLookupRow); if(nDescBinIdx>=0) atomicAdd((float*)aTmpDesc+nDescBinIdx,1.0f); } nContourPtIdx += blockDim.x; } __syncthreads(); } if(bNormalizeBins) { float fSum; if(nLUTSize==32 && blockDim.x==32) { assert(warpSize==32); float fVal = aTmpDesc[threadIdx.x]*aTmpDesc[threadIdx.x]; #if __CUDACC_VER_MAJOR__>=9 fVal += __shfl_down_sync(0xFFFFFFFF,fVal,16); fVal += __shfl_down_sync(0xFFFFFFFF,fVal,8); fVal += __shfl_down_sync(0xFFFFFFFF,fVal,4); fVal += __shfl_down_sync(0xFFFFFFFF,fVal,2); fVal += __shfl_down_sync(0xFFFFFFFF,fVal,1); fSum = __shfl_sync(0xFFFFFFFF,fVal,0); #else //__CUDACC_VER_MAJOR__<9 fVal += __shfl_down(fVal,16); fVal += __shfl_down(fVal,8); fVal += __shfl_down(fVal,4); fVal += __shfl_down(fVal,2); fVal += __shfl_down(fVal,1); fSum = __shfl(fVal,0); #endif //__CUDACC_VER_MAJOR__<9 } else { volatile float* aTmpLUT = aTmpCommon+nLUTSize; for(int nStep=0; nStep<nStepPerDesc; ++nStep) { const int nDescIdx = blockDim.x*nStep + threadIdx.x; aTmpLUT[nDescIdx] = aTmpDesc[nDescIdx]*aTmpDesc[nDescIdx]; } if(blockDim.x==32) { assert(warpSize==32 && nLUTSize>32); for(int nStep=nLUTSize-32; nStep>32; nStep-=32) aTmpLUT[threadIdx.x + (nStep-32)] += aTmpLUT[threadIdx.x + nStep]; aTmpLUT[threadIdx.x] += aTmpLUT[threadIdx.x + 32]; aTmpLUT[threadIdx.x] += aTmpLUT[threadIdx.x + 16]; aTmpLUT[threadIdx.x] += aTmpLUT[threadIdx.x + 8]; aTmpLUT[threadIdx.x] += aTmpLUT[threadIdx.x + 4]; aTmpLUT[threadIdx.x] += aTmpLUT[threadIdx.x + 2]; aTmpLUT[threadIdx.x] += aTmpLUT[threadIdx.x + 1]; } else { assert(lv::isPow2(blockDim.x)); if(nLUTSize>blockDim.x) { assert(nLUTSize>=blockDim.x*2); for(int nStep=nLUTSize-blockDim.x; nStep>=blockDim.x; nStep-=blockDim.x) aTmpLUT[threadIdx.x + (nStep-blockDim.x)] += aTmpLUT[threadIdx.x + nStep]; for(int nStep=blockDim.x/2; nStep>0; nStep>>=1) aTmpLUT[threadIdx.x] += aTmpLUT[threadIdx.x + nStep]; } else { assert(nLUTSize==blockDim.x); for(int nStep=blockDim.x/2; nStep>0; nStep>>=1) { if(threadIdx.x<nStep) aTmpLUT[threadIdx.x] += aTmpLUT[threadIdx.x+nStep]; __syncthreads(); } } } fSum = aTmpLUT[0]; } const float fInvNorm = rsqrt(fSum); int nDescIdx = threadIdx.x; while(nDescIdx<nDescSize) { aOutputDesc[nDescIdx] = aTmpDesc[nDescIdx]*fInvNorm; nDescIdx += blockDim.x; } } else { int nDescIdx = threadIdx.x; while(nDescIdx<nDescSize) { aOutputDesc[nDescIdx] = aTmpDesc[nDescIdx]; nDescIdx += blockDim.x; } } } } // namespace impl ///////////////////////////////////////////////////////////////////////// void device::scdesc_fill_desc_direct(const lv::cuda::KernelParams& oKParams, const cv::cuda::PtrStep<cv::Point2f> oKeyPts, const cv::cuda::PtrStepSz<cv::Point2f> oContourPts, const cv::cuda::PtrStep<uchar> oDistMask, const hipTextureObject_t pDescLUMask_tex, int nMaskSize, cv::cuda::PtrStepSzf oDescs, bool bNonZeroInitBins, bool bGenDescMap, bool bNormalizeBins) { cudaKernelWrap(scdesc_fill_desc_direct,oKParams,oKeyPts,oContourPts,oDistMask,pDescLUMask_tex,nMaskSize,oDescs,bGenDescMap,bNonZeroInitBins,bNormalizeBins); }
SC.cu
// This file is part of the LITIV framework; visit the original repository at // https://github.com/plstcharles/litiv for more information. // // Copyright 2017 Pierre-Luc St-Charles; pierre-luc.st-charles<at>polymtl.ca // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "SC.cuh" namespace impl { // note: as of 2017/08 with cuda 8.0 and msvc2015, nvcc fails to compile the kernel below // with the proper values for 'bGenDescMap' via template parameter on release builds // // ...no matter what template parameter value is given in the device call, the value in // the kernel always evaluates to 'false' (wtf) // // ...current workaround with a regular parameter might be slightly slower __global__ void scdesc_fill_desc_direct(const cv::cuda::PtrStep<cv::Point2f> oKeyPts, const cv::cuda::PtrStepSz<cv::Point2f> oContourPts, const cv::cuda::PtrStep<uchar> oDistMask, const cudaTextureObject_t pDescLUMask_tex, int nMaskSize, cv::cuda::PtrStepSzf oDescs, bool bGenDescMap, bool bNonZeroInitBins, bool bNormalizeBins) { assert((oContourPts.cols==0 && oContourPts.rows==0) || oContourPts.cols==1); assert((nMaskSize%2)==1); assert((blockDim.x%warpSize)==0 && blockDim.y==1 && blockDim.z==1); const int nDescSize = oDescs.cols; assert(nDescSize>0); const int nStepPerDesc = __float2int_ru(float(nDescSize)/blockDim.x); assert(nStepPerDesc>=1); const int nLUTSize = nStepPerDesc*blockDim.x; assert(blockDim.x<=nLUTSize); extern __shared__ volatile float aTmpCommon[]; volatile float* aTmpDesc = aTmpCommon; int2 vKeyPt_i; float2 vKeyPt_f; float* aOutputDesc; if(bGenDescMap) { vKeyPt_i = make_int2(blockIdx.x,blockIdx.y); vKeyPt_f = make_float2((float)blockIdx.x,(float)blockIdx.y); aOutputDesc = oDescs.ptr(blockIdx.y*gridDim.x+blockIdx.x); } else { const cv::Point2f& oKeyPt = oKeyPts(blockIdx.x,0); vKeyPt_i = make_int2(__float2int_rn(oKeyPt.x),__float2int_rn(oKeyPt.y)); vKeyPt_f = make_float2(oKeyPt.x,oKeyPt.y); aOutputDesc = oDescs.ptr(blockIdx.x); } const float fInitVal = bNonZeroInitBins?max(10.0f/nDescSize,0.5f):0.0f; for(int nStep=0; nStep<nStepPerDesc; ++nStep) { const int nDescIdx = blockDim.x*nStep + threadIdx.x; aTmpDesc[nDescIdx] = (nDescIdx<nDescSize)?fInitVal:0.0f; } __syncthreads(); if(oDistMask(vKeyPt_i.y,vKeyPt_i.x)) { const int nContourPts = oContourPts.rows; const int nHalfMaskSize = nMaskSize/2; int nContourPtIdx = threadIdx.x; while(nContourPtIdx<nContourPts) { const cv::Point2f& oContourPt = oContourPts(nContourPtIdx,0); const int nLookupRow = __float2int_rn(oContourPt.y-vKeyPt_f.y)+nHalfMaskSize; const int nLookupCol = __float2int_rn(oContourPt.x-vKeyPt_f.x)+nHalfMaskSize; if(nLookupRow>=0 && nLookupRow<nMaskSize && nLookupCol>=0 && nLookupCol<nMaskSize) { const int nDescBinIdx = tex2D<int>(pDescLUMask_tex,nLookupCol,nLookupRow); if(nDescBinIdx>=0) atomicAdd((float*)aTmpDesc+nDescBinIdx,1.0f); } nContourPtIdx += blockDim.x; } __syncthreads(); } if(bNormalizeBins) { float fSum; if(nLUTSize==32 && blockDim.x==32) { assert(warpSize==32); float fVal = aTmpDesc[threadIdx.x]*aTmpDesc[threadIdx.x]; #if __CUDACC_VER_MAJOR__>=9 fVal += __shfl_down_sync(0xFFFFFFFF,fVal,16); fVal += __shfl_down_sync(0xFFFFFFFF,fVal,8); fVal += __shfl_down_sync(0xFFFFFFFF,fVal,4); fVal += __shfl_down_sync(0xFFFFFFFF,fVal,2); fVal += __shfl_down_sync(0xFFFFFFFF,fVal,1); fSum = __shfl_sync(0xFFFFFFFF,fVal,0); #else //__CUDACC_VER_MAJOR__<9 fVal += __shfl_down(fVal,16); fVal += __shfl_down(fVal,8); fVal += __shfl_down(fVal,4); fVal += __shfl_down(fVal,2); fVal += __shfl_down(fVal,1); fSum = __shfl(fVal,0); #endif //__CUDACC_VER_MAJOR__<9 } else { volatile float* aTmpLUT = aTmpCommon+nLUTSize; for(int nStep=0; nStep<nStepPerDesc; ++nStep) { const int nDescIdx = blockDim.x*nStep + threadIdx.x; aTmpLUT[nDescIdx] = aTmpDesc[nDescIdx]*aTmpDesc[nDescIdx]; } if(blockDim.x==32) { assert(warpSize==32 && nLUTSize>32); for(int nStep=nLUTSize-32; nStep>32; nStep-=32) aTmpLUT[threadIdx.x + (nStep-32)] += aTmpLUT[threadIdx.x + nStep]; aTmpLUT[threadIdx.x] += aTmpLUT[threadIdx.x + 32]; aTmpLUT[threadIdx.x] += aTmpLUT[threadIdx.x + 16]; aTmpLUT[threadIdx.x] += aTmpLUT[threadIdx.x + 8]; aTmpLUT[threadIdx.x] += aTmpLUT[threadIdx.x + 4]; aTmpLUT[threadIdx.x] += aTmpLUT[threadIdx.x + 2]; aTmpLUT[threadIdx.x] += aTmpLUT[threadIdx.x + 1]; } else { assert(lv::isPow2(blockDim.x)); if(nLUTSize>blockDim.x) { assert(nLUTSize>=blockDim.x*2); for(int nStep=nLUTSize-blockDim.x; nStep>=blockDim.x; nStep-=blockDim.x) aTmpLUT[threadIdx.x + (nStep-blockDim.x)] += aTmpLUT[threadIdx.x + nStep]; for(int nStep=blockDim.x/2; nStep>0; nStep>>=1) aTmpLUT[threadIdx.x] += aTmpLUT[threadIdx.x + nStep]; } else { assert(nLUTSize==blockDim.x); for(int nStep=blockDim.x/2; nStep>0; nStep>>=1) { if(threadIdx.x<nStep) aTmpLUT[threadIdx.x] += aTmpLUT[threadIdx.x+nStep]; __syncthreads(); } } } fSum = aTmpLUT[0]; } const float fInvNorm = rsqrt(fSum); int nDescIdx = threadIdx.x; while(nDescIdx<nDescSize) { aOutputDesc[nDescIdx] = aTmpDesc[nDescIdx]*fInvNorm; nDescIdx += blockDim.x; } } else { int nDescIdx = threadIdx.x; while(nDescIdx<nDescSize) { aOutputDesc[nDescIdx] = aTmpDesc[nDescIdx]; nDescIdx += blockDim.x; } } } } // namespace impl ///////////////////////////////////////////////////////////////////////// void device::scdesc_fill_desc_direct(const lv::cuda::KernelParams& oKParams, const cv::cuda::PtrStep<cv::Point2f> oKeyPts, const cv::cuda::PtrStepSz<cv::Point2f> oContourPts, const cv::cuda::PtrStep<uchar> oDistMask, const cudaTextureObject_t pDescLUMask_tex, int nMaskSize, cv::cuda::PtrStepSzf oDescs, bool bNonZeroInitBins, bool bGenDescMap, bool bNormalizeBins) { cudaKernelWrap(scdesc_fill_desc_direct,oKParams,oKeyPts,oContourPts,oDistMask,pDescLUMask_tex,nMaskSize,oDescs,bGenDescMap,bNonZeroInitBins,bNormalizeBins); }
1f2feee49565d2ded50ffa9fa6d6371987be0310.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif __global__ void elementwise_1D_1D_sqrt_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] / out_x[tid] / 2; }
1f2feee49565d2ded50ffa9fa6d6371987be0310.cu
#include "includes.h" using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif __global__ void elementwise_1D_1D_sqrt_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] / out_x[tid] / 2; }
588d8c5abcb15768d323b8eded243e7dc0eddb17.hip
// !!! This is a file automatically generated by hipify!!! #include "common.h" void printDeviceProperties() { struct hipDeviceProp_t deviceProp; int ret = hipGetDeviceProperties(&deviceProp, 0); CPE(ret != hipSuccess, "Get Device Properties failed\n"); printf("\n=================DEVICE PROPERTIES=================\n"); printf("\tDevice name: %s\n", deviceProp.name); printf("\tTotal global memory: %lu bytes\n", deviceProp.totalGlobalMem); printf("\tWarp size: %d\n", deviceProp.warpSize); printf("\tCompute capability: %d.%d\n", deviceProp.major, deviceProp.minor); printf("\tMulti-processor count: %d\n", deviceProp.multiProcessorCount); printf("\tThreads per multi-processor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf("\n"); } // Like printf, but red. Limited to 1000 characters. void red_printf(const char *format, ...) { #define RED_LIM 1000 va_list args; int i; char buf1[RED_LIM], buf2[RED_LIM]; memset(buf1, 0, RED_LIM); memset(buf2, 0, RED_LIM); va_start(args, format); // Marshal the stuff to print in a buffer vsnprintf(buf1, RED_LIM, format, args); // Probably a bad check for buffer overflow for(i = RED_LIM - 1; i >= RED_LIM - 50; i --) { assert(buf1[i] == 0); } // Add markers for red color and reset color snprintf(buf2, 1000, "\033[31m%s\033[0m", buf1); // Probably another bad check for buffer overflow for(i = RED_LIM - 1; i >= RED_LIM - 50; i --) { assert(buf2[i] == 0); } printf("%s", buf2); va_end(args); }
588d8c5abcb15768d323b8eded243e7dc0eddb17.cu
#include "common.h" void printDeviceProperties() { struct cudaDeviceProp deviceProp; int ret = cudaGetDeviceProperties(&deviceProp, 0); CPE(ret != cudaSuccess, "Get Device Properties failed\n"); printf("\n=================DEVICE PROPERTIES=================\n"); printf("\tDevice name: %s\n", deviceProp.name); printf("\tTotal global memory: %lu bytes\n", deviceProp.totalGlobalMem); printf("\tWarp size: %d\n", deviceProp.warpSize); printf("\tCompute capability: %d.%d\n", deviceProp.major, deviceProp.minor); printf("\tMulti-processor count: %d\n", deviceProp.multiProcessorCount); printf("\tThreads per multi-processor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf("\n"); } // Like printf, but red. Limited to 1000 characters. void red_printf(const char *format, ...) { #define RED_LIM 1000 va_list args; int i; char buf1[RED_LIM], buf2[RED_LIM]; memset(buf1, 0, RED_LIM); memset(buf2, 0, RED_LIM); va_start(args, format); // Marshal the stuff to print in a buffer vsnprintf(buf1, RED_LIM, format, args); // Probably a bad check for buffer overflow for(i = RED_LIM - 1; i >= RED_LIM - 50; i --) { assert(buf1[i] == 0); } // Add markers for red color and reset color snprintf(buf2, 1000, "\033[31m%s\033[0m", buf1); // Probably another bad check for buffer overflow for(i = RED_LIM - 1; i >= RED_LIM - 50; i --) { assert(buf2[i] == 0); } printf("%s", buf2); va_end(args); }
056384ea17bff40029a4a6ffe8965c41732a3627.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { } #define ROTATE_DOWN(val,MAX) ((val-1==-1)?MAX-1:val-1) #define ROTATE_UP(val,MAX) ((val+1)%MAX) /** * GPU Device kernel for the for 2D stencil * First attempt during hackaton * M = Rows, N = Cols INCLUDING HALOS * In this version now we replace the size of the shared memory to be just 3 rows (actually 1+HALO*2) rows */ __global__ void gpu_stencil2D_4pt_hack5_cp_cols(double * dst, double * shared_cols, double *shared_rows,int tile_x,int tile_y, int M, int N){ #ifdef CUDA_DARTS_DEBUG if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.y==0)){ printf("copy cols begin!\n"); } #endif int base_global_row = tile_y * blockIdx.y; int base_global_col = tile_x * blockIdx.x; int base_global_idx = N*base_global_row + base_global_col ; int nextCol = base_global_col+1; bool legalNextCol = (nextCol<N); int t = threadIdx.y; int idx = 2*M*blockIdx.x + t + base_global_row; int idx_nextCol = idx + M ; bool legalCurRow = (base_global_row + t)<M; if(legalCurRow){ shared_cols[idx] = dst[base_global_idx + t*N]; } if(legalNextCol && legalCurRow){ shared_cols[idx_nextCol] = dst[base_global_idx + t*N+1]; } __syncthreads(); #ifdef CUDA_CUDA_DEBUG // if(threadIdx.y==0){ // printf("blockDimy = %d\n",blockDim.y); // } if(blockIdx.x==1 && t<5){ printf("addr: %d ,%f,\n",idx_nextCol,shared_cols[idx_nextCol]); } #endif #ifdef CUDA_DARTS_DEBUG if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.y==0)){ printf("copy cols finish!\n"); } #endif }
056384ea17bff40029a4a6ffe8965c41732a3627.cu
#include "includes.h" extern "C" { } #define ROTATE_DOWN(val,MAX) ((val-1==-1)?MAX-1:val-1) #define ROTATE_UP(val,MAX) ((val+1)%MAX) /** * GPU Device kernel for the for 2D stencil * First attempt during hackaton * M = Rows, N = Cols INCLUDING HALOS * In this version now we replace the size of the shared memory to be just 3 rows (actually 1+HALO*2) rows */ __global__ void gpu_stencil2D_4pt_hack5_cp_cols(double * dst, double * shared_cols, double *shared_rows,int tile_x,int tile_y, int M, int N){ #ifdef CUDA_DARTS_DEBUG if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.y==0)){ printf("copy cols begin!\n"); } #endif int base_global_row = tile_y * blockIdx.y; int base_global_col = tile_x * blockIdx.x; int base_global_idx = N*base_global_row + base_global_col ; int nextCol = base_global_col+1; bool legalNextCol = (nextCol<N); int t = threadIdx.y; int idx = 2*M*blockIdx.x + t + base_global_row; int idx_nextCol = idx + M ; bool legalCurRow = (base_global_row + t)<M; if(legalCurRow){ shared_cols[idx] = dst[base_global_idx + t*N]; } if(legalNextCol && legalCurRow){ shared_cols[idx_nextCol] = dst[base_global_idx + t*N+1]; } __syncthreads(); #ifdef CUDA_CUDA_DEBUG // if(threadIdx.y==0){ // printf("blockDimy = %d\n",blockDim.y); // } if(blockIdx.x==1 && t<5){ printf("addr: %d ,%f,\n",idx_nextCol,shared_cols[idx_nextCol]); } #endif #ifdef CUDA_DARTS_DEBUG if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.y==0)){ printf("copy cols finish!\n"); } #endif }
07244051d1e7fa344012e258ff16196be9cc46ef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <cmath> #include <iostream> #include <vector> #include <thrust/device_ptr.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <cuml/cuml.hpp> #include <cuml/tsa/batched_arima.hpp> #include <cuml/tsa/batched_kalman.hpp> #include <raft/cudart_utils.h> #include <common/cumlHandle.hpp> #include <common/device_buffer.hpp> #include <common/nvtx.hpp> #include <linalg/batched/matrix.cuh> #include <metrics/batched/information_criterion.cuh> #include <raft/cuda_utils.cuh> #include <raft/linalg/matrix_vector_op.cuh> #include <timeSeries/arima_helpers.cuh> namespace ML { void pack(raft::handle_t& handle, const ARIMAParams<double>& params, const ARIMAOrder& order, int batch_size, double* param_vec) { const auto stream = handle.get_stream(); params.pack(order, batch_size, param_vec, stream); } void unpack(raft::handle_t& handle, ARIMAParams<double>& params, const ARIMAOrder& order, int batch_size, const double* param_vec) { const auto stream = handle.get_stream(); params.unpack(order, batch_size, param_vec, stream); } void batched_diff(raft::handle_t& handle, double* d_y_diff, const double* d_y, int batch_size, int n_obs, const ARIMAOrder& order) { const auto stream = handle.get_stream(); MLCommon::TimeSeries::prepare_data(d_y_diff, d_y, batch_size, n_obs, order.d, order.D, order.s, stream); } void predict(raft::handle_t& handle, const double* d_y, int batch_size, int n_obs, int start, int end, const ARIMAOrder& order, const ARIMAParams<double>& params, double* d_y_p, bool pre_diff, double level, double* d_lower, double* d_upper) { ML::PUSH_RANGE(__func__); auto allocator = handle.get_device_allocator(); const auto stream = handle.get_stream(); bool diff = order.need_diff() && pre_diff && level == 0; // Prepare data int n_obs_kf; const double* d_y_kf; MLCommon::device_buffer<double> diff_buffer(allocator, stream); ARIMAOrder order_after_prep = order; if (diff) { n_obs_kf = n_obs - order.n_diff(); diff_buffer.resize(n_obs_kf * batch_size, stream); MLCommon::TimeSeries::prepare_data(diff_buffer.data(), d_y, batch_size, n_obs, order.d, order.D, order.s, stream); d_y_kf = diff_buffer.data(); order_after_prep.d = 0; order_after_prep.D = 0; } else { n_obs_kf = n_obs; d_y_kf = d_y; } // Create temporary array for the residuals MLCommon::device_buffer<double> v_buffer(allocator, stream, n_obs_kf * batch_size); double* d_vs = v_buffer.data(); // Create temporary array for the forecasts int num_steps = ::max(end - n_obs, 0); MLCommon::device_buffer<double> fc_buffer(allocator, stream, num_steps * batch_size); double* d_y_fc = fc_buffer.data(); // Compute the residual and forecast std::vector<double> loglike = std::vector<double>(batch_size); /// TODO: use device loglike to avoid useless copy ; part of #2233 batched_loglike(handle, d_y_kf, batch_size, n_obs_kf, order_after_prep, params, loglike.data(), d_vs, false, true, MLE, 0, num_steps, d_y_fc, level, d_lower, d_upper); auto counting = thrust::make_counting_iterator(0); int predict_ld = end - start; // // In-sample prediction // int res_offset = diff ? order.d + order.s * order.D : 0; int p_start = ::max(start, res_offset); int p_end = ::min(n_obs, end); // The prediction loop starts by filling undefined predictions with NaN, // then computes the predictions from the observations and residuals if (start < n_obs) { thrust::for_each(thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { d_y_p[0] = 0.0; for (int i = 0; i < res_offset - start; i++) { d_y_p[bid * predict_ld + i] = nan(""); } for (int i = p_start; i < p_end; i++) { d_y_p[bid * predict_ld + i - start] = d_y[bid * n_obs + i] - d_vs[bid * n_obs_kf + i - res_offset]; } }); } // // Finalize out-of-sample forecast and copy in-sample predictions // if (num_steps) { if (diff) { MLCommon::TimeSeries::finalize_forecast(d_y_fc, d_y, num_steps, batch_size, n_obs, n_obs, order.d, order.D, order.s, stream); } // Copy forecast in d_y_p thrust::for_each(thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { for (int i = 0; i < num_steps; i++) { d_y_p[bid * predict_ld + n_obs - start + i] = d_y_fc[num_steps * bid + i]; } }); /// TODO: 2D copy kernel? } ML::POP_RANGE(); } /** * Kernel to compute the sum-of-squares log-likelihood estimation * * @param[in] d_y Series to fit * @param[in] d_mu mu parameters * @param[in] d_ar AR parameters * @param[in] d_ma MA parameters * @param[in] d_sar Seasonal AR parameters * @param[in] d_sma Seasonal MA parameters * @param[out] d_loglike Evaluated log-likelihood * @param[in] n_obs Number of observations in a time series * @param[in] n_phi Number of phi coefficients (combined AR-SAR) * @param[in] n_theta Number of theta coefficients (combined MA-SMA) * @param[in] p Number of AR parameters * @param[in] q Number of MA parameters * @param[in] P Number of seasonal AR parameters * @param[in] Q Number of seasonal MA parameters * @param[in] s Seasonal period or 0 * @param[in] k Whether to use an intercept * @param[in] start_sum At which index to start the sum * @param[in] start_y First used y index (observation) * @param[in] start_v First used v index (residual) */ template <typename DataT> __global__ void sum_of_squares_kernel(const DataT* d_y, const DataT* d_mu, const DataT* d_ar, const DataT* d_ma, const DataT* d_sar, const DataT* d_sma, DataT* d_loglike, int n_obs, int n_phi, int n_theta, int p, int q, int P, int Q, int s, int k, int start_sum, int start_y, int start_v) { // Load phi, theta and mu to registers DataT phi, theta; if (threadIdx.x < n_phi) { phi = MLCommon::TimeSeries::reduced_polynomial<true>( blockIdx.x, d_ar, p, d_sar, P, s, threadIdx.x + 1); } if (threadIdx.x < n_theta) { theta = MLCommon::TimeSeries::reduced_polynomial<false>( blockIdx.x, d_ma, q, d_sma, Q, s, threadIdx.x + 1); } DataT mu = k ? d_mu[blockIdx.x] : (DataT)0; // Shared memory: load y and initialize the residuals extern __shared__ DataT shared_mem[]; DataT* b_y = shared_mem; DataT* b_vs = shared_mem + n_obs - start_y; for (int i = threadIdx.x; i < n_obs - start_y; i += blockDim.x) { b_y[i] = d_y[n_obs * blockIdx.x + i + start_y]; } for (int i = threadIdx.x; i < start_sum - start_v; i += blockDim.x) { b_vs[i] = (DataT)0; } // Main loop char* temp_smem = (char*)(shared_mem + 2 * n_obs - start_y - start_v); DataT res, ssq = 0; for (int i = start_sum; i < n_obs; i++) { __syncthreads(); res = (DataT)0; res -= threadIdx.x < n_phi ? phi * b_y[i - threadIdx.x - 1 - start_y] : (DataT)0; res -= threadIdx.x < n_theta ? theta * b_vs[i - threadIdx.x - 1 - start_v] : (DataT)0; res = raft::blockReduce(res, temp_smem); if (threadIdx.x == 0) { res += b_y[i - start_y] - mu; b_vs[i - start_v] = res; ssq += res * res; } } // Compute log-likelihood and write it to global memory if (threadIdx.x == 0) { d_loglike[blockIdx.x] = -0.5 * static_cast<DataT>(n_obs) * raft::myLog(ssq / static_cast<DataT>(n_obs - start_sum)); } } /** * Sum-of-squares estimation method * * @param[in] handle cuML handle * @param[in] d_y Series to fit: shape = (n_obs, batch_size) * @param[in] batch_size Number of time series * @param[in] n_obs Number of observations in a time series * @param[in] order ARIMA hyper-parameters * @param[in] Tparams Transformed parameters * @param[out] d_loglike Evaluated log-likelihood (device) * @param[in] truncate Number of observations to skip in the sum */ void conditional_sum_of_squares(raft::handle_t& handle, const double* d_y, int batch_size, int n_obs, const ARIMAOrder& order, const ARIMAParams<double>& Tparams, double* d_loglike, int truncate) { ML::PUSH_RANGE(__func__); auto stream = handle.get_stream(); int n_phi = order.n_phi(); int n_theta = order.n_theta(); int max_lags = ::max(n_phi, n_theta); int start_sum = ::max(max_lags, truncate); int start_y = start_sum - n_phi; int start_v = start_sum - n_theta; // Compute the sum-of-squares and the log-likelihood int n_warps = ::max(raft::ceildiv<int>(max_lags, 32), 1); size_t shared_mem_size = (2 * n_obs - start_y - start_v + n_warps) * sizeof(double); hipLaunchKernelGGL(( sum_of_squares_kernel), dim3(batch_size), dim3(32 * n_warps), shared_mem_size, stream, d_y, Tparams.mu, Tparams.ar, Tparams.ma, Tparams.sar, Tparams.sma, d_loglike, n_obs, n_phi, n_theta, order.p, order.q, order.P, order.Q, order.s, order.k, start_sum, start_y, start_v); CUDA_CHECK(hipPeekAtLastError()); ML::POP_RANGE(); } void batched_loglike(raft::handle_t& handle, const double* d_y, int batch_size, int n_obs, const ARIMAOrder& order, const ARIMAParams<double>& params, double* loglike, double* d_vs, bool trans, bool host_loglike, LoglikeMethod method, int truncate, int fc_steps, double* d_fc, double level, double* d_lower, double* d_upper) { ML::PUSH_RANGE(__func__); auto allocator = handle.get_device_allocator(); auto stream = handle.get_stream(); ARIMAParams<double> Tparams; ASSERT(method == MLE || fc_steps == 0, "Only MLE method is valid for forecasting"); /* Create log-likelihood device array if host pointer is provided */ double* d_loglike; MLCommon::device_buffer<double> loglike_buffer(allocator, stream); if (host_loglike) { loglike_buffer.resize(batch_size, stream); d_loglike = loglike_buffer.data(); } else { d_loglike = loglike; } if (trans) { Tparams.allocate(order, batch_size, allocator, stream, true); MLCommon::TimeSeries::batched_jones_transform( order, batch_size, false, params, Tparams, allocator, stream); Tparams.mu = params.mu; } else { // non-transformed case: just use original parameters Tparams = params; } if (method == CSS) { conditional_sum_of_squares(handle, d_y, batch_size, n_obs, order, Tparams, d_loglike, truncate); } else { batched_kalman_filter(handle, d_y, n_obs, Tparams, order, batch_size, d_loglike, d_vs, fc_steps, d_fc, level, d_lower, d_upper); } if (host_loglike) { /* Tranfer log-likelihood device -> host */ raft::update_host(loglike, d_loglike, batch_size, stream); } if (trans) { Tparams.deallocate(order, batch_size, allocator, stream, true); } ML::POP_RANGE(); } void batched_loglike(raft::handle_t& handle, const double* d_y, int batch_size, int n_obs, const ARIMAOrder& order, const double* d_params, double* loglike, double* d_vs, bool trans, bool host_loglike, LoglikeMethod method, int truncate, int fc_steps, double* d_fc, double level, double* d_lower, double* d_upper) { ML::PUSH_RANGE(__func__); // unpack parameters auto allocator = handle.get_device_allocator(); auto stream = handle.get_stream(); ARIMAParams<double> params; params.allocate(order, batch_size, allocator, stream, false); params.unpack(order, batch_size, d_params, stream); batched_loglike(handle, d_y, batch_size, n_obs, order, params, loglike, d_vs, trans, host_loglike, method, truncate, fc_steps, d_fc, level, d_lower, d_upper); params.deallocate(order, batch_size, allocator, stream, false); ML::POP_RANGE(); } void batched_loglike_grad(raft::handle_t& handle, const double* d_y, int batch_size, int n_obs, const ARIMAOrder& order, const double* d_x, double* d_grad, double h, bool trans, LoglikeMethod method, int truncate) { ML::PUSH_RANGE(__func__); auto allocator = handle.get_device_allocator(); auto stream = handle.get_stream(); auto counting = thrust::make_counting_iterator(0); int N = order.complexity(); // Initialize the perturbed x vector MLCommon::device_buffer<double> x_pert(allocator, stream, N * batch_size); double* d_x_pert = x_pert.data(); raft::copy(d_x_pert, d_x, N * batch_size, stream); // Create buffers for the log-likelihood and residuals MLCommon::device_buffer<double> ll_base(allocator, stream, batch_size); MLCommon::device_buffer<double> ll_pert(allocator, stream, batch_size); MLCommon::device_buffer<double> res(allocator, stream, n_obs * batch_size); double* d_ll_base = ll_base.data(); double* d_ll_pert = ll_pert.data(); // Evaluate the log-likelihood with the given parameter vector batched_loglike(handle, d_y, batch_size, n_obs, order, d_x, d_ll_base, res.data(), trans, false, method, truncate); for (int i = 0; i < N; i++) { // Add the perturbation to the i-th parameter thrust::for_each(thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { d_x_pert[N * bid + i] = d_x[N * bid + i] + h; }); // Evaluate the log-likelihood with the positive perturbation batched_loglike(handle, d_y, batch_size, n_obs, order, d_x_pert, d_ll_pert, res.data(), trans, false, method, truncate); // First derivative with a first-order accuracy thrust::for_each(thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { d_grad[N * bid + i] = (d_ll_pert[bid] - d_ll_base[bid]) / h; }); // Reset the i-th parameter thrust::for_each( thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { d_x_pert[N * bid + i] = d_x[N * bid + i]; }); } ML::POP_RANGE(); } void information_criterion(raft::handle_t& handle, const double* d_y, int batch_size, int n_obs, const ARIMAOrder& order, const ARIMAParams<double>& params, double* d_ic, int ic_type) { ML::PUSH_RANGE(__func__); auto allocator = handle.get_device_allocator(); auto stream = handle.get_stream(); MLCommon::device_buffer<double> v_buffer(allocator, stream, n_obs * batch_size); /* Compute log-likelihood in d_ic */ batched_loglike(handle, d_y, batch_size, n_obs, order, params, d_ic, v_buffer.data(), false, false, MLE); /* Compute information criterion from log-likelihood and base term */ MLCommon::Metrics::Batched::information_criterion( d_ic, d_ic, static_cast<MLCommon::Metrics::IC_Type>(ic_type), order.complexity(), batch_size, n_obs - order.n_diff(), stream); ML::POP_RANGE(); } /** * Test that the parameters are valid for the inverse transform * * @tparam isAr Are these (S)AR or (S)MA parameters? * @param[in] params Parameters * @param[in] pq p for AR, q for MA, P for SAR, Q for SMA */ template <bool isAr> DI bool test_invparams(const double* params, int pq) { double new_params[4]; double tmp[4]; constexpr double coef = isAr ? 1 : -1; for (int i = 0; i < pq; i++) { tmp[i] = params[i]; new_params[i] = tmp[i]; } // Perform inverse transform and stop before atanh step for (int j = pq - 1; j > 0; --j) { double a = new_params[j]; for (int k = 0; k < j; ++k) { tmp[k] = (new_params[k] + coef * a * new_params[j - k - 1]) / (1 - (a * a)); } for (int iter = 0; iter < j; ++iter) { new_params[iter] = tmp[iter]; } } // Verify that the values are between -1 and 1 bool result = true; for (int i = 0; i < pq; i++) { result = result && !(new_params[i] <= -1 || new_params[i] >= 1); } return result; } /** * Auxiliary function of _start_params: least square approximation of an * ARMA model (with or without seasonality) * @note: in this function the non-seasonal case has s=1, not s=0! */ void _arma_least_squares(raft::handle_t& handle, double* d_ar, double* d_ma, double* d_sigma2, const MLCommon::LinAlg::Batched::Matrix<double>& bm_y, int p, int q, int s, bool estimate_sigma2, int k = 0, double* d_mu = nullptr) { const auto& handle_impl = handle; auto stream = handle_impl.get_stream(); auto cublas_handle = handle_impl.get_cublas_handle(); auto allocator = handle_impl.get_device_allocator(); auto counting = thrust::make_counting_iterator(0); int batch_size = bm_y.batches(); int n_obs = bm_y.shape().first; int ps = p * s, qs = q * s; int p_ar = ::max(ps, 2 * qs); int r = ::max(p_ar + qs, ps); if ((q && p_ar >= n_obs - p_ar) || p + q + k >= n_obs - r) { // Too few observations for the estimate, fill with 0 (1 for sigma2) if (k) CUDA_CHECK(hipMemsetAsync(d_mu, 0, sizeof(double) * batch_size, stream)); if (p) CUDA_CHECK( hipMemsetAsync(d_ar, 0, sizeof(double) * p * batch_size, stream)); if (q) CUDA_CHECK( hipMemsetAsync(d_ma, 0, sizeof(double) * q * batch_size, stream)); if (estimate_sigma2) { thrust::device_ptr<double> sigma2_thrust = thrust::device_pointer_cast(d_sigma2); thrust::fill(thrust::hip::par.on(stream), sigma2_thrust, sigma2_thrust + batch_size, 1.0); } return; } /* Matrix formed by lag matrices of y and the residuals respectively, * side by side. The left side will be used to estimate AR, the right * side to estimate MA */ MLCommon::LinAlg::Batched::Matrix<double> bm_ls_ar_res( n_obs - r, p + q + k, batch_size, cublas_handle, allocator, stream, false); int ar_offset = r - ps; int res_offset = r - p_ar - qs; // Get residuals from an AR(p_ar) model to estimate the MA parameters if (q) { // Create lagged y int ls_height = n_obs - p_ar; MLCommon::LinAlg::Batched::Matrix<double> bm_ls = MLCommon::LinAlg::Batched::b_lagged_mat(bm_y, p_ar); /* Matrix for the initial AR fit, initialized by copy of y * (note: this is because gels works in-place ; the matrix has larger * dimensions than the actual AR fit) */ MLCommon::LinAlg::Batched::Matrix<double> bm_ar_fit = MLCommon::LinAlg::Batched::b_2dcopy(bm_y, p_ar, 0, ls_height, 1); // Residual, initialized as offset y to avoid one kernel call MLCommon::LinAlg::Batched::Matrix<double> bm_residual(bm_ar_fit); // Initial AR fit MLCommon::LinAlg::Batched::b_gels(bm_ls, bm_ar_fit); // Compute residual (technically a gemv) MLCommon::LinAlg::Batched::b_gemm(false, false, ls_height, 1, p_ar, -1.0, bm_ls, bm_ar_fit, 1.0, bm_residual); // Lags of the residual MLCommon::LinAlg::Batched::b_lagged_mat(bm_residual, bm_ls_ar_res, q, n_obs - r, res_offset, (n_obs - r) * (k + p), s); } // Fill the first column of the matrix with 1 if we fit an intercept if (k) { double* d_ls_ar_res = bm_ls_ar_res.raw_data(); thrust::for_each(thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { double* b_ls_ar_res = d_ls_ar_res + bid * (n_obs - r) * (p + q + k); for (int i = 0; i < n_obs - r; i++) { b_ls_ar_res[i] = 1.0; } }); } // Lags of y MLCommon::LinAlg::Batched::b_lagged_mat(bm_y, bm_ls_ar_res, p, n_obs - r, ar_offset, (n_obs - r) * k, s); /* Initializing the vector for the ARMA fit * (note: also in-place as described for AR fit) */ MLCommon::LinAlg::Batched::Matrix<double> bm_arma_fit = MLCommon::LinAlg::Batched::b_2dcopy(bm_y, r, 0, n_obs - r, 1); // The residuals will be computed only if sigma2 is requested MLCommon::LinAlg::Batched::Matrix<double> bm_final_residual( n_obs - r, 1, batch_size, cublas_handle, allocator, stream, false); if (estimate_sigma2) { raft::copy(bm_final_residual.raw_data(), bm_arma_fit.raw_data(), (n_obs - r) * batch_size, stream); } // ARMA fit MLCommon::LinAlg::Batched::b_gels(bm_ls_ar_res, bm_arma_fit); // Copy the results in the parameter vectors const double* d_arma_fit = bm_arma_fit.raw_data(); thrust::for_each(thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { const double* b_arma_fit = d_arma_fit + bid * (n_obs - r); if (k) { d_mu[bid] = b_arma_fit[0]; } if (p) { double* b_ar = d_ar + bid * p; for (int i = 0; i < p; i++) { b_ar[i] = b_arma_fit[i + k]; } } if (q) { double* b_ma = d_ma + bid * q; for (int i = 0; i < q; i++) { b_ma[i] = b_arma_fit[i + p + k]; } } }); if (estimate_sigma2) { // Compute final residual (technically a gemv) MLCommon::LinAlg::Batched::b_gemm(false, false, n_obs - r, 1, p + q + k, -1.0, bm_ls_ar_res, bm_arma_fit, 1.0, bm_final_residual); // Compute variance double* d_residual = bm_final_residual.raw_data(); thrust::for_each(thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { double acc = 0.0; const double* b_residual = d_residual + (n_obs - r) * bid; for (int i = q; i < n_obs - r; i++) { double res = b_residual[i]; acc += res * res; } d_sigma2[bid] = acc / static_cast<double>(n_obs - r - q); }); } // If (S)AR or (S)MA are not valid for the inverse transform, set them to zero thrust::for_each(thrust::hip::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { if (p) { double* b_ar = d_ar + bid * p; bool valid = test_invparams<true>(b_ar, p); if (!valid) { for (int ip = 0; ip < p; ip++) b_ar[ip] = 0; } } if (q) { double* b_ma = d_ma + bid * q; bool valid = test_invparams<false>(b_ma, q); if (!valid) { for (int iq = 0; iq < q; iq++) b_ma[iq] = 0; } } }); } /** * Auxiliary function of estimate_x0: compute the starting parameters for * the series pre-processed by estimate_x0 */ void _start_params(raft::handle_t& handle, ARIMAParams<double>& params, const MLCommon::LinAlg::Batched::Matrix<double>& bm_y, const ARIMAOrder& order) { // Estimate an ARMA fit without seasonality if (order.p + order.q + order.k) _arma_least_squares(handle, params.ar, params.ma, params.sigma2, bm_y, order.p, order.q, 1, true, order.k, params.mu); // Estimate a seasonal ARMA fit independantly if (order.P + order.Q) _arma_least_squares(handle, params.sar, params.sma, params.sigma2, bm_y, order.P, order.Q, order.s, order.p + order.q + order.k == 0); } void estimate_x0(raft::handle_t& handle, ARIMAParams<double>& params, const double* d_y, int batch_size, int n_obs, const ARIMAOrder& order) { ML::PUSH_RANGE(__func__); const auto& handle_impl = handle; auto stream = handle_impl.get_stream(); auto cublas_handle = handle_impl.get_cublas_handle(); auto allocator = handle_impl.get_device_allocator(); // Difference if necessary, copy otherwise MLCommon::LinAlg::Batched::Matrix<double> bm_yd( n_obs - order.d - order.s * order.D, 1, batch_size, cublas_handle, allocator, stream, false); MLCommon::TimeSeries::prepare_data(bm_yd.raw_data(), d_y, batch_size, n_obs, order.d, order.D, order.s, stream); // Do the computation of the initial parameters _start_params(handle, params, bm_yd, order); ML::POP_RANGE(); } } // namespace ML
07244051d1e7fa344012e258ff16196be9cc46ef.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <cmath> #include <iostream> #include <vector> #include <thrust/device_ptr.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <cuml/cuml.hpp> #include <cuml/tsa/batched_arima.hpp> #include <cuml/tsa/batched_kalman.hpp> #include <raft/cudart_utils.h> #include <common/cumlHandle.hpp> #include <common/device_buffer.hpp> #include <common/nvtx.hpp> #include <linalg/batched/matrix.cuh> #include <metrics/batched/information_criterion.cuh> #include <raft/cuda_utils.cuh> #include <raft/linalg/matrix_vector_op.cuh> #include <timeSeries/arima_helpers.cuh> namespace ML { void pack(raft::handle_t& handle, const ARIMAParams<double>& params, const ARIMAOrder& order, int batch_size, double* param_vec) { const auto stream = handle.get_stream(); params.pack(order, batch_size, param_vec, stream); } void unpack(raft::handle_t& handle, ARIMAParams<double>& params, const ARIMAOrder& order, int batch_size, const double* param_vec) { const auto stream = handle.get_stream(); params.unpack(order, batch_size, param_vec, stream); } void batched_diff(raft::handle_t& handle, double* d_y_diff, const double* d_y, int batch_size, int n_obs, const ARIMAOrder& order) { const auto stream = handle.get_stream(); MLCommon::TimeSeries::prepare_data(d_y_diff, d_y, batch_size, n_obs, order.d, order.D, order.s, stream); } void predict(raft::handle_t& handle, const double* d_y, int batch_size, int n_obs, int start, int end, const ARIMAOrder& order, const ARIMAParams<double>& params, double* d_y_p, bool pre_diff, double level, double* d_lower, double* d_upper) { ML::PUSH_RANGE(__func__); auto allocator = handle.get_device_allocator(); const auto stream = handle.get_stream(); bool diff = order.need_diff() && pre_diff && level == 0; // Prepare data int n_obs_kf; const double* d_y_kf; MLCommon::device_buffer<double> diff_buffer(allocator, stream); ARIMAOrder order_after_prep = order; if (diff) { n_obs_kf = n_obs - order.n_diff(); diff_buffer.resize(n_obs_kf * batch_size, stream); MLCommon::TimeSeries::prepare_data(diff_buffer.data(), d_y, batch_size, n_obs, order.d, order.D, order.s, stream); d_y_kf = diff_buffer.data(); order_after_prep.d = 0; order_after_prep.D = 0; } else { n_obs_kf = n_obs; d_y_kf = d_y; } // Create temporary array for the residuals MLCommon::device_buffer<double> v_buffer(allocator, stream, n_obs_kf * batch_size); double* d_vs = v_buffer.data(); // Create temporary array for the forecasts int num_steps = std::max(end - n_obs, 0); MLCommon::device_buffer<double> fc_buffer(allocator, stream, num_steps * batch_size); double* d_y_fc = fc_buffer.data(); // Compute the residual and forecast std::vector<double> loglike = std::vector<double>(batch_size); /// TODO: use device loglike to avoid useless copy ; part of #2233 batched_loglike(handle, d_y_kf, batch_size, n_obs_kf, order_after_prep, params, loglike.data(), d_vs, false, true, MLE, 0, num_steps, d_y_fc, level, d_lower, d_upper); auto counting = thrust::make_counting_iterator(0); int predict_ld = end - start; // // In-sample prediction // int res_offset = diff ? order.d + order.s * order.D : 0; int p_start = std::max(start, res_offset); int p_end = std::min(n_obs, end); // The prediction loop starts by filling undefined predictions with NaN, // then computes the predictions from the observations and residuals if (start < n_obs) { thrust::for_each(thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { d_y_p[0] = 0.0; for (int i = 0; i < res_offset - start; i++) { d_y_p[bid * predict_ld + i] = nan(""); } for (int i = p_start; i < p_end; i++) { d_y_p[bid * predict_ld + i - start] = d_y[bid * n_obs + i] - d_vs[bid * n_obs_kf + i - res_offset]; } }); } // // Finalize out-of-sample forecast and copy in-sample predictions // if (num_steps) { if (diff) { MLCommon::TimeSeries::finalize_forecast(d_y_fc, d_y, num_steps, batch_size, n_obs, n_obs, order.d, order.D, order.s, stream); } // Copy forecast in d_y_p thrust::for_each(thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { for (int i = 0; i < num_steps; i++) { d_y_p[bid * predict_ld + n_obs - start + i] = d_y_fc[num_steps * bid + i]; } }); /// TODO: 2D copy kernel? } ML::POP_RANGE(); } /** * Kernel to compute the sum-of-squares log-likelihood estimation * * @param[in] d_y Series to fit * @param[in] d_mu mu parameters * @param[in] d_ar AR parameters * @param[in] d_ma MA parameters * @param[in] d_sar Seasonal AR parameters * @param[in] d_sma Seasonal MA parameters * @param[out] d_loglike Evaluated log-likelihood * @param[in] n_obs Number of observations in a time series * @param[in] n_phi Number of phi coefficients (combined AR-SAR) * @param[in] n_theta Number of theta coefficients (combined MA-SMA) * @param[in] p Number of AR parameters * @param[in] q Number of MA parameters * @param[in] P Number of seasonal AR parameters * @param[in] Q Number of seasonal MA parameters * @param[in] s Seasonal period or 0 * @param[in] k Whether to use an intercept * @param[in] start_sum At which index to start the sum * @param[in] start_y First used y index (observation) * @param[in] start_v First used v index (residual) */ template <typename DataT> __global__ void sum_of_squares_kernel(const DataT* d_y, const DataT* d_mu, const DataT* d_ar, const DataT* d_ma, const DataT* d_sar, const DataT* d_sma, DataT* d_loglike, int n_obs, int n_phi, int n_theta, int p, int q, int P, int Q, int s, int k, int start_sum, int start_y, int start_v) { // Load phi, theta and mu to registers DataT phi, theta; if (threadIdx.x < n_phi) { phi = MLCommon::TimeSeries::reduced_polynomial<true>( blockIdx.x, d_ar, p, d_sar, P, s, threadIdx.x + 1); } if (threadIdx.x < n_theta) { theta = MLCommon::TimeSeries::reduced_polynomial<false>( blockIdx.x, d_ma, q, d_sma, Q, s, threadIdx.x + 1); } DataT mu = k ? d_mu[blockIdx.x] : (DataT)0; // Shared memory: load y and initialize the residuals extern __shared__ DataT shared_mem[]; DataT* b_y = shared_mem; DataT* b_vs = shared_mem + n_obs - start_y; for (int i = threadIdx.x; i < n_obs - start_y; i += blockDim.x) { b_y[i] = d_y[n_obs * blockIdx.x + i + start_y]; } for (int i = threadIdx.x; i < start_sum - start_v; i += blockDim.x) { b_vs[i] = (DataT)0; } // Main loop char* temp_smem = (char*)(shared_mem + 2 * n_obs - start_y - start_v); DataT res, ssq = 0; for (int i = start_sum; i < n_obs; i++) { __syncthreads(); res = (DataT)0; res -= threadIdx.x < n_phi ? phi * b_y[i - threadIdx.x - 1 - start_y] : (DataT)0; res -= threadIdx.x < n_theta ? theta * b_vs[i - threadIdx.x - 1 - start_v] : (DataT)0; res = raft::blockReduce(res, temp_smem); if (threadIdx.x == 0) { res += b_y[i - start_y] - mu; b_vs[i - start_v] = res; ssq += res * res; } } // Compute log-likelihood and write it to global memory if (threadIdx.x == 0) { d_loglike[blockIdx.x] = -0.5 * static_cast<DataT>(n_obs) * raft::myLog(ssq / static_cast<DataT>(n_obs - start_sum)); } } /** * Sum-of-squares estimation method * * @param[in] handle cuML handle * @param[in] d_y Series to fit: shape = (n_obs, batch_size) * @param[in] batch_size Number of time series * @param[in] n_obs Number of observations in a time series * @param[in] order ARIMA hyper-parameters * @param[in] Tparams Transformed parameters * @param[out] d_loglike Evaluated log-likelihood (device) * @param[in] truncate Number of observations to skip in the sum */ void conditional_sum_of_squares(raft::handle_t& handle, const double* d_y, int batch_size, int n_obs, const ARIMAOrder& order, const ARIMAParams<double>& Tparams, double* d_loglike, int truncate) { ML::PUSH_RANGE(__func__); auto stream = handle.get_stream(); int n_phi = order.n_phi(); int n_theta = order.n_theta(); int max_lags = std::max(n_phi, n_theta); int start_sum = std::max(max_lags, truncate); int start_y = start_sum - n_phi; int start_v = start_sum - n_theta; // Compute the sum-of-squares and the log-likelihood int n_warps = std::max(raft::ceildiv<int>(max_lags, 32), 1); size_t shared_mem_size = (2 * n_obs - start_y - start_v + n_warps) * sizeof(double); sum_of_squares_kernel<<<batch_size, 32 * n_warps, shared_mem_size, stream>>>( d_y, Tparams.mu, Tparams.ar, Tparams.ma, Tparams.sar, Tparams.sma, d_loglike, n_obs, n_phi, n_theta, order.p, order.q, order.P, order.Q, order.s, order.k, start_sum, start_y, start_v); CUDA_CHECK(cudaPeekAtLastError()); ML::POP_RANGE(); } void batched_loglike(raft::handle_t& handle, const double* d_y, int batch_size, int n_obs, const ARIMAOrder& order, const ARIMAParams<double>& params, double* loglike, double* d_vs, bool trans, bool host_loglike, LoglikeMethod method, int truncate, int fc_steps, double* d_fc, double level, double* d_lower, double* d_upper) { ML::PUSH_RANGE(__func__); auto allocator = handle.get_device_allocator(); auto stream = handle.get_stream(); ARIMAParams<double> Tparams; ASSERT(method == MLE || fc_steps == 0, "Only MLE method is valid for forecasting"); /* Create log-likelihood device array if host pointer is provided */ double* d_loglike; MLCommon::device_buffer<double> loglike_buffer(allocator, stream); if (host_loglike) { loglike_buffer.resize(batch_size, stream); d_loglike = loglike_buffer.data(); } else { d_loglike = loglike; } if (trans) { Tparams.allocate(order, batch_size, allocator, stream, true); MLCommon::TimeSeries::batched_jones_transform( order, batch_size, false, params, Tparams, allocator, stream); Tparams.mu = params.mu; } else { // non-transformed case: just use original parameters Tparams = params; } if (method == CSS) { conditional_sum_of_squares(handle, d_y, batch_size, n_obs, order, Tparams, d_loglike, truncate); } else { batched_kalman_filter(handle, d_y, n_obs, Tparams, order, batch_size, d_loglike, d_vs, fc_steps, d_fc, level, d_lower, d_upper); } if (host_loglike) { /* Tranfer log-likelihood device -> host */ raft::update_host(loglike, d_loglike, batch_size, stream); } if (trans) { Tparams.deallocate(order, batch_size, allocator, stream, true); } ML::POP_RANGE(); } void batched_loglike(raft::handle_t& handle, const double* d_y, int batch_size, int n_obs, const ARIMAOrder& order, const double* d_params, double* loglike, double* d_vs, bool trans, bool host_loglike, LoglikeMethod method, int truncate, int fc_steps, double* d_fc, double level, double* d_lower, double* d_upper) { ML::PUSH_RANGE(__func__); // unpack parameters auto allocator = handle.get_device_allocator(); auto stream = handle.get_stream(); ARIMAParams<double> params; params.allocate(order, batch_size, allocator, stream, false); params.unpack(order, batch_size, d_params, stream); batched_loglike(handle, d_y, batch_size, n_obs, order, params, loglike, d_vs, trans, host_loglike, method, truncate, fc_steps, d_fc, level, d_lower, d_upper); params.deallocate(order, batch_size, allocator, stream, false); ML::POP_RANGE(); } void batched_loglike_grad(raft::handle_t& handle, const double* d_y, int batch_size, int n_obs, const ARIMAOrder& order, const double* d_x, double* d_grad, double h, bool trans, LoglikeMethod method, int truncate) { ML::PUSH_RANGE(__func__); auto allocator = handle.get_device_allocator(); auto stream = handle.get_stream(); auto counting = thrust::make_counting_iterator(0); int N = order.complexity(); // Initialize the perturbed x vector MLCommon::device_buffer<double> x_pert(allocator, stream, N * batch_size); double* d_x_pert = x_pert.data(); raft::copy(d_x_pert, d_x, N * batch_size, stream); // Create buffers for the log-likelihood and residuals MLCommon::device_buffer<double> ll_base(allocator, stream, batch_size); MLCommon::device_buffer<double> ll_pert(allocator, stream, batch_size); MLCommon::device_buffer<double> res(allocator, stream, n_obs * batch_size); double* d_ll_base = ll_base.data(); double* d_ll_pert = ll_pert.data(); // Evaluate the log-likelihood with the given parameter vector batched_loglike(handle, d_y, batch_size, n_obs, order, d_x, d_ll_base, res.data(), trans, false, method, truncate); for (int i = 0; i < N; i++) { // Add the perturbation to the i-th parameter thrust::for_each(thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { d_x_pert[N * bid + i] = d_x[N * bid + i] + h; }); // Evaluate the log-likelihood with the positive perturbation batched_loglike(handle, d_y, batch_size, n_obs, order, d_x_pert, d_ll_pert, res.data(), trans, false, method, truncate); // First derivative with a first-order accuracy thrust::for_each(thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { d_grad[N * bid + i] = (d_ll_pert[bid] - d_ll_base[bid]) / h; }); // Reset the i-th parameter thrust::for_each( thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { d_x_pert[N * bid + i] = d_x[N * bid + i]; }); } ML::POP_RANGE(); } void information_criterion(raft::handle_t& handle, const double* d_y, int batch_size, int n_obs, const ARIMAOrder& order, const ARIMAParams<double>& params, double* d_ic, int ic_type) { ML::PUSH_RANGE(__func__); auto allocator = handle.get_device_allocator(); auto stream = handle.get_stream(); MLCommon::device_buffer<double> v_buffer(allocator, stream, n_obs * batch_size); /* Compute log-likelihood in d_ic */ batched_loglike(handle, d_y, batch_size, n_obs, order, params, d_ic, v_buffer.data(), false, false, MLE); /* Compute information criterion from log-likelihood and base term */ MLCommon::Metrics::Batched::information_criterion( d_ic, d_ic, static_cast<MLCommon::Metrics::IC_Type>(ic_type), order.complexity(), batch_size, n_obs - order.n_diff(), stream); ML::POP_RANGE(); } /** * Test that the parameters are valid for the inverse transform * * @tparam isAr Are these (S)AR or (S)MA parameters? * @param[in] params Parameters * @param[in] pq p for AR, q for MA, P for SAR, Q for SMA */ template <bool isAr> DI bool test_invparams(const double* params, int pq) { double new_params[4]; double tmp[4]; constexpr double coef = isAr ? 1 : -1; for (int i = 0; i < pq; i++) { tmp[i] = params[i]; new_params[i] = tmp[i]; } // Perform inverse transform and stop before atanh step for (int j = pq - 1; j > 0; --j) { double a = new_params[j]; for (int k = 0; k < j; ++k) { tmp[k] = (new_params[k] + coef * a * new_params[j - k - 1]) / (1 - (a * a)); } for (int iter = 0; iter < j; ++iter) { new_params[iter] = tmp[iter]; } } // Verify that the values are between -1 and 1 bool result = true; for (int i = 0; i < pq; i++) { result = result && !(new_params[i] <= -1 || new_params[i] >= 1); } return result; } /** * Auxiliary function of _start_params: least square approximation of an * ARMA model (with or without seasonality) * @note: in this function the non-seasonal case has s=1, not s=0! */ void _arma_least_squares(raft::handle_t& handle, double* d_ar, double* d_ma, double* d_sigma2, const MLCommon::LinAlg::Batched::Matrix<double>& bm_y, int p, int q, int s, bool estimate_sigma2, int k = 0, double* d_mu = nullptr) { const auto& handle_impl = handle; auto stream = handle_impl.get_stream(); auto cublas_handle = handle_impl.get_cublas_handle(); auto allocator = handle_impl.get_device_allocator(); auto counting = thrust::make_counting_iterator(0); int batch_size = bm_y.batches(); int n_obs = bm_y.shape().first; int ps = p * s, qs = q * s; int p_ar = std::max(ps, 2 * qs); int r = std::max(p_ar + qs, ps); if ((q && p_ar >= n_obs - p_ar) || p + q + k >= n_obs - r) { // Too few observations for the estimate, fill with 0 (1 for sigma2) if (k) CUDA_CHECK(cudaMemsetAsync(d_mu, 0, sizeof(double) * batch_size, stream)); if (p) CUDA_CHECK( cudaMemsetAsync(d_ar, 0, sizeof(double) * p * batch_size, stream)); if (q) CUDA_CHECK( cudaMemsetAsync(d_ma, 0, sizeof(double) * q * batch_size, stream)); if (estimate_sigma2) { thrust::device_ptr<double> sigma2_thrust = thrust::device_pointer_cast(d_sigma2); thrust::fill(thrust::cuda::par.on(stream), sigma2_thrust, sigma2_thrust + batch_size, 1.0); } return; } /* Matrix formed by lag matrices of y and the residuals respectively, * side by side. The left side will be used to estimate AR, the right * side to estimate MA */ MLCommon::LinAlg::Batched::Matrix<double> bm_ls_ar_res( n_obs - r, p + q + k, batch_size, cublas_handle, allocator, stream, false); int ar_offset = r - ps; int res_offset = r - p_ar - qs; // Get residuals from an AR(p_ar) model to estimate the MA parameters if (q) { // Create lagged y int ls_height = n_obs - p_ar; MLCommon::LinAlg::Batched::Matrix<double> bm_ls = MLCommon::LinAlg::Batched::b_lagged_mat(bm_y, p_ar); /* Matrix for the initial AR fit, initialized by copy of y * (note: this is because gels works in-place ; the matrix has larger * dimensions than the actual AR fit) */ MLCommon::LinAlg::Batched::Matrix<double> bm_ar_fit = MLCommon::LinAlg::Batched::b_2dcopy(bm_y, p_ar, 0, ls_height, 1); // Residual, initialized as offset y to avoid one kernel call MLCommon::LinAlg::Batched::Matrix<double> bm_residual(bm_ar_fit); // Initial AR fit MLCommon::LinAlg::Batched::b_gels(bm_ls, bm_ar_fit); // Compute residual (technically a gemv) MLCommon::LinAlg::Batched::b_gemm(false, false, ls_height, 1, p_ar, -1.0, bm_ls, bm_ar_fit, 1.0, bm_residual); // Lags of the residual MLCommon::LinAlg::Batched::b_lagged_mat(bm_residual, bm_ls_ar_res, q, n_obs - r, res_offset, (n_obs - r) * (k + p), s); } // Fill the first column of the matrix with 1 if we fit an intercept if (k) { double* d_ls_ar_res = bm_ls_ar_res.raw_data(); thrust::for_each(thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { double* b_ls_ar_res = d_ls_ar_res + bid * (n_obs - r) * (p + q + k); for (int i = 0; i < n_obs - r; i++) { b_ls_ar_res[i] = 1.0; } }); } // Lags of y MLCommon::LinAlg::Batched::b_lagged_mat(bm_y, bm_ls_ar_res, p, n_obs - r, ar_offset, (n_obs - r) * k, s); /* Initializing the vector for the ARMA fit * (note: also in-place as described for AR fit) */ MLCommon::LinAlg::Batched::Matrix<double> bm_arma_fit = MLCommon::LinAlg::Batched::b_2dcopy(bm_y, r, 0, n_obs - r, 1); // The residuals will be computed only if sigma2 is requested MLCommon::LinAlg::Batched::Matrix<double> bm_final_residual( n_obs - r, 1, batch_size, cublas_handle, allocator, stream, false); if (estimate_sigma2) { raft::copy(bm_final_residual.raw_data(), bm_arma_fit.raw_data(), (n_obs - r) * batch_size, stream); } // ARMA fit MLCommon::LinAlg::Batched::b_gels(bm_ls_ar_res, bm_arma_fit); // Copy the results in the parameter vectors const double* d_arma_fit = bm_arma_fit.raw_data(); thrust::for_each(thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { const double* b_arma_fit = d_arma_fit + bid * (n_obs - r); if (k) { d_mu[bid] = b_arma_fit[0]; } if (p) { double* b_ar = d_ar + bid * p; for (int i = 0; i < p; i++) { b_ar[i] = b_arma_fit[i + k]; } } if (q) { double* b_ma = d_ma + bid * q; for (int i = 0; i < q; i++) { b_ma[i] = b_arma_fit[i + p + k]; } } }); if (estimate_sigma2) { // Compute final residual (technically a gemv) MLCommon::LinAlg::Batched::b_gemm(false, false, n_obs - r, 1, p + q + k, -1.0, bm_ls_ar_res, bm_arma_fit, 1.0, bm_final_residual); // Compute variance double* d_residual = bm_final_residual.raw_data(); thrust::for_each(thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { double acc = 0.0; const double* b_residual = d_residual + (n_obs - r) * bid; for (int i = q; i < n_obs - r; i++) { double res = b_residual[i]; acc += res * res; } d_sigma2[bid] = acc / static_cast<double>(n_obs - r - q); }); } // If (S)AR or (S)MA are not valid for the inverse transform, set them to zero thrust::for_each(thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { if (p) { double* b_ar = d_ar + bid * p; bool valid = test_invparams<true>(b_ar, p); if (!valid) { for (int ip = 0; ip < p; ip++) b_ar[ip] = 0; } } if (q) { double* b_ma = d_ma + bid * q; bool valid = test_invparams<false>(b_ma, q); if (!valid) { for (int iq = 0; iq < q; iq++) b_ma[iq] = 0; } } }); } /** * Auxiliary function of estimate_x0: compute the starting parameters for * the series pre-processed by estimate_x0 */ void _start_params(raft::handle_t& handle, ARIMAParams<double>& params, const MLCommon::LinAlg::Batched::Matrix<double>& bm_y, const ARIMAOrder& order) { // Estimate an ARMA fit without seasonality if (order.p + order.q + order.k) _arma_least_squares(handle, params.ar, params.ma, params.sigma2, bm_y, order.p, order.q, 1, true, order.k, params.mu); // Estimate a seasonal ARMA fit independantly if (order.P + order.Q) _arma_least_squares(handle, params.sar, params.sma, params.sigma2, bm_y, order.P, order.Q, order.s, order.p + order.q + order.k == 0); } void estimate_x0(raft::handle_t& handle, ARIMAParams<double>& params, const double* d_y, int batch_size, int n_obs, const ARIMAOrder& order) { ML::PUSH_RANGE(__func__); const auto& handle_impl = handle; auto stream = handle_impl.get_stream(); auto cublas_handle = handle_impl.get_cublas_handle(); auto allocator = handle_impl.get_device_allocator(); // Difference if necessary, copy otherwise MLCommon::LinAlg::Batched::Matrix<double> bm_yd( n_obs - order.d - order.s * order.D, 1, batch_size, cublas_handle, allocator, stream, false); MLCommon::TimeSeries::prepare_data(bm_yd.raw_data(), d_y, batch_size, n_obs, order.d, order.D, order.s, stream); // Do the computation of the initial parameters _start_params(handle, params, bm_yd, order); ML::POP_RANGE(); } } // namespace ML
fd1fa3506f60d8a3c5505f8eccb32b8dc6c0006b.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <torch/serialize/tensor.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <ATen/hip/HIPContext.h> namespace { #define CUDA_1D_KERNEL_LOOP(i, n) \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) // The number of cuda threads to use. 512 is used for backward compatibility constexpr int ROI_CUDA_NUM_THREADS = 512; // The maximum number of blocks to use in the default kernel call. constexpr int ROI_MAXIMUM_NUM_BLOCKS = 4096; /** * @brief Compute the number of blocks needed to run N threads. */ inline int ROI_GET_BLOCKS(const int N) { return ::max( ::min( (N + ROI_CUDA_NUM_THREADS - 1) / ROI_CUDA_NUM_THREADS, ROI_MAXIMUM_NUM_BLOCKS), // Use at least 1 block, since CUDA does not allow empty block 1); } template <typename T> __device__ T bilinear_interpolate( const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } int y_low = static_cast<int>(y); int x_low = static_cast<int>(x); int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignForwardKernel( const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate( offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T* w1, T* w2, T* w3, T* w4, int* x_low, int* x_high, int* y_low, int* y_high, const int /*index*/ /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty *w1 = *w2 = *w3 = *w4 = 0.; *x_low = *x_high = *y_low = *y_high = -1; return; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } *y_low = static_cast<int>(y); *x_low = static_cast<int>(x); if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = (T)*y_low; } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = (T)*x_low; } else { *x_high = *x_low + 1; } T ly = y - *y_low; T lx = x - *x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } template <> inline __device__ double gpu_atomic_add(const double val, double* address) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull; unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return val; } template <typename T> __global__ void RoIAlignBackwardKernel( const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, &w1, &w2, &w3, &w4, &x_low, &x_high, &y_low, &y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { /* atomicAdd( offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd( offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd( offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd( offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); */ gpu_atomic_add( static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low); gpu_atomic_add( static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high); gpu_atomic_add( static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low); gpu_atomic_add( static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward } // namespace at::Tensor ROIAlignForwardCUDA( const at::Tensor input, const at::Tensor rois, int64_t pooled_height, int64_t pooled_width, double spatial_scale, int64_t sampling_ratio) { AT_ASSERT(input.is_contiguous()); AT_ASSERT(rois.is_contiguous()); AT_ASSERT(input.ndimension() == 4); AT_ASSERT(rois.ndimension() == 2); AT_ASSERT(rois.size(1) == 5); auto proposals = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); // Output Tensor is (num_rois, C, pooled_height, pooled_width) auto output = torch::zeros({proposals, channels, pooled_height, pooled_width}, input.options()); auto count = output.numel(); AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlignForwardCUDA", ([&] { hipLaunchKernelGGL(( RoIAlignForwardKernel<scalar_t>) , dim3(ROI_GET_BLOCKS(count)), dim3(ROI_CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, input.data<scalar_t>(), static_cast<scalar_t>(spatial_scale), channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.data<scalar_t>(), output.data<scalar_t>()); })); AT_ASSERT(hipGetLastError() == hipSuccess); return output; } at::Tensor ROIAlignBackwardCUDA( const at::Tensor rois, const at::Tensor grad_output, int64_t b_size, int64_t channels, int64_t height, int64_t width, int64_t pooled_height, int64_t pooled_width, double spatial_scale, int64_t sampling_ratio) { AT_ASSERT(rois.is_contiguous()); AT_ASSERT(rois.ndimension() == 2); AT_ASSERT(rois.size(1) == 5); auto roi_cols = rois.size(1); AT_ASSERT(roi_cols == 4 || roi_cols == 5); // Output Tensor is (num_rois, C, pooled_height, pooled_width) // gradient wrt input features auto grad_in = torch::zeros({b_size, channels, height, width}, rois.options()); auto num_rois = rois.size(0); auto count = grad_output.numel(); AT_DISPATCH_FLOATING_TYPES(rois.type(), "ROIAlignBackwardCUDA", ([&] { hipLaunchKernelGGL(( RoIAlignBackwardKernel<scalar_t>) , dim3(ROI_GET_BLOCKS(count)), dim3(ROI_CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, grad_output.data<scalar_t>(), num_rois, static_cast<scalar_t>(spatial_scale), channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_in.data<scalar_t>(), rois.data<scalar_t>()); })); AT_ASSERT(hipGetLastError() == hipSuccess); return grad_in; }
fd1fa3506f60d8a3c5505f8eccb32b8dc6c0006b.cu
#include <ATen/ATen.h> #include <torch/serialize/tensor.h> #include <cuda.h> #include <cuda_runtime.h> #include <ATen/cuda/CUDAContext.h> namespace { #define CUDA_1D_KERNEL_LOOP(i, n) \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) // The number of cuda threads to use. 512 is used for backward compatibility constexpr int ROI_CUDA_NUM_THREADS = 512; // The maximum number of blocks to use in the default kernel call. constexpr int ROI_MAXIMUM_NUM_BLOCKS = 4096; /** * @brief Compute the number of blocks needed to run N threads. */ inline int ROI_GET_BLOCKS(const int N) { return std::max( std::min( (N + ROI_CUDA_NUM_THREADS - 1) / ROI_CUDA_NUM_THREADS, ROI_MAXIMUM_NUM_BLOCKS), // Use at least 1 block, since CUDA does not allow empty block 1); } template <typename T> __device__ T bilinear_interpolate( const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } int y_low = static_cast<int>(y); int x_low = static_cast<int>(x); int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignForwardKernel( const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate( offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T* w1, T* w2, T* w3, T* w4, int* x_low, int* x_high, int* y_low, int* y_high, const int /*index*/ /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty *w1 = *w2 = *w3 = *w4 = 0.; *x_low = *x_high = *y_low = *y_high = -1; return; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } *y_low = static_cast<int>(y); *x_low = static_cast<int>(x); if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = (T)*y_low; } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = (T)*x_low; } else { *x_high = *x_low + 1; } T ly = y - *y_low; T lx = x - *x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } template <> inline __device__ double gpu_atomic_add(const double val, double* address) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull; unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return val; } template <typename T> __global__ void RoIAlignBackwardKernel( const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, &w1, &w2, &w3, &w4, &x_low, &x_high, &y_low, &y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { /* atomicAdd( offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd( offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd( offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd( offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); */ gpu_atomic_add( static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low); gpu_atomic_add( static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high); gpu_atomic_add( static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low); gpu_atomic_add( static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward } // namespace at::Tensor ROIAlignForwardCUDA( const at::Tensor input, const at::Tensor rois, int64_t pooled_height, int64_t pooled_width, double spatial_scale, int64_t sampling_ratio) { AT_ASSERT(input.is_contiguous()); AT_ASSERT(rois.is_contiguous()); AT_ASSERT(input.ndimension() == 4); AT_ASSERT(rois.ndimension() == 2); AT_ASSERT(rois.size(1) == 5); auto proposals = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); // Output Tensor is (num_rois, C, pooled_height, pooled_width) auto output = torch::zeros({proposals, channels, pooled_height, pooled_width}, input.options()); auto count = output.numel(); AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlignForwardCUDA", ([&] { RoIAlignForwardKernel<scalar_t> <<<ROI_GET_BLOCKS(count), ROI_CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, input.data<scalar_t>(), static_cast<scalar_t>(spatial_scale), channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.data<scalar_t>(), output.data<scalar_t>()); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return output; } at::Tensor ROIAlignBackwardCUDA( const at::Tensor rois, const at::Tensor grad_output, int64_t b_size, int64_t channels, int64_t height, int64_t width, int64_t pooled_height, int64_t pooled_width, double spatial_scale, int64_t sampling_ratio) { AT_ASSERT(rois.is_contiguous()); AT_ASSERT(rois.ndimension() == 2); AT_ASSERT(rois.size(1) == 5); auto roi_cols = rois.size(1); AT_ASSERT(roi_cols == 4 || roi_cols == 5); // Output Tensor is (num_rois, C, pooled_height, pooled_width) // gradient wrt input features auto grad_in = torch::zeros({b_size, channels, height, width}, rois.options()); auto num_rois = rois.size(0); auto count = grad_output.numel(); AT_DISPATCH_FLOATING_TYPES(rois.type(), "ROIAlignBackwardCUDA", ([&] { RoIAlignBackwardKernel<scalar_t> <<<ROI_GET_BLOCKS(count), ROI_CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, grad_output.data<scalar_t>(), num_rois, static_cast<scalar_t>(spatial_scale), channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_in.data<scalar_t>(), rois.data<scalar_t>()); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return grad_in; }
5463a85239124114ae0fce6bdd80f05a90595dd0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define BLOCK_WIDTH 1000 #define TOTAL_THREADS 1000000 #define BINS 4 __global__ void naive_histo(int *d_bins,const int *d_in, const int BINS_COUNT) { int myId = threadIdx.x + blockDim.x * blockIdx.x ; int myItem = d_in[myId]; int myBin = myItem % BINS_COUNT; d_bins[myBin]++; } __global__ void simple_histo(int *d_bins,const int *d_in, const int BINS_COUNT) { int myId = threadIdx.x + blockDim.x * blockIdx.x ; int myItem = d_in[myId]; int myBin = myItem % BINS_COUNT; atomicAdd(&d_bins[myBin],1); } int main(int argc, char **argv) { // Array size is can be considered as number of measurements taken const int ARRAY_SIZE = 65536; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int); const int BIN_COUNT = 16; const int BIN_BYTES = BIN_COUNT * sizeof(int); // Generate the nput array on host (some random 655536 numbers) int h_histo[ARRAY_SIZE]; for (int i = 0; i< ARRAY_SIZE ; i++) { h_histo[i] = bit_reverse(i, log2(ARRAY_SIZE)); } int h_bins[BIN_COUNT]; for (int i = 0; i< BIN_COUNT ; i++) { h_bins[i] = 0; } int d_bins*; hipMalloc((void**)&d_bins, BIN_BYTES); hipMemcpy(&d_bins, &h_bins, BIN_BYTES, hipMemcpyHostToDevice); int d_in*; hipMalloc((void**)&d_in, ARRAY_BYTES); hipMemcpy(&d_in, &h_histo, ARRAY_BYTES, hipMemcpyHostToDevice); // Kernel call hipLaunchKernelGGL(( naive_histo), dim3(1),dim3(16), 0, 0, &d_bins,&d_in,BIN_COUNT); // Copy back data to host // Freeup gpu memory Before exit hipFree(d_bins); hipFree(d_in); return 0; }
5463a85239124114ae0fce6bdd80f05a90595dd0.cu
#include <stdio.h> #define BLOCK_WIDTH 1000 #define TOTAL_THREADS 1000000 #define BINS 4 __global__ void naive_histo(int *d_bins,const int *d_in, const int BINS_COUNT) { int myId = threadIdx.x + blockDim.x * blockIdx.x ; int myItem = d_in[myId]; int myBin = myItem % BINS_COUNT; d_bins[myBin]++; } __global__ void simple_histo(int *d_bins,const int *d_in, const int BINS_COUNT) { int myId = threadIdx.x + blockDim.x * blockIdx.x ; int myItem = d_in[myId]; int myBin = myItem % BINS_COUNT; atomicAdd(&d_bins[myBin],1); } int main(int argc, char **argv) { // Array size is can be considered as number of measurements taken const int ARRAY_SIZE = 65536; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int); const int BIN_COUNT = 16; const int BIN_BYTES = BIN_COUNT * sizeof(int); // Generate the nput array on host (some random 655536 numbers) int h_histo[ARRAY_SIZE]; for (int i = 0; i< ARRAY_SIZE ; i++) { h_histo[i] = bit_reverse(i, log2(ARRAY_SIZE)); } int h_bins[BIN_COUNT]; for (int i = 0; i< BIN_COUNT ; i++) { h_bins[i] = 0; } int d_bins*; cudaMalloc((void**)&d_bins, BIN_BYTES); cudaMemcpy(&d_bins, &h_bins, BIN_BYTES, cudaMemcpyHostToDevice); int d_in*; cudaMalloc((void**)&d_in, ARRAY_BYTES); cudaMemcpy(&d_in, &h_histo, ARRAY_BYTES, cudaMemcpyHostToDevice); // Kernel call naive_histo<<<1,16>>>(&d_bins,&d_in,BIN_COUNT); // Copy back data to host // Freeup gpu memory Before exit cudaFree(d_bins); cudaFree(d_in); return 0; }
22d700c57e89310b2bd215b161626bb117c3187c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017 Madhavan Seshadri // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) extern "C" { __global__ void smvp(double *A_data, int *A_indices, int *A_pointers, double *B, double *C, int *m, int *n, int *count, double *alpha){ int ROW = blockIdx.x*blockDim.x+threadIdx.x; if(ROW<*m){ int start = A_pointers[ROW]; int end = (start==*m-1)?(*count):A_pointers[ROW+1]; double sum = 0; for(int i = start;i<end;i++) { int index = A_indices[i]; sum += (*alpha) * A_data[i] * B[index]; } C[ROW] = sum; } } }
22d700c57e89310b2bd215b161626bb117c3187c.cu
// Copyright (c) 2017 Madhavan Seshadri // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) extern "C" { __global__ void smvp(double *A_data, int *A_indices, int *A_pointers, double *B, double *C, int *m, int *n, int *count, double *alpha){ int ROW = blockIdx.x*blockDim.x+threadIdx.x; if(ROW<*m){ int start = A_pointers[ROW]; int end = (start==*m-1)?(*count):A_pointers[ROW+1]; double sum = 0; for(int i = start;i<end;i++) { int index = A_indices[i]; sum += (*alpha) * A_data[i] * B[index]; } C[ROW] = sum; } } }
67ca1d1b293b12dbaa3a77ad45892815d3a969c1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal z -> s d c @author Stan Tomov @author Mathieu Faverge @author Ichitaro Yamazaki @author Mark Gates */ #include "common_magma.h" // MAX_PIVOTS is maximum number of pivots to apply in each kernel launch // NTHREADS is number of threads in a block // 64 and 256 are better on Kepler; //#define MAX_PIVOTS 64 //#define NTHREADS 256 #define MAX_PIVOTS 32 #define NTHREADS 64 typedef struct { magmaDoubleComplex *dAT; int n, lda, j0, npivots; int ipiv[MAX_PIVOTS]; } zlaswp_params_t; // Matrix A is stored row-wise in dAT. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void zlaswp_kernel( zlaswp_params_t params ) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; if( tid < params.n ) { int lda = params.lda; magmaDoubleComplex *dAT = params.dAT + tid + params.j0*lda; magmaDoubleComplex *A1 = dAT; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; magmaDoubleComplex *A2 = dAT + i2*lda; magmaDoubleComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += lda; // A1 = dA + i1*ldx } } } // Launch zlaswp kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each. extern "C" void zlaswp_launch( zlaswp_params_t &params ) { int blocks = (params.n + NTHREADS - 1) / NTHREADS; hipLaunchKernelGGL(( zlaswp_kernel), dim3(blocks), dim3(NTHREADS), 0, magma_stream , params ); } // Swap rows of A, stored row-wise. // This version updates each entry of ipiv by adding ind. // It is used in zgetrf, zgetrf_gpu, zgetrf_mgpu, zgetrf_ooc. extern "C" void magmablas_zpermute_long2( magma_int_t n, magmaDoubleComplex *dAT, magma_int_t lda, magma_int_t *ipiv, magma_int_t nb, magma_int_t ind ) { for( int k = 0; k < nb; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, nb-k ); // fields are: dAT n lda j0 npivots zlaswp_params_t params = { dAT, n, lda, ind + k, npivots }; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[ind + k + j] - k - 1; ipiv[ind + k + j] += ind; } zlaswp_launch( params ); } } // Swap rows of A, stored row-wise. // This version assumes ind has already been added to ipiv. // It is used in zgetrf_mgpu, zgetrf_ooc. extern "C" void magmablas_zpermute_long3( magmaDoubleComplex *dAT, magma_int_t lda, const magma_int_t *ipiv, magma_int_t nb, magma_int_t ind ) { for( int k = 0; k < nb; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, nb-k ); // fields are: dAT n lda j0 npivots zlaswp_params_t params = { dAT, lda, lda, ind + k, npivots }; for( int j = 0; j < MAX_PIVOTS; ++j ) { params.ipiv[j] = ipiv[ind + k + j] - k - 1 - ind; } zlaswp_launch( params ); } } // Swap rows of A, stored row-wise. // This interface is identical to LAPACK's laswp interface. // It is used in zgessm, zgetrf_incpiv. extern "C" void magmablas_zlaswp( magma_int_t n, magmaDoubleComplex *dAT, magma_int_t lda, magma_int_t i1, magma_int_t i2, const magma_int_t *ipiv, magma_int_t inci ) { for( int k = i1-1; k < i2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, i2-k ); // fields are: dAT n lda j0 npivots zlaswp_params_t params = { dAT+k*lda, n, lda, 0, npivots }; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } zlaswp_launch( params ); } } // ------------------------------------------------------------ // Extended version has stride in both directions (ldx, ldy) // to handle both row-wise and column-wise storage. typedef struct { magmaDoubleComplex *dA; int n, ldx, ldy, j0, npivots; int ipiv[MAX_PIVOTS]; } zlaswpx_params_t; // Matrix A is stored row or column-wise in dA. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void zlaswpx_kernel( zlaswpx_params_t params ) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; if( tid < params.n ) { int ldx = params.ldx; magmaDoubleComplex *dA = params.dA + tid*params.ldy + params.j0*ldx; magmaDoubleComplex *A1 = dA; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; magmaDoubleComplex *A2 = dA + i2*ldx; magmaDoubleComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldx; // A1 = dA + i1*ldx } } } // Launch zlaswpx kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each. extern "C" void zlaswpx( zlaswpx_params_t &params ) { int blocks = (params.n + NTHREADS - 1) / NTHREADS; hipLaunchKernelGGL(( zlaswpx_kernel), dim3(blocks), dim3(NTHREADS), 0, magma_stream , params ); } // Swap rows of A. // For A stored row-wise, set ldx=lda and ldy=1. // For A stored column-wise, set ldx=1 and ldy=lda. // Otherwise, this interface is identical to LAPACK's laswp interface. extern "C" void magmablas_zlaswpx( magma_int_t n, magmaDoubleComplex *dA, magma_int_t ldx, magma_int_t ldy, magma_int_t i1, magma_int_t i2, const magma_int_t *ipiv, magma_int_t inci ) { for( int k = i1-1; k < i2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, i2-k ); // fields are: dA n ldx ldy j0 npivots zlaswpx_params_t params = { dA+k*ldx, n, ldx, ldy, 0, npivots }; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } zlaswpx( params ); } } // ------------------------------------------------------------ // This version takes d_ipiv on the GPU. Thus it does not pass pivots // as an argument using a structure, avoiding all the argument size // limitations of CUDA and OpenCL. It also needs just one kernel launch // with all the pivots, instead of multiple kernel launches with small // batches of pivots. On Fermi, it is faster than magmablas_zlaswp // (including copying pivots to the GPU). __global__ void zlaswp2_kernel( int n, magmaDoubleComplex *dAT, int lda, int npivots, const magma_int_t* d_ipiv ) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; if( tid < n ) { dAT += tid; magmaDoubleComplex *A1 = dAT; for( int i1 = 0; i1 < npivots; ++i1 ) { int i2 = d_ipiv[i1] - 1; // Fortran index magmaDoubleComplex *A2 = dAT + i2*lda; magmaDoubleComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += lda; // A1 = dA + i1*ldx } } } // Swap rows of A, stored row-wise. // d_ipiv is vector of pivots stored on the GPU, // unlike magmablas_zlaswp where ipiv is stored on the CPU. // This interface is identical to LAPACK's laswp interface. extern "C" void magmablas_zlaswp2( magma_int_t n, magmaDoubleComplex* dAT, magma_int_t lda, magma_int_t i1, magma_int_t i2, const magma_int_t *d_ipiv ) { int blocks = (n + NTHREADS - 1) / NTHREADS; hipLaunchKernelGGL(( zlaswp2_kernel), dim3(blocks), dim3(NTHREADS), 0, magma_stream , n, dAT + (i1-1)*lda, lda, i2-(i1-1), d_ipiv ); }
67ca1d1b293b12dbaa3a77ad45892815d3a969c1.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal z -> s d c @author Stan Tomov @author Mathieu Faverge @author Ichitaro Yamazaki @author Mark Gates */ #include "common_magma.h" // MAX_PIVOTS is maximum number of pivots to apply in each kernel launch // NTHREADS is number of threads in a block // 64 and 256 are better on Kepler; //#define MAX_PIVOTS 64 //#define NTHREADS 256 #define MAX_PIVOTS 32 #define NTHREADS 64 typedef struct { magmaDoubleComplex *dAT; int n, lda, j0, npivots; int ipiv[MAX_PIVOTS]; } zlaswp_params_t; // Matrix A is stored row-wise in dAT. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void zlaswp_kernel( zlaswp_params_t params ) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; if( tid < params.n ) { int lda = params.lda; magmaDoubleComplex *dAT = params.dAT + tid + params.j0*lda; magmaDoubleComplex *A1 = dAT; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; magmaDoubleComplex *A2 = dAT + i2*lda; magmaDoubleComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += lda; // A1 = dA + i1*ldx } } } // Launch zlaswp kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each. extern "C" void zlaswp_launch( zlaswp_params_t &params ) { int blocks = (params.n + NTHREADS - 1) / NTHREADS; zlaswp_kernel<<< blocks, NTHREADS, 0, magma_stream >>>( params ); } // Swap rows of A, stored row-wise. // This version updates each entry of ipiv by adding ind. // It is used in zgetrf, zgetrf_gpu, zgetrf_mgpu, zgetrf_ooc. extern "C" void magmablas_zpermute_long2( magma_int_t n, magmaDoubleComplex *dAT, magma_int_t lda, magma_int_t *ipiv, magma_int_t nb, magma_int_t ind ) { for( int k = 0; k < nb; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, nb-k ); // fields are: dAT n lda j0 npivots zlaswp_params_t params = { dAT, n, lda, ind + k, npivots }; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[ind + k + j] - k - 1; ipiv[ind + k + j] += ind; } zlaswp_launch( params ); } } // Swap rows of A, stored row-wise. // This version assumes ind has already been added to ipiv. // It is used in zgetrf_mgpu, zgetrf_ooc. extern "C" void magmablas_zpermute_long3( magmaDoubleComplex *dAT, magma_int_t lda, const magma_int_t *ipiv, magma_int_t nb, magma_int_t ind ) { for( int k = 0; k < nb; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, nb-k ); // fields are: dAT n lda j0 npivots zlaswp_params_t params = { dAT, lda, lda, ind + k, npivots }; for( int j = 0; j < MAX_PIVOTS; ++j ) { params.ipiv[j] = ipiv[ind + k + j] - k - 1 - ind; } zlaswp_launch( params ); } } // Swap rows of A, stored row-wise. // This interface is identical to LAPACK's laswp interface. // It is used in zgessm, zgetrf_incpiv. extern "C" void magmablas_zlaswp( magma_int_t n, magmaDoubleComplex *dAT, magma_int_t lda, magma_int_t i1, magma_int_t i2, const magma_int_t *ipiv, magma_int_t inci ) { for( int k = i1-1; k < i2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, i2-k ); // fields are: dAT n lda j0 npivots zlaswp_params_t params = { dAT+k*lda, n, lda, 0, npivots }; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } zlaswp_launch( params ); } } // ------------------------------------------------------------ // Extended version has stride in both directions (ldx, ldy) // to handle both row-wise and column-wise storage. typedef struct { magmaDoubleComplex *dA; int n, ldx, ldy, j0, npivots; int ipiv[MAX_PIVOTS]; } zlaswpx_params_t; // Matrix A is stored row or column-wise in dA. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void zlaswpx_kernel( zlaswpx_params_t params ) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; if( tid < params.n ) { int ldx = params.ldx; magmaDoubleComplex *dA = params.dA + tid*params.ldy + params.j0*ldx; magmaDoubleComplex *A1 = dA; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; magmaDoubleComplex *A2 = dA + i2*ldx; magmaDoubleComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldx; // A1 = dA + i1*ldx } } } // Launch zlaswpx kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each. extern "C" void zlaswpx( zlaswpx_params_t &params ) { int blocks = (params.n + NTHREADS - 1) / NTHREADS; zlaswpx_kernel<<< blocks, NTHREADS, 0, magma_stream >>>( params ); } // Swap rows of A. // For A stored row-wise, set ldx=lda and ldy=1. // For A stored column-wise, set ldx=1 and ldy=lda. // Otherwise, this interface is identical to LAPACK's laswp interface. extern "C" void magmablas_zlaswpx( magma_int_t n, magmaDoubleComplex *dA, magma_int_t ldx, magma_int_t ldy, magma_int_t i1, magma_int_t i2, const magma_int_t *ipiv, magma_int_t inci ) { for( int k = i1-1; k < i2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, i2-k ); // fields are: dA n ldx ldy j0 npivots zlaswpx_params_t params = { dA+k*ldx, n, ldx, ldy, 0, npivots }; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } zlaswpx( params ); } } // ------------------------------------------------------------ // This version takes d_ipiv on the GPU. Thus it does not pass pivots // as an argument using a structure, avoiding all the argument size // limitations of CUDA and OpenCL. It also needs just one kernel launch // with all the pivots, instead of multiple kernel launches with small // batches of pivots. On Fermi, it is faster than magmablas_zlaswp // (including copying pivots to the GPU). __global__ void zlaswp2_kernel( int n, magmaDoubleComplex *dAT, int lda, int npivots, const magma_int_t* d_ipiv ) { unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x; if( tid < n ) { dAT += tid; magmaDoubleComplex *A1 = dAT; for( int i1 = 0; i1 < npivots; ++i1 ) { int i2 = d_ipiv[i1] - 1; // Fortran index magmaDoubleComplex *A2 = dAT + i2*lda; magmaDoubleComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += lda; // A1 = dA + i1*ldx } } } // Swap rows of A, stored row-wise. // d_ipiv is vector of pivots stored on the GPU, // unlike magmablas_zlaswp where ipiv is stored on the CPU. // This interface is identical to LAPACK's laswp interface. extern "C" void magmablas_zlaswp2( magma_int_t n, magmaDoubleComplex* dAT, magma_int_t lda, magma_int_t i1, magma_int_t i2, const magma_int_t *d_ipiv ) { int blocks = (n + NTHREADS - 1) / NTHREADS; zlaswp2_kernel<<< blocks, NTHREADS, 0, magma_stream >>>( n, dAT + (i1-1)*lda, lda, i2-(i1-1), d_ipiv ); }
8d0874f68cc8f52475da6c99391606ae61db0cbb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from sparse-iter/blas/zmergecg.cu normal z -> c, Tue Feb 9 16:05:43 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_c // These routines merge multiple kernels from cmergecg into one // for a description see // "Reformulated Conjugate Gradient for the Energy-Aware // Solution of Linear Systems on GPUs (ICPP '13) // accelerated reduction for one vector __global__ void magma_ccgreduce_kernel_spmv1( int Gs, int n, magmaFloatComplex * vtmp, magmaFloatComplex * vtmp2 ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_C_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // accelerated reduction for two vectors __global__ void magma_ccgreduce_kernel_spmv2( int Gs, int n, magmaFloatComplex * vtmp, magmaFloatComplex * vtmp2 ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<2; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_C_ZERO; while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_C_ZERO; i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } // computes the SpMV using CSR and the first step of the reduction __global__ void magma_ccgmerge_spmvcsr_kernel( int n, magmaFloatComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * vtmp ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if( i<n ) { magmaFloatComplex dot = MAGMA_C_ZERO; int start = drowptr[ i ]; int end = drowptr[ i+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * d[ dcolind[j] ]; z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELL and the first step of the reduction __global__ void magma_ccgmerge_spmvell_kernel( int n, int num_cols_per_row, magmaFloatComplex * dval, magma_index_t * dcolind, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * vtmp ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if(i < n ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row; k++ ) { int col = dcolind [ n * k + i ]; magmaFloatComplex val = dval [ n * k + i ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLPACK and the first step of the reduction __global__ void magma_ccgmerge_spmvellpack_kernel( int n, int num_cols_per_row, magmaFloatComplex * dval, magma_index_t * dcolind, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * vtmp ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if(i < n ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row; k++ ) { int col = dcolind [ num_cols_per_row * i + k ]; magmaFloatComplex val = dval [ num_cols_per_row * i + k ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using SELL alignment 1 and the first step of the reduction __global__ void magma_ccgmerge_spmvell_kernelb1( int n, int blocksize, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * vtmp ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); int idx = threadIdx.x; // local row int bdx = blockIdx.x; // global block index int row = bdx * 256 + idx; // global row index // int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) ); int lrow = threadIdx.x%blocksize; // local row; if( row < n ) { int offset = drowptr[ row/blocksize ]; int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize; magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++) { int col = dcolind [ offset+ blocksize * n + lrow ]; magmaFloatComplex val = dval[ offset+ blocksize * n + lrow ]; dot = dot + val * d [ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } /* if(i < n ) { int offset = drowptr[ blockIdx.x ]; int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int k = 0; k < border; k++){ int col = dcolind [ offset+ blocksize * k + threadIdx.x ]; magmaFloatComplex val = dval[offset+ blocksize * k + threadIdx.x]; if( val != 0){ dot += val*d[col]; } } //magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); //for ( int k = 0; k < num_cols_per_row; k++ ) { // int col = dcolind [ n * k + i ]; // magmaFloatComplex val = dval [ n * k + i ]; // if( val != 0) // dot += val * d[ col ]; //} z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; }*/ __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_ccgmerge_spmvellpackrt_kernel_8( int n, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < n ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 4 ) { shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_ccgmerge_spmvellpackrt_kernel_16( int n, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < n ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 8 ) { shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_ccgmerge_spmvellpackrt_kernel_32( int n, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < n ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 16 ) { shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // additional kernel necessary to compute first reduction step __global__ void magma_ccgmerge_spmvellpackrt_kernel2( int n, magmaFloatComplex * z, magmaFloatComplex * d, magmaFloatComplex * vtmp2 ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_C_MAKE(0.0, 0.0); __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using SELLC __global__ void magma_ccgmerge_spmvsellc_kernel( int num_rows, int blocksize, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * vtmp) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int offset = drowptr[ blockIdx.x ]; int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if(i < num_rows ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int n = 0; n < border; n ++) { int col = dcolind [offset+ blocksize * n + Idx ]; magmaFloatComplex val = dval[offset+ blocksize * n + Idx]; if( val != 0) { dot=dot+val*d[col]; } } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_ccgmerge_spmvsellpt_kernel_8( int num_rows, int blocksize, int T, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaFloatComplex * d, magmaFloatComplex * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { magmaFloatComplex val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ) { shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_ccgmerge_spmvsellpt_kernel_16( int num_rows, int blocksize, int T, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaFloatComplex * d, magmaFloatComplex * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { magmaFloatComplex val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ) { shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_ccgmerge_spmvsellpt_kernel_32( int num_rows, int blocksize, int T, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaFloatComplex * d, magmaFloatComplex * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { magmaFloatComplex val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ) { shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // kernel to handle scalars __global__ void // rho = beta/tmp; gamma = beta; magma_ccg_rhokernel( magmaFloatComplex * skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ) { magmaFloatComplex tmp = skp[1]; skp[3] = tmp/skp[4]; skp[2] = tmp; } } /** Purpose ------- Merges the first SpmV using different formats with the dot product and the computation of rho Arguments --------- @param[in] A magma_c_matrix input matrix @param[in] d1 magmaFloatComplex_ptr temporary vector @param[in] d2 magmaFloatComplex_ptr temporary vector @param[in] dd magmaFloatComplex_ptr input vector d @param[out] dz magmaFloatComplex_ptr input vector z @param[out] skp magmaFloatComplex_ptr array for parameters ( skp[3]=rho ) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_ccgmerge_spmv1( magma_c_matrix A, magmaFloatComplex_ptr d1, magmaFloatComplex_ptr d2, magmaFloatComplex_ptr dd, magmaFloatComplex_ptr dz, magmaFloatComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( A.num_rows, local_block_size ) ); dim3 Gs_next; int Ms = local_block_size * sizeof( magmaFloatComplex ); magmaFloatComplex_ptr aux1 = d1, aux2 = d2; int b = 1; if ( A.storage_type == Magma_CSR ) hipLaunchKernelGGL(( magma_ccgmerge_spmvcsr_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_ELLPACKT ) hipLaunchKernelGGL(( magma_ccgmerge_spmvellpack_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_ELL ) hipLaunchKernelGGL(( magma_ccgmerge_spmvell_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_CUCSR ) { hipsparseHandle_t cusparseHandle = 0; hipsparseMatDescr_t descr = 0; magmaFloatComplex c_one = MAGMA_C_ONE; magmaFloatComplex c_zero = MAGMA_C_ZERO; hipsparseCreate( &cusparseHandle ); hipsparseSetStream( cusparseHandle, queue->cuda_stream() ); hipsparseCreateMatDescr( &descr ); hipsparseSetMatType( descr, HIPSPARSE_MATRIX_TYPE_GENERAL ); hipsparseSetMatIndexBase( descr, HIPSPARSE_INDEX_BASE_ZERO ); hipsparseCcsrmv( cusparseHandle,HIPSPARSE_OPERATION_NON_TRANSPOSE, A.num_rows, A.num_cols, A.nnz, &c_one, descr, A.dval, A.drow, A.dcol, dd, &c_zero, dz ); hipsparseDestroyMatDescr( descr ); hipsparseDestroy( cusparseHandle ); cusparseHandle = 0; descr = 0; hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , A.num_rows, dz, dd, d1 ); } else if ( A.storage_type == Magma_SELLP && A.alignment == 1 ) { hipLaunchKernelGGL(( magma_ccgmerge_spmvell_kernelb1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , A.num_rows, A.blocksize, A.dval, A.dcol, A.drow, dd, dz, d1 ); } else if ( A.storage_type == Magma_SELLP && A.alignment > 1) { int num_threadssellp = A.blocksize*A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threadssellp > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( A.blocksize, A.alignment, 1); int dimgrid1 = int( sqrt( float( A.numblocks ))); int dimgrid2 = magma_ceildiv( A.numblocks, dimgrid1 ); dim3 gridsellp( dimgrid1, dimgrid2, 1); int Mssellp = num_threadssellp * sizeof( magmaFloatComplex ); if ( A.alignment == 8) hipLaunchKernelGGL(( magma_ccgmerge_spmvsellpt_kernel_8) , dim3(gridsellp), dim3(block), Mssellp, queue->cuda_stream() , A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else if ( A.alignment == 16) hipLaunchKernelGGL(( magma_ccgmerge_spmvsellpt_kernel_16) , dim3(gridsellp), dim3(block), Mssellp, queue->cuda_stream() , A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else if ( A.alignment == 32) hipLaunchKernelGGL(( magma_ccgmerge_spmvsellpt_kernel_32) , dim3(gridsellp), dim3(block), Mssellp, queue->cuda_stream() , A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else printf("error: alignment not supported.\n"); // in case of using SELLP, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , A.num_rows, dz, dd, d1 ); } else if ( A.storage_type == Magma_ELLRT ) { // in case of using ELLRT, we need a different grid, assigning // threads_per_row processors to each row // the block size is num_threads // fixed values int num_blocks = magma_ceildiv( A.num_rows, A.blocksize ); int num_threads = A.alignment*A.blocksize; int real_row_length = magma_roundup( A.max_nnz_row, A.alignment ); magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = int( sqrt( float( num_blocks ))); int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 ); dim3 gridellrt( dimgrid1, dimgrid2, 1); int Mellrt = A.alignment * A.blocksize * sizeof( magmaFloatComplex ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if ( A.alignment == 32 ) { hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel_32) , dim3(gridellrt), dim3(num_threads) , Mellrt, queue->cuda_stream() , A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else if ( A.alignment == 16 ) { hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel_16) , dim3(gridellrt), dim3(num_threads) , Mellrt, queue->cuda_stream() , A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else if ( A.alignment == 8 ) { hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel_8) , dim3(gridellrt), dim3(num_threads) , Mellrt, queue->cuda_stream() , A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else { printf("error: alignment %d not supported.\n", int(A.alignment) ); return MAGMA_ERR_NOT_SUPPORTED; } // in case of using ELLRT, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , A.num_rows, dz, dd, d1 ); } while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_ccgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(), Gs.x, A.num_rows, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_ccopyvector( 1, aux1, 1, skp+4, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_ccg_rhokernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r and computes the first part of the dot product r*r __global__ void magma_ccgmerge_xrbeta_kernel( int n, magmaFloatComplex * x, magmaFloatComplex * r, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * skp, magmaFloatComplex * vtmp ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; magmaFloatComplex rho = skp[3]; magmaFloatComplex mrho = MAGMA_C_MAKE( -1.0, 0.0)*rho; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if( i<n ) { x[i] += rho * d[i]; r[i] += mrho * z[i]; temp[ Idx ] = r[i] * r[i]; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // kernel to handle scalars __global__ void //alpha = beta / gamma magma_ccg_alphabetakernel( magmaFloatComplex * skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ) { magmaFloatComplex tmp1 = skp[1]; skp[0] = tmp1/skp[2]; //printf("beta=%e\n", MAGMA_C_REAL(tmp1)); } } // update search Krylov vector d __global__ void magma_ccg_d_kernel( int n, magmaFloatComplex * skp, magmaFloatComplex * r, magmaFloatComplex * d ) { int i = blockIdx.x * blockDim.x + threadIdx.x; magmaFloatComplex alpha = skp[0]; if( i<n ) { d[i] = r[i] + alpha * d[i]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in] d1 magmaFloatComplex_ptr temporary vector @param[in] d2 magmaFloatComplex_ptr temporary vector @param[in,out] dx magmaFloatComplex_ptr input vector x @param[in,out] dr magmaFloatComplex_ptr input/output vector r @param[in] dd magmaFloatComplex_ptr input vector d @param[in] dz magmaFloatComplex_ptr input vector z @param[in] skp magmaFloatComplex_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_csygpuk ********************************************************************/ extern "C" magma_int_t magma_ccgmerge_xrbeta( magma_int_t n, magmaFloatComplex_ptr d1, magmaFloatComplex_ptr d2, magmaFloatComplex_ptr dx, magmaFloatComplex_ptr dr, magmaFloatComplex_ptr dd, magmaFloatComplex_ptr dz, magmaFloatComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( magmaFloatComplex ); magmaFloatComplex_ptr aux1 = d1, aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_ccgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(), n, dx, dr, dd, dz, skp, d1); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_ccgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(), Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_ccopyvector( 1, aux1, 1, skp+1, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_ccg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp ); dim3 Bs3( local_block_size ); dim3 Gs3( magma_ceildiv( n, local_block_size ) ); hipLaunchKernelGGL(( magma_ccg_d_kernel), dim3(Gs3), dim3(Bs3), 0, queue->cuda_stream(), n, skp, dr, dd ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r __global__ void magma_cpcgmerge_xrbeta_kernel( int n, magmaFloatComplex * x, magmaFloatComplex * r, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * skp ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; magmaFloatComplex rho = skp[3]; magmaFloatComplex mrho = MAGMA_C_MAKE( -1.0, 0.0)*rho; if( i<n ) { x[i] += rho * d[i]; r[i] += mrho * z[i]; } } // dot product for multiple vectors __global__ void magma_cmcdotc_one_kernel_1( int n, magmaFloatComplex * v0, magmaFloatComplex * w0, magmaFloatComplex * vtmp) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // 1 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_C_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? v0[ i ] * v0[ i ] : MAGMA_C_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; vtmp[ blockIdx.x+n ] = temp[ blockDim.x ]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in,out] dx magmaFloatComplex_ptr input vector x @param[in,out] dr magmaFloatComplex_ptr input/output vector r @param[in] dd magmaFloatComplex_ptr input vector d @param[in] dz magmaFloatComplex_ptr input vector z @param[in] skp magmaFloatComplex_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_csygpuk ********************************************************************/ extern "C" magma_int_t magma_cpcgmerge_xrbeta1( magma_int_t n, magmaFloatComplex_ptr dx, magmaFloatComplex_ptr dr, magmaFloatComplex_ptr dd, magmaFloatComplex_ptr dz, magmaFloatComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); hipLaunchKernelGGL(( magma_cpcgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream(), n, dx, dr, dd, dz, skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in] d1 magmaFloatComplex_ptr temporary vector @param[in] d2 magmaFloatComplex_ptr temporary vector @param[in] dh magmaFloatComplex_ptr input vector x @param[in] dr magmaFloatComplex_ptr input/output vector r @param[in] skp magmaFloatComplex_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_csygpuk ********************************************************************/ extern "C" magma_int_t magma_cpcgmerge_xrbeta2( magma_int_t n, magmaFloatComplex_ptr d1, magmaFloatComplex_ptr d2, magmaFloatComplex_ptr dh, magmaFloatComplex_ptr dr, magmaFloatComplex_ptr dd, magmaFloatComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 4*local_block_size * sizeof( magmaFloatComplex ); magmaFloatComplex_ptr aux1 = d1, aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_cmcdotc_one_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(), n, dr, dh, d1); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_ccgreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(), Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_ccopyvector( 1, aux1, 1, skp+1, 1, queue ); magma_ccopyvector( 1, aux1+n, 1, skp+6, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_ccg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp ); dim3 Bs3( local_block_size ); dim3 Gs3( magma_ceildiv( n, local_block_size ) ); hipLaunchKernelGGL(( magma_ccg_d_kernel), dim3(Gs3), dim3(Bs3), 0, queue->cuda_stream(), n, skp, dh, dd ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r __global__ void magma_cjcgmerge_xrbeta_kernel( int n, magmaFloatComplex * diag, magmaFloatComplex * x, magmaFloatComplex * r, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * h, magmaFloatComplex * vtmp, magmaFloatComplex * skp ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; magmaFloatComplex rho = skp[3]; magmaFloatComplex mrho = MAGMA_C_MAKE( -1.0, 0.0)*rho; if( i<n ) { x[i] += rho * d[i]; r[i] += mrho * z[i]; h[i] = r[i] * diag[i]; } __syncthreads(); temp[ Idx ] = ( i < n ) ? h[ i ] * r[ i ] : MAGMA_C_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? r[ i ] * r[ i ] : MAGMA_C_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; vtmp[ blockIdx.x+n ] = temp[ blockDim.x ]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in] d1 magmaFloatComplex_ptr temporary vector @param[in] d2 magmaFloatComplex_ptr temporary vector @param[in] dh magmaFloatComplex_ptr input vector x @param[in] dr magmaFloatComplex_ptr input/output vector r @param[in] skp magmaFloatComplex_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_csygpuk ********************************************************************/ extern "C" magma_int_t magma_cjcgmerge_xrbeta( magma_int_t n, magmaFloatComplex_ptr d1, magmaFloatComplex_ptr d2, magmaFloatComplex_ptr diag, magmaFloatComplex_ptr dx, magmaFloatComplex_ptr dr, magmaFloatComplex_ptr dd, magmaFloatComplex_ptr dz, magmaFloatComplex_ptr dh, magmaFloatComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 4*local_block_size * sizeof( magmaFloatComplex ); magmaFloatComplex_ptr aux1 = d1, aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_cjcgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , n, diag, dx, dr, dd, dz, dh, d1, skp ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_ccgreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() , Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_ccopyvector( 1, aux1, 1, skp+1, 1, queue ); magma_ccopyvector( 1, aux1+n, 1, skp+6, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_ccg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp ); dim3 Bs3( local_block_size ); dim3 Gs3( magma_ceildiv( n, local_block_size ) ); hipLaunchKernelGGL(( magma_ccg_d_kernel), dim3(Gs3), dim3(Bs3), 0, queue->cuda_stream(), n, skp, dh, dd ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */
8d0874f68cc8f52475da6c99391606ae61db0cbb.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from sparse-iter/blas/zmergecg.cu normal z -> c, Tue Feb 9 16:05:43 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_c // These routines merge multiple kernels from cmergecg into one // for a description see // "Reformulated Conjugate Gradient for the Energy-Aware // Solution of Linear Systems on GPUs (ICPP '13) // accelerated reduction for one vector __global__ void magma_ccgreduce_kernel_spmv1( int Gs, int n, magmaFloatComplex * vtmp, magmaFloatComplex * vtmp2 ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_C_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // accelerated reduction for two vectors __global__ void magma_ccgreduce_kernel_spmv2( int Gs, int n, magmaFloatComplex * vtmp, magmaFloatComplex * vtmp2 ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<2; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_C_ZERO; while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_C_ZERO; i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } // computes the SpMV using CSR and the first step of the reduction __global__ void magma_ccgmerge_spmvcsr_kernel( int n, magmaFloatComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * vtmp ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if( i<n ) { magmaFloatComplex dot = MAGMA_C_ZERO; int start = drowptr[ i ]; int end = drowptr[ i+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * d[ dcolind[j] ]; z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELL and the first step of the reduction __global__ void magma_ccgmerge_spmvell_kernel( int n, int num_cols_per_row, magmaFloatComplex * dval, magma_index_t * dcolind, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * vtmp ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if(i < n ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row; k++ ) { int col = dcolind [ n * k + i ]; magmaFloatComplex val = dval [ n * k + i ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLPACK and the first step of the reduction __global__ void magma_ccgmerge_spmvellpack_kernel( int n, int num_cols_per_row, magmaFloatComplex * dval, magma_index_t * dcolind, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * vtmp ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if(i < n ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row; k++ ) { int col = dcolind [ num_cols_per_row * i + k ]; magmaFloatComplex val = dval [ num_cols_per_row * i + k ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using SELL alignment 1 and the first step of the reduction __global__ void magma_ccgmerge_spmvell_kernelb1( int n, int blocksize, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * vtmp ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); int idx = threadIdx.x; // local row int bdx = blockIdx.x; // global block index int row = bdx * 256 + idx; // global row index // int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) ); int lrow = threadIdx.x%blocksize; // local row; if( row < n ) { int offset = drowptr[ row/blocksize ]; int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize; magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++) { int col = dcolind [ offset+ blocksize * n + lrow ]; magmaFloatComplex val = dval[ offset+ blocksize * n + lrow ]; dot = dot + val * d [ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } /* if(i < n ) { int offset = drowptr[ blockIdx.x ]; int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int k = 0; k < border; k++){ int col = dcolind [ offset+ blocksize * k + threadIdx.x ]; magmaFloatComplex val = dval[offset+ blocksize * k + threadIdx.x]; if( val != 0){ dot += val*d[col]; } } //magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); //for ( int k = 0; k < num_cols_per_row; k++ ) { // int col = dcolind [ n * k + i ]; // magmaFloatComplex val = dval [ n * k + i ]; // if( val != 0) // dot += val * d[ col ]; //} z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; }*/ __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_ccgmerge_spmvellpackrt_kernel_8( int n, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < n ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 4 ) { shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_ccgmerge_spmvellpackrt_kernel_16( int n, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < n ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 8 ) { shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_ccgmerge_spmvellpackrt_kernel_32( int n, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < n ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 16 ) { shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // additional kernel necessary to compute first reduction step __global__ void magma_ccgmerge_spmvellpackrt_kernel2( int n, magmaFloatComplex * z, magmaFloatComplex * d, magmaFloatComplex * vtmp2 ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_C_MAKE(0.0, 0.0); __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using SELLC __global__ void magma_ccgmerge_spmvsellc_kernel( int num_rows, int blocksize, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * vtmp) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int offset = drowptr[ blockIdx.x ]; int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if(i < num_rows ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); for ( int n = 0; n < border; n ++) { int col = dcolind [offset+ blocksize * n + Idx ]; magmaFloatComplex val = dval[offset+ blocksize * n + Idx]; if( val != 0) { dot=dot+val*d[col]; } } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_ccgmerge_spmvsellpt_kernel_8( int num_rows, int blocksize, int T, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaFloatComplex * d, magmaFloatComplex * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { magmaFloatComplex val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ) { shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_ccgmerge_spmvsellpt_kernel_16( int num_rows, int blocksize, int T, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaFloatComplex * d, magmaFloatComplex * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { magmaFloatComplex val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ) { shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_ccgmerge_spmvsellpt_kernel_32( int num_rows, int blocksize, int T, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaFloatComplex * d, magmaFloatComplex * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaFloatComplex shared[]; if(row < num_rows ) { magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { magmaFloatComplex val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ) { shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // kernel to handle scalars __global__ void // rho = beta/tmp; gamma = beta; magma_ccg_rhokernel( magmaFloatComplex * skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ) { magmaFloatComplex tmp = skp[1]; skp[3] = tmp/skp[4]; skp[2] = tmp; } } /** Purpose ------- Merges the first SpmV using different formats with the dot product and the computation of rho Arguments --------- @param[in] A magma_c_matrix input matrix @param[in] d1 magmaFloatComplex_ptr temporary vector @param[in] d2 magmaFloatComplex_ptr temporary vector @param[in] dd magmaFloatComplex_ptr input vector d @param[out] dz magmaFloatComplex_ptr input vector z @param[out] skp magmaFloatComplex_ptr array for parameters ( skp[3]=rho ) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_ccgmerge_spmv1( magma_c_matrix A, magmaFloatComplex_ptr d1, magmaFloatComplex_ptr d2, magmaFloatComplex_ptr dd, magmaFloatComplex_ptr dz, magmaFloatComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( A.num_rows, local_block_size ) ); dim3 Gs_next; int Ms = local_block_size * sizeof( magmaFloatComplex ); magmaFloatComplex_ptr aux1 = d1, aux2 = d2; int b = 1; if ( A.storage_type == Magma_CSR ) magma_ccgmerge_spmvcsr_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_ELLPACKT ) magma_ccgmerge_spmvellpack_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_ELL ) magma_ccgmerge_spmvell_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_CUCSR ) { cusparseHandle_t cusparseHandle = 0; cusparseMatDescr_t descr = 0; magmaFloatComplex c_one = MAGMA_C_ONE; magmaFloatComplex c_zero = MAGMA_C_ZERO; cusparseCreate( &cusparseHandle ); cusparseSetStream( cusparseHandle, queue->cuda_stream() ); cusparseCreateMatDescr( &descr ); cusparseSetMatType( descr, CUSPARSE_MATRIX_TYPE_GENERAL ); cusparseSetMatIndexBase( descr, CUSPARSE_INDEX_BASE_ZERO ); cusparseCcsrmv( cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE, A.num_rows, A.num_cols, A.nnz, &c_one, descr, A.dval, A.drow, A.dcol, dd, &c_zero, dz ); cusparseDestroyMatDescr( descr ); cusparseDestroy( cusparseHandle ); cusparseHandle = 0; descr = 0; magma_ccgmerge_spmvellpackrt_kernel2<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( A.num_rows, dz, dd, d1 ); } else if ( A.storage_type == Magma_SELLP && A.alignment == 1 ) { magma_ccgmerge_spmvell_kernelb1<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( A.num_rows, A.blocksize, A.dval, A.dcol, A.drow, dd, dz, d1 ); } else if ( A.storage_type == Magma_SELLP && A.alignment > 1) { int num_threadssellp = A.blocksize*A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threadssellp > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( A.blocksize, A.alignment, 1); int dimgrid1 = int( sqrt( float( A.numblocks ))); int dimgrid2 = magma_ceildiv( A.numblocks, dimgrid1 ); dim3 gridsellp( dimgrid1, dimgrid2, 1); int Mssellp = num_threadssellp * sizeof( magmaFloatComplex ); if ( A.alignment == 8) magma_ccgmerge_spmvsellpt_kernel_8 <<< gridsellp, block, Mssellp, queue->cuda_stream() >>> ( A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else if ( A.alignment == 16) magma_ccgmerge_spmvsellpt_kernel_16 <<< gridsellp, block, Mssellp, queue->cuda_stream() >>> ( A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else if ( A.alignment == 32) magma_ccgmerge_spmvsellpt_kernel_32 <<< gridsellp, block, Mssellp, queue->cuda_stream() >>> ( A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else printf("error: alignment not supported.\n"); // in case of using SELLP, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. magma_ccgmerge_spmvellpackrt_kernel2<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( A.num_rows, dz, dd, d1 ); } else if ( A.storage_type == Magma_ELLRT ) { // in case of using ELLRT, we need a different grid, assigning // threads_per_row processors to each row // the block size is num_threads // fixed values int num_blocks = magma_ceildiv( A.num_rows, A.blocksize ); int num_threads = A.alignment*A.blocksize; int real_row_length = magma_roundup( A.max_nnz_row, A.alignment ); magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = int( sqrt( float( num_blocks ))); int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 ); dim3 gridellrt( dimgrid1, dimgrid2, 1); int Mellrt = A.alignment * A.blocksize * sizeof( magmaFloatComplex ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if ( A.alignment == 32 ) { magma_ccgmerge_spmvellpackrt_kernel_32 <<< gridellrt, num_threads , Mellrt, queue->cuda_stream() >>> ( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else if ( A.alignment == 16 ) { magma_ccgmerge_spmvellpackrt_kernel_16 <<< gridellrt, num_threads , Mellrt, queue->cuda_stream() >>> ( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else if ( A.alignment == 8 ) { magma_ccgmerge_spmvellpackrt_kernel_8 <<< gridellrt, num_threads , Mellrt, queue->cuda_stream() >>> ( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else { printf("error: alignment %d not supported.\n", int(A.alignment) ); return MAGMA_ERR_NOT_SUPPORTED; } // in case of using ELLRT, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. magma_ccgmerge_spmvellpackrt_kernel2<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( A.num_rows, dz, dd, d1 ); } while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_ccgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>> ( Gs.x, A.num_rows, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_ccopyvector( 1, aux1, 1, skp+4, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_ccg_rhokernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r and computes the first part of the dot product r*r __global__ void magma_ccgmerge_xrbeta_kernel( int n, magmaFloatComplex * x, magmaFloatComplex * r, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * skp, magmaFloatComplex * vtmp ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; magmaFloatComplex rho = skp[3]; magmaFloatComplex mrho = MAGMA_C_MAKE( -1.0, 0.0)*rho; temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0); if( i<n ) { x[i] += rho * d[i]; r[i] += mrho * z[i]; temp[ Idx ] = r[i] * r[i]; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // kernel to handle scalars __global__ void //alpha = beta / gamma magma_ccg_alphabetakernel( magmaFloatComplex * skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ) { magmaFloatComplex tmp1 = skp[1]; skp[0] = tmp1/skp[2]; //printf("beta=%e\n", MAGMA_C_REAL(tmp1)); } } // update search Krylov vector d __global__ void magma_ccg_d_kernel( int n, magmaFloatComplex * skp, magmaFloatComplex * r, magmaFloatComplex * d ) { int i = blockIdx.x * blockDim.x + threadIdx.x; magmaFloatComplex alpha = skp[0]; if( i<n ) { d[i] = r[i] + alpha * d[i]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in] d1 magmaFloatComplex_ptr temporary vector @param[in] d2 magmaFloatComplex_ptr temporary vector @param[in,out] dx magmaFloatComplex_ptr input vector x @param[in,out] dr magmaFloatComplex_ptr input/output vector r @param[in] dd magmaFloatComplex_ptr input vector d @param[in] dz magmaFloatComplex_ptr input vector z @param[in] skp magmaFloatComplex_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_csygpuk ********************************************************************/ extern "C" magma_int_t magma_ccgmerge_xrbeta( magma_int_t n, magmaFloatComplex_ptr d1, magmaFloatComplex_ptr d2, magmaFloatComplex_ptr dx, magmaFloatComplex_ptr dr, magmaFloatComplex_ptr dd, magmaFloatComplex_ptr dz, magmaFloatComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( magmaFloatComplex ); magmaFloatComplex_ptr aux1 = d1, aux2 = d2; int b = 1; magma_ccgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>> ( n, dx, dr, dd, dz, skp, d1); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_ccgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_ccopyvector( 1, aux1, 1, skp+1, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_ccg_alphabetakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp ); dim3 Bs3( local_block_size ); dim3 Gs3( magma_ceildiv( n, local_block_size ) ); magma_ccg_d_kernel<<< Gs3, Bs3, 0, queue->cuda_stream()>>>( n, skp, dr, dd ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r __global__ void magma_cpcgmerge_xrbeta_kernel( int n, magmaFloatComplex * x, magmaFloatComplex * r, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * skp ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; magmaFloatComplex rho = skp[3]; magmaFloatComplex mrho = MAGMA_C_MAKE( -1.0, 0.0)*rho; if( i<n ) { x[i] += rho * d[i]; r[i] += mrho * z[i]; } } // dot product for multiple vectors __global__ void magma_cmcdotc_one_kernel_1( int n, magmaFloatComplex * v0, magmaFloatComplex * w0, magmaFloatComplex * vtmp) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // 1 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_C_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? v0[ i ] * v0[ i ] : MAGMA_C_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; vtmp[ blockIdx.x+n ] = temp[ blockDim.x ]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in,out] dx magmaFloatComplex_ptr input vector x @param[in,out] dr magmaFloatComplex_ptr input/output vector r @param[in] dd magmaFloatComplex_ptr input vector d @param[in] dz magmaFloatComplex_ptr input vector z @param[in] skp magmaFloatComplex_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_csygpuk ********************************************************************/ extern "C" magma_int_t magma_cpcgmerge_xrbeta1( magma_int_t n, magmaFloatComplex_ptr dx, magmaFloatComplex_ptr dr, magmaFloatComplex_ptr dd, magmaFloatComplex_ptr dz, magmaFloatComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); magma_cpcgmerge_xrbeta_kernel<<< Gs, Bs, 0, queue->cuda_stream()>>> ( n, dx, dr, dd, dz, skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in] d1 magmaFloatComplex_ptr temporary vector @param[in] d2 magmaFloatComplex_ptr temporary vector @param[in] dh magmaFloatComplex_ptr input vector x @param[in] dr magmaFloatComplex_ptr input/output vector r @param[in] skp magmaFloatComplex_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_csygpuk ********************************************************************/ extern "C" magma_int_t magma_cpcgmerge_xrbeta2( magma_int_t n, magmaFloatComplex_ptr d1, magmaFloatComplex_ptr d2, magmaFloatComplex_ptr dh, magmaFloatComplex_ptr dr, magmaFloatComplex_ptr dd, magmaFloatComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 4*local_block_size * sizeof( magmaFloatComplex ); magmaFloatComplex_ptr aux1 = d1, aux2 = d2; int b = 1; magma_cmcdotc_one_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream()>>> ( n, dr, dh, d1); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_ccgreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_ccopyvector( 1, aux1, 1, skp+1, 1, queue ); magma_ccopyvector( 1, aux1+n, 1, skp+6, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_ccg_alphabetakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp ); dim3 Bs3( local_block_size ); dim3 Gs3( magma_ceildiv( n, local_block_size ) ); magma_ccg_d_kernel<<< Gs3, Bs3, 0, queue->cuda_stream()>>>( n, skp, dh, dd ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r __global__ void magma_cjcgmerge_xrbeta_kernel( int n, magmaFloatComplex * diag, magmaFloatComplex * x, magmaFloatComplex * r, magmaFloatComplex * d, magmaFloatComplex * z, magmaFloatComplex * h, magmaFloatComplex * vtmp, magmaFloatComplex * skp ) { extern __shared__ magmaFloatComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; magmaFloatComplex rho = skp[3]; magmaFloatComplex mrho = MAGMA_C_MAKE( -1.0, 0.0)*rho; if( i<n ) { x[i] += rho * d[i]; r[i] += mrho * z[i]; h[i] = r[i] * diag[i]; } __syncthreads(); temp[ Idx ] = ( i < n ) ? h[ i ] * r[ i ] : MAGMA_C_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? r[ i ] * r[ i ] : MAGMA_C_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; vtmp[ blockIdx.x+n ] = temp[ blockDim.x ]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in] d1 magmaFloatComplex_ptr temporary vector @param[in] d2 magmaFloatComplex_ptr temporary vector @param[in] dh magmaFloatComplex_ptr input vector x @param[in] dr magmaFloatComplex_ptr input/output vector r @param[in] skp magmaFloatComplex_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_csygpuk ********************************************************************/ extern "C" magma_int_t magma_cjcgmerge_xrbeta( magma_int_t n, magmaFloatComplex_ptr d1, magmaFloatComplex_ptr d2, magmaFloatComplex_ptr diag, magmaFloatComplex_ptr dx, magmaFloatComplex_ptr dr, magmaFloatComplex_ptr dd, magmaFloatComplex_ptr dz, magmaFloatComplex_ptr dh, magmaFloatComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 4*local_block_size * sizeof( magmaFloatComplex ); magmaFloatComplex_ptr aux1 = d1, aux2 = d2; int b = 1; magma_cjcgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( n, diag, dx, dr, dd, dz, dh, d1, skp ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_ccgreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_ccopyvector( 1, aux1, 1, skp+1, 1, queue ); magma_ccopyvector( 1, aux1+n, 1, skp+6, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_ccg_alphabetakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp ); dim3 Bs3( local_block_size ); dim3 Gs3( magma_ceildiv( n, local_block_size ) ); magma_ccg_d_kernel<<< Gs3, Bs3, 0, queue->cuda_stream()>>>( n, skp, dh, dd ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */
df22fce5dc48c18472fd1b69860e6baaf01d6806.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "sga_up_forward.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int n = 1; const float *filters = NULL; hipMalloc(&filters, XSIZE*YSIZE); const int height = 1; const int width = 1; const int depth = 1; const int wsize = 1; float *top_data = NULL; hipMalloc(&top_data, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( sga_up_forward), dim3(gridBlock),dim3(threadBlock), 0, 0, n,filters,height,width,depth,wsize,top_data); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( sga_up_forward), dim3(gridBlock),dim3(threadBlock), 0, 0, n,filters,height,width,depth,wsize,top_data); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( sga_up_forward), dim3(gridBlock),dim3(threadBlock), 0, 0, n,filters,height,width,depth,wsize,top_data); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
df22fce5dc48c18472fd1b69860e6baaf01d6806.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "sga_up_forward.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int n = 1; const float *filters = NULL; cudaMalloc(&filters, XSIZE*YSIZE); const int height = 1; const int width = 1; const int depth = 1; const int wsize = 1; float *top_data = NULL; cudaMalloc(&top_data, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); sga_up_forward<<<gridBlock,threadBlock>>>(n,filters,height,width,depth,wsize,top_data); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { sga_up_forward<<<gridBlock,threadBlock>>>(n,filters,height,width,depth,wsize,top_data); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { sga_up_forward<<<gridBlock,threadBlock>>>(n,filters,height,width,depth,wsize,top_data); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6a3837238d172757704982935c84105f966776d1.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cstdio> #include <cstdlib> #define ARR_I_J(arr, i, j) arr[(i) * (K) + (j)] #define ARR_I_J_W(arr, i, j, W) arr[(i) * (L) + (j)] //#define N_DEBUG struct PathNode { int prevTimestamp; int prevQueryIdx; int videoId; float score; public: PathNode() : prevTimestamp(-1), prevQueryIdx(-1), videoId(-1), score(0) { } PathNode(int _videoId, float _score) : prevTimestamp(-1), prevQueryIdx(-1), videoId(_videoId), score(_score) { } }; __global__ void detect_cuda_vwii(const int* d_ref_index, const float* d_ref_score, int* const d_video_idx, PathNode* d_PN, const int L, const int K, int tmp_wnd, const int offset, int* d_last_timestamp_list, int* d_last_queryidx_list, float* d_res_score_list ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int s_idx = tid * offset; int e_idx = s_idx + offset; if (e_idx > K) e_idx = K; #ifdef N_DEBUG printf("[tid:%d] %d ~ %d(%d)\n", tid, s_idx, e_idx, offset); #endif if (tid >= K) { return; } for (int i = 1; i < L; i++) { for (int j = s_idx; j < e_idx; j++) { int curr_videoidx = ARR_I_J(d_video_idx, i, j); if (curr_videoidx == -1) { continue; } float curr_score = ARR_I_J(d_ref_score, i, j); int curr_timpstamp = ARR_I_J(d_ref_index, i, j); float max_score = curr_score; int prev_i = -1; //query idx int prev_j = -1; //ref timestamp int start_idx = i - tmp_wnd >= 0 ? i - tmp_wnd : 0; #ifdef N_DEBUG //printf("start %d -> %d\n", start_idx, i); #endif for (int l = start_idx; l < i; l++) { for (int k = 0; k < K; k++) { int prev_videoidx = ARR_I_J(d_video_idx, l, k); if (curr_videoidx != prev_videoidx) { continue; } if (prev_videoidx == -1) { continue; } float prev_score = ARR_I_J(d_PN, l, k).score; //prev path score int prev_timestamp = ARR_I_J(d_ref_index, l, k); if (prev_timestamp >= curr_timpstamp) continue; //if (prev_timestamp <= curr_timpstamp - tmp_wnd) //continue; if (max_score < prev_score + curr_score) { //printf("updeted(prev score %f -> %f\n", max_score, prev_score + curr_score); max_score = prev_score + curr_score; prev_i = l; prev_j = k; } } } ARR_I_J(d_PN, i, j).prevQueryIdx = prev_i; ARR_I_J(d_PN, i, j).prevTimestamp = prev_j; ARR_I_J(d_PN, i, j).videoId = curr_videoidx; ARR_I_J(d_PN, i, j).score = max_score; if (d_res_score_list[curr_videoidx] <= max_score) { d_last_queryidx_list[curr_videoidx] = i; d_last_timestamp_list[curr_videoidx] = j; d_res_score_list[curr_videoidx] = max_score; } } __syncthreads(); } #ifdef N_DEBUG printf("==d_last_queryidx_list\n"); for (int j = 0 ; j < K; j++) { printf("%d ", d_last_queryidx_list[j]); } printf("\n\n"); printf("==d_last_timestamp_list\n"); for (int j = 0 ; j < K; j++) { printf("%d ", d_last_timestamp_list[j]); } printf("\n\n"); printf("==d_res_score_list\n"); for (int j = 0 ; j < K; j++) { printf("%f ", d_res_score_list[j]); } printf("\n\n"); #endif } __global__ void update_result(const int* d_ref_index, const float* d_ref_score, int* const d_video_idx, PathNode* d_PN, const int L, const int K, const int video_num, int* d_last_timestamp_list, int* d_last_queryidx_list, float* d_res_score_list, const int offset, int* d_res_q, int* d_res_p, float* d_res_scores, int* d_match ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int s_idx = tid * offset; int e_idx = s_idx + offset; if (e_idx > video_num) e_idx = video_num; #ifdef N_DEBUG printf("[tid:%d]update_result %d ~ %d(%d) %d\n", tid, s_idx, e_idx, offset , video_num); #endif if (tid >= K) { return; } for(int t = s_idx; t < e_idx; t++) { int last_timestamp = d_last_timestamp_list[t]; int last_queryidx = d_last_queryidx_list[t]; if (last_queryidx == -1) continue; //printf("[%d](%d %d),\n", t, last_queryidx, last_timestamp); int p_i = last_queryidx; int p_j = last_timestamp; int idx = 0; int match_cnt = 0; while(p_i != -1) { //printf("(%d %d)->", p_i, p_j); match_cnt++; ARR_I_J_W(d_res_scores, t, idx, L) = ARR_I_J(d_ref_score, p_i, p_j); ARR_I_J_W(d_res_q, t, idx, L) = p_i; ARR_I_J_W(d_res_p, t, idx++, L) = ARR_I_J(d_ref_index, p_i, p_j); int c_i = ARR_I_J(d_PN, p_i, p_j).prevQueryIdx; int c_j = ARR_I_J(d_PN, p_i, p_j).prevTimestamp; p_i = c_i; p_j = c_j; } //printf("\n"); //reverse array for(int i = 0; i < match_cnt / 2; i++) { int temp = ARR_I_J_W(d_res_q, t, match_cnt - 1 - i, video_num); ARR_I_J_W(d_res_q, t, match_cnt - 1 - i, video_num) = ARR_I_J_W(d_res_q, t, i, video_num); ARR_I_J_W(d_res_q, t, i, video_num) = temp; temp = ARR_I_J_W(d_res_p, t, match_cnt - 1 - i, video_num); ARR_I_J_W(d_res_p, t, match_cnt - 1 - i, video_num) = ARR_I_J_W(d_res_p, t, i, video_num); ARR_I_J_W(d_res_p, t, i, video_num) = temp; float f_temp = ARR_I_J_W(d_res_scores, t, match_cnt - 1 - i, video_num); ARR_I_J_W(d_res_scores, t, match_cnt - 1 - i, video_num) = ARR_I_J_W(d_res_scores, t, i, video_num); ARR_I_J_W(d_res_scores, t, i, video_num) = f_temp; } d_match[t] = match_cnt; } __syncthreads(); } #ifdef __cplusplus extern "C" { #endif void call_kernel(int* _ref_index, float* _ref_score, int* _video_idx, int L, int K, float score_thr, int tmp_wnd, int video_num, int n_block, int n_thread, int* res_q, int* res_p, float* result_score_path, int* match, float* score) { //printf("call_kernel called! L : %d K : %d vn : %d\n",L, K, video_num); int* h_listidx_list = (int*)calloc(K, sizeof(int)); int* h_queryidx_list = (int*)calloc(K, sizeof(int)); float* h_maxscore_list = (float*)calloc(K, sizeof(float)); memset(h_listidx_list, -1, K * sizeof(int)); memset(h_queryidx_list, -1, K * sizeof(int)); int* d_ref_index; float* d_ref_score; int* d_video_idx; PathNode* d_PN; int* d_lastidx_list; int* d_last_queryidx_list; float* d_maxscore_list; int* d_res_q; int* d_res_p; float* d_res_scores; int* d_match; hipMalloc((void**)&d_ref_index, L * K * sizeof(int)); hipMalloc((void**)&d_ref_score, L * K * sizeof(float)); hipMalloc((void**)&d_video_idx, L * K * sizeof(int)); hipMalloc((void**)&d_PN, L * K * sizeof(PathNode)); hipMalloc((void**)&d_lastidx_list, video_num * sizeof(int)); hipMalloc((void**)&d_last_queryidx_list, video_num * sizeof(int)); hipMalloc((void**)&d_maxscore_list, video_num * sizeof(float)); hipMalloc((void**)&d_res_q, L * video_num * sizeof(int)); hipMalloc((void**)&d_res_p, L * video_num * sizeof(int)); hipMalloc((void**)&d_res_scores, L * video_num * sizeof(float)); hipMalloc((void**)&d_match, video_num * sizeof(int)); /* * set variables */ PathNode* h_PN = new PathNode[L * K]; //printf("update first row\n"); for (int i = 0; i < K; i++) { ARR_I_J(h_PN, 0, i).videoId = ARR_I_J(_video_idx, 0, i); ARR_I_J(h_PN, 0, i).score = ARR_I_J(_ref_score, 0, i); //printf("%f ", ARR_I_J(_ref_score, 0, i)); } //printf("\n"); #ifdef N_DEBUG printf("K : %d\n", K); printf("vidnum : %d\n", video_num); printf("====== ARR_I_J(_ref_score, i, j)\n"); for (int i = 0 ; i < L; i++) { printf("[%d]\t", i); for (int j = 0 ; j < K; j++) { printf("%f ", ARR_I_J(_ref_score, i, j)); } printf("\n"); } #endif hipMemcpy(d_lastidx_list, h_listidx_list, K * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_last_queryidx_list, h_queryidx_list, K * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_maxscore_list, h_maxscore_list, K * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_ref_index, _ref_index, L * K * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_ref_score, _ref_score, L * K * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_video_idx, _video_idx, L * K * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_PN, h_PN, L * K * sizeof(PathNode), hipMemcpyHostToDevice); hipMemcpy(d_res_q, res_q, L * video_num * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_res_p, res_p, L * video_num * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_res_scores, result_score_path, L * video_num * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_match, match, video_num * sizeof(int), hipMemcpyHostToDevice); int offset = K / (n_block * n_thread); if (offset && K % (n_block * n_thread) != 0) offset += 1; offset = offset ? offset : 1; hipDeviceSynchronize(); hipLaunchKernelGGL(( detect_cuda_vwii), dim3(n_block), dim3(n_thread), 0, 0, d_ref_index, d_ref_score, d_video_idx, d_PN, L, K, tmp_wnd, offset, d_lastidx_list, d_last_queryidx_list, d_maxscore_list ); hipDeviceSynchronize(); #ifdef N_DEBUG printf("=== ARR_I_J(h_PN, i, j).score\n"); for (int i = 0 ; i < L; i++) { for (int j = 0 ; j < K; j++) { printf("%f ", ARR_I_J(h_PN, i, j).score); } printf("\n"); } printf("=====ARR_I_J(h_PN, i, j).prevTimestamp \n"); for (int i = 0 ; i < L; i++) { for (int j = 0 ; j < K; j++) { printf("%d ", ARR_I_J(h_PN, i, j).prevTimestamp); } printf("\n"); } printf("=====ARR_I_J(h_PN, i, j).prevQueryIdx \n"); for (int i = 0 ; i < L; i++) { for (int j = 0 ; j < K; j++) { printf("%d ", ARR_I_J(h_PN, i, j).prevQueryIdx); } printf("\n"); } printf("===match\n"); for (int j = 0 ; j < video_num; j++) { printf("%d(%d) ", match[j], j); } printf("\n"); printf("===res_p\n"); for (int i = 0 ; i < video_num; i++) { for (int j = 0 ; j < match[i]; j++) { printf("%d ", ARR_I_J_W(res_p, i, j, video_num)); } printf("\n"); } #endif offset = video_num / (n_block * n_thread); if (offset && video_num % (n_block * n_thread) != 0) offset += 1; offset = offset ? offset : 1; hipLaunchKernelGGL(( update_result), dim3(n_block), dim3(n_thread), 0, 0, d_ref_index, d_ref_score, d_video_idx, d_PN, L, K, video_num, d_lastidx_list, d_last_queryidx_list, d_maxscore_list, offset, d_res_q, d_res_p, d_res_scores, d_match ); hipDeviceSynchronize(); //hipMemcpy(_ref_index, d_ref_index, L * K * sizeof(int), hipMemcpyDeviceToHost); //hipMemcpy(_ref_score, d_ref_score, L * K * sizeof(float), hipMemcpyDeviceToHost); //hipMemcpy(_video_idx, d_video_idx, L * K * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_PN, d_PN, L * K * sizeof(PathNode), hipMemcpyDeviceToHost); hipMemcpy(res_q, d_res_q, L * video_num * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(res_p, d_res_p, L * video_num * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(result_score_path, d_res_scores, L * video_num * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(score, d_maxscore_list, video_num * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(match, d_match, video_num * sizeof(int), hipMemcpyDeviceToHost); hipFree(d_ref_index); hipFree(d_ref_score); hipFree(d_video_idx); hipFree(d_PN); hipFree(d_lastidx_list); hipFree(d_last_queryidx_list); hipFree(d_maxscore_list); hipFree(d_res_q); hipFree(d_res_p); hipFree(d_res_scores); hipFree(d_match); } void foo(int* arr2d, int L, int K) { printf("fun called!\n"); for(int i = 0 ; i < L; i++) { for (int j = 0 ; j < K; j++) { ARR_I_J(arr2d, i, j) = 100; printf("%d ", ARR_I_J(arr2d, i, j)); } printf("\n"); } } #ifdef __cplusplus } #endif
6a3837238d172757704982935c84105f966776d1.cu
#include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cstdio> #include <cstdlib> #define ARR_I_J(arr, i, j) arr[(i) * (K) + (j)] #define ARR_I_J_W(arr, i, j, W) arr[(i) * (L) + (j)] //#define N_DEBUG struct PathNode { int prevTimestamp; int prevQueryIdx; int videoId; float score; public: PathNode() : prevTimestamp(-1), prevQueryIdx(-1), videoId(-1), score(0) { } PathNode(int _videoId, float _score) : prevTimestamp(-1), prevQueryIdx(-1), videoId(_videoId), score(_score) { } }; __global__ void detect_cuda_vwii(const int* d_ref_index, const float* d_ref_score, int* const d_video_idx, PathNode* d_PN, const int L, const int K, int tmp_wnd, const int offset, int* d_last_timestamp_list, int* d_last_queryidx_list, float* d_res_score_list ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int s_idx = tid * offset; int e_idx = s_idx + offset; if (e_idx > K) e_idx = K; #ifdef N_DEBUG printf("[tid:%d] %d ~ %d(%d)\n", tid, s_idx, e_idx, offset); #endif if (tid >= K) { return; } for (int i = 1; i < L; i++) { for (int j = s_idx; j < e_idx; j++) { int curr_videoidx = ARR_I_J(d_video_idx, i, j); if (curr_videoidx == -1) { continue; } float curr_score = ARR_I_J(d_ref_score, i, j); int curr_timpstamp = ARR_I_J(d_ref_index, i, j); float max_score = curr_score; int prev_i = -1; //query idx int prev_j = -1; //ref timestamp int start_idx = i - tmp_wnd >= 0 ? i - tmp_wnd : 0; #ifdef N_DEBUG //printf("start %d -> %d\n", start_idx, i); #endif for (int l = start_idx; l < i; l++) { for (int k = 0; k < K; k++) { int prev_videoidx = ARR_I_J(d_video_idx, l, k); if (curr_videoidx != prev_videoidx) { continue; } if (prev_videoidx == -1) { continue; } float prev_score = ARR_I_J(d_PN, l, k).score; //prev까지 가는 path의 score int prev_timestamp = ARR_I_J(d_ref_index, l, k); if (prev_timestamp >= curr_timpstamp) continue; //if (prev_timestamp <= curr_timpstamp - tmp_wnd) //continue; if (max_score < prev_score + curr_score) { //printf("updeted(prev score %f -> %f\n", max_score, prev_score + curr_score); max_score = prev_score + curr_score; prev_i = l; prev_j = k; } } } ARR_I_J(d_PN, i, j).prevQueryIdx = prev_i; ARR_I_J(d_PN, i, j).prevTimestamp = prev_j; ARR_I_J(d_PN, i, j).videoId = curr_videoidx; ARR_I_J(d_PN, i, j).score = max_score; if (d_res_score_list[curr_videoidx] <= max_score) { d_last_queryidx_list[curr_videoidx] = i; d_last_timestamp_list[curr_videoidx] = j; d_res_score_list[curr_videoidx] = max_score; } } __syncthreads(); } #ifdef N_DEBUG printf("==d_last_queryidx_list\n"); for (int j = 0 ; j < K; j++) { printf("%d ", d_last_queryidx_list[j]); } printf("\n\n"); printf("==d_last_timestamp_list\n"); for (int j = 0 ; j < K; j++) { printf("%d ", d_last_timestamp_list[j]); } printf("\n\n"); printf("==d_res_score_list\n"); for (int j = 0 ; j < K; j++) { printf("%f ", d_res_score_list[j]); } printf("\n\n"); #endif } __global__ void update_result(const int* d_ref_index, const float* d_ref_score, int* const d_video_idx, PathNode* d_PN, const int L, const int K, const int video_num, int* d_last_timestamp_list, int* d_last_queryidx_list, float* d_res_score_list, const int offset, int* d_res_q, int* d_res_p, float* d_res_scores, int* d_match ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int s_idx = tid * offset; int e_idx = s_idx + offset; if (e_idx > video_num) e_idx = video_num; #ifdef N_DEBUG printf("[tid:%d]update_result %d ~ %d(%d) %d\n", tid, s_idx, e_idx, offset , video_num); #endif if (tid >= K) { return; } for(int t = s_idx; t < e_idx; t++) { int last_timestamp = d_last_timestamp_list[t]; int last_queryidx = d_last_queryidx_list[t]; if (last_queryidx == -1) continue; //printf("[%d](%d %d),\n", t, last_queryidx, last_timestamp); int p_i = last_queryidx; int p_j = last_timestamp; int idx = 0; int match_cnt = 0; while(p_i != -1) { //printf("(%d %d)->", p_i, p_j); match_cnt++; ARR_I_J_W(d_res_scores, t, idx, L) = ARR_I_J(d_ref_score, p_i, p_j); ARR_I_J_W(d_res_q, t, idx, L) = p_i; ARR_I_J_W(d_res_p, t, idx++, L) = ARR_I_J(d_ref_index, p_i, p_j); int c_i = ARR_I_J(d_PN, p_i, p_j).prevQueryIdx; int c_j = ARR_I_J(d_PN, p_i, p_j).prevTimestamp; p_i = c_i; p_j = c_j; } //printf("\n"); //reverse array for(int i = 0; i < match_cnt / 2; i++) { int temp = ARR_I_J_W(d_res_q, t, match_cnt - 1 - i, video_num); ARR_I_J_W(d_res_q, t, match_cnt - 1 - i, video_num) = ARR_I_J_W(d_res_q, t, i, video_num); ARR_I_J_W(d_res_q, t, i, video_num) = temp; temp = ARR_I_J_W(d_res_p, t, match_cnt - 1 - i, video_num); ARR_I_J_W(d_res_p, t, match_cnt - 1 - i, video_num) = ARR_I_J_W(d_res_p, t, i, video_num); ARR_I_J_W(d_res_p, t, i, video_num) = temp; float f_temp = ARR_I_J_W(d_res_scores, t, match_cnt - 1 - i, video_num); ARR_I_J_W(d_res_scores, t, match_cnt - 1 - i, video_num) = ARR_I_J_W(d_res_scores, t, i, video_num); ARR_I_J_W(d_res_scores, t, i, video_num) = f_temp; } d_match[t] = match_cnt; } __syncthreads(); } #ifdef __cplusplus extern "C" { #endif void call_kernel(int* _ref_index, float* _ref_score, int* _video_idx, int L, int K, float score_thr, int tmp_wnd, int video_num, int n_block, int n_thread, int* res_q, int* res_p, float* result_score_path, int* match, float* score) { //printf("call_kernel called! L : %d K : %d vn : %d\n",L, K, video_num); int* h_listidx_list = (int*)calloc(K, sizeof(int)); int* h_queryidx_list = (int*)calloc(K, sizeof(int)); float* h_maxscore_list = (float*)calloc(K, sizeof(float)); memset(h_listidx_list, -1, K * sizeof(int)); memset(h_queryidx_list, -1, K * sizeof(int)); int* d_ref_index; float* d_ref_score; int* d_video_idx; PathNode* d_PN; int* d_lastidx_list; int* d_last_queryidx_list; float* d_maxscore_list; int* d_res_q; int* d_res_p; float* d_res_scores; int* d_match; cudaMalloc((void**)&d_ref_index, L * K * sizeof(int)); cudaMalloc((void**)&d_ref_score, L * K * sizeof(float)); cudaMalloc((void**)&d_video_idx, L * K * sizeof(int)); cudaMalloc((void**)&d_PN, L * K * sizeof(PathNode)); cudaMalloc((void**)&d_lastidx_list, video_num * sizeof(int)); cudaMalloc((void**)&d_last_queryidx_list, video_num * sizeof(int)); cudaMalloc((void**)&d_maxscore_list, video_num * sizeof(float)); cudaMalloc((void**)&d_res_q, L * video_num * sizeof(int)); cudaMalloc((void**)&d_res_p, L * video_num * sizeof(int)); cudaMalloc((void**)&d_res_scores, L * video_num * sizeof(float)); cudaMalloc((void**)&d_match, video_num * sizeof(int)); /* * set variables */ PathNode* h_PN = new PathNode[L * K]; //printf("update first row\n"); for (int i = 0; i < K; i++) { ARR_I_J(h_PN, 0, i).videoId = ARR_I_J(_video_idx, 0, i); ARR_I_J(h_PN, 0, i).score = ARR_I_J(_ref_score, 0, i); //printf("%f ", ARR_I_J(_ref_score, 0, i)); } //printf("\n"); #ifdef N_DEBUG printf("K : %d\n", K); printf("vidnum : %d\n", video_num); printf("====== ARR_I_J(_ref_score, i, j)\n"); for (int i = 0 ; i < L; i++) { printf("[%d]\t", i); for (int j = 0 ; j < K; j++) { printf("%f ", ARR_I_J(_ref_score, i, j)); } printf("\n"); } #endif cudaMemcpy(d_lastidx_list, h_listidx_list, K * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_last_queryidx_list, h_queryidx_list, K * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_maxscore_list, h_maxscore_list, K * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_ref_index, _ref_index, L * K * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_ref_score, _ref_score, L * K * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_video_idx, _video_idx, L * K * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_PN, h_PN, L * K * sizeof(PathNode), cudaMemcpyHostToDevice); cudaMemcpy(d_res_q, res_q, L * video_num * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_res_p, res_p, L * video_num * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_res_scores, result_score_path, L * video_num * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_match, match, video_num * sizeof(int), cudaMemcpyHostToDevice); int offset = K / (n_block * n_thread); if (offset && K % (n_block * n_thread) != 0) offset += 1; offset = offset ? offset : 1; cudaDeviceSynchronize(); detect_cuda_vwii<<<n_block, n_thread>>>( d_ref_index, d_ref_score, d_video_idx, d_PN, L, K, tmp_wnd, offset, d_lastidx_list, d_last_queryidx_list, d_maxscore_list ); cudaDeviceSynchronize(); #ifdef N_DEBUG printf("=== ARR_I_J(h_PN, i, j).score\n"); for (int i = 0 ; i < L; i++) { for (int j = 0 ; j < K; j++) { printf("%f ", ARR_I_J(h_PN, i, j).score); } printf("\n"); } printf("=====ARR_I_J(h_PN, i, j).prevTimestamp \n"); for (int i = 0 ; i < L; i++) { for (int j = 0 ; j < K; j++) { printf("%d ", ARR_I_J(h_PN, i, j).prevTimestamp); } printf("\n"); } printf("=====ARR_I_J(h_PN, i, j).prevQueryIdx \n"); for (int i = 0 ; i < L; i++) { for (int j = 0 ; j < K; j++) { printf("%d ", ARR_I_J(h_PN, i, j).prevQueryIdx); } printf("\n"); } printf("===match\n"); for (int j = 0 ; j < video_num; j++) { printf("%d(%d) ", match[j], j); } printf("\n"); printf("===res_p\n"); for (int i = 0 ; i < video_num; i++) { for (int j = 0 ; j < match[i]; j++) { printf("%d ", ARR_I_J_W(res_p, i, j, video_num)); } printf("\n"); } #endif offset = video_num / (n_block * n_thread); if (offset && video_num % (n_block * n_thread) != 0) offset += 1; offset = offset ? offset : 1; update_result<<<n_block, n_thread>>>( d_ref_index, d_ref_score, d_video_idx, d_PN, L, K, video_num, d_lastidx_list, d_last_queryidx_list, d_maxscore_list, offset, d_res_q, d_res_p, d_res_scores, d_match ); cudaDeviceSynchronize(); //cudaMemcpy(_ref_index, d_ref_index, L * K * sizeof(int), cudaMemcpyDeviceToHost); //cudaMemcpy(_ref_score, d_ref_score, L * K * sizeof(float), cudaMemcpyDeviceToHost); //cudaMemcpy(_video_idx, d_video_idx, L * K * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_PN, d_PN, L * K * sizeof(PathNode), cudaMemcpyDeviceToHost); cudaMemcpy(res_q, d_res_q, L * video_num * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(res_p, d_res_p, L * video_num * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(result_score_path, d_res_scores, L * video_num * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(score, d_maxscore_list, video_num * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(match, d_match, video_num * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_ref_index); cudaFree(d_ref_score); cudaFree(d_video_idx); cudaFree(d_PN); cudaFree(d_lastidx_list); cudaFree(d_last_queryidx_list); cudaFree(d_maxscore_list); cudaFree(d_res_q); cudaFree(d_res_p); cudaFree(d_res_scores); cudaFree(d_match); } void foo(int* arr2d, int L, int K) { printf("fun called!\n"); for(int i = 0 ; i < L; i++) { for (int j = 0 ; j < K; j++) { ARR_I_J(arr2d, i, j) = 100; printf("%d ", ARR_I_J(arr2d, i, j)); } printf("\n"); } } #ifdef __cplusplus } #endif
59b65c650551af82cad5b279e1e2c3f51dc45107.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPEvent.h> #include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h> #include <ATen/native/Copy.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> #include <THH/THH.h> namespace at { namespace native { using namespace at::cuda; // device-to-device copy, does type conversion void copy_device_to_device(TensorIterator& iter, bool non_blocking) { int64_t numel = iter.numel(); // We can memcpy the memory if both tensors have the same type AND both // tensors are contiguous after dimension coalescing and reordering. bool same_type = iter.dtype(0) == iter.dtype(1); bool memcpy_eligible = same_type && iter.is_contiguous(); Device dst_device = iter.device(0); Device src_device = iter.device(1); HIPGuardMasqueradingAsCUDA device_guard(src_device); // We always perform the copy on the source device, using the current stream // on the source device, and we fully synchronize on both src and dst's // current streams for completion of the copy. We have to explicitly do this // for non-contig copies. This mimics the behavior of cross-device // hipMemcpyAsync on the default stream. HIPStreamMasqueradingAsCUDA copy_stream = getCurrentHIPStreamMasqueradingAsCUDA(src_device.index()); if (src_device != dst_device) { // This is a cross-device copy on the src current stream and dst current // stream. We perform a two-way barrier between both devices' streams // before the copy. This ensures that any write-after-write and // write-after-read dependencies on the destination side are handled, so // that no one is operating on the dst memory when we perform the copy. // src waits on dst barrier (src already waits on src) CUDAEvent dst_ready; device_guard.set_device(dst_device); dst_ready.record(getCurrentHIPStreamMasqueradingAsCUDA(dst_device.index())); device_guard.set_device(src_device); dst_ready.block(copy_stream); } if (memcpy_eligible) { // Perform the copy AT_CUDA_CHECK(hipMemcpyAsync( iter.data_ptr(0), iter.data_ptr(1), numel * iter.element_size(0), hipMemcpyDeviceToDevice, copy_stream)); } else { iter.dynamic_cast_if(true); AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, kBFloat16, iter.dtype(0), "copy_", [&] { gpu_kernel(iter, []GPU_LAMBDA(scalar_t x) { return x; }); }); } if (src_device != dst_device) { // dst waits on src barrier (dst already waits on dst). We cannot // operate on dst's copy until the copy is complete. // Still on src_device, record stream event CUDAEvent src_ready; src_ready.record(copy_stream); device_guard.set_device(dst_device); src_ready.block(getCurrentHIPStreamMasqueradingAsCUDA(dst_device.index())); } AT_CUDA_CHECK(hipGetLastError()); } static bool copy_requires_temporaries(TensorIterator& iter, bool p2p_enabled) { Device dst_device = iter.device(0); Device src_device = iter.device(1); if (dst_device == src_device) { // We never require temporaries for copies on the same GPU. TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda()); return false; } bool same_dtype = iter.dtype(0) == iter.dtype(1); if (same_dtype && iter.is_contiguous()) { // Contiguous same-dtype copies can always use hipMemcpyAsync return false; } else if (dst_device.is_cuda() && src_device.is_cuda()) { // Copies between GPUs can use the copy kernel if P2P is supported return !p2p_enabled; } else { // The remaining cases require temporaries. For example, this includes // non-contiguous copies between CPU and GPU. return true; } } static bool maybe_enable_p2p_access(Device dst_device, Device src_device) { if (dst_device.is_cpu() || src_device.is_cpu()) { return false; } return THCState_getPeerToPeerAccess( globalContext().getTHCState(), src_device.index(), dst_device.index()); } static void copy_kernel_cuda(TensorIterator& iter, bool non_blocking) { AT_ASSERT(iter.ntensors() == 2); Device dst_device = iter.device(0); Device src_device = iter.device(1); // Enable p2p access between devices. (No-op if it invovles the CPU) bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device); if (copy_requires_temporaries(iter, p2p_enabled)) { // NB: this involves recursive calls to copy. Be careful that those copies // don't require temporaries or you will cause an infinite recursion! auto& dst = iter.tensor(0); Tensor dst_contig; Tensor src_contig; // Type conversions are performed on the CPU for CPU-GPU copies and on // the src device for GPU-GPU copies. if (iter.device_type(0) == kCUDA) { dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst, LEGACY_CONTIGUOUS_MEMORY_FORMAT); src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous(); } else { bool same_type = iter.dtype(0) == iter.dtype(1); dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1), LEGACY_CONTIGUOUS_MEMORY_FORMAT); src_contig = iter.tensor(1).expand_as(dst).contiguous(); } // perform a same-dtype copy on contiguous tensors TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes())); TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type()); dst_contig.copy_(src_contig, non_blocking); // if necessary, copy back into dst if (!dst_contig.is_same(dst)) { TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device()); dst.copy_(dst_contig, non_blocking); } return; } // Copy on GPU (or between GPUs) if (dst_device.is_cuda() && src_device.is_cuda()) { copy_device_to_device(iter, non_blocking); return; } // Copy between CPU and GPU hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; hipMemcpyKind kind; if (dst_device.is_cuda() && src_device.is_cpu()) { device_guard.set_device(dst_device); kind = hipMemcpyHostToDevice; } else if (dst_device.is_cpu() && src_device.is_cuda()) { device_guard.set_device(src_device); kind = hipMemcpyDeviceToHost; } else { TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()"); } void* dst = iter.data_ptr(0); void* src = iter.data_ptr(1); int64_t nbytes = iter.numel() * iter.element_size(0); HIPStreamMasqueradingAsCUDA stream = getCurrentHIPStreamMasqueradingAsCUDA(); AT_CUDA_CHECK(hipMemcpyAsync(dst, src, nbytes, kind, stream)); if (non_blocking) { void* ptr = (dst_device == kCPU ? dst : src); AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream)); } else { AT_CUDA_CHECK(hipStreamSynchronize(stream)); } } REGISTER_DISPATCH(copy_stub, &copy_kernel_cuda); } // namespace native } // namespace at
59b65c650551af82cad5b279e1e2c3f51dc45107.cu
#include <ATen/ATen.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAEvent.h> #include <c10/cuda/CUDAStream.h> #include <ATen/native/Copy.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <THC/THC.h> namespace at { namespace native { using namespace at::cuda; // device-to-device copy, does type conversion void copy_device_to_device(TensorIterator& iter, bool non_blocking) { int64_t numel = iter.numel(); // We can memcpy the memory if both tensors have the same type AND both // tensors are contiguous after dimension coalescing and reordering. bool same_type = iter.dtype(0) == iter.dtype(1); bool memcpy_eligible = same_type && iter.is_contiguous(); Device dst_device = iter.device(0); Device src_device = iter.device(1); CUDAGuard device_guard(src_device); // We always perform the copy on the source device, using the current stream // on the source device, and we fully synchronize on both src and dst's // current streams for completion of the copy. We have to explicitly do this // for non-contig copies. This mimics the behavior of cross-device // cudaMemcpyAsync on the default stream. CUDAStream copy_stream = getCurrentCUDAStream(src_device.index()); if (src_device != dst_device) { // This is a cross-device copy on the src current stream and dst current // stream. We perform a two-way barrier between both devices' streams // before the copy. This ensures that any write-after-write and // write-after-read dependencies on the destination side are handled, so // that no one is operating on the dst memory when we perform the copy. // src waits on dst barrier (src already waits on src) CUDAEvent dst_ready; device_guard.set_device(dst_device); dst_ready.record(getCurrentCUDAStream(dst_device.index())); device_guard.set_device(src_device); dst_ready.block(copy_stream); } if (memcpy_eligible) { // Perform the copy AT_CUDA_CHECK(cudaMemcpyAsync( iter.data_ptr(0), iter.data_ptr(1), numel * iter.element_size(0), cudaMemcpyDeviceToDevice, copy_stream)); } else { iter.dynamic_cast_if(true); AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, kBFloat16, iter.dtype(0), "copy_", [&] { gpu_kernel(iter, []GPU_LAMBDA(scalar_t x) { return x; }); }); } if (src_device != dst_device) { // dst waits on src barrier (dst already waits on dst). We cannot // operate on dst's copy until the copy is complete. // Still on src_device, record stream event CUDAEvent src_ready; src_ready.record(copy_stream); device_guard.set_device(dst_device); src_ready.block(getCurrentCUDAStream(dst_device.index())); } AT_CUDA_CHECK(cudaGetLastError()); } static bool copy_requires_temporaries(TensorIterator& iter, bool p2p_enabled) { Device dst_device = iter.device(0); Device src_device = iter.device(1); if (dst_device == src_device) { // We never require temporaries for copies on the same GPU. TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda()); return false; } bool same_dtype = iter.dtype(0) == iter.dtype(1); if (same_dtype && iter.is_contiguous()) { // Contiguous same-dtype copies can always use cudaMemcpyAsync return false; } else if (dst_device.is_cuda() && src_device.is_cuda()) { // Copies between GPUs can use the copy kernel if P2P is supported return !p2p_enabled; } else { // The remaining cases require temporaries. For example, this includes // non-contiguous copies between CPU and GPU. return true; } } static bool maybe_enable_p2p_access(Device dst_device, Device src_device) { if (dst_device.is_cpu() || src_device.is_cpu()) { return false; } return THCState_getPeerToPeerAccess( globalContext().getTHCState(), src_device.index(), dst_device.index()); } static void copy_kernel_cuda(TensorIterator& iter, bool non_blocking) { AT_ASSERT(iter.ntensors() == 2); Device dst_device = iter.device(0); Device src_device = iter.device(1); // Enable p2p access between devices. (No-op if it invovles the CPU) bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device); if (copy_requires_temporaries(iter, p2p_enabled)) { // NB: this involves recursive calls to copy. Be careful that those copies // don't require temporaries or you will cause an infinite recursion! auto& dst = iter.tensor(0); Tensor dst_contig; Tensor src_contig; // Type conversions are performed on the CPU for CPU-GPU copies and on // the src device for GPU-GPU copies. if (iter.device_type(0) == kCUDA) { dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst, LEGACY_CONTIGUOUS_MEMORY_FORMAT); src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous(); } else { bool same_type = iter.dtype(0) == iter.dtype(1); dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1), LEGACY_CONTIGUOUS_MEMORY_FORMAT); src_contig = iter.tensor(1).expand_as(dst).contiguous(); } // perform a same-dtype copy on contiguous tensors TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes())); TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type()); dst_contig.copy_(src_contig, non_blocking); // if necessary, copy back into dst if (!dst_contig.is_same(dst)) { TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device()); dst.copy_(dst_contig, non_blocking); } return; } // Copy on GPU (or between GPUs) if (dst_device.is_cuda() && src_device.is_cuda()) { copy_device_to_device(iter, non_blocking); return; } // Copy between CPU and GPU cuda::OptionalCUDAGuard device_guard; cudaMemcpyKind kind; if (dst_device.is_cuda() && src_device.is_cpu()) { device_guard.set_device(dst_device); kind = cudaMemcpyHostToDevice; } else if (dst_device.is_cpu() && src_device.is_cuda()) { device_guard.set_device(src_device); kind = cudaMemcpyDeviceToHost; } else { TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()"); } void* dst = iter.data_ptr(0); void* src = iter.data_ptr(1); int64_t nbytes = iter.numel() * iter.element_size(0); CUDAStream stream = getCurrentCUDAStream(); AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream)); if (non_blocking) { void* ptr = (dst_device == kCPU ? dst : src); AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream)); } else { AT_CUDA_CHECK(cudaStreamSynchronize(stream)); } } REGISTER_DISPATCH(copy_stub, &copy_kernel_cuda); } // namespace native } // namespace at
9d739e6ee4da2735f19a5a79514c926e55858ea3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // old op include, fluid should be removed #ifdef PADDLE_WITH_HIP #include <hipcub/hipcub.hpp> namespace cub = hipcub; #else #include <hipcub/hipcub.hpp> #endif #include <vector> #include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/kernels/funcs/axis_utils.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/reduce_function.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/distributed/collective/process_group.h" #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif // trace op include #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" namespace phi { static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T, typename Context> void GetClassInterval(const gpuStream_t& stream, const phi::Place& place, const Context& dev_ctx, const int rid, const int rank, const int nranks, const int D, DenseTensor* class_interval) { std::vector<int> shard_dim_vec(nranks + 1, 0); shard_dim_vec[rank + 1] = D; if (nranks <= 1) { paddle::framework::TensorFromVector(shard_dim_vec, dev_ctx, class_interval); return; } #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) DenseTensor num_classes_per_device; paddle::framework::TensorFromVector( shard_dim_vec, dev_ctx, &num_classes_per_device); int* num_classes_per_device_ptr = num_classes_per_device.data<int>(); auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance(); if (map->has(rid)) { // Use ProcessGroup paddle::distributed::ProcessGroup* pg = map->get(rid); std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(num_classes_per_device); out_tensor.push_back(num_classes_per_device); paddle::distributed::AllreduceOptions opts; opts.reduce_op = paddle::distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { const auto& comm = paddle::platform::NCCLCommContext::Instance().Get(rid, place); // use global calculate stream const auto calcu_stream = static_cast<GPUContext*>( paddle::platform::DeviceContextPool::Instance().Get(place)) ->stream(); PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce( num_classes_per_device_ptr, num_classes_per_device_ptr, num_classes_per_device.numel(), paddle::platform::ToNCCLDataType(paddle::framework::TransToProtoVarType( num_classes_per_device.dtype())), ncclSum, comm->comm(), calcu_stream)); } class_interval->Resize({nranks + 1}); auto class_interval_ptr = dev_ctx.template Alloc<int>(class_interval); size_t cub_temp_storage_bytes = 0; hipcub::DeviceScan::InclusiveSum<int*, int*>( nullptr, cub_temp_storage_bytes, nullptr, nullptr, nranks + 1, stream); auto cub_temp_storage = paddle::memory::Alloc(place, cub_temp_storage_bytes); hipcub::DeviceScan::InclusiveSum<int*, int*>(cub_temp_storage->ptr(), cub_temp_storage_bytes, num_classes_per_device_ptr, class_interval_ptr, nranks + 1, stream); return; #endif } template <typename T, typename IndexT> __global__ void AddMarginToPositiveLogitsKernel(T* logit, const IndexT* label, const float margin1, const float margin2, const float margin3, const int rank, const int nranks, const int64_t N, const int64_t D, const int* class_interval_ptr) { using MPType = typename phi::dtype::MPTypeTrait<T>::Type; int start_index = class_interval_ptr[rank]; int end_index = class_interval_ptr[rank + 1]; int num_classes = class_interval_ptr[nranks]; CUDA_KERNEL_LOOP(i, N) { auto real_label = label[i]; PADDLE_ENFORCE((real_label < num_classes) && (real_label >= 0), "The index is out of bounds, " "please check whether the value of label and " "input meet the number of class. It should " "be less than [%d], but received [%d]", num_classes, real_label); if (real_label >= start_index && real_label < end_index) { int64_t offset = i * D + real_label - start_index; if (fabs(margin1 - 1.0) > 1e-8 || fabs(margin2) > 1e-8) { MPType x = static_cast<MPType>(logit[offset]); MPType theta = acos(x); if (fabs(margin1 - 1.0) > 1e-8) { theta *= static_cast<MPType>(margin1); } if (fabs(margin2) > 1e-8) { theta += static_cast<MPType>(margin2); } logit[offset] = static_cast<T>(cos(theta)); } if (fabs(margin3) > 1e-8) { MPType y = static_cast<MPType>(logit[offset]); y -= static_cast<MPType>(margin3); logit[offset] = static_cast<T>(y); } } } } template <typename T> __global__ void ScaleLogitKernel(T* logits, const float scale, const int64_t N, const int64_t D) { CUDA_KERNEL_LOOP(i, N * D) { logits[i] *= static_cast<T>(scale); } } template <typename T> __global__ void LogitsMinusMaxKernel(T* logits, const T* logits_max_per_row, const int64_t N, const int64_t D) { CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; logits[i] -= logits_max_per_row[row]; } } template <typename T> __global__ void LogitsMinusLogSumKernel(T* logits, const T* logits_sum_per_row, const int64_t N, const int64_t D) { CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; logits[i] -= phi::kps::details::Log(logits_sum_per_row[row]); } } template <typename T, typename IndexT> __global__ void HardLabelSoftmaxWithCrossEntropyKernel( T* loss, T* log_softmax, const IndexT* labels, const int rank, const int64_t N, const int64_t D, const int* class_interval_ptr) { int start_index = class_interval_ptr[rank]; CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; auto col = i % D; if ((col + start_index) == labels[row]) { auto softmax = log_softmax[i]; loss[row] = -softmax; log_softmax[i] = phi::kps::details::Exp(softmax); } else { log_softmax[i] = phi::kps::details::Exp(log_softmax[i]); } } } template <typename T, typename Context> void MarginCrossEntropyKernel(const Context& dev_ctx, const DenseTensor& logits, const DenseTensor& labels, bool return_softmax, int ring_id, int rank, int nranks, float margin1, float margin2, float margin3, float scale, DenseTensor* softmax, DenseTensor* loss) { const auto& place = dev_ctx.GetPlace(); // old code #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) paddle::platform::NCCLComm* comm; paddle::distributed::ProcessGroup* pg = nullptr; gpuStream_t stream; if (nranks > 1) { auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance(); if (map->has(ring_id)) { // Use ProcessGroup pg = map->get(ring_id); } else { comm = paddle::platform::NCCLCommContext::Instance().Get(ring_id, place); // use global calculate stream stream = static_cast<GPUContext*>( paddle::platform::DeviceContextPool::Instance().Get(place)) ->stream(); } } #endif // allocate memory on device. T* softmax_ptr = dev_ctx.template Alloc<T>(softmax); T* loss_ptr = dev_ctx.template Alloc<T>(loss); const auto& logits_dims = logits.dims(); const auto& labels_dims = labels.dims(); const int axis = logits_dims.size() - 1; const int N = phi::funcs::SizeToAxis(axis, logits_dims); const int D = phi::funcs::SizeFromAxis(axis, logits_dims); int blocks = NumBlocks(N); int threads = kNumCUDAThreads; const auto& label_type = paddle::framework::TransToProtoVarType(labels.dtype()); // copy logits to softmax variable since we can't modify logits, // and it also be used when calculate grad phi::Copy<Context>(dev_ctx, logits, dev_ctx.GetPlace(), true, softmax); DenseTensor softmax_2d; softmax_2d.ShareDataWith(*softmax).Resize({N, D}); T* logits_ptr = softmax_2d.data<T>(); DenseTensor class_interval; GetClassInterval<T, Context>(dev_ctx.stream(), dev_ctx.GetPlace(), dev_ctx, ring_id, rank, nranks, D, &class_interval); // step 1, preprocess logits // add margin for positive elements // theta = acos(x_i) // (cos(m1 * theta + m2) - m3) // save match_logits, used for gradient computation. if (label_type == paddle::framework::proto::VarType::INT32) { typedef int32_t LabelT; hipLaunchKernelGGL(( AddMarginToPositiveLogitsKernel<T>) , dim3(NumBlocks(N)), dim3(threads), 0, dev_ctx.stream(), logits_ptr, labels.data<LabelT>(), margin1, margin2, margin3, rank, nranks, N, D, class_interval.data<int>()); } else if (label_type == paddle::framework::proto::VarType::INT64) { typedef int64_t LabelT; hipLaunchKernelGGL(( AddMarginToPositiveLogitsKernel<T>) , dim3(NumBlocks(N)), dim3(threads), 0, dev_ctx.stream(), logits_ptr, labels.data<LabelT>(), margin1, margin2, margin3, rank, nranks, N, D, class_interval.data<int>()); } else { PADDLE_THROW(errors::Unimplemented( "margin_cross_entropy label type noly support int32 and int64, " "but got %s", label_type)); } // scale by s hipLaunchKernelGGL(( ScaleLogitKernel<T>), dim3(NumBlocks(N * D)), dim3(threads), 0, dev_ctx.stream(), logits_ptr, scale, N, D); // step 2, obtain logit_max DenseTensor logits_max; logits_max.Resize({N, 1}); dev_ctx.template Alloc<T>(&logits_max); T* logits_max_buff = dev_ctx.template Alloc<T>(&logits_max); phi::funcs:: ReduceKernel<T, T, phi::kps::MaxFunctor, phi::kps::IdentityFunctor<T>>( static_cast<const phi::GPUContext&>(dev_ctx), softmax_2d, &logits_max, phi::kps::IdentityFunctor<T>(), {1}); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { if (pg) { std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(logits_max); out_tensor.push_back(logits_max); paddle::distributed::AllreduceOptions opts; opts.reduce_op = paddle::distributed::ReduceOp::MAX; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce( logits_max_buff, logits_max_buff, logits_max.numel(), paddle::platform::ToNCCLDataType( paddle::framework::TransToProtoVarType(logits_max.dtype())), ncclMax, comm->comm(), stream)); } } #endif // step 3, logit - logit_max hipLaunchKernelGGL(( LogitsMinusMaxKernel<T>), dim3(NumBlocks(N * D)), dim3(threads), 0, dev_ctx.stream(), logits_ptr, logits_max_buff, N, D); // step 4, sum(exp(logit - logit_max)) DenseTensor sum_exp_logits; sum_exp_logits.Resize({N, 1}); dev_ctx.template Alloc<T>(&sum_exp_logits); T* sum_exp_logits_buff = dev_ctx.template Alloc<T>(&sum_exp_logits); phi::funcs::ReduceKernel<T, T, phi::kps::AddFunctor, phi::kps::ExpFunctor<T>>( static_cast<const phi::GPUContext&>(dev_ctx), softmax_2d, &sum_exp_logits, phi::kps::ExpFunctor<T>(), {1}); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { if (pg) { std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(sum_exp_logits); out_tensor.push_back(sum_exp_logits); paddle::distributed::AllreduceOptions opts; opts.reduce_op = paddle::distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce( sum_exp_logits_buff, sum_exp_logits_buff, sum_exp_logits.numel(), paddle::platform::ToNCCLDataType( paddle::framework::TransToProtoVarType(sum_exp_logits.dtype())), ncclSum, comm->comm(), stream)); } } #endif // step 5, (logit - logit_max) - log(sum(exp(logit - logit_max))) hipLaunchKernelGGL(( LogitsMinusLogSumKernel<T>) , dim3(NumBlocks(N * D)), dim3(threads), 0, dev_ctx.stream(), logits_ptr, sum_exp_logits_buff, N, D); // step 6, prob = exp((logit - logit_max) - log(sum(exp(logit - // logit_max)))) // loss = -((logit_i - logit_max) - log(sum(exp(logit - logit_max)))) phi::funcs::SetConstant<Context, T> functor; functor(dev_ctx, loss, static_cast<T>(0.0)); if (label_type == paddle::framework::proto::VarType::INT32) { typedef int32_t LabelT; hipLaunchKernelGGL(( HardLabelSoftmaxWithCrossEntropyKernel<T, LabelT>) , dim3(blocks), dim3(threads), 0, dev_ctx.stream(), loss_ptr, logits_ptr, labels.data<LabelT>(), rank, N, D, class_interval.data<int>()); } else if (label_type == paddle::framework::proto::VarType::INT64) { typedef int64_t LabelT; hipLaunchKernelGGL(( HardLabelSoftmaxWithCrossEntropyKernel<T, LabelT>) , dim3(blocks), dim3(threads), 0, dev_ctx.stream(), loss_ptr, logits_ptr, labels.data<LabelT>(), rank, N, D, class_interval.data<int>()); } #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { if (pg) { std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(*loss); out_tensor.push_back(*loss); paddle::distributed::AllreduceOptions opts; opts.reduce_op = paddle::distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce( loss_ptr, loss_ptr, loss->numel(), paddle::platform::ToNCCLDataType( paddle::framework::TransToProtoVarType(loss->dtype())), ncclSum, comm->comm(), stream)); } } #endif } } // namespace phi PD_REGISTER_KERNEL(margin_cross_entropy, GPU, ALL_LAYOUT, phi::MarginCrossEntropyKernel, float, double, phi::dtype::float16) {}
9d739e6ee4da2735f19a5a79514c926e55858ea3.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // old op include, fluid should be removed #ifdef PADDLE_WITH_HIP #include <hipcub/hipcub.hpp> namespace cub = hipcub; #else #include <cub/cub.cuh> #endif #include <vector> #include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/kernels/funcs/axis_utils.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/reduce_function.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/distributed/collective/process_group.h" #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif // trace op include #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" namespace phi { static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T, typename Context> void GetClassInterval(const gpuStream_t& stream, const phi::Place& place, const Context& dev_ctx, const int rid, const int rank, const int nranks, const int D, DenseTensor* class_interval) { std::vector<int> shard_dim_vec(nranks + 1, 0); shard_dim_vec[rank + 1] = D; if (nranks <= 1) { paddle::framework::TensorFromVector(shard_dim_vec, dev_ctx, class_interval); return; } #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) DenseTensor num_classes_per_device; paddle::framework::TensorFromVector( shard_dim_vec, dev_ctx, &num_classes_per_device); int* num_classes_per_device_ptr = num_classes_per_device.data<int>(); auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance(); if (map->has(rid)) { // Use ProcessGroup paddle::distributed::ProcessGroup* pg = map->get(rid); std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(num_classes_per_device); out_tensor.push_back(num_classes_per_device); paddle::distributed::AllreduceOptions opts; opts.reduce_op = paddle::distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { const auto& comm = paddle::platform::NCCLCommContext::Instance().Get(rid, place); // use global calculate stream const auto calcu_stream = static_cast<GPUContext*>( paddle::platform::DeviceContextPool::Instance().Get(place)) ->stream(); PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce( num_classes_per_device_ptr, num_classes_per_device_ptr, num_classes_per_device.numel(), paddle::platform::ToNCCLDataType(paddle::framework::TransToProtoVarType( num_classes_per_device.dtype())), ncclSum, comm->comm(), calcu_stream)); } class_interval->Resize({nranks + 1}); auto class_interval_ptr = dev_ctx.template Alloc<int>(class_interval); size_t cub_temp_storage_bytes = 0; cub::DeviceScan::InclusiveSum<int*, int*>( nullptr, cub_temp_storage_bytes, nullptr, nullptr, nranks + 1, stream); auto cub_temp_storage = paddle::memory::Alloc(place, cub_temp_storage_bytes); cub::DeviceScan::InclusiveSum<int*, int*>(cub_temp_storage->ptr(), cub_temp_storage_bytes, num_classes_per_device_ptr, class_interval_ptr, nranks + 1, stream); return; #endif } template <typename T, typename IndexT> __global__ void AddMarginToPositiveLogitsKernel(T* logit, const IndexT* label, const float margin1, const float margin2, const float margin3, const int rank, const int nranks, const int64_t N, const int64_t D, const int* class_interval_ptr) { using MPType = typename phi::dtype::MPTypeTrait<T>::Type; int start_index = class_interval_ptr[rank]; int end_index = class_interval_ptr[rank + 1]; int num_classes = class_interval_ptr[nranks]; CUDA_KERNEL_LOOP(i, N) { auto real_label = label[i]; PADDLE_ENFORCE((real_label < num_classes) && (real_label >= 0), "The index is out of bounds, " "please check whether the value of label and " "input meet the number of class. It should " "be less than [%d], but received [%d]", num_classes, real_label); if (real_label >= start_index && real_label < end_index) { int64_t offset = i * D + real_label - start_index; if (fabs(margin1 - 1.0) > 1e-8 || fabs(margin2) > 1e-8) { MPType x = static_cast<MPType>(logit[offset]); MPType theta = acos(x); if (fabs(margin1 - 1.0) > 1e-8) { theta *= static_cast<MPType>(margin1); } if (fabs(margin2) > 1e-8) { theta += static_cast<MPType>(margin2); } logit[offset] = static_cast<T>(cos(theta)); } if (fabs(margin3) > 1e-8) { MPType y = static_cast<MPType>(logit[offset]); y -= static_cast<MPType>(margin3); logit[offset] = static_cast<T>(y); } } } } template <typename T> __global__ void ScaleLogitKernel(T* logits, const float scale, const int64_t N, const int64_t D) { CUDA_KERNEL_LOOP(i, N * D) { logits[i] *= static_cast<T>(scale); } } template <typename T> __global__ void LogitsMinusMaxKernel(T* logits, const T* logits_max_per_row, const int64_t N, const int64_t D) { CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; logits[i] -= logits_max_per_row[row]; } } template <typename T> __global__ void LogitsMinusLogSumKernel(T* logits, const T* logits_sum_per_row, const int64_t N, const int64_t D) { CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; logits[i] -= phi::kps::details::Log(logits_sum_per_row[row]); } } template <typename T, typename IndexT> __global__ void HardLabelSoftmaxWithCrossEntropyKernel( T* loss, T* log_softmax, const IndexT* labels, const int rank, const int64_t N, const int64_t D, const int* class_interval_ptr) { int start_index = class_interval_ptr[rank]; CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; auto col = i % D; if ((col + start_index) == labels[row]) { auto softmax = log_softmax[i]; loss[row] = -softmax; log_softmax[i] = phi::kps::details::Exp(softmax); } else { log_softmax[i] = phi::kps::details::Exp(log_softmax[i]); } } } template <typename T, typename Context> void MarginCrossEntropyKernel(const Context& dev_ctx, const DenseTensor& logits, const DenseTensor& labels, bool return_softmax, int ring_id, int rank, int nranks, float margin1, float margin2, float margin3, float scale, DenseTensor* softmax, DenseTensor* loss) { const auto& place = dev_ctx.GetPlace(); // old code #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) paddle::platform::NCCLComm* comm; paddle::distributed::ProcessGroup* pg = nullptr; gpuStream_t stream; if (nranks > 1) { auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance(); if (map->has(ring_id)) { // Use ProcessGroup pg = map->get(ring_id); } else { comm = paddle::platform::NCCLCommContext::Instance().Get(ring_id, place); // use global calculate stream stream = static_cast<GPUContext*>( paddle::platform::DeviceContextPool::Instance().Get(place)) ->stream(); } } #endif // allocate memory on device. T* softmax_ptr = dev_ctx.template Alloc<T>(softmax); T* loss_ptr = dev_ctx.template Alloc<T>(loss); const auto& logits_dims = logits.dims(); const auto& labels_dims = labels.dims(); const int axis = logits_dims.size() - 1; const int N = phi::funcs::SizeToAxis(axis, logits_dims); const int D = phi::funcs::SizeFromAxis(axis, logits_dims); int blocks = NumBlocks(N); int threads = kNumCUDAThreads; const auto& label_type = paddle::framework::TransToProtoVarType(labels.dtype()); // copy logits to softmax variable since we can't modify logits, // and it also be used when calculate grad phi::Copy<Context>(dev_ctx, logits, dev_ctx.GetPlace(), true, softmax); DenseTensor softmax_2d; softmax_2d.ShareDataWith(*softmax).Resize({N, D}); T* logits_ptr = softmax_2d.data<T>(); DenseTensor class_interval; GetClassInterval<T, Context>(dev_ctx.stream(), dev_ctx.GetPlace(), dev_ctx, ring_id, rank, nranks, D, &class_interval); // step 1, preprocess logits // add margin for positive elements // theta = acos(x_i) // (cos(m1 * theta + m2) - m3) // save match_logits, used for gradient computation. if (label_type == paddle::framework::proto::VarType::INT32) { typedef int32_t LabelT; AddMarginToPositiveLogitsKernel<T> <<<NumBlocks(N), threads, 0, dev_ctx.stream()>>>( logits_ptr, labels.data<LabelT>(), margin1, margin2, margin3, rank, nranks, N, D, class_interval.data<int>()); } else if (label_type == paddle::framework::proto::VarType::INT64) { typedef int64_t LabelT; AddMarginToPositiveLogitsKernel<T> <<<NumBlocks(N), threads, 0, dev_ctx.stream()>>>( logits_ptr, labels.data<LabelT>(), margin1, margin2, margin3, rank, nranks, N, D, class_interval.data<int>()); } else { PADDLE_THROW(errors::Unimplemented( "margin_cross_entropy label type noly support int32 and int64, " "but got %s", label_type)); } // scale by s ScaleLogitKernel<T><<<NumBlocks(N * D), threads, 0, dev_ctx.stream()>>>( logits_ptr, scale, N, D); // step 2, obtain logit_max DenseTensor logits_max; logits_max.Resize({N, 1}); dev_ctx.template Alloc<T>(&logits_max); T* logits_max_buff = dev_ctx.template Alloc<T>(&logits_max); phi::funcs:: ReduceKernel<T, T, phi::kps::MaxFunctor, phi::kps::IdentityFunctor<T>>( static_cast<const phi::GPUContext&>(dev_ctx), softmax_2d, &logits_max, phi::kps::IdentityFunctor<T>(), {1}); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { if (pg) { std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(logits_max); out_tensor.push_back(logits_max); paddle::distributed::AllreduceOptions opts; opts.reduce_op = paddle::distributed::ReduceOp::MAX; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce( logits_max_buff, logits_max_buff, logits_max.numel(), paddle::platform::ToNCCLDataType( paddle::framework::TransToProtoVarType(logits_max.dtype())), ncclMax, comm->comm(), stream)); } } #endif // step 3, logit - logit_max LogitsMinusMaxKernel<T><<<NumBlocks(N * D), threads, 0, dev_ctx.stream()>>>( logits_ptr, logits_max_buff, N, D); // step 4, sum(exp(logit - logit_max)) DenseTensor sum_exp_logits; sum_exp_logits.Resize({N, 1}); dev_ctx.template Alloc<T>(&sum_exp_logits); T* sum_exp_logits_buff = dev_ctx.template Alloc<T>(&sum_exp_logits); phi::funcs::ReduceKernel<T, T, phi::kps::AddFunctor, phi::kps::ExpFunctor<T>>( static_cast<const phi::GPUContext&>(dev_ctx), softmax_2d, &sum_exp_logits, phi::kps::ExpFunctor<T>(), {1}); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { if (pg) { std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(sum_exp_logits); out_tensor.push_back(sum_exp_logits); paddle::distributed::AllreduceOptions opts; opts.reduce_op = paddle::distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce( sum_exp_logits_buff, sum_exp_logits_buff, sum_exp_logits.numel(), paddle::platform::ToNCCLDataType( paddle::framework::TransToProtoVarType(sum_exp_logits.dtype())), ncclSum, comm->comm(), stream)); } } #endif // step 5, (logit - logit_max) - log(sum(exp(logit - logit_max))) LogitsMinusLogSumKernel<T> <<<NumBlocks(N * D), threads, 0, dev_ctx.stream()>>>( logits_ptr, sum_exp_logits_buff, N, D); // step 6, prob = exp((logit - logit_max) - log(sum(exp(logit - // logit_max)))) // loss = -((logit_i - logit_max) - log(sum(exp(logit - logit_max)))) phi::funcs::SetConstant<Context, T> functor; functor(dev_ctx, loss, static_cast<T>(0.0)); if (label_type == paddle::framework::proto::VarType::INT32) { typedef int32_t LabelT; HardLabelSoftmaxWithCrossEntropyKernel<T, LabelT> <<<blocks, threads, 0, dev_ctx.stream()>>>(loss_ptr, logits_ptr, labels.data<LabelT>(), rank, N, D, class_interval.data<int>()); } else if (label_type == paddle::framework::proto::VarType::INT64) { typedef int64_t LabelT; HardLabelSoftmaxWithCrossEntropyKernel<T, LabelT> <<<blocks, threads, 0, dev_ctx.stream()>>>(loss_ptr, logits_ptr, labels.data<LabelT>(), rank, N, D, class_interval.data<int>()); } #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) if (nranks > 1) { if (pg) { std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(*loss); out_tensor.push_back(*loss); paddle::distributed::AllreduceOptions opts; opts.reduce_op = paddle::distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce( loss_ptr, loss_ptr, loss->numel(), paddle::platform::ToNCCLDataType( paddle::framework::TransToProtoVarType(loss->dtype())), ncclSum, comm->comm(), stream)); } } #endif } } // namespace phi PD_REGISTER_KERNEL(margin_cross_entropy, GPU, ALL_LAYOUT, phi::MarginCrossEntropyKernel, float, double, phi::dtype::float16) {}
1fd5b7ca5162453132cf541063009006a2694ffa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2015 by Contributors * \file pad.cu * \brief * \author Sebastian Bodenstein */ #include <algorithm> #include "./pad-inl.h" #include "../common/cuda_utils.h" namespace mshadow { namespace cuda { //////////////////////////////////////////////////////////////////////////////// // Special Case: 2d image (so only pad width + height) // Case 1: Replication Padding // single_image_2d_edge adapted from Torch // https://github.com/torch/cunn/blob/master/lib/THCUNN/SpatialReplicationPadding.cu template <int n_bits, typename DType> __global__ void image_2d_pad_edge_kernel(Tensor<gpu, 4, DType> dst, const Tensor<gpu, 4, DType> src, const int padT, const int padL) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= dst.size(2) * dst.size(3)) { return; } int outputPointX = outputPointId % dst.size(3); int outputPointY = outputPointId / dst.size(3); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int oStartX = max(0, padL); int oStartY = max(0, padT); int inputPointX = min(max(padL, outputPointX), src.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = min(max(padT, outputPointY), src.size(2) + padT - 1) - oStartY + iStartY; DType valueToCopy = src[batch][plane][inputPointY][inputPointX]; dst[batch][plane][outputPointY][outputPointX] = valueToCopy; } template <typename DType> inline void image_pad_edge(Tensor<gpu, 4, DType> dst, const Tensor<gpu, 4, DType> &src, const mxnet::TShape &pad) { const int padT = pad[4]; const int padL = pad[6]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256; dim3 dimGrid(xGridSize, dst.size(1), dst.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); hipStream_t stream = Stream<gpu>::GetStream(dst.stream_); hipLaunchKernelGGL(( image_2d_pad_edge_kernel<kBaseThreadBits, DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, dst, src, padT, padL); } template <int n_bits, typename DType> __global__ void image_2d_pad_edge_grad_kernel( Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out, const int padT, const int padL) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= grad_out.size(2) * grad_out.size(3)) { return; } int outputPointX = outputPointId % grad_out.size(3); int outputPointY = outputPointId / grad_out.size(3); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int oStartX = max(0, padL); int oStartY = max(0, padT); int inputPointX = min(max(padL, outputPointX), grad_in.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = min(max(padT, outputPointY), grad_in.size(2) + padT - 1) - oStartY + iStartY; DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX]; atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy); } template <typename DType> inline void image_pad_edge_grad(Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> &grad_out, const mxnet::TShape &pad) { const int padT = pad[4]; const int padL = pad[6]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (grad_out.size(2) * grad_out.size(3) + 256 - 1) / 256; dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); hipStream_t stream = Stream<gpu>::GetStream(grad_out.stream_); hipLaunchKernelGGL(( image_2d_pad_edge_grad_kernel<kBaseThreadBits, DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, grad_in, grad_out, padT, padL); } // Case 2: Constant Padding template <int n_bits, typename DType> __global__ void image_2d_pad_constant_kernel(Tensor<gpu, 4, DType> dst, const Tensor<gpu, 4, DType> src, const int padT, const int padL, const DType constant) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; if (outputPointId >= dst.size(2) * dst.size(3)) { return; } // cast sizes to int to use in min/max int Ny = src.size(2); int Nx = src.size(3); int plane = blockIdx.y; int batch = blockIdx.z; int outputPointX = outputPointId % dst.size(3); int outputPointY = outputPointId / dst.size(3); int checkT = max(0, outputPointY - padT + 1); int checkB = max(0, padT + Ny - outputPointY); int checkL = max(0, outputPointX - padL + 1); int checkR = max(0, padL + Nx - outputPointX); int inputPointX = min(max(outputPointX - padL, 0), Nx - 1); int inputPointY = min(max(outputPointY - padT, 0), Ny - 1); // 1 if need padding, 0 if not int need_pad = !(checkT * checkB * checkL * checkR); DType valueToCopy = src[batch][plane][inputPointY][inputPointX]; dst[batch][plane][outputPointY][outputPointX] = valueToCopy * (!need_pad) + need_pad * constant; } template <typename DType> inline void image_pad_constant(Tensor<gpu, 4, DType> dst, const Tensor<gpu, 4, DType> &src, const mxnet::TShape &pad, const DType constant) { const int padT = pad[4]; const int padL = pad[6]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256; dim3 dimGrid(xGridSize, dst.size(1), dst.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); hipStream_t stream = Stream<gpu>::GetStream(dst.stream_); hipLaunchKernelGGL(( image_2d_pad_constant_kernel<kBaseThreadBits, DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, dst, src, padT, padL, constant); } template <int n_bits, typename DType> __global__ void image_2d_pad_constant_grad_kernel( Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out, const int padT, const int padL) { int inPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; int pixel_num = grad_in.size(2) * grad_in.size(3); if (inPointId >= pixel_num) { return; } int inPointX = inPointId % grad_in.size(3); int inPointY = inPointId / grad_in.size(3); int outPointX = inPointX + padL; int outPointY = inPointY + padT; grad_in[batch][plane][inPointY][inPointX] = grad_out[batch][plane][outPointY][outPointX]; } template <typename DType> inline void image_pad_constant_grad(Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> &grad_out, const mxnet::TShape &pad) { const int padT = pad[4]; const int padL = pad[6]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (grad_in.size(2) * grad_in.size(3) + 256 - 1) / 256; dim3 dimGrid(xGridSize, grad_in.size(1), grad_in.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); hipStream_t stream = Stream<gpu>::GetStream(grad_in.stream_); hipLaunchKernelGGL(( image_2d_pad_constant_grad_kernel<kBaseThreadBits, DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, grad_in, grad_out, padT, padL); } // Case 3: Reflection Padding // adapted from Torch // https://github.com/torch/cunn/blob/master/lib/THCUNN/SpatialReflectionPadding.cu template <int n_bits, typename DType> __global__ void image_2d_pad_reflect_kernel(Tensor<gpu, 4, DType> dst, const Tensor<gpu, 4, DType> src, const int padT, const int padL) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= dst.size(2) * dst.size(3)) { return; } int outputPointX = outputPointId % dst.size(3); int outputPointY = outputPointId / dst.size(3); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int oStartX = max(0, padL); int oStartY = max(0, padT); int inputPointX = __sad(outputPointX, padL, 0) - __sad(outputPointX, src.size(3) + padL - 1, 0) - outputPointX + 2 * padL + src.size(3) - 1 - oStartX + iStartX; int inputPointY = __sad(outputPointY, padT, 0) - __sad(outputPointY, src.size(2) + padT - 1, 0) - outputPointY + 2 * padT + src.size(2) - 1 - oStartY + iStartY; DType valueToCopy = src[batch][plane][inputPointY][inputPointX]; dst[batch][plane][outputPointY][outputPointX] = valueToCopy; } template <typename DType> inline void image_pad_reflect(Tensor<gpu, 4, DType> dst, const Tensor<gpu, 4, DType> &src, const mxnet::TShape &pad) { const int padT = pad[4]; const int padL = pad[6]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256; dim3 dimGrid(xGridSize, dst.size(1), dst.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); hipStream_t stream = Stream<gpu>::GetStream(dst.stream_); hipLaunchKernelGGL(( image_2d_pad_reflect_kernel<kBaseThreadBits, DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, dst, src, padT, padL); } template <int n_bits, typename DType> __global__ void image_2d_pad_reflect_grad_kernel( Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out, const int padT, const int padL) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= grad_out.size(2) * grad_out.size(3)) { return; } int outputPointX = outputPointId % grad_out.size(3); int outputPointY = outputPointId / grad_out.size(3); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int oStartX = max(0, padL); int oStartY = max(0, padT); int inputPointX = __sad(outputPointX, padL, 0) - __sad(outputPointX, grad_in.size(3) + padL - 1, 0) - outputPointX + 2 * padL + grad_in.size(3) - 1 - oStartX + iStartX; int inputPointY = __sad(outputPointY, padT, 0) - __sad(outputPointY, grad_in.size(2) + padT - 1, 0) - outputPointY + 2 * padT + grad_in.size(2) - 1 - oStartY + iStartY; DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX]; atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy); } template <typename DType> inline void image_pad_reflect_grad(Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> &grad_out, const mxnet::TShape &pad) { const int padT = pad[4]; const int padL = pad[6]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (grad_out.size(2) * grad_out.size(3) + 256 - 1) / 256; dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); hipStream_t stream = Stream<gpu>::GetStream(grad_out.stream_); hipLaunchKernelGGL(( image_2d_pad_reflect_grad_kernel<kBaseThreadBits, DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, grad_in, grad_out, padT, padL); } //////////////////////////////////////////////////////////////////////////////// // Special Case: 3d image (pad depth + width + height) // Case 1: Replication Padding // single_image_3_edge adapted from Torch // https://github.com/torch/cunn/blob/master/lib/THCUNN/VolumetricReplicationPadding.cu template <int n_bits, typename DType> __global__ void image_3d_pad_edge_kernel(Tensor<gpu, 5, DType> dst, const Tensor<gpu, 5, DType> src, const int padF, const int padT, const int padL) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) { return; } int outputPointX = outputPointId % dst.size(4); int outputPointY = (outputPointId / dst.size(4)) % dst.size(3); int outputPointZ = outputPointId / (dst.size(3) * dst.size(4)); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int iStartZ = max(0, -padF); int oStartX = max(0, padL); int oStartY = max(0, padT); int oStartZ = max(0, padF); int inputPointX = min(max(padL, outputPointX), src.size(4) + padL - 1) - oStartX + iStartX; int inputPointY = min(max(padT, outputPointY), src.size(3) + padT - 1) - oStartY + iStartY; int inputPointZ = min(max(padF, outputPointZ), src.size(2) + padF - 1) - oStartZ + iStartZ; DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX]; dst[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy; } template <typename DType> inline void image_pad_edge(Tensor<gpu, 5, DType> dst, const Tensor<gpu, 5, DType> &src, const mxnet::TShape &pad) { const int padF = pad[4]; const int padT = pad[6]; const int padL = pad[8]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256; dim3 dimGrid(xGridSize, dst.size(1), dst.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); hipStream_t stream = Stream<gpu>::GetStream(dst.stream_); hipLaunchKernelGGL(( image_3d_pad_edge_kernel<kBaseThreadBits, DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, dst, src, padF, padT, padL); } template <int n_bits, typename DType> __global__ void image_3d_pad_edge_grad_kernel( Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out, const int padF, const int padT, const int padL) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= grad_out.size(2) * grad_out.size(3) * grad_out.size(4)) { return; } int outputPointX = outputPointId % grad_out.size(4); int outputPointY = (outputPointId / grad_out.size(4)) % grad_out.size(3); int outputPointZ = outputPointId / (grad_out.size(3) * grad_out.size(4)); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int iStartZ = max(0, -padF); int oStartX = max(0, padL); int oStartY = max(0, padT); int oStartZ = max(0, padF); int inputPointX = min(max(padL, outputPointX), grad_in.size(4) + padL - 1) - oStartX + iStartX; int inputPointY = min(max(padT, outputPointY), grad_in.size(3) + padT - 1) - oStartY + iStartY; int inputPointZ = min(max(padF, outputPointZ), grad_in.size(2) + padF - 1) - oStartZ + iStartZ; DType valueToCopy = grad_out[batch][plane][outputPointZ][outputPointY][outputPointX]; atomicAdd(&grad_in[batch][plane][inputPointZ][inputPointY][inputPointX], valueToCopy); } template <typename DType> inline void image_pad_edge_grad(Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> &grad_out, const mxnet::TShape &pad) { const int padF = pad[4]; const int padT = pad[6]; const int padL = pad[8]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (grad_out.size(2) * grad_out.size(3) * grad_out.size(4) + 256 - 1) / 256; dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); hipStream_t stream = Stream<gpu>::GetStream(grad_out.stream_); hipLaunchKernelGGL(( image_3d_pad_edge_grad_kernel<kBaseThreadBits, DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, grad_in, grad_out, padF, padT, padL); } // Case 2: Constant Padding template <int n_bits, typename DType> __global__ void image_3d_pad_constant_kernel(Tensor<gpu, 5, DType> dst, const Tensor<gpu, 5, DType> src, const int padF, const int padT, const int padL, const DType constant) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) { return; } // cast sizes to int to use in min/max int Nz = src.size(2); int Ny = src.size(3); int Nx = src.size(4); int plane = blockIdx.y; int batch = blockIdx.z; int outputPointX = outputPointId % dst.size(4); int outputPointY = (outputPointId / dst.size(4)) % dst.size(3); int outputPointZ = outputPointId / (dst.size(3) * dst.size(4)); int checkFront = max(0, outputPointZ - padF + 1); int checkBack = max(0, padF + Nz - outputPointZ); int checkTop = max(0, outputPointY - padT + 1); int checkBottom = max(0, padT + Ny - outputPointY); int checkLeft = max(0, outputPointX - padL + 1); int checkRight = max(0, padL + Nx - outputPointX); int inputPointZ = min(max(outputPointZ - padF, 0), Nz - 1); int inputPointX = min(max(outputPointX - padL, 0), Nx - 1); int inputPointY = min(max(outputPointY - padT, 0), Ny - 1); // 1 if need padding, 0 if not int need_pad = !(checkFront * checkBack * checkTop * checkBottom * checkLeft * checkRight); DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX]; dst[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy * (!need_pad) + need_pad * constant; } template <typename DType> inline void image_pad_constant(Tensor<gpu, 5, DType> dst, const Tensor<gpu, 5, DType> &src, const mxnet::TShape &pad, const DType constant) { const int padF = pad[4]; const int padT = pad[6]; const int padL = pad[8]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256; dim3 dimGrid(xGridSize, dst.size(1), dst.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); hipStream_t stream = Stream<gpu>::GetStream(dst.stream_); hipLaunchKernelGGL(( image_3d_pad_constant_kernel<kBaseThreadBits, DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, dst, src, padF, padT, padL, constant); } template <int n_bits, typename DType> __global__ void image_3d_pad_constant_grad_kernel( Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out, const int padF, const int padT, const int padL) { int inPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; int pixel_num = grad_in.size(2) * grad_in.size(3) * grad_in.size(4); if (inPointId >= pixel_num) { return; } int inPointX = inPointId % grad_in.size(4); int inPointY = (inPointId / grad_in.size(4)) % grad_in.size(3); int inPointZ = inPointId / (grad_in.size(3) * grad_in.size(4)); int outPointZ = inPointZ + padF; int outPointX = inPointX + padL; int outPointY = inPointY + padT; grad_in[batch][plane][inPointZ][inPointY][inPointX] = grad_out[batch][plane][outPointZ][outPointY][outPointX]; } template <typename DType> inline void image_pad_constant_grad(Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> &grad_out, const mxnet::TShape &pad) { const int padF = pad[4]; const int padT = pad[6]; const int padL = pad[8]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (grad_in.size(2) * grad_in.size(3) * grad_in.size(4) + 256 - 1) / 256; dim3 dimGrid(xGridSize, grad_in.size(1), grad_in.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); hipStream_t stream = Stream<gpu>::GetStream(grad_in.stream_); hipLaunchKernelGGL(( image_3d_pad_constant_grad_kernel<kBaseThreadBits, DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, grad_in, grad_out, padF, padT, padL); } // Case 3: Reflection Padding template <int n_bits, typename DType> __global__ void image_3d_pad_reflect_kernel(Tensor<gpu, 5, DType> dst, const Tensor<gpu, 5, DType> src, const int padF, const int padT, const int padL) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) { return; } int outputPointX = outputPointId % dst.size(4); int outputPointY = (outputPointId / dst.size(4)) % dst.size(3); int outputPointZ = outputPointId / (dst.size(3) * dst.size(4)); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int iStartZ = max(0, -padF); int oStartX = max(0, padL); int oStartY = max(0, padT); int oStartZ = max(0, padF); int inputPointX = __sad(outputPointX, padL, 0) - __sad(outputPointX, src.size(4) + padL - 1, 0) - outputPointX + 2 * padL + src.size(4) - 1 - oStartX + iStartX; int inputPointY = __sad(outputPointY, padT, 0) - __sad(outputPointY, src.size(3) + padT - 1, 0) - outputPointY + 2 * padT + src.size(3) - 1 - oStartY + iStartY; int inputPointZ = __sad(outputPointZ, padF, 0) - __sad(outputPointZ, src.size(2) + padF - 1, 0) - outputPointZ + 2 * padF + src.size(2) - 1 - oStartZ + iStartZ; DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX]; dst[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy; } template <typename DType> inline void image_pad_reflect(Tensor<gpu, 5, DType> dst, const Tensor<gpu, 5, DType> &src, const mxnet::TShape &pad) { const int padF = pad[4]; const int padT = pad[6]; const int padL = pad[8]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256; dim3 dimGrid(xGridSize, dst.size(1), dst.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); hipStream_t stream = Stream<gpu>::GetStream(dst.stream_); hipLaunchKernelGGL(( image_3d_pad_reflect_kernel<kBaseThreadBits, DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, dst, src, padF, padT, padL); } template <int n_bits, typename DType> __global__ void image_3d_pad_reflect_grad_kernel( Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out, const int padF, const int padT, const int padL) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= grad_out.size(2) * grad_out.size(3) * grad_out.size(4)) { return; } int outputPointX = outputPointId % grad_out.size(4); int outputPointY = (outputPointId / grad_out.size(4)) % grad_out.size(3); int outputPointZ = outputPointId / (grad_out.size(3) * grad_out.size(4)); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int iStartZ = max(0, -padF); int oStartX = max(0, padL); int oStartY = max(0, padT); int oStartZ = max(0, padF); int inputPointX = __sad(outputPointX, padL, 0) - __sad(outputPointX, grad_in.size(4) + padL - 1, 0) - outputPointX + 2 * padL + grad_in.size(4) - 1 - oStartX + iStartX; int inputPointY = __sad(outputPointY, padT, 0) - __sad(outputPointY, grad_in.size(3) + padT - 1, 0) - outputPointY + 2 * padT + grad_in.size(3) - 1 - oStartY + iStartY; int inputPointZ = __sad(outputPointZ, padF, 0) - __sad(outputPointZ, grad_in.size(2) + padF - 1, 0) - outputPointZ + 2 * padF + grad_in.size(2) - 1 - oStartZ + iStartZ; DType valueToCopy = grad_out[batch][plane][outputPointZ][outputPointY][outputPointX]; atomicAdd(&grad_in[batch][plane][inputPointZ][inputPointY][inputPointX], valueToCopy); } /* int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= grad_out.size(2) * grad_out.size(3)) { return; } int outputPointX = outputPointId % grad_out.size(3); int outputPointY = outputPointId / grad_out.size(3); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int oStartX = max(0, padL); int oStartY = max(0, padT); int inputPointX = __sad(outputPointX, padL, 0) - __sad(outputPointX, grad_in.size(3) + padL - 1, 0) - outputPointX + 2 * padL + grad_in.size(3) - 1 - oStartX + iStartX; int inputPointY = __sad(outputPointY, padT, 0) - __sad(outputPointY, grad_in.size(2) + padT - 1, 0) - outputPointY + 2 * padT + grad_in.size(2) - 1 - oStartY + iStartY; DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX]; atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy);*/ template <typename DType> inline void image_pad_reflect_grad(Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> &grad_out, const mxnet::TShape &pad) { const int padF = pad[4]; const int padT = pad[6]; const int padL = pad[8]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (grad_out.size(2) * grad_out.size(3) * grad_out.size(4) + 256 - 1) / 256; dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); hipStream_t stream = Stream<gpu>::GetStream(grad_out.stream_); hipLaunchKernelGGL(( image_3d_pad_reflect_grad_kernel<kBaseThreadBits, DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, grad_in, grad_out, padF, padT, padL); } //////////////////////////////////////////////////////////////////////////////// } // namespace cuda template <int dim, typename DType> void pad_image(Tensor<gpu, dim, DType> dst, const Tensor<gpu, dim, DType> src, const mxnet::TShape pad, int mode, const DType constant_value) { switch (mode) { case mxnet::op::pad_enum::kEdge: cuda::image_pad_edge(dst, src, pad); break; case mxnet::op::pad_enum::kConstant: cuda::image_pad_constant(dst, src, pad, constant_value); break; case mxnet::op::pad_enum::kReflect: cuda::image_pad_reflect(dst, src, pad); break; } } template <int dim, typename DType> void pad_image_grad(Tensor<gpu, dim, DType> grad_in, const Tensor<gpu, dim, DType> grad_out, const mxnet::TShape pad, int mode) { switch (mode) { case mxnet::op::pad_enum::kEdge: cuda::image_pad_edge_grad(grad_in, grad_out, pad); break; case mxnet::op::pad_enum::kConstant: cuda::image_pad_constant_grad(grad_in, grad_out, pad); break; case mxnet::op::pad_enum::kReflect: cuda::image_pad_reflect_grad(grad_in, grad_out, pad); break; } } } // namespace mshadow //////////////////////////////////////////////////////////////////////////////// namespace mxnet { namespace op { template <> Operator *CreateOp<gpu>(PadParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new PadOp<gpu, DType>(param); }) return op; } } // namespace op } // namespace mxnet
1fd5b7ca5162453132cf541063009006a2694ffa.cu
/*! * Copyright (c) 2015 by Contributors * \file pad.cu * \brief * \author Sebastian Bodenstein */ #include <algorithm> #include "./pad-inl.h" #include "../common/cuda_utils.h" namespace mshadow { namespace cuda { //////////////////////////////////////////////////////////////////////////////// // Special Case: 2d image (so only pad width + height) // Case 1: Replication Padding // single_image_2d_edge adapted from Torch // https://github.com/torch/cunn/blob/master/lib/THCUNN/SpatialReplicationPadding.cu template <int n_bits, typename DType> __global__ void image_2d_pad_edge_kernel(Tensor<gpu, 4, DType> dst, const Tensor<gpu, 4, DType> src, const int padT, const int padL) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= dst.size(2) * dst.size(3)) { return; } int outputPointX = outputPointId % dst.size(3); int outputPointY = outputPointId / dst.size(3); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int oStartX = max(0, padL); int oStartY = max(0, padT); int inputPointX = min(max(padL, outputPointX), src.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = min(max(padT, outputPointY), src.size(2) + padT - 1) - oStartY + iStartY; DType valueToCopy = src[batch][plane][inputPointY][inputPointX]; dst[batch][plane][outputPointY][outputPointX] = valueToCopy; } template <typename DType> inline void image_pad_edge(Tensor<gpu, 4, DType> dst, const Tensor<gpu, 4, DType> &src, const mxnet::TShape &pad) { const int padT = pad[4]; const int padL = pad[6]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256; dim3 dimGrid(xGridSize, dst.size(1), dst.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_); image_2d_pad_edge_kernel<kBaseThreadBits, DType><<<dimGrid, dimBlock, 0, stream>>>(dst, src, padT, padL); } template <int n_bits, typename DType> __global__ void image_2d_pad_edge_grad_kernel( Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out, const int padT, const int padL) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= grad_out.size(2) * grad_out.size(3)) { return; } int outputPointX = outputPointId % grad_out.size(3); int outputPointY = outputPointId / grad_out.size(3); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int oStartX = max(0, padL); int oStartY = max(0, padT); int inputPointX = min(max(padL, outputPointX), grad_in.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = min(max(padT, outputPointY), grad_in.size(2) + padT - 1) - oStartY + iStartY; DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX]; atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy); } template <typename DType> inline void image_pad_edge_grad(Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> &grad_out, const mxnet::TShape &pad) { const int padT = pad[4]; const int padL = pad[6]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (grad_out.size(2) * grad_out.size(3) + 256 - 1) / 256; dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); cudaStream_t stream = Stream<gpu>::GetStream(grad_out.stream_); image_2d_pad_edge_grad_kernel<kBaseThreadBits, DType><<<dimGrid, dimBlock, 0, stream>>>( grad_in, grad_out, padT, padL); } // Case 2: Constant Padding template <int n_bits, typename DType> __global__ void image_2d_pad_constant_kernel(Tensor<gpu, 4, DType> dst, const Tensor<gpu, 4, DType> src, const int padT, const int padL, const DType constant) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; if (outputPointId >= dst.size(2) * dst.size(3)) { return; } // cast sizes to int to use in min/max int Ny = src.size(2); int Nx = src.size(3); int plane = blockIdx.y; int batch = blockIdx.z; int outputPointX = outputPointId % dst.size(3); int outputPointY = outputPointId / dst.size(3); int checkT = max(0, outputPointY - padT + 1); int checkB = max(0, padT + Ny - outputPointY); int checkL = max(0, outputPointX - padL + 1); int checkR = max(0, padL + Nx - outputPointX); int inputPointX = min(max(outputPointX - padL, 0), Nx - 1); int inputPointY = min(max(outputPointY - padT, 0), Ny - 1); // 1 if need padding, 0 if not int need_pad = !(checkT * checkB * checkL * checkR); DType valueToCopy = src[batch][plane][inputPointY][inputPointX]; dst[batch][plane][outputPointY][outputPointX] = valueToCopy * (!need_pad) + need_pad * constant; } template <typename DType> inline void image_pad_constant(Tensor<gpu, 4, DType> dst, const Tensor<gpu, 4, DType> &src, const mxnet::TShape &pad, const DType constant) { const int padT = pad[4]; const int padL = pad[6]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256; dim3 dimGrid(xGridSize, dst.size(1), dst.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_); image_2d_pad_constant_kernel<kBaseThreadBits, DType><<<dimGrid, dimBlock, 0, stream>>>( dst, src, padT, padL, constant); } template <int n_bits, typename DType> __global__ void image_2d_pad_constant_grad_kernel( Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out, const int padT, const int padL) { int inPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; int pixel_num = grad_in.size(2) * grad_in.size(3); if (inPointId >= pixel_num) { return; } int inPointX = inPointId % grad_in.size(3); int inPointY = inPointId / grad_in.size(3); int outPointX = inPointX + padL; int outPointY = inPointY + padT; grad_in[batch][plane][inPointY][inPointX] = grad_out[batch][plane][outPointY][outPointX]; } template <typename DType> inline void image_pad_constant_grad(Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> &grad_out, const mxnet::TShape &pad) { const int padT = pad[4]; const int padL = pad[6]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (grad_in.size(2) * grad_in.size(3) + 256 - 1) / 256; dim3 dimGrid(xGridSize, grad_in.size(1), grad_in.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); cudaStream_t stream = Stream<gpu>::GetStream(grad_in.stream_); image_2d_pad_constant_grad_kernel<kBaseThreadBits, DType><<<dimGrid, dimBlock, 0, stream>>>( grad_in, grad_out, padT, padL); } // Case 3: Reflection Padding // adapted from Torch // https://github.com/torch/cunn/blob/master/lib/THCUNN/SpatialReflectionPadding.cu template <int n_bits, typename DType> __global__ void image_2d_pad_reflect_kernel(Tensor<gpu, 4, DType> dst, const Tensor<gpu, 4, DType> src, const int padT, const int padL) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= dst.size(2) * dst.size(3)) { return; } int outputPointX = outputPointId % dst.size(3); int outputPointY = outputPointId / dst.size(3); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int oStartX = max(0, padL); int oStartY = max(0, padT); int inputPointX = __sad(outputPointX, padL, 0) - __sad(outputPointX, src.size(3) + padL - 1, 0) - outputPointX + 2 * padL + src.size(3) - 1 - oStartX + iStartX; int inputPointY = __sad(outputPointY, padT, 0) - __sad(outputPointY, src.size(2) + padT - 1, 0) - outputPointY + 2 * padT + src.size(2) - 1 - oStartY + iStartY; DType valueToCopy = src[batch][plane][inputPointY][inputPointX]; dst[batch][plane][outputPointY][outputPointX] = valueToCopy; } template <typename DType> inline void image_pad_reflect(Tensor<gpu, 4, DType> dst, const Tensor<gpu, 4, DType> &src, const mxnet::TShape &pad) { const int padT = pad[4]; const int padL = pad[6]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256; dim3 dimGrid(xGridSize, dst.size(1), dst.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_); image_2d_pad_reflect_kernel<kBaseThreadBits, DType><<<dimGrid, dimBlock, 0, stream>>>(dst, src, padT, padL); } template <int n_bits, typename DType> __global__ void image_2d_pad_reflect_grad_kernel( Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out, const int padT, const int padL) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= grad_out.size(2) * grad_out.size(3)) { return; } int outputPointX = outputPointId % grad_out.size(3); int outputPointY = outputPointId / grad_out.size(3); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int oStartX = max(0, padL); int oStartY = max(0, padT); int inputPointX = __sad(outputPointX, padL, 0) - __sad(outputPointX, grad_in.size(3) + padL - 1, 0) - outputPointX + 2 * padL + grad_in.size(3) - 1 - oStartX + iStartX; int inputPointY = __sad(outputPointY, padT, 0) - __sad(outputPointY, grad_in.size(2) + padT - 1, 0) - outputPointY + 2 * padT + grad_in.size(2) - 1 - oStartY + iStartY; DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX]; atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy); } template <typename DType> inline void image_pad_reflect_grad(Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> &grad_out, const mxnet::TShape &pad) { const int padT = pad[4]; const int padL = pad[6]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (grad_out.size(2) * grad_out.size(3) + 256 - 1) / 256; dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); cudaStream_t stream = Stream<gpu>::GetStream(grad_out.stream_); image_2d_pad_reflect_grad_kernel<kBaseThreadBits, DType><<<dimGrid, dimBlock, 0, stream>>>( grad_in, grad_out, padT, padL); } //////////////////////////////////////////////////////////////////////////////// // Special Case: 3d image (pad depth + width + height) // Case 1: Replication Padding // single_image_3_edge adapted from Torch // https://github.com/torch/cunn/blob/master/lib/THCUNN/VolumetricReplicationPadding.cu template <int n_bits, typename DType> __global__ void image_3d_pad_edge_kernel(Tensor<gpu, 5, DType> dst, const Tensor<gpu, 5, DType> src, const int padF, const int padT, const int padL) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) { return; } int outputPointX = outputPointId % dst.size(4); int outputPointY = (outputPointId / dst.size(4)) % dst.size(3); int outputPointZ = outputPointId / (dst.size(3) * dst.size(4)); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int iStartZ = max(0, -padF); int oStartX = max(0, padL); int oStartY = max(0, padT); int oStartZ = max(0, padF); int inputPointX = min(max(padL, outputPointX), src.size(4) + padL - 1) - oStartX + iStartX; int inputPointY = min(max(padT, outputPointY), src.size(3) + padT - 1) - oStartY + iStartY; int inputPointZ = min(max(padF, outputPointZ), src.size(2) + padF - 1) - oStartZ + iStartZ; DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX]; dst[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy; } template <typename DType> inline void image_pad_edge(Tensor<gpu, 5, DType> dst, const Tensor<gpu, 5, DType> &src, const mxnet::TShape &pad) { const int padF = pad[4]; const int padT = pad[6]; const int padL = pad[8]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256; dim3 dimGrid(xGridSize, dst.size(1), dst.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_); image_3d_pad_edge_kernel<kBaseThreadBits, DType><<<dimGrid, dimBlock, 0, stream>>>( dst, src, padF, padT, padL); } template <int n_bits, typename DType> __global__ void image_3d_pad_edge_grad_kernel( Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out, const int padF, const int padT, const int padL) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= grad_out.size(2) * grad_out.size(3) * grad_out.size(4)) { return; } int outputPointX = outputPointId % grad_out.size(4); int outputPointY = (outputPointId / grad_out.size(4)) % grad_out.size(3); int outputPointZ = outputPointId / (grad_out.size(3) * grad_out.size(4)); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int iStartZ = max(0, -padF); int oStartX = max(0, padL); int oStartY = max(0, padT); int oStartZ = max(0, padF); int inputPointX = min(max(padL, outputPointX), grad_in.size(4) + padL - 1) - oStartX + iStartX; int inputPointY = min(max(padT, outputPointY), grad_in.size(3) + padT - 1) - oStartY + iStartY; int inputPointZ = min(max(padF, outputPointZ), grad_in.size(2) + padF - 1) - oStartZ + iStartZ; DType valueToCopy = grad_out[batch][plane][outputPointZ][outputPointY][outputPointX]; atomicAdd(&grad_in[batch][plane][inputPointZ][inputPointY][inputPointX], valueToCopy); } template <typename DType> inline void image_pad_edge_grad(Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> &grad_out, const mxnet::TShape &pad) { const int padF = pad[4]; const int padT = pad[6]; const int padL = pad[8]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (grad_out.size(2) * grad_out.size(3) * grad_out.size(4) + 256 - 1) / 256; dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); cudaStream_t stream = Stream<gpu>::GetStream(grad_out.stream_); image_3d_pad_edge_grad_kernel<kBaseThreadBits, DType><<<dimGrid, dimBlock, 0, stream>>>( grad_in, grad_out, padF, padT, padL); } // Case 2: Constant Padding template <int n_bits, typename DType> __global__ void image_3d_pad_constant_kernel(Tensor<gpu, 5, DType> dst, const Tensor<gpu, 5, DType> src, const int padF, const int padT, const int padL, const DType constant) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) { return; } // cast sizes to int to use in min/max int Nz = src.size(2); int Ny = src.size(3); int Nx = src.size(4); int plane = blockIdx.y; int batch = blockIdx.z; int outputPointX = outputPointId % dst.size(4); int outputPointY = (outputPointId / dst.size(4)) % dst.size(3); int outputPointZ = outputPointId / (dst.size(3) * dst.size(4)); int checkFront = max(0, outputPointZ - padF + 1); int checkBack = max(0, padF + Nz - outputPointZ); int checkTop = max(0, outputPointY - padT + 1); int checkBottom = max(0, padT + Ny - outputPointY); int checkLeft = max(0, outputPointX - padL + 1); int checkRight = max(0, padL + Nx - outputPointX); int inputPointZ = min(max(outputPointZ - padF, 0), Nz - 1); int inputPointX = min(max(outputPointX - padL, 0), Nx - 1); int inputPointY = min(max(outputPointY - padT, 0), Ny - 1); // 1 if need padding, 0 if not int need_pad = !(checkFront * checkBack * checkTop * checkBottom * checkLeft * checkRight); DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX]; dst[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy * (!need_pad) + need_pad * constant; } template <typename DType> inline void image_pad_constant(Tensor<gpu, 5, DType> dst, const Tensor<gpu, 5, DType> &src, const mxnet::TShape &pad, const DType constant) { const int padF = pad[4]; const int padT = pad[6]; const int padL = pad[8]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256; dim3 dimGrid(xGridSize, dst.size(1), dst.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_); image_3d_pad_constant_kernel<kBaseThreadBits, DType><<<dimGrid, dimBlock, 0, stream>>>( dst, src, padF, padT, padL, constant); } template <int n_bits, typename DType> __global__ void image_3d_pad_constant_grad_kernel( Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out, const int padF, const int padT, const int padL) { int inPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; int pixel_num = grad_in.size(2) * grad_in.size(3) * grad_in.size(4); if (inPointId >= pixel_num) { return; } int inPointX = inPointId % grad_in.size(4); int inPointY = (inPointId / grad_in.size(4)) % grad_in.size(3); int inPointZ = inPointId / (grad_in.size(3) * grad_in.size(4)); int outPointZ = inPointZ + padF; int outPointX = inPointX + padL; int outPointY = inPointY + padT; grad_in[batch][plane][inPointZ][inPointY][inPointX] = grad_out[batch][plane][outPointZ][outPointY][outPointX]; } template <typename DType> inline void image_pad_constant_grad(Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> &grad_out, const mxnet::TShape &pad) { const int padF = pad[4]; const int padT = pad[6]; const int padL = pad[8]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (grad_in.size(2) * grad_in.size(3) * grad_in.size(4) + 256 - 1) / 256; dim3 dimGrid(xGridSize, grad_in.size(1), grad_in.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); cudaStream_t stream = Stream<gpu>::GetStream(grad_in.stream_); image_3d_pad_constant_grad_kernel<kBaseThreadBits, DType><<<dimGrid, dimBlock, 0, stream>>>( grad_in, grad_out, padF, padT, padL); } // Case 3: Reflection Padding template <int n_bits, typename DType> __global__ void image_3d_pad_reflect_kernel(Tensor<gpu, 5, DType> dst, const Tensor<gpu, 5, DType> src, const int padF, const int padT, const int padL) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) { return; } int outputPointX = outputPointId % dst.size(4); int outputPointY = (outputPointId / dst.size(4)) % dst.size(3); int outputPointZ = outputPointId / (dst.size(3) * dst.size(4)); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int iStartZ = max(0, -padF); int oStartX = max(0, padL); int oStartY = max(0, padT); int oStartZ = max(0, padF); int inputPointX = __sad(outputPointX, padL, 0) - __sad(outputPointX, src.size(4) + padL - 1, 0) - outputPointX + 2 * padL + src.size(4) - 1 - oStartX + iStartX; int inputPointY = __sad(outputPointY, padT, 0) - __sad(outputPointY, src.size(3) + padT - 1, 0) - outputPointY + 2 * padT + src.size(3) - 1 - oStartY + iStartY; int inputPointZ = __sad(outputPointZ, padF, 0) - __sad(outputPointZ, src.size(2) + padF - 1, 0) - outputPointZ + 2 * padF + src.size(2) - 1 - oStartZ + iStartZ; DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX]; dst[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy; } template <typename DType> inline void image_pad_reflect(Tensor<gpu, 5, DType> dst, const Tensor<gpu, 5, DType> &src, const mxnet::TShape &pad) { const int padF = pad[4]; const int padT = pad[6]; const int padL = pad[8]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256; dim3 dimGrid(xGridSize, dst.size(1), dst.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_); image_3d_pad_reflect_kernel<kBaseThreadBits, DType><<<dimGrid, dimBlock, 0, stream>>>( dst, src, padF, padT, padL); } template <int n_bits, typename DType> __global__ void image_3d_pad_reflect_grad_kernel( Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out, const int padF, const int padT, const int padL) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= grad_out.size(2) * grad_out.size(3) * grad_out.size(4)) { return; } int outputPointX = outputPointId % grad_out.size(4); int outputPointY = (outputPointId / grad_out.size(4)) % grad_out.size(3); int outputPointZ = outputPointId / (grad_out.size(3) * grad_out.size(4)); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int iStartZ = max(0, -padF); int oStartX = max(0, padL); int oStartY = max(0, padT); int oStartZ = max(0, padF); int inputPointX = __sad(outputPointX, padL, 0) - __sad(outputPointX, grad_in.size(4) + padL - 1, 0) - outputPointX + 2 * padL + grad_in.size(4) - 1 - oStartX + iStartX; int inputPointY = __sad(outputPointY, padT, 0) - __sad(outputPointY, grad_in.size(3) + padT - 1, 0) - outputPointY + 2 * padT + grad_in.size(3) - 1 - oStartY + iStartY; int inputPointZ = __sad(outputPointZ, padF, 0) - __sad(outputPointZ, grad_in.size(2) + padF - 1, 0) - outputPointZ + 2 * padF + grad_in.size(2) - 1 - oStartZ + iStartZ; DType valueToCopy = grad_out[batch][plane][outputPointZ][outputPointY][outputPointX]; atomicAdd(&grad_in[batch][plane][inputPointZ][inputPointY][inputPointX], valueToCopy); } /* int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= grad_out.size(2) * grad_out.size(3)) { return; } int outputPointX = outputPointId % grad_out.size(3); int outputPointY = outputPointId / grad_out.size(3); int iStartX = max(0, -padL); int iStartY = max(0, -padT); int oStartX = max(0, padL); int oStartY = max(0, padT); int inputPointX = __sad(outputPointX, padL, 0) - __sad(outputPointX, grad_in.size(3) + padL - 1, 0) - outputPointX + 2 * padL + grad_in.size(3) - 1 - oStartX + iStartX; int inputPointY = __sad(outputPointY, padT, 0) - __sad(outputPointY, grad_in.size(2) + padT - 1, 0) - outputPointY + 2 * padT + grad_in.size(2) - 1 - oStartY + iStartY; DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX]; atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy);*/ template <typename DType> inline void image_pad_reflect_grad(Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> &grad_out, const mxnet::TShape &pad) { const int padF = pad[4]; const int padT = pad[6]; const int padL = pad[8]; dim3 dimBlock(kBaseThreadNum); int xGridSize = (grad_out.size(2) * grad_out.size(3) * grad_out.size(4) + 256 - 1) / 256; dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0)); CheckLaunchParam(dimGrid, dimBlock, "Pad"); cudaStream_t stream = Stream<gpu>::GetStream(grad_out.stream_); image_3d_pad_reflect_grad_kernel<kBaseThreadBits, DType><<<dimGrid, dimBlock, 0, stream>>>( grad_in, grad_out, padF, padT, padL); } //////////////////////////////////////////////////////////////////////////////// } // namespace cuda template <int dim, typename DType> void pad_image(Tensor<gpu, dim, DType> dst, const Tensor<gpu, dim, DType> src, const mxnet::TShape pad, int mode, const DType constant_value) { switch (mode) { case mxnet::op::pad_enum::kEdge: cuda::image_pad_edge(dst, src, pad); break; case mxnet::op::pad_enum::kConstant: cuda::image_pad_constant(dst, src, pad, constant_value); break; case mxnet::op::pad_enum::kReflect: cuda::image_pad_reflect(dst, src, pad); break; } } template <int dim, typename DType> void pad_image_grad(Tensor<gpu, dim, DType> grad_in, const Tensor<gpu, dim, DType> grad_out, const mxnet::TShape pad, int mode) { switch (mode) { case mxnet::op::pad_enum::kEdge: cuda::image_pad_edge_grad(grad_in, grad_out, pad); break; case mxnet::op::pad_enum::kConstant: cuda::image_pad_constant_grad(grad_in, grad_out, pad); break; case mxnet::op::pad_enum::kReflect: cuda::image_pad_reflect_grad(grad_in, grad_out, pad); break; } } } // namespace mshadow //////////////////////////////////////////////////////////////////////////////// namespace mxnet { namespace op { template <> Operator *CreateOp<gpu>(PadParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new PadOp<gpu, DType>(param); }) return op; } } // namespace op } // namespace mxnet
ba002e0a017df1958c16ced9d8edbfe46607428d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <assert.h> #include <iostream> #define Shared_Mem_Size 16*16*4 // CUDA kernel for vector addition __global__ void tile_MatrixMul(int* a, int* b, int* c, int n, int tile_size) { //statically-sized memory __shared__ int A[Shared_Mem_Size]; __shared__ int B[Shared_Mem_Size]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; //cal global row and col postions for this thread int row = by * tile_size + ty; int col = bx * tile_size + tx; //Intermidiate sum for element being written int temp_val = 0; //sweet tiles over entire matrix for (int i = 0; i < (n / tile_size); i++) { /* Every thread in a threadblock loads one element into shared memory The element location in shared memory corresponds to the thread's position in the threadblock (e.g thread[0,0] loads for A[0 * tile_size + 0] and B[0 * tile_size + 0]) Explanation of indexing parameters for A: row*n: Indexes the global row for this thread (loop invariant) i*tile_size: Indexes new set of column each iteration tx: Indexes the column within that set for B: col: Indexes the global column this thread (loop invariant) i*tile_size*n: Indexes next set of rows each iteration ty*n: Indexes the row within that set */ A[(ty * tile_size) + tx] = a[row * n + (i * tile_size + tx)]; B[(ty * tile_size) + tx] = b[(i * tile_size * n + ty * n) + col]; //Ensure all threads have loaded their data before proceeding __syncthreads(); //cal all temp values for this tile for (int j = 0; j < tile_size; j++) { temp_val += A[(ty * tile_size) + j] * B[(j * tile_size) + tx]; } //Ensure some threads dont progress and stomp current shared memory values __syncthreads(); } c[(row * n) + col] = temp_val; } // Initialize void Mat_init(int* a, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { a[i * n + j] = rand() % 100; } } } // Check MatrixMul add result void check_answer(int* a, int* b, int* c, int n) { int* result = (int*)malloc(n * n * sizeof(int)); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { for (int k = 0; k < n; k++) { result[i * n + j] += a[i * n + k] * b[k * n + j]; } } } for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { assert(c[i * n + j] == result[i * n + j]); } } } int main() { // matrix of size 1024 x 1024 int n = 1 << 10; //host memory pointers int* h_a, * h_b, * h_c; // Allocation size for all vectors size_t bytes = sizeof(int) * n * n; h_a = (int*)malloc(bytes); h_b = (int*)malloc(bytes); h_c = (int*)malloc(bytes); //device memory pointers int* d_a, * d_b, * d_c; hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); // Initialize vectors a and b with random values between 0 and 99 Mat_init(h_a, n); Mat_init(h_b, n); hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice); // Threadblock size int BLOCKS = 16; // Grid size int GRID = (int)ceil(n / BLOCKS); //use dim3 objects dim3 grid(GRID, GRID); dim3 threads(BLOCKS, BLOCKS); // Launch kernel on default stream w/o shmem hipLaunchKernelGGL(( tile_MatrixMul) , dim3(grid), dim3(threads) , 0, 0, d_a, d_b, d_c, n, BLOCKS); //copy result back to host hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost); // Check result for errors check_answer(h_a, h_b, h_c, n); free(h_a); free(h_b); free(h_c); hipFree(d_a); hipFree(d_b); hipFree(d_c); printf("COMPLETED SUCCESFULLY\n"); return 0; }
ba002e0a017df1958c16ced9d8edbfe46607428d.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <assert.h> #include <iostream> #define Shared_Mem_Size 16*16*4 // CUDA kernel for vector addition __global__ void tile_MatrixMul(int* a, int* b, int* c, int n, int tile_size) { //statically-sized memory __shared__ int A[Shared_Mem_Size]; __shared__ int B[Shared_Mem_Size]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; //cal global row and col postions for this thread int row = by * tile_size + ty; int col = bx * tile_size + tx; //Intermidiate sum for element being written int temp_val = 0; //sweet tiles over entire matrix for (int i = 0; i < (n / tile_size); i++) { /* Every thread in a threadblock loads one element into shared memory The element location in shared memory corresponds to the thread's position in the threadblock (e.g thread[0,0] loads for A[0 * tile_size + 0] and B[0 * tile_size + 0]) Explanation of indexing parameters for A: row*n: Indexes the global row for this thread (loop invariant) i*tile_size: Indexes new set of column each iteration tx: Indexes the column within that set for B: col: Indexes the global column this thread (loop invariant) i*tile_size*n: Indexes next set of rows each iteration ty*n: Indexes the row within that set */ A[(ty * tile_size) + tx] = a[row * n + (i * tile_size + tx)]; B[(ty * tile_size) + tx] = b[(i * tile_size * n + ty * n) + col]; //Ensure all threads have loaded their data before proceeding __syncthreads(); //cal all temp values for this tile for (int j = 0; j < tile_size; j++) { temp_val += A[(ty * tile_size) + j] * B[(j * tile_size) + tx]; } //Ensure some threads dont progress and stomp current shared memory values __syncthreads(); } c[(row * n) + col] = temp_val; } // Initialize void Mat_init(int* a, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { a[i * n + j] = rand() % 100; } } } // Check MatrixMul add result void check_answer(int* a, int* b, int* c, int n) { int* result = (int*)malloc(n * n * sizeof(int)); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { for (int k = 0; k < n; k++) { result[i * n + j] += a[i * n + k] * b[k * n + j]; } } } for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { assert(c[i * n + j] == result[i * n + j]); } } } int main() { // matrix of size 1024 x 1024 int n = 1 << 10; //host memory pointers int* h_a, * h_b, * h_c; // Allocation size for all vectors size_t bytes = sizeof(int) * n * n; h_a = (int*)malloc(bytes); h_b = (int*)malloc(bytes); h_c = (int*)malloc(bytes); //device memory pointers int* d_a, * d_b, * d_c; cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); // Initialize vectors a and b with random values between 0 and 99 Mat_init(h_a, n); Mat_init(h_b, n); cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); // Threadblock size int BLOCKS = 16; // Grid size int GRID = (int)ceil(n / BLOCKS); //use dim3 objects dim3 grid(GRID, GRID); dim3 threads(BLOCKS, BLOCKS); // Launch kernel on default stream w/o shmem tile_MatrixMul <<<grid, threads >>> (d_a, d_b, d_c, n, BLOCKS); //copy result back to host cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); // Check result for errors check_answer(h_a, h_b, h_c, n); free(h_a); free(h_b); free(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); printf("COMPLETED SUCCESFULLY\n"); return 0; }
2c94290119a416698908163409f50d9c93e685ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __NVCC__ #include "hipcub/hipcub.hpp" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/softmax_cudnn_op.cu.h" #include "paddle/fluid/operators/softmax_with_cross_entropy_op.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; using DataLayout = platform::DataLayout; using Tensor = framework::Tensor; // Wrapper of log function. Use log(float32) for float16 template <typename T> static __device__ __forceinline__ T Log(T x) { using AccT = typename details::MPTypeTrait<T>::Type; AccT logx = ::log(static_cast<AccT>(x)); return math::TolerableValue<T>()(static_cast<T>(logx)); } // Wrapper of exp function. Use exp(float32) for float16 template <typename T> static __device__ __forceinline__ T Exp(T x) { using AccT = typename details::MPTypeTrait<T>::Type; AccT expx = ::exp(static_cast<AccT>(x)); return math::TolerableValue<T>()(static_cast<T>(expx)); } // log2(value) static inline int Log2Ceil(int value) { int log2_value = 0; while ((1 << log2_value) < value) ++log2_value; return log2_value; } enum class SoftmaxMode { kSoftmax, kLogSoftmax, kCrossEntropy }; /* Hard label cross entropy. */ template <typename T, bool IgnoreIndex> __global__ void CrossEntropyHardLabel(T* loss, const T* softmax, const int64_t* labels, const int n, const int dim, const int d, const int ignore_idx) { int64_t ids = blockIdx.x * blockDim.x + threadIdx.x; int64_t idx_n = ids / d; int64_t idx_d = ids % d; // thread ids compute loss[ids] using softmax[idx] if (ids < n * d) { if (labels[ids] < 0) { // label is negative loss[ids] = static_cast<T>(0.0); } else { // label is positive of zero int64_t idx = idx_n * dim * d + labels[ids] * d + idx_d; if (IgnoreIndex == true) { // IgnoreIndex is true if (labels[ids] == ignore_idx) { loss[ids] = static_cast<T>(0.0); } else { loss[ids] = -Log(softmax[idx]); } } else { // IgnoreIndex is false loss[ids] = -Log(softmax[idx]); } } } } /* Hard label cross entropy with exp. Input: log softmax Output: loss and exp(input) */ template <typename T, bool IgnoreIndex> __global__ void CrossEntropyExpHardLabel(T* loss, T* softmax, const int64_t* labels, const int n, const int dim, const int d, const int ignore_idx) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; int64_t idx_n = idx / (d * dim); int64_t idx_dim = (idx / d) % dim; int64_t idx_d = idx % d; int64_t ids = idx_n * d + idx_d; if (idx < n * dim * d) { if (IgnoreIndex == true) { // IgnoreIndex is true if (idx_dim == labels[ids]) { if (labels[ids] == ignore_idx) { loss[ids] = static_cast<T>(0.0); } else { loss[ids] = -softmax[idx]; } } } else { // IgnoreIndex is false if (labels[ids] >= 0 && labels[ids] < dim) { if (labels[ids] == idx_dim) { loss[ids] = -softmax[idx]; } } else { loss[ids] = static_cast<T>(0.0); } } softmax[idx] = Exp(softmax[idx]); } } /* Core function of softmax with cross entropy forward - softmax, SoftmaxMode=kSoftmax - log softmax, SoftmaxMode=kLogSoftmax - softmax with cross entropy hard label, SoftmaxMode=kCrossEntropy The computation includes - Compute max value: maxvalue_{i} = max_j src_{i,j} - Compute sum of exp: s_{i} = sum_{j}{e^{src_{i,j} - maxvalue_{i}}} - Compute: softmax_{i,j} = e^{src_{i,j} - maxvalue_{i}} / s_{i} - Compute: logsoftmax_{i,j} = src_{i,j} - maxvalue_{i} - log(s_{i}) - Compute: loss_{i} = -logsoftmax[i,label[i]] (Hard label) This computation results from following formula: softmax_{i,j} = e^{src_{i,j}} / sum_{j}{e^{src_{i,j}}} = e^{src_{i,j} - maxvalue_{i}} / sum_{j}{e^{src_{i,j} - maxvalue_{i}}} = e^{src_{i,j} - maxvalue_{i}} / s_{i} logsoftmax_{i,j} = log(softmax_{i,j}) = src_{i,j} - maxvalue_{i} - log(s_{i}) One warp (32 threads) is used to compute 1 or 2 batch (kBatchSize). For reduction max (sum), firstly compute max (sum) to one warp, then use shuffle api to compute max (sum) in one warp. */ template <typename T, typename VecT, typename AccT, int Log2Elements, SoftmaxMode mode, bool IgnoreIndex> __global__ void WarpSoftmaxForward(T* loss, T* softmax, const T* src, const int64_t* label, const int batch_size, const int stride, const int element_count, const int ignore_index) { constexpr int kDimCeil = 1 << Log2Elements; constexpr int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32; constexpr int kVSize = sizeof(VecT) / sizeof(T); constexpr int kIterations = kDimCeil / kWarpSize; constexpr int kIterationsV = (kIterations >= kVSize) ? (kIterations / kVSize) : 1; constexpr int kBatchSize = (kDimCeil <= 128) ? 2 : 1; int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize; // max index to read int idx_max_v[kBatchSize]; #pragma unroll for (int i = 0; i < kBatchSize; i++) { int idx_max = ((i + first_batch) < batch_size) ? element_count : 0; idx_max_v[i] = idx_max / kVSize; } // read data from global memory AccT srcdata[kBatchSize][kIterationsV][kVSize]; #pragma unroll for (int i = 0; i < kBatchSize; ++i) { // read data to srcdata: - KVSize==1, - KVSize>1 #pragma unroll for (int it = 0; it < kIterationsV; ++it) { int src_idx = threadIdx.x + it * kWarpSize; if (kVSize == 1) { if (src_idx < idx_max_v[i]) { srcdata[i][it][0] = static_cast<AccT>(src[(first_batch + i) * stride + src_idx]); } else { srcdata[i][it][0] = -std::numeric_limits<AccT>::infinity(); } } else { const VecT* src_v = reinterpret_cast<const VecT*>(&src[(first_batch + i) * stride]); if (src_idx < idx_max_v[i]) { VecT srctmp = src_v[src_idx]; const T* srcinptr = reinterpret_cast<const T*>(&srctmp); #pragma unroll for (int s = 0; s < kVSize; s++) { srcdata[i][it][s] = static_cast<AccT>(srcinptr[s]); } } else { #pragma unroll for (int s = 0; s < kVSize; s++) { srcdata[i][it][s] = -std::numeric_limits<AccT>::infinity(); } } } } } // compute max value: maxvalue_{i} = max_j src_{i,j} AccT max_value[kBatchSize]; #pragma unroll for (int i = 0; i < kBatchSize; ++i) { // it = 0 AccT valmax = srcdata[i][0][0]; #pragma unroll for (int s = 1; s < kVSize; ++s) { valmax = (valmax > srcdata[i][0][s]) ? valmax : srcdata[i][0][s]; } max_value[i] = valmax; // it = 1, 2, ... #pragma unroll for (int it = 1; it < kIterationsV; ++it) { AccT valmax = srcdata[i][it][0]; #pragma unroll for (int s = 1; s < kVSize; ++s) { valmax = (valmax > srcdata[i][it][s]) ? valmax : srcdata[i][it][s]; } max_value[i] = (max_value[i] > valmax) ? max_value[i] : valmax; } } WarpReduceMax<AccT, kBatchSize, kWarpSize>(max_value); // compute sum: s_{i} = sum_{j}{ exp(src_{i,j} - maxvalue_{i} } AccT sum[kBatchSize]; #pragma unroll for (int i = 0; i < kBatchSize; ++i) { // it = 0 if (mode == SoftmaxMode::kLogSoftmax || mode == SoftmaxMode::kCrossEntropy) { sum[i] = ::exp(srcdata[i][0][0] - max_value[i]); } else { srcdata[i][0][0] = ::exp(srcdata[i][0][0] - max_value[i]); sum[i] = srcdata[i][0][0]; } #pragma unroll for (int s = 1; s < kVSize; ++s) { if (mode == SoftmaxMode::kLogSoftmax || mode == SoftmaxMode::kCrossEntropy) { sum[i] += ::exp(srcdata[i][0][s] - max_value[i]); } else { srcdata[i][0][s] = ::exp(srcdata[i][0][s] - max_value[i]); sum[i] += srcdata[i][0][s]; } } // it = 1, 2, ... #pragma unroll for (int it = 1; it < kIterationsV; ++it) { #pragma unroll for (int s = 0; s < kVSize; ++s) { if (mode == SoftmaxMode::kLogSoftmax || mode == SoftmaxMode::kCrossEntropy) { sum[i] += ::exp(srcdata[i][it][s] - max_value[i]); } else { srcdata[i][it][s] = ::exp(srcdata[i][it][s] - max_value[i]); sum[i] += srcdata[i][it][s]; } } } } WarpReduceSum<AccT, kBatchSize, kWarpSize>(sum); // write data #pragma unroll for (int i = 0; i < kBatchSize; ++i) { if (mode == SoftmaxMode::kLogSoftmax || mode == SoftmaxMode::kCrossEntropy) { sum[i] = ::log(sum[i]); } #pragma unroll for (int it = 0; it < kIterationsV; ++it) { int idx = threadIdx.x + it * kWarpSize; if (kVSize == 1) { // kVSize==1 if (idx < idx_max_v[i]) { if (mode == SoftmaxMode::kLogSoftmax) { // log softmax softmax[(first_batch + i) * stride + idx] = srcdata[i][it][0] - max_value[i] - sum[i]; // softmax with cross entropy hard label } else if (mode == SoftmaxMode::kCrossEntropy) { AccT logsoftmax = srcdata[i][it][0] - max_value[i] - sum[i]; // softmax softmax[(first_batch + i) * stride + idx] = ::exp(logsoftmax); // label int loss_idx = (threadIdx.x + it * kWarpSize) * kVSize; if (IgnoreIndex == true) { // IgnoreIndex is true if (label[first_batch + i] == loss_idx) { if (label[first_batch + i] != ignore_index) { loss[first_batch + i] = -logsoftmax; } else { loss[first_batch + i] = static_cast<T>(0.0); } } } else { // IgnoreIndex is false if (label[first_batch + i] >= 0 && label[first_batch + i] < element_count) { if (label[first_batch + i] == loss_idx) { loss[first_batch + i] = -logsoftmax; } } else { loss[first_batch + i] = static_cast<T>(0.0); } } } else { // softmax softmax[(first_batch + i) * stride + idx] = srcdata[i][it][0] / sum[i]; } } else { break; } } else { // KVSize>1 VecT* softmax_v = reinterpret_cast<VecT*>(&softmax[(first_batch + i) * stride]); VecT tmpdata; T* tmpptr = reinterpret_cast<T*>(&tmpdata); #pragma unroll for (int s = 0; s < kVSize; ++s) { if (mode == SoftmaxMode::kLogSoftmax) { // log softmax tmpptr[s] = srcdata[i][it][s] - max_value[i] - sum[i]; // softmax with cross entropy hard label } else if (mode == SoftmaxMode::kCrossEntropy) { AccT logsoftmax = srcdata[i][it][s] - max_value[i] - sum[i]; // softmax tmpptr[s] = ::exp(logsoftmax); // label int loss_idx = (threadIdx.x + it * kWarpSize) * kVSize + s; if (IgnoreIndex == true) { // IgnoreIndex is true if (label[first_batch + i] == loss_idx && label[first_batch + i] != ignore_index) { loss[first_batch + i] = -logsoftmax; } } else { // IgnoreIndex is false if (label[first_batch + i] >= 0 && label[first_batch + i] < element_count) { if (label[first_batch + i] == loss_idx) { loss[first_batch + i] = -logsoftmax; } } else { loss[first_batch + i] = static_cast<T>(0.0); } } } else { // softmax tmpptr[s] = srcdata[i][it][s] / sum[i]; } } if (idx < idx_max_v[i]) { softmax_v[idx] = tmpdata; } else { break; } } } } } #define SOFTMAX_WARP_FORWARD_CASE(Log2Elements, VecT, AccT) \ case Log2Elements: \ hipLaunchKernelGGL(( WarpSoftmaxForward<T, VecT, AccT, Log2Elements, mode, \ IgnoreIndex>), dim3(blocks), dim3(threads), 0, stream, \ loss, softmax, src, label, batch_size, stride, element_count, \ ignore_index); \ break; /* Wrapper of softmax with cross entropy forward hard label. */ template <typename T, SoftmaxMode mode, bool IgnoreIndex> void SwitchWarpSoftmaxForward(T* loss, T* softmax, const T* src, const int64_t* label, const int batch_size, const int stride, const int element_count, const int ignore_index, gpuStream_t stream) { using AccT = typename details::MPTypeTrait<T>::Type; // use 128 threads per block to maximimize gpu utilization const int log2_elements = static_cast<int>(Log2Ceil(element_count)); const int kDimCeil = 1 << log2_elements; int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32; int batches_per_warp = (kDimCeil <= 128) ? 2 : 1; constexpr int threads_per_block = 128; int warps_per_block = (threads_per_block / kWarpSize); int batches_per_block = warps_per_block * batches_per_warp; int blocks = (batch_size + batches_per_block - 1) / batches_per_block; dim3 threads(kWarpSize, warps_per_block, 1); switch (log2_elements) { SOFTMAX_WARP_FORWARD_CASE(0, T, AccT); SOFTMAX_WARP_FORWARD_CASE(1, T, AccT); SOFTMAX_WARP_FORWARD_CASE(2, T, AccT); SOFTMAX_WARP_FORWARD_CASE(3, T, AccT); SOFTMAX_WARP_FORWARD_CASE(4, T, AccT); SOFTMAX_WARP_FORWARD_CASE(5, T, AccT); SOFTMAX_WARP_FORWARD_CASE(6, T, AccT); SOFTMAX_WARP_FORWARD_CASE(7, T, AccT); SOFTMAX_WARP_FORWARD_CASE(8, T, AccT); SOFTMAX_WARP_FORWARD_CASE(9, T, AccT); default: break; } } /* Wrapper of softmax with cross entropy hard label. - SwitchWarpSoftmaxForward for small size - cudnn function for large size */ template <typename T, bool IgnoreIndex> static void SoftmaxWithCrossEntropyHardLabel( const platform::CUDADeviceContext& ctx, int rank, int axis, const T* logits_data, const int64_t* labels_data, T* loss_data, T* softmax_data, int N, int dim, int D, const int ignore_index) { auto stream = ctx.stream(); constexpr int max_dim = 320; if (D == 1 && dim <= max_dim) { // small size const SoftmaxMode mode = SoftmaxMode::kCrossEntropy; SwitchWarpSoftmaxForward<T, mode, IgnoreIndex>( loss_data, softmax_data, logits_data, labels_data, N, dim, dim, ignore_index, stream); } else { ScopedTensorDescriptor desc; std::vector<int> tensor_dims = {N, dim, D, 1}; DataLayout layout = DataLayout::kNCHW; #ifdef PADDLE_WITH_HIP miopenTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims); #else cudnnTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims); #endif auto handle = ctx.cudnn_handle(); #ifdef PADDLE_WITH_HIP auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE : MIOPEN_SOFTMAX_MODE_CHANNEL; PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSoftmaxForward_V2( handle, platform::CudnnDataType<T>::kOne(), descp, logits_data, platform::CudnnDataType<T>::kZero(), descp, softmax_data, MIOPEN_SOFTMAX_LOG, mode)); #else auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE : CUDNN_SOFTMAX_MODE_CHANNEL; PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSoftmaxForward( handle, CUDNN_SOFTMAX_LOG, mode, platform::CudnnDataType<T>::kOne(), descp, logits_data, platform::CudnnDataType<T>::kZero(), descp, softmax_data)); #endif int threads = 128; int blocks = (N * dim * D + threads - 1) / threads; // compute cross entropy, input is log softmax hipLaunchKernelGGL(( CrossEntropyExpHardLabel<T, IgnoreIndex>), dim3(blocks), dim3(threads), 0, stream, loss_data, softmax_data, labels_data, N, dim, D, ignore_index); } } /* Wrapper of softmax with cross entropy grad hard label. */ template <typename T> __global__ void SoftmaxWithCrossEntropyGradHardLabel( T* logits_grad, const T* loss_grad, const int64_t* labels, const int64_t n, const int64_t dim, const int64_t d, const int ignore_index) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; int64_t idx_n = idx / (d * dim); int64_t idx_dim = (idx / d) % dim; int64_t idx_d = idx % d; int64_t ids = idx_n * d + idx_d; if (idx < n * dim * d) { if (labels[ids] == ignore_index) { logits_grad[idx] = static_cast<T>(0.0); } else if (labels[ids] == idx_dim) { logits_grad[idx] = (logits_grad[idx] - static_cast<T>(1.0)) * loss_grad[ids]; } else { logits_grad[idx] *= loss_grad[ids]; } } } /* Cross entropy soft label with dynamic size on axis (log2_elements is varibale). - if the input is softmaxcompute loss with softmax - if the input is log_softmax, compute loss with log_softmax and update softmax */ template <typename T, typename VecT, bool InLogMode = false> __global__ void CrossEntropySoftLabel(T* loss, T* softmaxwrt, const T* softmax, const T* labels, const int n, const int dim, const int d, int log2_elements) { const int kDimCeil = 1 << log2_elements; const int kVSize = sizeof(VecT) / sizeof(T); #ifdef __HIPCC__ const int kThreadPerBlock = 256; #else const int kThreadPerBlock = 512; #endif const int kBatchPerBlock = 1; const int kWarpSize = 32; // (dim < 32) ? dim : 32; const int kBatchSize = 1; const int kThreadPerBatch = kThreadPerBlock / kBatchPerBlock; const int kWarpPerBatch = kThreadPerBatch / kWarpSize; const int kIterations = (dim + kThreadPerBatch - 1) / kThreadPerBatch; const int kIterationsV = (kIterations >= kVSize) ? (kIterations / kVSize) : 1; const int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize; T sum[kBatchSize]{static_cast<T>(0.0)}; #pragma unroll for (int i = 0; i < kBatchSize; ++i) { int ids = first_batch + i; if (ids >= n * d) break; int idx_n = ids / d; int idx_d = ids % d; #pragma unroll for (int it = 0; it < kIterations; ++it) { int idx_dim = it * kThreadPerBatch + threadIdx.x; int idx = idx_n * dim * d + idx_dim * d + idx_d; if (idx_n < n && idx_dim < dim) { VecT softmaxdata; if (InLogMode) { softmaxdata = reinterpret_cast<VecT*>(&softmaxwrt[idx])[0]; } else { softmaxdata = reinterpret_cast<const VecT*>(&softmax[idx])[0]; } VecT labelsdata = reinterpret_cast<const VecT*>(&labels[idx])[0]; T* softmaxptr = reinterpret_cast<T*>(&softmaxdata); T* labelsptr = reinterpret_cast<T*>(&labelsdata); #pragma unroll for (int s = 0; s < kVSize; s++) { if (InLogMode) { sum[i] -= softmaxptr[s] * labelsptr[s]; softmaxptr[s] = Exp(softmaxptr[s]); } else { sum[i] -= Log(softmaxptr[s]) * labelsptr[s]; } } if (InLogMode) { reinterpret_cast<VecT*>(&softmaxwrt[idx])[0] = softmaxdata; } } } } WarpReduceSum<T, kBatchSize, kWarpSize>(sum); __syncthreads(); __shared__ T sumshare[kWarpPerBatch][kBatchPerBlock][kBatchSize]; if (threadIdx.x % kWarpSize == 0) { #pragma unroll for (int i = 0; i < kBatchSize; i++) { sumshare[threadIdx.x / kWarpSize][threadIdx.y][i] = sum[i]; } } __syncthreads(); // write if (threadIdx.x == 0) { for (int i = 0; i < kBatchSize; i++) { int ids = first_batch + i; if (ids < n * d) { loss[ids] = sumshare[0][threadIdx.y][i]; for (int s = 1; s < kWarpPerBatch; s++) { loss[ids] += sumshare[s][threadIdx.y][i]; } } } } } /* Core function of softmax with cross entropy forward soft label. The computation includes - Compute maximum of batch: maxvalue_{i} = max_j src_{i,j} - Compute sum of exp batch: s_{i} = sum_{j}{ exp(src_{i,j} - maxvalue_{i} } - Compute: sum of - sum_{j}{ label_{i,j} * (src_{i,j} - maxvalue_{i} - log(sum[i]))} One warp (32 threads) is used to compute 1 or 2 batch (kBatchSize). For reduction max (sum), firstly compute max (sum) to one warp, then use shuffle api to compute max (sum) in one warp. */ template <typename T, typename VecT, typename AccT, int Log2Elements> __global__ void WarpSoftmaxForwardSoftLabel(T* loss, T* softmax, const T* src, const T* label, const int batch_size, const int stride, const int element_count) { const bool LogMode = true; constexpr int kDimCeil = 1 << Log2Elements; constexpr int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32; constexpr int kVSize = sizeof(VecT) / sizeof(T); constexpr int kIterations = kDimCeil / kWarpSize; constexpr int kIterationsV = (kIterations >= kVSize) ? (kIterations / kVSize) : 1; constexpr int kBatchSize = (kDimCeil <= 128) ? 2 : 1; int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize; int local_batches = batch_size - first_batch; if (local_batches > kBatchSize) { local_batches = kBatchSize; } // read data from global memory VecT srcdata[kBatchSize][kIterationsV]; VecT labeldata[kBatchSize][kIterationsV]; for (int i = 0; i < kBatchSize; ++i) { const VecT* src_v = reinterpret_cast<const VecT*>(&src[(first_batch + i) * stride]); const VecT* label_v = reinterpret_cast<const VecT*>(&label[(first_batch + i) * stride]); // max index to read int idx_max = (i < local_batches) ? element_count : 0; int idx_max_v = idx_max / kVSize; // read data for (int it = 0; it < kIterationsV; ++it) { int src_idx = threadIdx.x + it * kWarpSize; if (src_idx < idx_max_v) { srcdata[i][it] = src_v[src_idx]; labeldata[i][it] = label_v[src_idx]; } else { #pragma unroll for (int s = 0; s < kVSize; s++) { reinterpret_cast<T*>(&srcdata[i][it])[s] = -std::numeric_limits<AccT>::max(); reinterpret_cast<T*>(&labeldata[i][it])[s] = 0.0; } } } } // compute max value AccT max_value[kBatchSize]; #pragma unroll for (int i = 0; i < kBatchSize; ++i) { max_value[i] = -std::numeric_limits<AccT>::infinity(); #pragma unroll for (int it = 0; it < kIterationsV; ++it) { T* srcptr_v = reinterpret_cast<T*>(&srcdata[i][it]); T valmax = srcptr_v[0]; #pragma unroll for (int s = 1; s < kVSize; ++s) { valmax = (valmax > srcptr_v[s]) ? valmax : srcptr_v[s]; } max_value[i] = (max_value[i] > static_cast<AccT>(valmax)) ? max_value[i] : static_cast<AccT>(valmax); } } WarpReduceMax<AccT, kBatchSize, kWarpSize>(max_value); // compute sum AccT sum[kBatchSize]{0.0}; #pragma unroll for (int i = 0; i < kBatchSize; ++i) { #pragma unroll for (int it = 0; it < kIterationsV; ++it) { T* srcptr_v = reinterpret_cast<T*>(&srcdata[i][it]); #pragma unroll for (int s = 0; s < kVSize; ++s) { if (LogMode) { sum[i] += ::exp(static_cast<AccT>(srcptr_v[s]) - max_value[i]); } else { srcptr_v[s] = ::exp(static_cast<AccT>(srcptr_v[s]) - max_value[i]); sum[i] += static_cast<AccT>(srcptr_v[s]); } } } } WarpReduceSum<AccT, kBatchSize, kWarpSize>(sum); // log_softmax and loss AccT sumloss[kBatchSize]{0.0}; #pragma unroll for (int i = 0; i < kBatchSize; ++i) { if (i >= local_batches) break; VecT* softmax_v = reinterpret_cast<VecT*>(&softmax[(first_batch + i) * stride]); // max index to write int idx_max = (i < local_batches) ? element_count : 0; int idx_max_v = idx_max / kVSize; if (LogMode) { sum[i] = ::log(sum[i]); } #pragma unroll for (int it = 0; it < kIterationsV; ++it) { T* srcvp = reinterpret_cast<T*>(&srcdata[i][it]); T* labelvp = reinterpret_cast<T*>(&labeldata[i][it]); VecT tmpv; T* tmpvp = reinterpret_cast<T*>(&tmpv); #pragma unroll for (int s = 0; s < kVSize; ++s) { if (LogMode) { AccT logsoftmax = static_cast<AccT>(srcvp[s]) - max_value[i] - sum[i]; sumloss[i] -= logsoftmax * static_cast<AccT>(labelvp[s]); tmpvp[s] = ::exp(logsoftmax); } else { tmpvp[s] = static_cast<AccT>(srcvp[s]) / sum[i]; } } int idx = threadIdx.x + it * kWarpSize; if (idx < idx_max_v) { softmax_v[idx] = tmpv; } } } // loss WarpReduceSum<AccT, kBatchSize, kWarpSize>(sumloss); for (int i = 0; i < kBatchSize; i++) { if (i >= local_batches) break; loss[first_batch + i] = sumloss[i]; } } #define SOFTMAX_WARP_FORWARD_SOFT_CASE(Log2Elements, VecT, AccT) \ case Log2Elements: \ hipLaunchKernelGGL(( WarpSoftmaxForwardSoftLabel<T, VecT, AccT, \ Log2Elements>), dim3(blocks), dim3(threads), 0, stream, \ loss, softmax, src, label, batch_size, stride, element_count); \ break; /* Wrapper of softmax with cross entropy forward soft label. */ template <typename T> void SwitchWarpSoftmaxForwardSoftLabel(const int blocks, const dim3 threads, gpuStream_t stream, T* loss, T* softmax, const T* src, const T* label, const int batch_size, const int stride, const int element_count, const int log2_elements) { using AccT = typename details::MPTypeTrait<T>::Type; switch (log2_elements) { SOFTMAX_WARP_FORWARD_SOFT_CASE(0, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(1, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(2, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(3, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(4, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(5, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(6, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(7, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(8, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(9, T, AccT); default: break; } } template <typename T> static void SoftmaxWithCrossEntropySoftLabel( const platform::CUDADeviceContext& ctx, const int rank, const int axis, const T* logits_data, const T* labels_data, T* softmax_data, T* loss_data, int N, int dim, int D) { #ifdef __HIPCC__ constexpr int kMaxBlockDim = 256; #else constexpr int kMaxBlockDim = 512; #endif int64_t block_dim = dim >= kMaxBlockDim ? kMaxBlockDim : (1 << static_cast<int>(std::log2(dim))); int64_t grid_dim = N * D; constexpr int max_dim = 320; const int kDimLog2 = static_cast<int>(Log2Ceil(dim)); const int kDimCeil = 1 << kDimLog2; auto stream = ctx.stream(); if (D == 1 && dim <= max_dim) { int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32; int batches_per_warp = (kDimCeil <= 128) ? 2 : 1; // use 128 threads per block to maximimize gpu utilization constexpr int threads_per_block = 128; int warps_per_block = (threads_per_block / kWarpSize); int batches_per_block = warps_per_block * batches_per_warp; int blocks = (N + batches_per_block - 1) / batches_per_block; dim3 threads(kWarpSize, warps_per_block, 1); SwitchWarpSoftmaxForwardSoftLabel<T>(blocks, threads, stream, loss_data, softmax_data, logits_data, labels_data, N, dim, dim, kDimLog2); } else { ScopedTensorDescriptor desc; std::vector<int> tensor_dims = {N, dim, D, 1}; DataLayout layout = DataLayout::kNCHW; #ifdef PADDLE_WITH_HIP miopenTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims); #else cudnnTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims); #endif auto handle = ctx.cudnn_handle(); #ifdef PADDLE_WITH_HIP auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE : MIOPEN_SOFTMAX_MODE_CHANNEL; PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSoftmaxForward_V2( handle, platform::CudnnDataType<T>::kOne(), descp, logits_data, platform::CudnnDataType<T>::kZero(), descp, softmax_data, MIOPEN_SOFTMAX_LOG, mode)); #else auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE : CUDNN_SOFTMAX_MODE_CHANNEL; PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSoftmaxForward( handle, CUDNN_SOFTMAX_LOG, mode, platform::CudnnDataType<T>::kOne(), descp, logits_data, platform::CudnnDataType<T>::kZero(), descp, softmax_data)); #endif const int kDimLog2 = static_cast<int>(Log2Ceil(dim)); const int kDimCeil = 1 << kDimLog2; #ifdef __HIPCC__ int kThreadPerBlock = 256; #else int kThreadPerBlock = 512; #endif int kBatchPerBlock = 1; int blocks = (N * D + kBatchPerBlock - 1) / kBatchPerBlock; dim3 threads(kThreadPerBlock / kBatchPerBlock, kBatchPerBlock, 1); hipLaunchKernelGGL(( CrossEntropySoftLabel<T, T, true>), dim3(blocks), dim3(threads), 0, stream, loss_data, softmax_data, NULL, labels_data, N, dim, D, kDimLog2); } } template <typename T> __global__ void SoftCrossEntropyGradientKernel(T* logit_grad, const T* loss_grad, const T* labels, const int64_t n, const int64_t d, const int64_t remain) { int64_t ids = blockIdx.x * blockDim.x + threadIdx.x; if (ids < n * d) { int64_t idx_n = ids / d; int64_t idx_remain = ids % remain; int64_t idx_loss = idx_n * remain + idx_remain; logit_grad[ids] = loss_grad[idx_loss] * (logit_grad[ids] - labels[ids]); } } template <typename T> __global__ void SoftLabelCrossEntropyGradientKernel(T* logit_grad, const T* loss_grad, const T* labels, const int n, const int d, const int remain) { int ids = blockIdx.x * blockDim.x + threadIdx.x; if (ids < n * d) { int idx_n = ids / d; int idx_remain = ids % remain; int idx_loss = idx_n * remain + idx_remain; logit_grad[ids] = loss_grad[idx_loss] * (-labels[ids] / logit_grad[ids]); } } template <typename T> __global__ void HardLabelCrossEntropyGradientKernel(T* logit_grad, const int64_t* labels, const int n, const int d, const int remain, const int ignore_index) { CUDA_KERNEL_LOOP(index, n * remain) { int idx_n = index / remain; int idx_remain = index % remain; int tmp = labels[index]; int idx = idx_n * d + tmp * remain + idx_remain; if (ignore_index != tmp) { logit_grad[idx] = -static_cast<T>(1.) / logit_grad[idx]; } } } template <typename T> __global__ void ScaleCrossEntropyGradient(T* logit_grad, const T* loss_grad, const int num, const int d, const int remain, const int64_t* labels, const int ignore_index) { CUDA_KERNEL_LOOP(index, num) { int idx_n = index / d; int idx_remain = index % remain; int idx_lbl = idx_n * remain + idx_remain; int k = (index % d) / remain; if (labels[idx_lbl] == ignore_index || labels[idx_lbl] != k) { logit_grad[index] = static_cast<T>(0.); } else { logit_grad[index] *= loss_grad[idx_lbl]; } } } template <typename T> class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(context.GetPlace()), true, platform::errors::Unavailable("softmax_with_cross_entropy operator's " "CUDA kernel only runs on GPU device.")); const bool use_softmax = context.Attr<bool>("use_softmax"); // do not with softmax op, and input is softmax if (!use_softmax) { const Tensor* softmax = context.Input<Tensor>("Logits"); const Tensor* labels = context.Input<Tensor>("Label"); Tensor* softmax_out = context.Output<Tensor>("Softmax"); Tensor* loss = context.Output<Tensor>("Loss"); const int rank = softmax->dims().size(); const int axis = CanonicalAxis(context.Attr<int>("axis"), rank); const int axis_dim = softmax->dims()[axis]; const int n = SizeToAxis(axis, softmax->dims()); const int d = SizeFromAxis(axis, softmax->dims()); auto* softmax_out_data = softmax_out->mutable_data<T>(context.GetPlace()); auto* loss_data = loss->mutable_data<T>(context.GetPlace()); math::SetConstant<platform::CUDADeviceContext, T> set_constant; set_constant(context.cuda_device_context(), loss, static_cast<T>(0)); if (axis_dim == 1) { set_constant(context.cuda_device_context(), softmax_out, static_cast<T>(1)); return; } auto soft_label = context.Attr<bool>("soft_label"); auto ignore_index = context.Attr<int>("ignore_index"); Tensor softmax_2d, labels_2d, loss_2d, softmax_out_2d; softmax_2d.ShareDataWith(*softmax).Resize({n, d}); labels_2d.ShareDataWith(*labels).Resize({n, labels->numel() / n}); loss_2d.ShareDataWith(*loss).Resize({n, 1}); softmax_out_2d.ShareDataWith(*softmax_out).Resize({n, d}); // math::CrossEntropyFunctor support axis is the last if (axis == -1) { math::CrossEntropyFunctor<platform::CUDADeviceContext, T>()( context.cuda_device_context(), &loss_2d, &softmax_2d, &labels_2d, soft_label, ignore_index, axis_dim); return; } // if axis is not the last, we need a new impliment if (soft_label) { auto* logits_data = softmax->data<T>(); auto* labels_data = labels->data<T>(); const int kDimLog2 = static_cast<int>(Log2Ceil(axis_dim)); const int kDimCeil = 1 << kDimLog2; #ifdef __HIPCC__ int kThreadPerBlock = 256; #else int kThreadPerBlock = 512; #endif int kBatchPerBlock = 1; int blocks = (n * d + kBatchPerBlock - 1) / kBatchPerBlock; dim3 threads(kThreadPerBlock / kBatchPerBlock, kBatchPerBlock, 1); hipLaunchKernelGGL(( CrossEntropySoftLabel<T, T, false>), dim3(blocks), dim3(threads), 0, context.cuda_device_context().stream(), loss_data, NULL, logits_data, labels_data, n, axis_dim, d / axis_dim, kDimLog2); } else { // HardLabel auto* logits_data = softmax->data<T>(); auto* labels_data = labels->data<int64_t>(); int threads = 128; int blocks = (n * d / axis_dim + threads - 1) / threads; if (ignore_index >= 0 && ignore_index < axis_dim) { hipLaunchKernelGGL(( CrossEntropyHardLabel<T, true>), dim3(blocks), dim3(threads), 0, context.cuda_device_context().stream(), loss_data, logits_data, labels_data, n, axis_dim, d / axis_dim, ignore_index); } else { hipLaunchKernelGGL(( CrossEntropyHardLabel<T, false>), dim3(blocks), dim3(threads), 0, context.cuda_device_context().stream(), loss_data, logits_data, labels_data, n, axis_dim, d / axis_dim, ignore_index); } } // cause of input is softmax // copy to output softmax, directly framework::TensorCopy(*softmax, context.GetPlace(), context.device_context(), softmax_out); return; } const Tensor* logits = context.Input<Tensor>("Logits"); const Tensor* labels = context.Input<Tensor>("Label"); Tensor* softmax = context.Output<Tensor>("Softmax"); Tensor* loss = context.Output<Tensor>("Loss"); const int rank = logits->dims().size(); const int axis = CanonicalAxis(context.Attr<int>("axis"), rank); int axis_dim = logits->dims()[axis]; const int64_t n = SizeToAxis(axis, logits->dims()); const int64_t d = SizeFromAxis(axis, logits->dims()); auto* softmax_data = softmax->mutable_data<T>(context.GetPlace()); auto* loss_data = loss->mutable_data<T>(context.GetPlace()); if (axis_dim == 1) { math::SetConstant<platform::CUDADeviceContext, T> set_constant; set_constant(context.cuda_device_context(), softmax, static_cast<T>(1)); set_constant(context.cuda_device_context(), loss, static_cast<T>(0)); return; } auto soft_label = context.Attr<bool>("soft_label"); auto ignore_index = context.Attr<int>("ignore_index"); if (soft_label) { auto* logits_data = logits->data<T>(); auto* labels_data = labels->data<T>(); SoftmaxWithCrossEntropySoftLabel<T>( context.cuda_device_context(), rank, axis, logits_data, labels_data, softmax_data, loss_data, n, axis_dim, d / axis_dim); } else { if (!context.Attr<bool>("numeric_stable_mode")) { // CUDNN kernel only suppoer 2-D tensor and perfome softmax on last dim Tensor logits_2d, softmax_2d, labels_2d, loss_2d; logits_2d.ShareDataWith(*logits).Resize({n, d}); softmax_2d.ShareDataWith(*softmax).Resize({n, d}); labels_2d.ShareDataWith(*labels).Resize({n, labels->numel() / n}); loss_2d.ShareDataWith(*loss).Resize({n, 1}); math::SoftmaxCUDNNFunctor<T>()(context.cuda_device_context(), &logits_2d, &softmax_2d); math::CrossEntropyFunctor<platform::CUDADeviceContext, T>()( context.cuda_device_context(), &loss_2d, &softmax_2d, &labels_2d, false, ignore_index, axis_dim); } else { auto* logits_data = logits->data<T>(); auto* labels_data = labels->data<int64_t>(); if (ignore_index >= 0 && ignore_index < axis_dim) { SoftmaxWithCrossEntropyHardLabel<T, true>( context.cuda_device_context(), rank, axis, logits_data, labels_data, loss_data, softmax_data, n, axis_dim, d / axis_dim, ignore_index); } else { SoftmaxWithCrossEntropyHardLabel<T, false>( context.cuda_device_context(), rank, axis, logits_data, labels_data, loss_data, softmax_data, n, axis_dim, d / axis_dim, ignore_index); } } } } }; template <typename T> class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(context.GetPlace()), true, platform::errors::Unavailable("softmax_with_cross_entropy operator's " "CUDA kernel only runs on GPU device.")); const Tensor* labels = context.Input<Tensor>("Label"); const T* loss_grad_data = context.Input<Tensor>(framework::GradVarName("Loss"))->data<T>(); Tensor* logit_grad = context.Output<Tensor>(framework::GradVarName("Logits")); const Tensor* softmax = context.Input<Tensor>("Softmax"); if (logit_grad != softmax) { framework::TensorCopy(*softmax, context.GetPlace(), context.device_context(), logit_grad); } T* logit_grad_data = logit_grad->data<T>(); const int rank = logit_grad->dims().size(); const int axis = CanonicalAxis(context.Attr<int>("axis"), rank); int axis_dim = logit_grad->dims()[axis]; const int64_t n = SizeToAxis(axis, logit_grad->dims()); const int64_t d = SizeFromAxis(axis, logit_grad->dims()); const int64_t remain = d / axis_dim; #ifdef __HIPCC__ int block = 256; #else int block = 512; #endif auto stream = context.cuda_device_context().stream(); auto ignore_index = context.Attr<int>("ignore_index"); auto use_softmax = context.Attr<bool>("use_softmax"); // do not with softmax op, and input is softmax if (!use_softmax) { if (context.Attr<bool>("soft_label")) { int grid = (n * d + block - 1) / block; const T* label_data = labels->data<T>(); hipLaunchKernelGGL(( SoftLabelCrossEntropyGradientKernel<T>), dim3(grid), dim3(block), 0, stream, logit_grad_data, loss_grad_data, label_data, n, d, remain); } else { Tensor logits_grad_2d; logits_grad_2d.ShareDataWith(*logit_grad).Resize({n, d}); int grid = (n * remain + block - 1) / block; const int64_t* label_data = labels->data<int64_t>(); hipLaunchKernelGGL(( HardLabelCrossEntropyGradientKernel<T>), dim3(grid), dim3(block), 0, stream, logit_grad_data, label_data, n, d, remain, ignore_index); int num = n * d; grid = (num + block - 1) / block; hipLaunchKernelGGL(( ScaleCrossEntropyGradient<T>), dim3(grid), dim3(block), 0, stream, logit_grad_data, loss_grad_data, num, d, remain, label_data, ignore_index); } return; } // with softmax, continue if (context.Attr<bool>("soft_label")) { int64_t grid = (n * d + block - 1) / block; const T* label_data = labels->data<T>(); hipLaunchKernelGGL(( SoftCrossEntropyGradientKernel<T>), dim3(grid), dim3(block), 0, stream, logit_grad_data, loss_grad_data, label_data, n, d, remain); } else { const int64_t* label_data = labels->data<int64_t>(); int grid = (n * d + block - 1) / block; hipLaunchKernelGGL(( SoftmaxWithCrossEntropyGradHardLabel<T>), dim3(grid), dim3(block), 0, stream, logit_grad_data, loss_grad_data, label_data, n, d / remain, remain, ignore_index); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; #ifdef PADDLE_WITH_HIP // MIOPEN do not support double REGISTER_OP_CUDA_KERNEL( softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyCUDAKernel<float>, ops::SoftmaxWithCrossEntropyCUDAKernel<paddle::platform::float16>); REGISTER_OP_CUDA_KERNEL( softmax_with_cross_entropy_grad, ops::SoftmaxWithCrossEntropyGradCUDAKernel<float>, ops::SoftmaxWithCrossEntropyGradCUDAKernel<paddle::platform::float16>); #else REGISTER_OP_CUDA_KERNEL( softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyCUDAKernel<float>, ops::SoftmaxWithCrossEntropyCUDAKernel<paddle::platform::float16>, ops::SoftmaxWithCrossEntropyCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL( softmax_with_cross_entropy_grad, ops::SoftmaxWithCrossEntropyGradCUDAKernel<float>, ops::SoftmaxWithCrossEntropyGradCUDAKernel<paddle::platform::float16>, ops::SoftmaxWithCrossEntropyGradCUDAKernel<double>); #endif
2c94290119a416698908163409f50d9c93e685ca.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __NVCC__ #include "cub/cub.cuh" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/softmax_cudnn_op.cu.h" #include "paddle/fluid/operators/softmax_with_cross_entropy_op.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; using DataLayout = platform::DataLayout; using Tensor = framework::Tensor; // Wrapper of log function. Use log(float32) for float16 template <typename T> static __device__ __forceinline__ T Log(T x) { using AccT = typename details::MPTypeTrait<T>::Type; AccT logx = std::log(static_cast<AccT>(x)); return math::TolerableValue<T>()(static_cast<T>(logx)); } // Wrapper of exp function. Use exp(float32) for float16 template <typename T> static __device__ __forceinline__ T Exp(T x) { using AccT = typename details::MPTypeTrait<T>::Type; AccT expx = std::exp(static_cast<AccT>(x)); return math::TolerableValue<T>()(static_cast<T>(expx)); } // log2(value) static inline int Log2Ceil(int value) { int log2_value = 0; while ((1 << log2_value) < value) ++log2_value; return log2_value; } enum class SoftmaxMode { kSoftmax, kLogSoftmax, kCrossEntropy }; /* Hard label cross entropy. */ template <typename T, bool IgnoreIndex> __global__ void CrossEntropyHardLabel(T* loss, const T* softmax, const int64_t* labels, const int n, const int dim, const int d, const int ignore_idx) { int64_t ids = blockIdx.x * blockDim.x + threadIdx.x; int64_t idx_n = ids / d; int64_t idx_d = ids % d; // thread ids compute loss[ids] using softmax[idx] if (ids < n * d) { if (labels[ids] < 0) { // label is negative loss[ids] = static_cast<T>(0.0); } else { // label is positive of zero int64_t idx = idx_n * dim * d + labels[ids] * d + idx_d; if (IgnoreIndex == true) { // IgnoreIndex is true if (labels[ids] == ignore_idx) { loss[ids] = static_cast<T>(0.0); } else { loss[ids] = -Log(softmax[idx]); } } else { // IgnoreIndex is false loss[ids] = -Log(softmax[idx]); } } } } /* Hard label cross entropy with exp. Input: log softmax Output: loss and exp(input) */ template <typename T, bool IgnoreIndex> __global__ void CrossEntropyExpHardLabel(T* loss, T* softmax, const int64_t* labels, const int n, const int dim, const int d, const int ignore_idx) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; int64_t idx_n = idx / (d * dim); int64_t idx_dim = (idx / d) % dim; int64_t idx_d = idx % d; int64_t ids = idx_n * d + idx_d; if (idx < n * dim * d) { if (IgnoreIndex == true) { // IgnoreIndex is true if (idx_dim == labels[ids]) { if (labels[ids] == ignore_idx) { loss[ids] = static_cast<T>(0.0); } else { loss[ids] = -softmax[idx]; } } } else { // IgnoreIndex is false if (labels[ids] >= 0 && labels[ids] < dim) { if (labels[ids] == idx_dim) { loss[ids] = -softmax[idx]; } } else { loss[ids] = static_cast<T>(0.0); } } softmax[idx] = Exp(softmax[idx]); } } /* Core function of softmax with cross entropy forward - softmax, SoftmaxMode=kSoftmax - log softmax, SoftmaxMode=kLogSoftmax - softmax with cross entropy hard label, SoftmaxMode=kCrossEntropy The computation includes - Compute max value: maxvalue_{i} = max_j src_{i,j} - Compute sum of exp: s_{i} = sum_{j}{e^{src_{i,j} - maxvalue_{i}}} - Compute: softmax_{i,j} = e^{src_{i,j} - maxvalue_{i}} / s_{i} - Compute: logsoftmax_{i,j} = src_{i,j} - maxvalue_{i} - log(s_{i}) - Compute: loss_{i} = -logsoftmax[i,label[i]] (Hard label) This computation results from following formula: softmax_{i,j} = e^{src_{i,j}} / sum_{j}{e^{src_{i,j}}} = e^{src_{i,j} - maxvalue_{i}} / sum_{j}{e^{src_{i,j} - maxvalue_{i}}} = e^{src_{i,j} - maxvalue_{i}} / s_{i} logsoftmax_{i,j} = log(softmax_{i,j}) = src_{i,j} - maxvalue_{i} - log(s_{i}) One warp (32 threads) is used to compute 1 or 2 batch (kBatchSize). For reduction max (sum), firstly compute max (sum) to one warp, then use shuffle api to compute max (sum) in one warp. */ template <typename T, typename VecT, typename AccT, int Log2Elements, SoftmaxMode mode, bool IgnoreIndex> __global__ void WarpSoftmaxForward(T* loss, T* softmax, const T* src, const int64_t* label, const int batch_size, const int stride, const int element_count, const int ignore_index) { constexpr int kDimCeil = 1 << Log2Elements; constexpr int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32; constexpr int kVSize = sizeof(VecT) / sizeof(T); constexpr int kIterations = kDimCeil / kWarpSize; constexpr int kIterationsV = (kIterations >= kVSize) ? (kIterations / kVSize) : 1; constexpr int kBatchSize = (kDimCeil <= 128) ? 2 : 1; int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize; // max index to read int idx_max_v[kBatchSize]; #pragma unroll for (int i = 0; i < kBatchSize; i++) { int idx_max = ((i + first_batch) < batch_size) ? element_count : 0; idx_max_v[i] = idx_max / kVSize; } // read data from global memory AccT srcdata[kBatchSize][kIterationsV][kVSize]; #pragma unroll for (int i = 0; i < kBatchSize; ++i) { // read data to srcdata: - KVSize==1, - KVSize>1 #pragma unroll for (int it = 0; it < kIterationsV; ++it) { int src_idx = threadIdx.x + it * kWarpSize; if (kVSize == 1) { if (src_idx < idx_max_v[i]) { srcdata[i][it][0] = static_cast<AccT>(src[(first_batch + i) * stride + src_idx]); } else { srcdata[i][it][0] = -std::numeric_limits<AccT>::infinity(); } } else { const VecT* src_v = reinterpret_cast<const VecT*>(&src[(first_batch + i) * stride]); if (src_idx < idx_max_v[i]) { VecT srctmp = src_v[src_idx]; const T* srcinptr = reinterpret_cast<const T*>(&srctmp); #pragma unroll for (int s = 0; s < kVSize; s++) { srcdata[i][it][s] = static_cast<AccT>(srcinptr[s]); } } else { #pragma unroll for (int s = 0; s < kVSize; s++) { srcdata[i][it][s] = -std::numeric_limits<AccT>::infinity(); } } } } } // compute max value: maxvalue_{i} = max_j src_{i,j} AccT max_value[kBatchSize]; #pragma unroll for (int i = 0; i < kBatchSize; ++i) { // it = 0 AccT valmax = srcdata[i][0][0]; #pragma unroll for (int s = 1; s < kVSize; ++s) { valmax = (valmax > srcdata[i][0][s]) ? valmax : srcdata[i][0][s]; } max_value[i] = valmax; // it = 1, 2, ... #pragma unroll for (int it = 1; it < kIterationsV; ++it) { AccT valmax = srcdata[i][it][0]; #pragma unroll for (int s = 1; s < kVSize; ++s) { valmax = (valmax > srcdata[i][it][s]) ? valmax : srcdata[i][it][s]; } max_value[i] = (max_value[i] > valmax) ? max_value[i] : valmax; } } WarpReduceMax<AccT, kBatchSize, kWarpSize>(max_value); // compute sum: s_{i} = sum_{j}{ exp(src_{i,j} - maxvalue_{i} } AccT sum[kBatchSize]; #pragma unroll for (int i = 0; i < kBatchSize; ++i) { // it = 0 if (mode == SoftmaxMode::kLogSoftmax || mode == SoftmaxMode::kCrossEntropy) { sum[i] = std::exp(srcdata[i][0][0] - max_value[i]); } else { srcdata[i][0][0] = std::exp(srcdata[i][0][0] - max_value[i]); sum[i] = srcdata[i][0][0]; } #pragma unroll for (int s = 1; s < kVSize; ++s) { if (mode == SoftmaxMode::kLogSoftmax || mode == SoftmaxMode::kCrossEntropy) { sum[i] += std::exp(srcdata[i][0][s] - max_value[i]); } else { srcdata[i][0][s] = std::exp(srcdata[i][0][s] - max_value[i]); sum[i] += srcdata[i][0][s]; } } // it = 1, 2, ... #pragma unroll for (int it = 1; it < kIterationsV; ++it) { #pragma unroll for (int s = 0; s < kVSize; ++s) { if (mode == SoftmaxMode::kLogSoftmax || mode == SoftmaxMode::kCrossEntropy) { sum[i] += std::exp(srcdata[i][it][s] - max_value[i]); } else { srcdata[i][it][s] = std::exp(srcdata[i][it][s] - max_value[i]); sum[i] += srcdata[i][it][s]; } } } } WarpReduceSum<AccT, kBatchSize, kWarpSize>(sum); // write data #pragma unroll for (int i = 0; i < kBatchSize; ++i) { if (mode == SoftmaxMode::kLogSoftmax || mode == SoftmaxMode::kCrossEntropy) { sum[i] = std::log(sum[i]); } #pragma unroll for (int it = 0; it < kIterationsV; ++it) { int idx = threadIdx.x + it * kWarpSize; if (kVSize == 1) { // kVSize==1 if (idx < idx_max_v[i]) { if (mode == SoftmaxMode::kLogSoftmax) { // log softmax softmax[(first_batch + i) * stride + idx] = srcdata[i][it][0] - max_value[i] - sum[i]; // softmax with cross entropy hard label } else if (mode == SoftmaxMode::kCrossEntropy) { AccT logsoftmax = srcdata[i][it][0] - max_value[i] - sum[i]; // softmax softmax[(first_batch + i) * stride + idx] = std::exp(logsoftmax); // label int loss_idx = (threadIdx.x + it * kWarpSize) * kVSize; if (IgnoreIndex == true) { // IgnoreIndex is true if (label[first_batch + i] == loss_idx) { if (label[first_batch + i] != ignore_index) { loss[first_batch + i] = -logsoftmax; } else { loss[first_batch + i] = static_cast<T>(0.0); } } } else { // IgnoreIndex is false if (label[first_batch + i] >= 0 && label[first_batch + i] < element_count) { if (label[first_batch + i] == loss_idx) { loss[first_batch + i] = -logsoftmax; } } else { loss[first_batch + i] = static_cast<T>(0.0); } } } else { // softmax softmax[(first_batch + i) * stride + idx] = srcdata[i][it][0] / sum[i]; } } else { break; } } else { // KVSize>1 VecT* softmax_v = reinterpret_cast<VecT*>(&softmax[(first_batch + i) * stride]); VecT tmpdata; T* tmpptr = reinterpret_cast<T*>(&tmpdata); #pragma unroll for (int s = 0; s < kVSize; ++s) { if (mode == SoftmaxMode::kLogSoftmax) { // log softmax tmpptr[s] = srcdata[i][it][s] - max_value[i] - sum[i]; // softmax with cross entropy hard label } else if (mode == SoftmaxMode::kCrossEntropy) { AccT logsoftmax = srcdata[i][it][s] - max_value[i] - sum[i]; // softmax tmpptr[s] = std::exp(logsoftmax); // label int loss_idx = (threadIdx.x + it * kWarpSize) * kVSize + s; if (IgnoreIndex == true) { // IgnoreIndex is true if (label[first_batch + i] == loss_idx && label[first_batch + i] != ignore_index) { loss[first_batch + i] = -logsoftmax; } } else { // IgnoreIndex is false if (label[first_batch + i] >= 0 && label[first_batch + i] < element_count) { if (label[first_batch + i] == loss_idx) { loss[first_batch + i] = -logsoftmax; } } else { loss[first_batch + i] = static_cast<T>(0.0); } } } else { // softmax tmpptr[s] = srcdata[i][it][s] / sum[i]; } } if (idx < idx_max_v[i]) { softmax_v[idx] = tmpdata; } else { break; } } } } } #define SOFTMAX_WARP_FORWARD_CASE(Log2Elements, VecT, AccT) \ case Log2Elements: \ WarpSoftmaxForward<T, VecT, AccT, Log2Elements, mode, \ IgnoreIndex><<<blocks, threads, 0, stream>>>( \ loss, softmax, src, label, batch_size, stride, element_count, \ ignore_index); \ break; /* Wrapper of softmax with cross entropy forward hard label. */ template <typename T, SoftmaxMode mode, bool IgnoreIndex> void SwitchWarpSoftmaxForward(T* loss, T* softmax, const T* src, const int64_t* label, const int batch_size, const int stride, const int element_count, const int ignore_index, gpuStream_t stream) { using AccT = typename details::MPTypeTrait<T>::Type; // use 128 threads per block to maximimize gpu utilization const int log2_elements = static_cast<int>(Log2Ceil(element_count)); const int kDimCeil = 1 << log2_elements; int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32; int batches_per_warp = (kDimCeil <= 128) ? 2 : 1; constexpr int threads_per_block = 128; int warps_per_block = (threads_per_block / kWarpSize); int batches_per_block = warps_per_block * batches_per_warp; int blocks = (batch_size + batches_per_block - 1) / batches_per_block; dim3 threads(kWarpSize, warps_per_block, 1); switch (log2_elements) { SOFTMAX_WARP_FORWARD_CASE(0, T, AccT); SOFTMAX_WARP_FORWARD_CASE(1, T, AccT); SOFTMAX_WARP_FORWARD_CASE(2, T, AccT); SOFTMAX_WARP_FORWARD_CASE(3, T, AccT); SOFTMAX_WARP_FORWARD_CASE(4, T, AccT); SOFTMAX_WARP_FORWARD_CASE(5, T, AccT); SOFTMAX_WARP_FORWARD_CASE(6, T, AccT); SOFTMAX_WARP_FORWARD_CASE(7, T, AccT); SOFTMAX_WARP_FORWARD_CASE(8, T, AccT); SOFTMAX_WARP_FORWARD_CASE(9, T, AccT); default: break; } } /* Wrapper of softmax with cross entropy hard label. - SwitchWarpSoftmaxForward for small size - cudnn function for large size */ template <typename T, bool IgnoreIndex> static void SoftmaxWithCrossEntropyHardLabel( const platform::CUDADeviceContext& ctx, int rank, int axis, const T* logits_data, const int64_t* labels_data, T* loss_data, T* softmax_data, int N, int dim, int D, const int ignore_index) { auto stream = ctx.stream(); constexpr int max_dim = 320; if (D == 1 && dim <= max_dim) { // small size const SoftmaxMode mode = SoftmaxMode::kCrossEntropy; SwitchWarpSoftmaxForward<T, mode, IgnoreIndex>( loss_data, softmax_data, logits_data, labels_data, N, dim, dim, ignore_index, stream); } else { ScopedTensorDescriptor desc; std::vector<int> tensor_dims = {N, dim, D, 1}; DataLayout layout = DataLayout::kNCHW; #ifdef PADDLE_WITH_HIP miopenTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims); #else cudnnTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims); #endif auto handle = ctx.cudnn_handle(); #ifdef PADDLE_WITH_HIP auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE : MIOPEN_SOFTMAX_MODE_CHANNEL; PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSoftmaxForward_V2( handle, platform::CudnnDataType<T>::kOne(), descp, logits_data, platform::CudnnDataType<T>::kZero(), descp, softmax_data, MIOPEN_SOFTMAX_LOG, mode)); #else auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE : CUDNN_SOFTMAX_MODE_CHANNEL; PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSoftmaxForward( handle, CUDNN_SOFTMAX_LOG, mode, platform::CudnnDataType<T>::kOne(), descp, logits_data, platform::CudnnDataType<T>::kZero(), descp, softmax_data)); #endif int threads = 128; int blocks = (N * dim * D + threads - 1) / threads; // compute cross entropy, input is log softmax CrossEntropyExpHardLabel<T, IgnoreIndex><<<blocks, threads, 0, stream>>>( loss_data, softmax_data, labels_data, N, dim, D, ignore_index); } } /* Wrapper of softmax with cross entropy grad hard label. */ template <typename T> __global__ void SoftmaxWithCrossEntropyGradHardLabel( T* logits_grad, const T* loss_grad, const int64_t* labels, const int64_t n, const int64_t dim, const int64_t d, const int ignore_index) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; int64_t idx_n = idx / (d * dim); int64_t idx_dim = (idx / d) % dim; int64_t idx_d = idx % d; int64_t ids = idx_n * d + idx_d; if (idx < n * dim * d) { if (labels[ids] == ignore_index) { logits_grad[idx] = static_cast<T>(0.0); } else if (labels[ids] == idx_dim) { logits_grad[idx] = (logits_grad[idx] - static_cast<T>(1.0)) * loss_grad[ids]; } else { logits_grad[idx] *= loss_grad[ids]; } } } /* Cross entropy soft label with dynamic size on axis (log2_elements is varibale). - if the input is softmax,compute loss with softmax - if the input is log_softmax, compute loss with log_softmax and update softmax */ template <typename T, typename VecT, bool InLogMode = false> __global__ void CrossEntropySoftLabel(T* loss, T* softmaxwrt, const T* softmax, const T* labels, const int n, const int dim, const int d, int log2_elements) { const int kDimCeil = 1 << log2_elements; const int kVSize = sizeof(VecT) / sizeof(T); #ifdef __HIPCC__ const int kThreadPerBlock = 256; #else const int kThreadPerBlock = 512; #endif const int kBatchPerBlock = 1; const int kWarpSize = 32; // (dim < 32) ? dim : 32; const int kBatchSize = 1; const int kThreadPerBatch = kThreadPerBlock / kBatchPerBlock; const int kWarpPerBatch = kThreadPerBatch / kWarpSize; const int kIterations = (dim + kThreadPerBatch - 1) / kThreadPerBatch; const int kIterationsV = (kIterations >= kVSize) ? (kIterations / kVSize) : 1; const int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize; T sum[kBatchSize]{static_cast<T>(0.0)}; #pragma unroll for (int i = 0; i < kBatchSize; ++i) { int ids = first_batch + i; if (ids >= n * d) break; int idx_n = ids / d; int idx_d = ids % d; #pragma unroll for (int it = 0; it < kIterations; ++it) { int idx_dim = it * kThreadPerBatch + threadIdx.x; int idx = idx_n * dim * d + idx_dim * d + idx_d; if (idx_n < n && idx_dim < dim) { VecT softmaxdata; if (InLogMode) { softmaxdata = reinterpret_cast<VecT*>(&softmaxwrt[idx])[0]; } else { softmaxdata = reinterpret_cast<const VecT*>(&softmax[idx])[0]; } VecT labelsdata = reinterpret_cast<const VecT*>(&labels[idx])[0]; T* softmaxptr = reinterpret_cast<T*>(&softmaxdata); T* labelsptr = reinterpret_cast<T*>(&labelsdata); #pragma unroll for (int s = 0; s < kVSize; s++) { if (InLogMode) { sum[i] -= softmaxptr[s] * labelsptr[s]; softmaxptr[s] = Exp(softmaxptr[s]); } else { sum[i] -= Log(softmaxptr[s]) * labelsptr[s]; } } if (InLogMode) { reinterpret_cast<VecT*>(&softmaxwrt[idx])[0] = softmaxdata; } } } } WarpReduceSum<T, kBatchSize, kWarpSize>(sum); __syncthreads(); __shared__ T sumshare[kWarpPerBatch][kBatchPerBlock][kBatchSize]; if (threadIdx.x % kWarpSize == 0) { #pragma unroll for (int i = 0; i < kBatchSize; i++) { sumshare[threadIdx.x / kWarpSize][threadIdx.y][i] = sum[i]; } } __syncthreads(); // write if (threadIdx.x == 0) { for (int i = 0; i < kBatchSize; i++) { int ids = first_batch + i; if (ids < n * d) { loss[ids] = sumshare[0][threadIdx.y][i]; for (int s = 1; s < kWarpPerBatch; s++) { loss[ids] += sumshare[s][threadIdx.y][i]; } } } } } /* Core function of softmax with cross entropy forward soft label. The computation includes - Compute maximum of batch: maxvalue_{i} = max_j src_{i,j} - Compute sum of exp batch: s_{i} = sum_{j}{ exp(src_{i,j} - maxvalue_{i} } - Compute: sum of - sum_{j}{ label_{i,j} * (src_{i,j} - maxvalue_{i} - log(sum[i]))} One warp (32 threads) is used to compute 1 or 2 batch (kBatchSize). For reduction max (sum), firstly compute max (sum) to one warp, then use shuffle api to compute max (sum) in one warp. */ template <typename T, typename VecT, typename AccT, int Log2Elements> __global__ void WarpSoftmaxForwardSoftLabel(T* loss, T* softmax, const T* src, const T* label, const int batch_size, const int stride, const int element_count) { const bool LogMode = true; constexpr int kDimCeil = 1 << Log2Elements; constexpr int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32; constexpr int kVSize = sizeof(VecT) / sizeof(T); constexpr int kIterations = kDimCeil / kWarpSize; constexpr int kIterationsV = (kIterations >= kVSize) ? (kIterations / kVSize) : 1; constexpr int kBatchSize = (kDimCeil <= 128) ? 2 : 1; int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize; int local_batches = batch_size - first_batch; if (local_batches > kBatchSize) { local_batches = kBatchSize; } // read data from global memory VecT srcdata[kBatchSize][kIterationsV]; VecT labeldata[kBatchSize][kIterationsV]; for (int i = 0; i < kBatchSize; ++i) { const VecT* src_v = reinterpret_cast<const VecT*>(&src[(first_batch + i) * stride]); const VecT* label_v = reinterpret_cast<const VecT*>(&label[(first_batch + i) * stride]); // max index to read int idx_max = (i < local_batches) ? element_count : 0; int idx_max_v = idx_max / kVSize; // read data for (int it = 0; it < kIterationsV; ++it) { int src_idx = threadIdx.x + it * kWarpSize; if (src_idx < idx_max_v) { srcdata[i][it] = src_v[src_idx]; labeldata[i][it] = label_v[src_idx]; } else { #pragma unroll for (int s = 0; s < kVSize; s++) { reinterpret_cast<T*>(&srcdata[i][it])[s] = -std::numeric_limits<AccT>::max(); reinterpret_cast<T*>(&labeldata[i][it])[s] = 0.0; } } } } // compute max value AccT max_value[kBatchSize]; #pragma unroll for (int i = 0; i < kBatchSize; ++i) { max_value[i] = -std::numeric_limits<AccT>::infinity(); #pragma unroll for (int it = 0; it < kIterationsV; ++it) { T* srcptr_v = reinterpret_cast<T*>(&srcdata[i][it]); T valmax = srcptr_v[0]; #pragma unroll for (int s = 1; s < kVSize; ++s) { valmax = (valmax > srcptr_v[s]) ? valmax : srcptr_v[s]; } max_value[i] = (max_value[i] > static_cast<AccT>(valmax)) ? max_value[i] : static_cast<AccT>(valmax); } } WarpReduceMax<AccT, kBatchSize, kWarpSize>(max_value); // compute sum AccT sum[kBatchSize]{0.0}; #pragma unroll for (int i = 0; i < kBatchSize; ++i) { #pragma unroll for (int it = 0; it < kIterationsV; ++it) { T* srcptr_v = reinterpret_cast<T*>(&srcdata[i][it]); #pragma unroll for (int s = 0; s < kVSize; ++s) { if (LogMode) { sum[i] += std::exp(static_cast<AccT>(srcptr_v[s]) - max_value[i]); } else { srcptr_v[s] = std::exp(static_cast<AccT>(srcptr_v[s]) - max_value[i]); sum[i] += static_cast<AccT>(srcptr_v[s]); } } } } WarpReduceSum<AccT, kBatchSize, kWarpSize>(sum); // log_softmax and loss AccT sumloss[kBatchSize]{0.0}; #pragma unroll for (int i = 0; i < kBatchSize; ++i) { if (i >= local_batches) break; VecT* softmax_v = reinterpret_cast<VecT*>(&softmax[(first_batch + i) * stride]); // max index to write int idx_max = (i < local_batches) ? element_count : 0; int idx_max_v = idx_max / kVSize; if (LogMode) { sum[i] = std::log(sum[i]); } #pragma unroll for (int it = 0; it < kIterationsV; ++it) { T* srcvp = reinterpret_cast<T*>(&srcdata[i][it]); T* labelvp = reinterpret_cast<T*>(&labeldata[i][it]); VecT tmpv; T* tmpvp = reinterpret_cast<T*>(&tmpv); #pragma unroll for (int s = 0; s < kVSize; ++s) { if (LogMode) { AccT logsoftmax = static_cast<AccT>(srcvp[s]) - max_value[i] - sum[i]; sumloss[i] -= logsoftmax * static_cast<AccT>(labelvp[s]); tmpvp[s] = std::exp(logsoftmax); } else { tmpvp[s] = static_cast<AccT>(srcvp[s]) / sum[i]; } } int idx = threadIdx.x + it * kWarpSize; if (idx < idx_max_v) { softmax_v[idx] = tmpv; } } } // loss WarpReduceSum<AccT, kBatchSize, kWarpSize>(sumloss); for (int i = 0; i < kBatchSize; i++) { if (i >= local_batches) break; loss[first_batch + i] = sumloss[i]; } } #define SOFTMAX_WARP_FORWARD_SOFT_CASE(Log2Elements, VecT, AccT) \ case Log2Elements: \ WarpSoftmaxForwardSoftLabel<T, VecT, AccT, \ Log2Elements><<<blocks, threads, 0, stream>>>( \ loss, softmax, src, label, batch_size, stride, element_count); \ break; /* Wrapper of softmax with cross entropy forward soft label. */ template <typename T> void SwitchWarpSoftmaxForwardSoftLabel(const int blocks, const dim3 threads, gpuStream_t stream, T* loss, T* softmax, const T* src, const T* label, const int batch_size, const int stride, const int element_count, const int log2_elements) { using AccT = typename details::MPTypeTrait<T>::Type; switch (log2_elements) { SOFTMAX_WARP_FORWARD_SOFT_CASE(0, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(1, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(2, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(3, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(4, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(5, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(6, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(7, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(8, T, AccT); SOFTMAX_WARP_FORWARD_SOFT_CASE(9, T, AccT); default: break; } } template <typename T> static void SoftmaxWithCrossEntropySoftLabel( const platform::CUDADeviceContext& ctx, const int rank, const int axis, const T* logits_data, const T* labels_data, T* softmax_data, T* loss_data, int N, int dim, int D) { #ifdef __HIPCC__ constexpr int kMaxBlockDim = 256; #else constexpr int kMaxBlockDim = 512; #endif int64_t block_dim = dim >= kMaxBlockDim ? kMaxBlockDim : (1 << static_cast<int>(std::log2(dim))); int64_t grid_dim = N * D; constexpr int max_dim = 320; const int kDimLog2 = static_cast<int>(Log2Ceil(dim)); const int kDimCeil = 1 << kDimLog2; auto stream = ctx.stream(); if (D == 1 && dim <= max_dim) { int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32; int batches_per_warp = (kDimCeil <= 128) ? 2 : 1; // use 128 threads per block to maximimize gpu utilization constexpr int threads_per_block = 128; int warps_per_block = (threads_per_block / kWarpSize); int batches_per_block = warps_per_block * batches_per_warp; int blocks = (N + batches_per_block - 1) / batches_per_block; dim3 threads(kWarpSize, warps_per_block, 1); SwitchWarpSoftmaxForwardSoftLabel<T>(blocks, threads, stream, loss_data, softmax_data, logits_data, labels_data, N, dim, dim, kDimLog2); } else { ScopedTensorDescriptor desc; std::vector<int> tensor_dims = {N, dim, D, 1}; DataLayout layout = DataLayout::kNCHW; #ifdef PADDLE_WITH_HIP miopenTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims); #else cudnnTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims); #endif auto handle = ctx.cudnn_handle(); #ifdef PADDLE_WITH_HIP auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE : MIOPEN_SOFTMAX_MODE_CHANNEL; PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSoftmaxForward_V2( handle, platform::CudnnDataType<T>::kOne(), descp, logits_data, platform::CudnnDataType<T>::kZero(), descp, softmax_data, MIOPEN_SOFTMAX_LOG, mode)); #else auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE : CUDNN_SOFTMAX_MODE_CHANNEL; PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSoftmaxForward( handle, CUDNN_SOFTMAX_LOG, mode, platform::CudnnDataType<T>::kOne(), descp, logits_data, platform::CudnnDataType<T>::kZero(), descp, softmax_data)); #endif const int kDimLog2 = static_cast<int>(Log2Ceil(dim)); const int kDimCeil = 1 << kDimLog2; #ifdef __HIPCC__ int kThreadPerBlock = 256; #else int kThreadPerBlock = 512; #endif int kBatchPerBlock = 1; int blocks = (N * D + kBatchPerBlock - 1) / kBatchPerBlock; dim3 threads(kThreadPerBlock / kBatchPerBlock, kBatchPerBlock, 1); CrossEntropySoftLabel<T, T, true><<<blocks, threads, 0, stream>>>( loss_data, softmax_data, NULL, labels_data, N, dim, D, kDimLog2); } } template <typename T> __global__ void SoftCrossEntropyGradientKernel(T* logit_grad, const T* loss_grad, const T* labels, const int64_t n, const int64_t d, const int64_t remain) { int64_t ids = blockIdx.x * blockDim.x + threadIdx.x; if (ids < n * d) { int64_t idx_n = ids / d; int64_t idx_remain = ids % remain; int64_t idx_loss = idx_n * remain + idx_remain; logit_grad[ids] = loss_grad[idx_loss] * (logit_grad[ids] - labels[ids]); } } template <typename T> __global__ void SoftLabelCrossEntropyGradientKernel(T* logit_grad, const T* loss_grad, const T* labels, const int n, const int d, const int remain) { int ids = blockIdx.x * blockDim.x + threadIdx.x; if (ids < n * d) { int idx_n = ids / d; int idx_remain = ids % remain; int idx_loss = idx_n * remain + idx_remain; logit_grad[ids] = loss_grad[idx_loss] * (-labels[ids] / logit_grad[ids]); } } template <typename T> __global__ void HardLabelCrossEntropyGradientKernel(T* logit_grad, const int64_t* labels, const int n, const int d, const int remain, const int ignore_index) { CUDA_KERNEL_LOOP(index, n * remain) { int idx_n = index / remain; int idx_remain = index % remain; int tmp = labels[index]; int idx = idx_n * d + tmp * remain + idx_remain; if (ignore_index != tmp) { logit_grad[idx] = -static_cast<T>(1.) / logit_grad[idx]; } } } template <typename T> __global__ void ScaleCrossEntropyGradient(T* logit_grad, const T* loss_grad, const int num, const int d, const int remain, const int64_t* labels, const int ignore_index) { CUDA_KERNEL_LOOP(index, num) { int idx_n = index / d; int idx_remain = index % remain; int idx_lbl = idx_n * remain + idx_remain; int k = (index % d) / remain; if (labels[idx_lbl] == ignore_index || labels[idx_lbl] != k) { logit_grad[index] = static_cast<T>(0.); } else { logit_grad[index] *= loss_grad[idx_lbl]; } } } template <typename T> class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(context.GetPlace()), true, platform::errors::Unavailable("softmax_with_cross_entropy operator's " "CUDA kernel only runs on GPU device.")); const bool use_softmax = context.Attr<bool>("use_softmax"); // do not with softmax op, and input is softmax if (!use_softmax) { const Tensor* softmax = context.Input<Tensor>("Logits"); const Tensor* labels = context.Input<Tensor>("Label"); Tensor* softmax_out = context.Output<Tensor>("Softmax"); Tensor* loss = context.Output<Tensor>("Loss"); const int rank = softmax->dims().size(); const int axis = CanonicalAxis(context.Attr<int>("axis"), rank); const int axis_dim = softmax->dims()[axis]; const int n = SizeToAxis(axis, softmax->dims()); const int d = SizeFromAxis(axis, softmax->dims()); auto* softmax_out_data = softmax_out->mutable_data<T>(context.GetPlace()); auto* loss_data = loss->mutable_data<T>(context.GetPlace()); math::SetConstant<platform::CUDADeviceContext, T> set_constant; set_constant(context.cuda_device_context(), loss, static_cast<T>(0)); if (axis_dim == 1) { set_constant(context.cuda_device_context(), softmax_out, static_cast<T>(1)); return; } auto soft_label = context.Attr<bool>("soft_label"); auto ignore_index = context.Attr<int>("ignore_index"); Tensor softmax_2d, labels_2d, loss_2d, softmax_out_2d; softmax_2d.ShareDataWith(*softmax).Resize({n, d}); labels_2d.ShareDataWith(*labels).Resize({n, labels->numel() / n}); loss_2d.ShareDataWith(*loss).Resize({n, 1}); softmax_out_2d.ShareDataWith(*softmax_out).Resize({n, d}); // math::CrossEntropyFunctor support axis is the last if (axis == -1) { math::CrossEntropyFunctor<platform::CUDADeviceContext, T>()( context.cuda_device_context(), &loss_2d, &softmax_2d, &labels_2d, soft_label, ignore_index, axis_dim); return; } // if axis is not the last, we need a new impliment if (soft_label) { auto* logits_data = softmax->data<T>(); auto* labels_data = labels->data<T>(); const int kDimLog2 = static_cast<int>(Log2Ceil(axis_dim)); const int kDimCeil = 1 << kDimLog2; #ifdef __HIPCC__ int kThreadPerBlock = 256; #else int kThreadPerBlock = 512; #endif int kBatchPerBlock = 1; int blocks = (n * d + kBatchPerBlock - 1) / kBatchPerBlock; dim3 threads(kThreadPerBlock / kBatchPerBlock, kBatchPerBlock, 1); CrossEntropySoftLabel<T, T, false><<< blocks, threads, 0, context.cuda_device_context().stream()>>>( loss_data, NULL, logits_data, labels_data, n, axis_dim, d / axis_dim, kDimLog2); } else { // HardLabel auto* logits_data = softmax->data<T>(); auto* labels_data = labels->data<int64_t>(); int threads = 128; int blocks = (n * d / axis_dim + threads - 1) / threads; if (ignore_index >= 0 && ignore_index < axis_dim) { CrossEntropyHardLabel<T, true><<< blocks, threads, 0, context.cuda_device_context().stream()>>>( loss_data, logits_data, labels_data, n, axis_dim, d / axis_dim, ignore_index); } else { CrossEntropyHardLabel<T, false><<< blocks, threads, 0, context.cuda_device_context().stream()>>>( loss_data, logits_data, labels_data, n, axis_dim, d / axis_dim, ignore_index); } } // cause of input is softmax // copy to output softmax, directly framework::TensorCopy(*softmax, context.GetPlace(), context.device_context(), softmax_out); return; } const Tensor* logits = context.Input<Tensor>("Logits"); const Tensor* labels = context.Input<Tensor>("Label"); Tensor* softmax = context.Output<Tensor>("Softmax"); Tensor* loss = context.Output<Tensor>("Loss"); const int rank = logits->dims().size(); const int axis = CanonicalAxis(context.Attr<int>("axis"), rank); int axis_dim = logits->dims()[axis]; const int64_t n = SizeToAxis(axis, logits->dims()); const int64_t d = SizeFromAxis(axis, logits->dims()); auto* softmax_data = softmax->mutable_data<T>(context.GetPlace()); auto* loss_data = loss->mutable_data<T>(context.GetPlace()); if (axis_dim == 1) { math::SetConstant<platform::CUDADeviceContext, T> set_constant; set_constant(context.cuda_device_context(), softmax, static_cast<T>(1)); set_constant(context.cuda_device_context(), loss, static_cast<T>(0)); return; } auto soft_label = context.Attr<bool>("soft_label"); auto ignore_index = context.Attr<int>("ignore_index"); if (soft_label) { auto* logits_data = logits->data<T>(); auto* labels_data = labels->data<T>(); SoftmaxWithCrossEntropySoftLabel<T>( context.cuda_device_context(), rank, axis, logits_data, labels_data, softmax_data, loss_data, n, axis_dim, d / axis_dim); } else { if (!context.Attr<bool>("numeric_stable_mode")) { // CUDNN kernel only suppoer 2-D tensor and perfome softmax on last dim Tensor logits_2d, softmax_2d, labels_2d, loss_2d; logits_2d.ShareDataWith(*logits).Resize({n, d}); softmax_2d.ShareDataWith(*softmax).Resize({n, d}); labels_2d.ShareDataWith(*labels).Resize({n, labels->numel() / n}); loss_2d.ShareDataWith(*loss).Resize({n, 1}); math::SoftmaxCUDNNFunctor<T>()(context.cuda_device_context(), &logits_2d, &softmax_2d); math::CrossEntropyFunctor<platform::CUDADeviceContext, T>()( context.cuda_device_context(), &loss_2d, &softmax_2d, &labels_2d, false, ignore_index, axis_dim); } else { auto* logits_data = logits->data<T>(); auto* labels_data = labels->data<int64_t>(); if (ignore_index >= 0 && ignore_index < axis_dim) { SoftmaxWithCrossEntropyHardLabel<T, true>( context.cuda_device_context(), rank, axis, logits_data, labels_data, loss_data, softmax_data, n, axis_dim, d / axis_dim, ignore_index); } else { SoftmaxWithCrossEntropyHardLabel<T, false>( context.cuda_device_context(), rank, axis, logits_data, labels_data, loss_data, softmax_data, n, axis_dim, d / axis_dim, ignore_index); } } } } }; template <typename T> class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(context.GetPlace()), true, platform::errors::Unavailable("softmax_with_cross_entropy operator's " "CUDA kernel only runs on GPU device.")); const Tensor* labels = context.Input<Tensor>("Label"); const T* loss_grad_data = context.Input<Tensor>(framework::GradVarName("Loss"))->data<T>(); Tensor* logit_grad = context.Output<Tensor>(framework::GradVarName("Logits")); const Tensor* softmax = context.Input<Tensor>("Softmax"); if (logit_grad != softmax) { framework::TensorCopy(*softmax, context.GetPlace(), context.device_context(), logit_grad); } T* logit_grad_data = logit_grad->data<T>(); const int rank = logit_grad->dims().size(); const int axis = CanonicalAxis(context.Attr<int>("axis"), rank); int axis_dim = logit_grad->dims()[axis]; const int64_t n = SizeToAxis(axis, logit_grad->dims()); const int64_t d = SizeFromAxis(axis, logit_grad->dims()); const int64_t remain = d / axis_dim; #ifdef __HIPCC__ int block = 256; #else int block = 512; #endif auto stream = context.cuda_device_context().stream(); auto ignore_index = context.Attr<int>("ignore_index"); auto use_softmax = context.Attr<bool>("use_softmax"); // do not with softmax op, and input is softmax if (!use_softmax) { if (context.Attr<bool>("soft_label")) { int grid = (n * d + block - 1) / block; const T* label_data = labels->data<T>(); SoftLabelCrossEntropyGradientKernel<T><<<grid, block, 0, stream>>>( logit_grad_data, loss_grad_data, label_data, n, d, remain); } else { Tensor logits_grad_2d; logits_grad_2d.ShareDataWith(*logit_grad).Resize({n, d}); int grid = (n * remain + block - 1) / block; const int64_t* label_data = labels->data<int64_t>(); HardLabelCrossEntropyGradientKernel<T><<<grid, block, 0, stream>>>( logit_grad_data, label_data, n, d, remain, ignore_index); int num = n * d; grid = (num + block - 1) / block; ScaleCrossEntropyGradient<T><<<grid, block, 0, stream>>>( logit_grad_data, loss_grad_data, num, d, remain, label_data, ignore_index); } return; } // with softmax, continue if (context.Attr<bool>("soft_label")) { int64_t grid = (n * d + block - 1) / block; const T* label_data = labels->data<T>(); SoftCrossEntropyGradientKernel<T><<<grid, block, 0, stream>>>( logit_grad_data, loss_grad_data, label_data, n, d, remain); } else { const int64_t* label_data = labels->data<int64_t>(); int grid = (n * d + block - 1) / block; SoftmaxWithCrossEntropyGradHardLabel<T><<<grid, block, 0, stream>>>( logit_grad_data, loss_grad_data, label_data, n, d / remain, remain, ignore_index); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; #ifdef PADDLE_WITH_HIP // MIOPEN do not support double REGISTER_OP_CUDA_KERNEL( softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyCUDAKernel<float>, ops::SoftmaxWithCrossEntropyCUDAKernel<paddle::platform::float16>); REGISTER_OP_CUDA_KERNEL( softmax_with_cross_entropy_grad, ops::SoftmaxWithCrossEntropyGradCUDAKernel<float>, ops::SoftmaxWithCrossEntropyGradCUDAKernel<paddle::platform::float16>); #else REGISTER_OP_CUDA_KERNEL( softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyCUDAKernel<float>, ops::SoftmaxWithCrossEntropyCUDAKernel<paddle::platform::float16>, ops::SoftmaxWithCrossEntropyCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL( softmax_with_cross_entropy_grad, ops::SoftmaxWithCrossEntropyGradCUDAKernel<float>, ops::SoftmaxWithCrossEntropyGradCUDAKernel<paddle::platform::float16>, ops::SoftmaxWithCrossEntropyGradCUDAKernel<double>); #endif
ad0b957b56fa0efc8728ed8e55c8d82b3e51392c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "raft_backprojection_gpu_function.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #define PI 3.141592653589793238462643383279502884 #define INIC -1.0 /* The TPBXb and TPBYb are parameters that can be changed and may interfere the performance */ #define TPBXb 32 #define TPBYb 32 /* Autor: Joao Carlos Cerqueira email: [email protected] */ texture<float, hipTextureType2D, hipReadModeElementType> texSino; extern "C" { __global__ void raft_backprojection_gpu_kernel(float *image, int wdI, int nrays, int nangles, float delta, float dt, float dth) { int i, j, T; float t, cs1, cs2, cs3, cs4, k; float x, y; float cosk, sink; i = 2*(blockDim.x * blockIdx.x + threadIdx.x); j = 2*(blockDim.y * blockIdx.y + threadIdx.y); if ( ((i+1)<wdI) && ((j+1) < wdI) ){ cs1 = 0; cs2 = 0; cs3 = 0; cs4 = 0; for(k=0; k < (nangles); k++) { sincosf(k * dth, &sink, &cosk); /////////////////////////// x = (float)INIC + i * delta; y = (float)INIC + j * delta; t = x*cosk + y*sink; T = (float)((t + 1)/dt); cs1 = cs1 + tex2D(texSino, k + 0.5f, T + 0.5f); ////////////////////////// x = (float)INIC + (i+1) * delta; y = (float)INIC + j * delta; t = x*cosk + y*sink; T = (float)((t + 1)/dt); cs2 = cs2 + tex2D(texSino, k + 0.5f, T + 0.5f); ////////////////////////// x = (float)INIC + i * delta; y = (float)INIC + (j+1) * delta; t = x*cosk + y*sink; T = (float)((t + 1)/dt); cs3 = cs3 + tex2D(texSino, k + 0.5f, T + 0.5f); ////////////////////////// x = (float)INIC + (i+1) * delta; y = (float)INIC + (j+1) * delta; t = x*cosk + y*sink; T = (float)((t + 1)/dt); cs4 = cs4 + tex2D(texSino, k + 0.5f, T + 0.5f); } image[(j)*wdI + (wdI-1-i)] = (cs1*dth); image[(j)*wdI + (wdI-1-i-1)] = (cs2*dth); image[(j+1)*wdI + (wdI-1-i)] = (cs3*dth); image[(j+1)*wdI + (wdI-1-i-1)] = (cs4*dth); } } } extern "C" { void raft_backprojection_gpu_function(float *d_output, float *d_input, int sizeImage, int nrays, int nangles){ float dt = 2.0/(nrays-1); float dth = PI/(nangles); float delta = (float) 2*fabsf(INIC)/(sizeImage-1); // Allocate CUDA array in device memory (sinogram matrix) hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0,hipChannelFormatKindFloat); hipArray* cuArray; hipMallocArray(&cuArray, &channelDesc, nangles, nrays); // Copy to device memory the sinogram matrix hipMemcpyToArray(cuArray, 0, 0, d_input, nrays * nangles * sizeof(float) , hipMemcpyDeviceToDevice); // Set texture parameters texSino.addressMode[0] = hipAddressModeBorder; texSino.addressMode[1] = hipAddressModeBorder; texSino.filterMode = hipFilterModeLinear; /*texSino.normalized = true; */ // Bind the array to the texture reference hipBindTextureToArray(texSino, cuArray, channelDesc); //GRID and BLOCKS SIZE dim3 threadsPerBlock(TPBXb,TPBYb); dim3 grid((sizeImage/threadsPerBlock.x)/2 + 1, (sizeImage/threadsPerBlock.y)/2 + 1); //KERNEL EXECUTION hipLaunchKernelGGL(( raft_backprojection_gpu_kernel), dim3(grid), dim3(threadsPerBlock), 0, 0, d_output, sizeImage, nrays, nangles, delta, dt, dth); hipDeviceSynchronize(); hipUnbindTexture(texSino); hipFreeArray(cuArray); return; } }
ad0b957b56fa0efc8728ed8e55c8d82b3e51392c.cu
#include "raft_backprojection_gpu_function.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #define PI 3.141592653589793238462643383279502884 #define INIC -1.0 /* The TPBXb and TPBYb are parameters that can be changed and may interfere the performance */ #define TPBXb 32 #define TPBYb 32 /* Autor: Joao Carlos Cerqueira email: [email protected] */ texture<float, cudaTextureType2D, cudaReadModeElementType> texSino; extern "C" { __global__ void raft_backprojection_gpu_kernel(float *image, int wdI, int nrays, int nangles, float delta, float dt, float dth) { int i, j, T; float t, cs1, cs2, cs3, cs4, k; float x, y; float cosk, sink; i = 2*(blockDim.x * blockIdx.x + threadIdx.x); j = 2*(blockDim.y * blockIdx.y + threadIdx.y); if ( ((i+1)<wdI) && ((j+1) < wdI) ){ cs1 = 0; cs2 = 0; cs3 = 0; cs4 = 0; for(k=0; k < (nangles); k++) { sincosf(k * dth, &sink, &cosk); /////////////////////////// x = (float)INIC + i * delta; y = (float)INIC + j * delta; t = x*cosk + y*sink; T = (float)((t + 1)/dt); cs1 = cs1 + tex2D(texSino, k + 0.5f, T + 0.5f); ////////////////////////// x = (float)INIC + (i+1) * delta; y = (float)INIC + j * delta; t = x*cosk + y*sink; T = (float)((t + 1)/dt); cs2 = cs2 + tex2D(texSino, k + 0.5f, T + 0.5f); ////////////////////////// x = (float)INIC + i * delta; y = (float)INIC + (j+1) * delta; t = x*cosk + y*sink; T = (float)((t + 1)/dt); cs3 = cs3 + tex2D(texSino, k + 0.5f, T + 0.5f); ////////////////////////// x = (float)INIC + (i+1) * delta; y = (float)INIC + (j+1) * delta; t = x*cosk + y*sink; T = (float)((t + 1)/dt); cs4 = cs4 + tex2D(texSino, k + 0.5f, T + 0.5f); } image[(j)*wdI + (wdI-1-i)] = (cs1*dth); image[(j)*wdI + (wdI-1-i-1)] = (cs2*dth); image[(j+1)*wdI + (wdI-1-i)] = (cs3*dth); image[(j+1)*wdI + (wdI-1-i-1)] = (cs4*dth); } } } extern "C" { void raft_backprojection_gpu_function(float *d_output, float *d_input, int sizeImage, int nrays, int nangles){ float dt = 2.0/(nrays-1); float dth = PI/(nangles); float delta = (float) 2*fabsf(INIC)/(sizeImage-1); // Allocate CUDA array in device memory (sinogram matrix) cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0,cudaChannelFormatKindFloat); cudaArray* cuArray; cudaMallocArray(&cuArray, &channelDesc, nangles, nrays); // Copy to device memory the sinogram matrix cudaMemcpyToArray(cuArray, 0, 0, d_input, nrays * nangles * sizeof(float) , cudaMemcpyDeviceToDevice); // Set texture parameters texSino.addressMode[0] = cudaAddressModeBorder; texSino.addressMode[1] = cudaAddressModeBorder; texSino.filterMode = cudaFilterModeLinear; /*texSino.normalized = true; */ // Bind the array to the texture reference cudaBindTextureToArray(texSino, cuArray, channelDesc); //GRID and BLOCKS SIZE dim3 threadsPerBlock(TPBXb,TPBYb); dim3 grid((sizeImage/threadsPerBlock.x)/2 + 1, (sizeImage/threadsPerBlock.y)/2 + 1); //KERNEL EXECUTION raft_backprojection_gpu_kernel<<<grid, threadsPerBlock>>>(d_output, sizeImage, nrays, nangles, delta, dt, dth); cudaDeviceSynchronize(); cudaUnbindTexture(texSino); cudaFreeArray(cuArray); return; } }
d0c2857e4e99f988d0268ce5bbe04c10b1c786f4.hip
// !!! This is a file automatically generated by hipify!!! #include "cudf.h" #include "utilities/cudf_utils.h" #include "utilities/error_utils.h" #include "cudf/functions.h" #include "bitmask/bitmask_ops.h" #include "rmm/thrust_rmm_allocator.h" #include <hip/hip_runtime.h> #include <vector> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/iterator/iterator_adaptor.h> template <typename LeftType,typename RightType,typename ResultType > struct gdf_equals_op : public thrust::binary_function< LeftType, RightType, ResultType> { __host__ __device__ ResultType operator()(LeftType x, RightType y) { return x == y; } }; template <typename LeftType,typename RightType,typename ResultType > struct gdf_not_equals_op : public thrust::binary_function< LeftType, RightType, ResultType> { __host__ __device__ ResultType operator()(LeftType x, RightType y) { return x != y; } }; template <typename LeftType,typename RightType,typename ResultType > struct gdf_greater_than_op : public thrust::binary_function< LeftType, RightType, ResultType> { __host__ __device__ ResultType operator()(LeftType x, RightType y) { return x > y; } }; template <typename LeftType,typename RightType,typename ResultType > struct gdf_greater_than_or_equals_op : public thrust::binary_function< LeftType, RightType, ResultType> { __host__ __device__ ResultType operator()(LeftType x, RightType y) { return x >= y; } }; template <typename LeftType,typename RightType,typename ResultType > struct gdf_less_than_op : public thrust::binary_function< LeftType, RightType, ResultType> { __host__ __device__ ResultType operator()(LeftType x, RightType y) { return x > y; } }; template <typename LeftType,typename RightType,typename ResultType > struct gdf_less_than_or_equals_op : public thrust::binary_function< LeftType, RightType, ResultType> { __host__ __device__ ResultType operator()(LeftType x, RightType y) { return x >= y; } }; /** * @brief takes two columns data and their valid bitmasks and performs a comparison operation returning a column of type bool * * Takes two thrust::iterator_adaptor implemented iterators and performs a filter operation on them that it outputs into a third thust::iterator_adaptor dervied iterator. * We are not making assumptions about what kind of data is being passed into it for these pointers so * * @param begin_left an iterator that implements thrust::iterator_adaptor * @param begin_right an iterator that implements thrust::iterator_adaptor * @param result an iterator that implements thrust::iterator_adaptor * @param operation an enum telling us what kind of comparision operation we are trying to do * @param num_values the number of rows in our columns * @param valid_left left column null bitmask (1 = not null) * @param valid_right right column null bitmask * @param valid_out output column null bitmask * @param left_null_count tells us if there are any nulls in the left column * @param right_null_count tells us if there are any nulls in the right column * @param */ template<typename IteratorTypeLeft, typename IteratorTypeRight, typename IteratorTypeResult, class LeftType = typename IteratorTypeLeft::value_type, class RightType = typename IteratorTypeRight::value_type, class ResultType = typename IteratorTypeResult::value_type> void gpu_filter_op(IteratorTypeLeft begin_left, IteratorTypeRight begin_right, IteratorTypeResult result, gdf_comparison_operator operation, gdf_size_type num_values, gdf_valid_type * valid_left, gdf_valid_type * valid_right, gdf_valid_type * valid_out, gdf_size_type left_null_count, gdf_size_type right_null_count, gdf_size_type & out_null_count, hipStream_t stream){ //TODO: be able to pass in custom comparison operators so we can handle types that have not implemented these oeprators IteratorTypeLeft end_left = begin_left + num_values; //regardless of nulls we perform the same operation //the nulls we are going to and together if (operation == GDF_EQUALS) { gdf_equals_op<LeftType, RightType, ResultType> op; thrust::transform(rmm::exec_policy(stream)->on(stream), begin_left, end_left, begin_right, result, op); } else if (operation == GDF_NOT_EQUALS) { gdf_not_equals_op<LeftType, RightType, ResultType> op; thrust::transform(rmm::exec_policy(stream)->on(stream), begin_left, end_left, begin_right, result, op); } else if (operation == GDF_GREATER_THAN_OR_EQUALS) { gdf_greater_than_or_equals_op<LeftType, RightType, ResultType> op; thrust::transform(rmm::exec_policy(stream)->on(stream), begin_left, end_left, begin_right, result, op); } else if (operation == GDF_GREATER_THAN) { gdf_greater_than_op<LeftType, RightType, ResultType> op; thrust::transform(rmm::exec_policy(stream)->on(stream), begin_left, end_left, begin_right, result, op); } else if (operation == GDF_LESS_THAN) { gdf_less_than_op<LeftType, RightType, ResultType> op; thrust::transform(rmm::exec_policy(stream)->on(stream), begin_left, end_left, begin_right, result, op); } else if (operation == GDF_LESS_THAN_OR_EQUALS) { gdf_less_than_or_equals_op<LeftType, RightType, ResultType> op; thrust::transform(rmm::exec_policy(stream)->on(stream),begin_left, end_left, begin_right, result, op); } //TODO: if we could make sure that these things aligned on 8 byte boundaries we could probable do this more efficiently as an unsigned long long if((left_null_count == 0) && (right_null_count == 0) ){ gdf_error error = all_bitmask_on(valid_out, out_null_count,num_values,stream); }else if(valid_right == valid_left){ //this is often the case if we are passing in the same column to operate on itself //or when we are sending something like a constant_iterator for the right hand side, allows us some shortcuts gdf_size_type num_chars_bitmask = ( ( num_values +( GDF_VALID_BITSIZE - 1)) / GDF_VALID_BITSIZE ); hipError_t error = hipMemcpyAsync(valid_out,valid_left,num_chars_bitmask * sizeof(gdf_valid_type),hipMemcpyDeviceToDevice,stream); out_null_count = left_null_count; }else{ apply_bitmask_to_bitmask( out_null_count, valid_out, valid_left, valid_right, stream, num_values); } hipStreamSynchronize(stream); } // stencil: plantilla! // template<typename T> gdf_error gpu_comparison_static_templated(gdf_column *lhs, T value, gdf_column *output,gdf_comparison_operator operation){ GDF_REQUIRE(lhs->size == output->size, GDF_COLUMN_SIZE_MISMATCH); GDF_REQUIRE(output->dtype == GDF_INT8, GDF_COLUMN_SIZE_MISMATCH); hipStream_t stream; hipStreamCreate(&stream); if(lhs->dtype == GDF_INT8){ thrust::device_ptr<int8_t> left_ptr((int8_t *) lhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::constant_iterator<T>(value), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,lhs->valid,output->valid, lhs->null_count,lhs->null_count,output->null_count,stream ); }else if(lhs->dtype == GDF_INT16){ thrust::device_ptr<int16_t> left_ptr((int16_t *) lhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::constant_iterator<T>(value), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,lhs->valid,output->valid, lhs->null_count,lhs->null_count,output->null_count,stream ); }else if(lhs->dtype == GDF_INT32){ thrust::device_ptr<int32_t> left_ptr((int32_t *) lhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::constant_iterator<T>(value), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,lhs->valid,output->valid, lhs->null_count,lhs->null_count,output->null_count,stream ); }else if(lhs->dtype == GDF_INT64){ thrust::device_ptr<int64_t> left_ptr((int64_t *) lhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::constant_iterator<T>(value), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,lhs->valid,output->valid, lhs->null_count,lhs->null_count,output->null_count,stream ); }else if(lhs->dtype == GDF_FLOAT32){ thrust::device_ptr<float> left_ptr((float *) lhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::constant_iterator<T>(value), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,lhs->valid,output->valid, lhs->null_count,lhs->null_count,output->null_count,stream ); }else if(lhs->dtype == GDF_FLOAT64){ thrust::device_ptr<double> left_ptr((double *) lhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::constant_iterator<T>(value), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,lhs->valid,output->valid, lhs->null_count,lhs->null_count,output->null_count,stream ); } hipStreamSynchronize(stream); hipStreamDestroy(stream); return GDF_SUCCESS; } gdf_error gpu_comparison_static_i8(gdf_column *lhs, int8_t value, gdf_column *output,gdf_comparison_operator operation){ return gpu_comparison_static_templated(lhs, value, output,operation); } gdf_error gpu_comparison_static_i16(gdf_column *lhs, int16_t value, gdf_column *output,gdf_comparison_operator operation){ return gpu_comparison_static_templated(lhs, value, output,operation); } gdf_error gpu_comparison_static_i32(gdf_column *lhs, int32_t value, gdf_column *output,gdf_comparison_operator operation){ return gpu_comparison_static_templated(lhs, value, output,operation); } gdf_error gpu_comparison_static_i64(gdf_column *lhs, int64_t value, gdf_column *output,gdf_comparison_operator operation){ return gpu_comparison_static_templated(lhs, value, output,operation); } gdf_error gpu_comparison_static_f32(gdf_column *lhs, float value, gdf_column *output,gdf_comparison_operator operation){ return gpu_comparison_static_templated(lhs, value, output,operation); } gdf_error gpu_comparison_static_f64(gdf_column *lhs, double value, gdf_column *output,gdf_comparison_operator operation){ return gpu_comparison_static_templated(lhs, value, output,operation); } gdf_error gpu_comparison(gdf_column *lhs, gdf_column *rhs, gdf_column *output,gdf_comparison_operator operation){ GDF_REQUIRE(lhs->size == rhs->size, GDF_COLUMN_SIZE_MISMATCH); GDF_REQUIRE(lhs->size == output->size, GDF_COLUMN_SIZE_MISMATCH); GDF_REQUIRE(output->dtype == GDF_INT8, GDF_COLUMN_SIZE_MISMATCH); //TODO: consider adding more requirements like that the columns be well defined in their type //I commented this out because I am not sure if we want to require the output be an int8 //GDF_REQUIRE(output->dtype == GDF_INT8,GDF_UNSUPPORTED_DTYPE); // SO... I know the follow code looks, questionable, but the other option is to have a shitload of function definitions // given that our gdf_columns very conveniently carry around their types with them, this seems to be to be simpler // than having tons of function definitions. it also makes it so much nicer to just type gpu_filter(lhs,rhs,output); // also we are making it so that we can send any types here, the only one which is debatable I feel is output which // we could decide to always have be an int8 since the output is a boolean hipStream_t stream; hipStreamCreate(&stream); if(lhs->dtype == GDF_INT8){ thrust::device_ptr<int8_t> left_ptr((int8_t *) lhs->data); if(rhs->dtype == GDF_INT8){ thrust::device_ptr<int8_t> right_ptr((int8_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); //... }else if(rhs->dtype == GDF_INT16){ thrust::device_ptr<int16_t> right_ptr((int16_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT32){ thrust::device_ptr<int32_t> right_ptr((int32_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT64){ thrust::device_ptr<int64_t> right_ptr((int64_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT32){ thrust::device_ptr<float> right_ptr((float *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT64){ thrust::device_ptr<double> right_ptr((double *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); } }else if(lhs->dtype == GDF_INT16){ thrust::device_ptr<int16_t> left_ptr((int16_t *) lhs->data); if(rhs->dtype == GDF_INT8){ thrust::device_ptr<int8_t> right_ptr((int8_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT16){ thrust::device_ptr<int16_t> right_ptr((int16_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT32){ thrust::device_ptr<int32_t> right_ptr((int32_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT64){ thrust::device_ptr<int64_t> right_ptr((int64_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT32){ thrust::device_ptr<float> right_ptr((float *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT64){ thrust::device_ptr<double> right_ptr((double *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); } }else if(lhs->dtype == GDF_INT32){ thrust::device_ptr<int32_t> left_ptr((int32_t *) lhs->data); if(rhs->dtype == GDF_INT8){ thrust::device_ptr<int8_t> right_ptr((int8_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT16){ thrust::device_ptr<int16_t> right_ptr((int16_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT32){ thrust::device_ptr<int32_t> right_ptr((int32_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT64){ thrust::device_ptr<int64_t> right_ptr((int64_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT32){ thrust::device_ptr<float> right_ptr((float *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT64){ thrust::device_ptr<double> right_ptr((double *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); } }else if(lhs->dtype == GDF_INT64){ thrust::device_ptr<int64_t> left_ptr((int64_t *) lhs->data); if(rhs->dtype == GDF_INT8){ thrust::device_ptr<int8_t> right_ptr((int8_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT16){ thrust::device_ptr<int16_t> right_ptr((int16_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT32){ thrust::device_ptr<int32_t> right_ptr((int32_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT64){ thrust::device_ptr<int64_t> right_ptr((int64_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT32){ thrust::device_ptr<float> right_ptr((float *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT64){ thrust::device_ptr<double> right_ptr((double *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); } }else if(lhs->dtype == GDF_FLOAT32){ thrust::device_ptr<float> left_ptr((float *) lhs->data); if(rhs->dtype == GDF_INT8){ thrust::device_ptr<int8_t> right_ptr((int8_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT16){ thrust::device_ptr<int16_t> right_ptr((int16_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT32){ thrust::device_ptr<int32_t> right_ptr((int32_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT64){ thrust::device_ptr<int64_t> right_ptr((int64_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT32){ thrust::device_ptr<float> right_ptr((float *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT64){ thrust::device_ptr<double> right_ptr((double *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); } }else if(lhs->dtype == GDF_FLOAT64){ thrust::device_ptr<double> left_ptr((double *) lhs->data); if(rhs->dtype == GDF_INT8){ thrust::device_ptr<int8_t> right_ptr((int8_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT16){ thrust::device_ptr<int16_t> right_ptr((int16_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT32){ thrust::device_ptr<int32_t> right_ptr((int32_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT64){ thrust::device_ptr<int64_t> right_ptr((int64_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT32){ thrust::device_ptr<float> right_ptr((float *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT64){ thrust::device_ptr<double> right_ptr((double *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); } } hipStreamSynchronize(stream); hipStreamDestroy(stream); return GDF_SUCCESS; }
d0c2857e4e99f988d0268ce5bbe04c10b1c786f4.cu
#include "cudf.h" #include "utilities/cudf_utils.h" #include "utilities/error_utils.h" #include "cudf/functions.h" #include "bitmask/bitmask_ops.h" #include "rmm/thrust_rmm_allocator.h" #include <cuda_runtime.h> #include <vector> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/iterator/iterator_adaptor.h> template <typename LeftType,typename RightType,typename ResultType > struct gdf_equals_op : public thrust::binary_function< LeftType, RightType, ResultType> { __host__ __device__ ResultType operator()(LeftType x, RightType y) { return x == y; } }; template <typename LeftType,typename RightType,typename ResultType > struct gdf_not_equals_op : public thrust::binary_function< LeftType, RightType, ResultType> { __host__ __device__ ResultType operator()(LeftType x, RightType y) { return x != y; } }; template <typename LeftType,typename RightType,typename ResultType > struct gdf_greater_than_op : public thrust::binary_function< LeftType, RightType, ResultType> { __host__ __device__ ResultType operator()(LeftType x, RightType y) { return x > y; } }; template <typename LeftType,typename RightType,typename ResultType > struct gdf_greater_than_or_equals_op : public thrust::binary_function< LeftType, RightType, ResultType> { __host__ __device__ ResultType operator()(LeftType x, RightType y) { return x >= y; } }; template <typename LeftType,typename RightType,typename ResultType > struct gdf_less_than_op : public thrust::binary_function< LeftType, RightType, ResultType> { __host__ __device__ ResultType operator()(LeftType x, RightType y) { return x > y; } }; template <typename LeftType,typename RightType,typename ResultType > struct gdf_less_than_or_equals_op : public thrust::binary_function< LeftType, RightType, ResultType> { __host__ __device__ ResultType operator()(LeftType x, RightType y) { return x >= y; } }; /** * @brief takes two columns data and their valid bitmasks and performs a comparison operation returning a column of type bool * * Takes two thrust::iterator_adaptor implemented iterators and performs a filter operation on them that it outputs into a third thust::iterator_adaptor dervied iterator. * We are not making assumptions about what kind of data is being passed into it for these pointers so * * @param begin_left an iterator that implements thrust::iterator_adaptor * @param begin_right an iterator that implements thrust::iterator_adaptor * @param result an iterator that implements thrust::iterator_adaptor * @param operation an enum telling us what kind of comparision operation we are trying to do * @param num_values the number of rows in our columns * @param valid_left left column null bitmask (1 = not null) * @param valid_right right column null bitmask * @param valid_out output column null bitmask * @param left_null_count tells us if there are any nulls in the left column * @param right_null_count tells us if there are any nulls in the right column * @param */ template<typename IteratorTypeLeft, typename IteratorTypeRight, typename IteratorTypeResult, class LeftType = typename IteratorTypeLeft::value_type, class RightType = typename IteratorTypeRight::value_type, class ResultType = typename IteratorTypeResult::value_type> void gpu_filter_op(IteratorTypeLeft begin_left, IteratorTypeRight begin_right, IteratorTypeResult result, gdf_comparison_operator operation, gdf_size_type num_values, gdf_valid_type * valid_left, gdf_valid_type * valid_right, gdf_valid_type * valid_out, gdf_size_type left_null_count, gdf_size_type right_null_count, gdf_size_type & out_null_count, cudaStream_t stream){ //TODO: be able to pass in custom comparison operators so we can handle types that have not implemented these oeprators IteratorTypeLeft end_left = begin_left + num_values; //regardless of nulls we perform the same operation //the nulls we are going to and together if (operation == GDF_EQUALS) { gdf_equals_op<LeftType, RightType, ResultType> op; thrust::transform(rmm::exec_policy(stream)->on(stream), begin_left, end_left, begin_right, result, op); } else if (operation == GDF_NOT_EQUALS) { gdf_not_equals_op<LeftType, RightType, ResultType> op; thrust::transform(rmm::exec_policy(stream)->on(stream), begin_left, end_left, begin_right, result, op); } else if (operation == GDF_GREATER_THAN_OR_EQUALS) { gdf_greater_than_or_equals_op<LeftType, RightType, ResultType> op; thrust::transform(rmm::exec_policy(stream)->on(stream), begin_left, end_left, begin_right, result, op); } else if (operation == GDF_GREATER_THAN) { gdf_greater_than_op<LeftType, RightType, ResultType> op; thrust::transform(rmm::exec_policy(stream)->on(stream), begin_left, end_left, begin_right, result, op); } else if (operation == GDF_LESS_THAN) { gdf_less_than_op<LeftType, RightType, ResultType> op; thrust::transform(rmm::exec_policy(stream)->on(stream), begin_left, end_left, begin_right, result, op); } else if (operation == GDF_LESS_THAN_OR_EQUALS) { gdf_less_than_or_equals_op<LeftType, RightType, ResultType> op; thrust::transform(rmm::exec_policy(stream)->on(stream),begin_left, end_left, begin_right, result, op); } //TODO: if we could make sure that these things aligned on 8 byte boundaries we could probable do this more efficiently as an unsigned long long if((left_null_count == 0) && (right_null_count == 0) ){ gdf_error error = all_bitmask_on(valid_out, out_null_count,num_values,stream); }else if(valid_right == valid_left){ //this is often the case if we are passing in the same column to operate on itself //or when we are sending something like a constant_iterator for the right hand side, allows us some shortcuts gdf_size_type num_chars_bitmask = ( ( num_values +( GDF_VALID_BITSIZE - 1)) / GDF_VALID_BITSIZE ); cudaError_t error = cudaMemcpyAsync(valid_out,valid_left,num_chars_bitmask * sizeof(gdf_valid_type),cudaMemcpyDeviceToDevice,stream); out_null_count = left_null_count; }else{ apply_bitmask_to_bitmask( out_null_count, valid_out, valid_left, valid_right, stream, num_values); } cudaStreamSynchronize(stream); } // stencil: plantilla! // template<typename T> gdf_error gpu_comparison_static_templated(gdf_column *lhs, T value, gdf_column *output,gdf_comparison_operator operation){ GDF_REQUIRE(lhs->size == output->size, GDF_COLUMN_SIZE_MISMATCH); GDF_REQUIRE(output->dtype == GDF_INT8, GDF_COLUMN_SIZE_MISMATCH); cudaStream_t stream; cudaStreamCreate(&stream); if(lhs->dtype == GDF_INT8){ thrust::device_ptr<int8_t> left_ptr((int8_t *) lhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::constant_iterator<T>(value), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,lhs->valid,output->valid, lhs->null_count,lhs->null_count,output->null_count,stream ); }else if(lhs->dtype == GDF_INT16){ thrust::device_ptr<int16_t> left_ptr((int16_t *) lhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::constant_iterator<T>(value), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,lhs->valid,output->valid, lhs->null_count,lhs->null_count,output->null_count,stream ); }else if(lhs->dtype == GDF_INT32){ thrust::device_ptr<int32_t> left_ptr((int32_t *) lhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::constant_iterator<T>(value), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,lhs->valid,output->valid, lhs->null_count,lhs->null_count,output->null_count,stream ); }else if(lhs->dtype == GDF_INT64){ thrust::device_ptr<int64_t> left_ptr((int64_t *) lhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::constant_iterator<T>(value), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,lhs->valid,output->valid, lhs->null_count,lhs->null_count,output->null_count,stream ); }else if(lhs->dtype == GDF_FLOAT32){ thrust::device_ptr<float> left_ptr((float *) lhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::constant_iterator<T>(value), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,lhs->valid,output->valid, lhs->null_count,lhs->null_count,output->null_count,stream ); }else if(lhs->dtype == GDF_FLOAT64){ thrust::device_ptr<double> left_ptr((double *) lhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::constant_iterator<T>(value), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,lhs->valid,output->valid, lhs->null_count,lhs->null_count,output->null_count,stream ); } cudaStreamSynchronize(stream); cudaStreamDestroy(stream); return GDF_SUCCESS; } gdf_error gpu_comparison_static_i8(gdf_column *lhs, int8_t value, gdf_column *output,gdf_comparison_operator operation){ return gpu_comparison_static_templated(lhs, value, output,operation); } gdf_error gpu_comparison_static_i16(gdf_column *lhs, int16_t value, gdf_column *output,gdf_comparison_operator operation){ return gpu_comparison_static_templated(lhs, value, output,operation); } gdf_error gpu_comparison_static_i32(gdf_column *lhs, int32_t value, gdf_column *output,gdf_comparison_operator operation){ return gpu_comparison_static_templated(lhs, value, output,operation); } gdf_error gpu_comparison_static_i64(gdf_column *lhs, int64_t value, gdf_column *output,gdf_comparison_operator operation){ return gpu_comparison_static_templated(lhs, value, output,operation); } gdf_error gpu_comparison_static_f32(gdf_column *lhs, float value, gdf_column *output,gdf_comparison_operator operation){ return gpu_comparison_static_templated(lhs, value, output,operation); } gdf_error gpu_comparison_static_f64(gdf_column *lhs, double value, gdf_column *output,gdf_comparison_operator operation){ return gpu_comparison_static_templated(lhs, value, output,operation); } gdf_error gpu_comparison(gdf_column *lhs, gdf_column *rhs, gdf_column *output,gdf_comparison_operator operation){ GDF_REQUIRE(lhs->size == rhs->size, GDF_COLUMN_SIZE_MISMATCH); GDF_REQUIRE(lhs->size == output->size, GDF_COLUMN_SIZE_MISMATCH); GDF_REQUIRE(output->dtype == GDF_INT8, GDF_COLUMN_SIZE_MISMATCH); //TODO: consider adding more requirements like that the columns be well defined in their type //I commented this out because I am not sure if we want to require the output be an int8 //GDF_REQUIRE(output->dtype == GDF_INT8,GDF_UNSUPPORTED_DTYPE); // SO... I know the follow code looks, questionable, but the other option is to have a shitload of function definitions // given that our gdf_columns very conveniently carry around their types with them, this seems to be to be simpler // than having tons of function definitions. it also makes it so much nicer to just type gpu_filter(lhs,rhs,output); // also we are making it so that we can send any types here, the only one which is debatable I feel is output which // we could decide to always have be an int8 since the output is a boolean cudaStream_t stream; cudaStreamCreate(&stream); if(lhs->dtype == GDF_INT8){ thrust::device_ptr<int8_t> left_ptr((int8_t *) lhs->data); if(rhs->dtype == GDF_INT8){ thrust::device_ptr<int8_t> right_ptr((int8_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); //... }else if(rhs->dtype == GDF_INT16){ thrust::device_ptr<int16_t> right_ptr((int16_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT32){ thrust::device_ptr<int32_t> right_ptr((int32_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT64){ thrust::device_ptr<int64_t> right_ptr((int64_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT32){ thrust::device_ptr<float> right_ptr((float *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT64){ thrust::device_ptr<double> right_ptr((double *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); } }else if(lhs->dtype == GDF_INT16){ thrust::device_ptr<int16_t> left_ptr((int16_t *) lhs->data); if(rhs->dtype == GDF_INT8){ thrust::device_ptr<int8_t> right_ptr((int8_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT16){ thrust::device_ptr<int16_t> right_ptr((int16_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT32){ thrust::device_ptr<int32_t> right_ptr((int32_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT64){ thrust::device_ptr<int64_t> right_ptr((int64_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT32){ thrust::device_ptr<float> right_ptr((float *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT64){ thrust::device_ptr<double> right_ptr((double *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); } }else if(lhs->dtype == GDF_INT32){ thrust::device_ptr<int32_t> left_ptr((int32_t *) lhs->data); if(rhs->dtype == GDF_INT8){ thrust::device_ptr<int8_t> right_ptr((int8_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT16){ thrust::device_ptr<int16_t> right_ptr((int16_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT32){ thrust::device_ptr<int32_t> right_ptr((int32_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT64){ thrust::device_ptr<int64_t> right_ptr((int64_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT32){ thrust::device_ptr<float> right_ptr((float *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT64){ thrust::device_ptr<double> right_ptr((double *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); } }else if(lhs->dtype == GDF_INT64){ thrust::device_ptr<int64_t> left_ptr((int64_t *) lhs->data); if(rhs->dtype == GDF_INT8){ thrust::device_ptr<int8_t> right_ptr((int8_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT16){ thrust::device_ptr<int16_t> right_ptr((int16_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT32){ thrust::device_ptr<int32_t> right_ptr((int32_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT64){ thrust::device_ptr<int64_t> right_ptr((int64_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT32){ thrust::device_ptr<float> right_ptr((float *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT64){ thrust::device_ptr<double> right_ptr((double *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); } }else if(lhs->dtype == GDF_FLOAT32){ thrust::device_ptr<float> left_ptr((float *) lhs->data); if(rhs->dtype == GDF_INT8){ thrust::device_ptr<int8_t> right_ptr((int8_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT16){ thrust::device_ptr<int16_t> right_ptr((int16_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT32){ thrust::device_ptr<int32_t> right_ptr((int32_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT64){ thrust::device_ptr<int64_t> right_ptr((int64_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT32){ thrust::device_ptr<float> right_ptr((float *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT64){ thrust::device_ptr<double> right_ptr((double *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); } }else if(lhs->dtype == GDF_FLOAT64){ thrust::device_ptr<double> left_ptr((double *) lhs->data); if(rhs->dtype == GDF_INT8){ thrust::device_ptr<int8_t> right_ptr((int8_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT16){ thrust::device_ptr<int16_t> right_ptr((int16_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT32){ thrust::device_ptr<int32_t> right_ptr((int32_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_INT64){ thrust::device_ptr<int64_t> right_ptr((int64_t *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT32){ thrust::device_ptr<float> right_ptr((float *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); }else if(rhs->dtype == GDF_FLOAT64){ thrust::device_ptr<double> right_ptr((double *) rhs->data); thrust::device_ptr<int8_t> out_ptr((int8_t *) output->data); gpu_filter_op( thrust::detail::make_normal_iterator(left_ptr),thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(out_ptr),operation,lhs->size,lhs->valid,rhs->valid,output->valid, lhs->null_count,rhs->null_count,output->null_count,stream ); } } cudaStreamSynchronize(stream); cudaStreamDestroy(stream); return GDF_SUCCESS; }
294bbce883b91f0970ad5f16c83f83479b7ff5ec.hip
// !!! This is a file automatically generated by hipify!!! #include <unittest/unittest.h> #include <thrust/scatter.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/retag.h> #include <thrust/sequence.h> #include <thrust/fill.h> template <class Vector> void TestScatterSimple(void) { typedef typename Vector::value_type T; Vector map(5); // scatter indices Vector src(5); // source vector Vector dst(8); // destination vector map[0] = 6; map[1] = 3; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; dst[5] = 0; dst[6] = 0; dst[7] = 0; thrust::scatter(src.begin(), src.end(), map.begin(), dst.begin()); ASSERT_EQUAL(dst[0], 0); ASSERT_EQUAL(dst[1], 2); ASSERT_EQUAL(dst[2], 4); ASSERT_EQUAL(dst[3], 1); ASSERT_EQUAL(dst[4], 0); ASSERT_EQUAL(dst[5], 0); ASSERT_EQUAL(dst[6], 0); ASSERT_EQUAL(dst[7], 3); } DECLARE_VECTOR_UNITTEST(TestScatterSimple); template<typename InputIterator1, typename InputIterator2, typename RandomAccessIterator> void scatter(my_system &system, InputIterator1, InputIterator1, InputIterator2, RandomAccessIterator output) { system.validate_dispatch(); } void TestScatterDispatchExplicit() { thrust::device_vector<int> vec(1); my_system sys(0); thrust::scatter(sys, vec.begin(), vec.begin(), vec.begin(), vec.begin()); ASSERT_EQUAL(true, sys.is_valid()); } DECLARE_UNITTEST(TestScatterDispatchExplicit); template<typename InputIterator1, typename InputIterator2, typename RandomAccessIterator> void scatter(my_tag, InputIterator1, InputIterator1, InputIterator2, RandomAccessIterator output) { *output = 13; } void TestScatterDispatchImplicit() { thrust::device_vector<int> vec(1); thrust::scatter(thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin())); ASSERT_EQUAL(13, vec.front()); } DECLARE_UNITTEST(TestScatterDispatchImplicit); template <typename T> void TestScatter(const size_t n) { const size_t output_size = ::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input(n, (T) 1); thrust::device_vector<T> d_input(n, (T) 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % output_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::host_vector<T> h_output(output_size, (T) 0); thrust::device_vector<T> d_output(output_size, (T) 0); thrust::scatter(h_input.begin(), h_input.end(), h_map.begin(), h_output.begin()); thrust::scatter(d_input.begin(), d_input.end(), d_map.begin(), d_output.begin()); ASSERT_EQUAL(h_output, d_output); } DECLARE_VARIABLE_UNITTEST(TestScatter); template <typename T> void TestScatterToDiscardIterator(const size_t n) { const size_t output_size = ::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input(n, (T) 1); thrust::device_vector<T> d_input(n, (T) 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % output_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::scatter(h_input.begin(), h_input.end(), h_map.begin(), thrust::make_discard_iterator()); thrust::scatter(d_input.begin(), d_input.end(), d_map.begin(), thrust::make_discard_iterator()); // there's nothing to check -- just make sure it compiles } DECLARE_VARIABLE_UNITTEST(TestScatterToDiscardIterator); template <class Vector> void TestScatterIfSimple(void) { typedef typename Vector::value_type T; Vector flg(5); // predicate array Vector map(5); // scatter indices Vector src(5); // source vector Vector dst(8); // destination vector flg[0] = 0; flg[1] = 1; flg[2] = 0; flg[3] = 1; flg[4] = 0; map[0] = 6; map[1] = 3; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; dst[5] = 0; dst[6] = 0; dst[7] = 0; thrust::scatter_if(src.begin(), src.end(), map.begin(), flg.begin(), dst.begin()); ASSERT_EQUAL(dst[0], 0); ASSERT_EQUAL(dst[1], 0); ASSERT_EQUAL(dst[2], 0); ASSERT_EQUAL(dst[3], 1); ASSERT_EQUAL(dst[4], 0); ASSERT_EQUAL(dst[5], 0); ASSERT_EQUAL(dst[6], 0); ASSERT_EQUAL(dst[7], 3); } DECLARE_VECTOR_UNITTEST(TestScatterIfSimple); template<typename InputIterator1, typename InputIterator2, typename InputIterator3, typename RandomAccessIterator> void scatter_if(my_system &system, InputIterator1, InputIterator1, InputIterator2, InputIterator3, RandomAccessIterator output) { system.validate_dispatch(); } void TestScatterIfDispatchExplicit() { thrust::device_vector<int> vec(1); my_system sys(0); thrust::scatter_if(sys, vec.begin(), vec.begin(), vec.begin(), vec.begin(), vec.begin()); ASSERT_EQUAL(true, sys.is_valid()); } DECLARE_UNITTEST(TestScatterIfDispatchExplicit); template<typename InputIterator1, typename InputIterator2, typename InputIterator3, typename RandomAccessIterator> void scatter_if(my_tag, InputIterator1, InputIterator1, InputIterator2, InputIterator3, RandomAccessIterator output) { *output = 13; } void TestScatterIfDispatchImplicit() { thrust::device_vector<int> vec(1); thrust::scatter_if(thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin())); ASSERT_EQUAL(13, vec.front()); } DECLARE_UNITTEST(TestScatterIfDispatchImplicit); template <typename T> class is_even_scatter_if { public: __host__ __device__ bool operator()(const T i) const { return (i % 2) == 0; } }; template <typename T> void TestScatterIf(const size_t n) { const size_t output_size = ::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input(n, (T) 1); thrust::device_vector<T> d_input(n, (T) 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % output_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::host_vector<T> h_output(output_size, (T) 0); thrust::device_vector<T> d_output(output_size, (T) 0); thrust::scatter_if(h_input.begin(), h_input.end(), h_map.begin(), h_map.begin(), h_output.begin(), is_even_scatter_if<unsigned int>()); thrust::scatter_if(d_input.begin(), d_input.end(), d_map.begin(), d_map.begin(), d_output.begin(), is_even_scatter_if<unsigned int>()); ASSERT_EQUAL(h_output, d_output); } DECLARE_VARIABLE_UNITTEST(TestScatterIf); template <typename T> void TestScatterIfToDiscardIterator(const size_t n) { const size_t output_size = ::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input(n, (T) 1); thrust::device_vector<T> d_input(n, (T) 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % output_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::scatter_if(h_input.begin(), h_input.end(), h_map.begin(), h_map.begin(), thrust::make_discard_iterator(), is_even_scatter_if<unsigned int>()); thrust::scatter_if(d_input.begin(), d_input.end(), d_map.begin(), d_map.begin(), thrust::make_discard_iterator(), is_even_scatter_if<unsigned int>()); } DECLARE_VARIABLE_UNITTEST(TestScatterIfToDiscardIterator); template <typename Vector> void TestScatterCountingIterator(void) { typedef typename Vector::value_type T; Vector source(10); thrust::sequence(source.begin(), source.end(), 0); Vector map(10); thrust::sequence(map.begin(), map.end(), 0); Vector output(10); // source has any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), map.begin(), output.begin()); ASSERT_EQUAL(output, map); // map has any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter(source.begin(), source.end(), thrust::make_counting_iterator(0), output.begin()); ASSERT_EQUAL(output, map); // source and map have any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), thrust::make_counting_iterator(0), output.begin()); ASSERT_EQUAL(output, map); } DECLARE_VECTOR_UNITTEST(TestScatterCountingIterator); template <typename Vector> void TestScatterIfCountingIterator(void) { typedef typename Vector::value_type T; Vector source(10); thrust::sequence(source.begin(), source.end(), 0); Vector map(10); thrust::sequence(map.begin(), map.end(), 0); Vector stencil(10, 1); Vector output(10); // source has any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter_if(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), map.begin(), stencil.begin(), output.begin()); ASSERT_EQUAL(output, map); // map has any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter_if(source.begin(), source.end(), thrust::make_counting_iterator(0), stencil.begin(), output.begin()); ASSERT_EQUAL(output, map); // source and map have any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter_if(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), thrust::make_counting_iterator(0), stencil.begin(), output.begin()); ASSERT_EQUAL(output, map); } DECLARE_VECTOR_UNITTEST(TestScatterIfCountingIterator);
294bbce883b91f0970ad5f16c83f83479b7ff5ec.cu
#include <unittest/unittest.h> #include <thrust/scatter.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/retag.h> #include <thrust/sequence.h> #include <thrust/fill.h> template <class Vector> void TestScatterSimple(void) { typedef typename Vector::value_type T; Vector map(5); // scatter indices Vector src(5); // source vector Vector dst(8); // destination vector map[0] = 6; map[1] = 3; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; dst[5] = 0; dst[6] = 0; dst[7] = 0; thrust::scatter(src.begin(), src.end(), map.begin(), dst.begin()); ASSERT_EQUAL(dst[0], 0); ASSERT_EQUAL(dst[1], 2); ASSERT_EQUAL(dst[2], 4); ASSERT_EQUAL(dst[3], 1); ASSERT_EQUAL(dst[4], 0); ASSERT_EQUAL(dst[5], 0); ASSERT_EQUAL(dst[6], 0); ASSERT_EQUAL(dst[7], 3); } DECLARE_VECTOR_UNITTEST(TestScatterSimple); template<typename InputIterator1, typename InputIterator2, typename RandomAccessIterator> void scatter(my_system &system, InputIterator1, InputIterator1, InputIterator2, RandomAccessIterator output) { system.validate_dispatch(); } void TestScatterDispatchExplicit() { thrust::device_vector<int> vec(1); my_system sys(0); thrust::scatter(sys, vec.begin(), vec.begin(), vec.begin(), vec.begin()); ASSERT_EQUAL(true, sys.is_valid()); } DECLARE_UNITTEST(TestScatterDispatchExplicit); template<typename InputIterator1, typename InputIterator2, typename RandomAccessIterator> void scatter(my_tag, InputIterator1, InputIterator1, InputIterator2, RandomAccessIterator output) { *output = 13; } void TestScatterDispatchImplicit() { thrust::device_vector<int> vec(1); thrust::scatter(thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin())); ASSERT_EQUAL(13, vec.front()); } DECLARE_UNITTEST(TestScatterDispatchImplicit); template <typename T> void TestScatter(const size_t n) { const size_t output_size = std::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input(n, (T) 1); thrust::device_vector<T> d_input(n, (T) 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % output_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::host_vector<T> h_output(output_size, (T) 0); thrust::device_vector<T> d_output(output_size, (T) 0); thrust::scatter(h_input.begin(), h_input.end(), h_map.begin(), h_output.begin()); thrust::scatter(d_input.begin(), d_input.end(), d_map.begin(), d_output.begin()); ASSERT_EQUAL(h_output, d_output); } DECLARE_VARIABLE_UNITTEST(TestScatter); template <typename T> void TestScatterToDiscardIterator(const size_t n) { const size_t output_size = std::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input(n, (T) 1); thrust::device_vector<T> d_input(n, (T) 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % output_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::scatter(h_input.begin(), h_input.end(), h_map.begin(), thrust::make_discard_iterator()); thrust::scatter(d_input.begin(), d_input.end(), d_map.begin(), thrust::make_discard_iterator()); // there's nothing to check -- just make sure it compiles } DECLARE_VARIABLE_UNITTEST(TestScatterToDiscardIterator); template <class Vector> void TestScatterIfSimple(void) { typedef typename Vector::value_type T; Vector flg(5); // predicate array Vector map(5); // scatter indices Vector src(5); // source vector Vector dst(8); // destination vector flg[0] = 0; flg[1] = 1; flg[2] = 0; flg[3] = 1; flg[4] = 0; map[0] = 6; map[1] = 3; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; dst[5] = 0; dst[6] = 0; dst[7] = 0; thrust::scatter_if(src.begin(), src.end(), map.begin(), flg.begin(), dst.begin()); ASSERT_EQUAL(dst[0], 0); ASSERT_EQUAL(dst[1], 0); ASSERT_EQUAL(dst[2], 0); ASSERT_EQUAL(dst[3], 1); ASSERT_EQUAL(dst[4], 0); ASSERT_EQUAL(dst[5], 0); ASSERT_EQUAL(dst[6], 0); ASSERT_EQUAL(dst[7], 3); } DECLARE_VECTOR_UNITTEST(TestScatterIfSimple); template<typename InputIterator1, typename InputIterator2, typename InputIterator3, typename RandomAccessIterator> void scatter_if(my_system &system, InputIterator1, InputIterator1, InputIterator2, InputIterator3, RandomAccessIterator output) { system.validate_dispatch(); } void TestScatterIfDispatchExplicit() { thrust::device_vector<int> vec(1); my_system sys(0); thrust::scatter_if(sys, vec.begin(), vec.begin(), vec.begin(), vec.begin(), vec.begin()); ASSERT_EQUAL(true, sys.is_valid()); } DECLARE_UNITTEST(TestScatterIfDispatchExplicit); template<typename InputIterator1, typename InputIterator2, typename InputIterator3, typename RandomAccessIterator> void scatter_if(my_tag, InputIterator1, InputIterator1, InputIterator2, InputIterator3, RandomAccessIterator output) { *output = 13; } void TestScatterIfDispatchImplicit() { thrust::device_vector<int> vec(1); thrust::scatter_if(thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin())); ASSERT_EQUAL(13, vec.front()); } DECLARE_UNITTEST(TestScatterIfDispatchImplicit); template <typename T> class is_even_scatter_if { public: __host__ __device__ bool operator()(const T i) const { return (i % 2) == 0; } }; template <typename T> void TestScatterIf(const size_t n) { const size_t output_size = std::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input(n, (T) 1); thrust::device_vector<T> d_input(n, (T) 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % output_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::host_vector<T> h_output(output_size, (T) 0); thrust::device_vector<T> d_output(output_size, (T) 0); thrust::scatter_if(h_input.begin(), h_input.end(), h_map.begin(), h_map.begin(), h_output.begin(), is_even_scatter_if<unsigned int>()); thrust::scatter_if(d_input.begin(), d_input.end(), d_map.begin(), d_map.begin(), d_output.begin(), is_even_scatter_if<unsigned int>()); ASSERT_EQUAL(h_output, d_output); } DECLARE_VARIABLE_UNITTEST(TestScatterIf); template <typename T> void TestScatterIfToDiscardIterator(const size_t n) { const size_t output_size = std::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input(n, (T) 1); thrust::device_vector<T> d_input(n, (T) 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % output_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::scatter_if(h_input.begin(), h_input.end(), h_map.begin(), h_map.begin(), thrust::make_discard_iterator(), is_even_scatter_if<unsigned int>()); thrust::scatter_if(d_input.begin(), d_input.end(), d_map.begin(), d_map.begin(), thrust::make_discard_iterator(), is_even_scatter_if<unsigned int>()); } DECLARE_VARIABLE_UNITTEST(TestScatterIfToDiscardIterator); template <typename Vector> void TestScatterCountingIterator(void) { typedef typename Vector::value_type T; Vector source(10); thrust::sequence(source.begin(), source.end(), 0); Vector map(10); thrust::sequence(map.begin(), map.end(), 0); Vector output(10); // source has any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), map.begin(), output.begin()); ASSERT_EQUAL(output, map); // map has any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter(source.begin(), source.end(), thrust::make_counting_iterator(0), output.begin()); ASSERT_EQUAL(output, map); // source and map have any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), thrust::make_counting_iterator(0), output.begin()); ASSERT_EQUAL(output, map); } DECLARE_VECTOR_UNITTEST(TestScatterCountingIterator); template <typename Vector> void TestScatterIfCountingIterator(void) { typedef typename Vector::value_type T; Vector source(10); thrust::sequence(source.begin(), source.end(), 0); Vector map(10); thrust::sequence(map.begin(), map.end(), 0); Vector stencil(10, 1); Vector output(10); // source has any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter_if(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), map.begin(), stencil.begin(), output.begin()); ASSERT_EQUAL(output, map); // map has any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter_if(source.begin(), source.end(), thrust::make_counting_iterator(0), stencil.begin(), output.begin()); ASSERT_EQUAL(output, map); // source and map have any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter_if(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), thrust::make_counting_iterator(0), stencil.begin(), output.begin()); ASSERT_EQUAL(output, map); } DECLARE_VECTOR_UNITTEST(TestScatterIfCountingIterator);
fb54263150294a9c25868e02369e44d1ba858fba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "definitions.h" #include "kernel.h" #include <stdio.h> #include <stdlib.h> #include <string.h> __global__ void kernel_Localization(float *ParamIn, float *ParamNext, float *Convergence, float *FirstDev, float *SecondDev, int Nfit, int N_int, int FitBoxsize, float lambda, float SampleSpacingXY) { const int tx = threadIdx.x; const int bx = blockIdx.x; const int BlockSize = blockDim.x; //Prevent read/write past end of array int j = BlockSize*bx + tx; if ((bx*BlockSize + tx) >= Nfit) return; float stepLimit[NP] = {0.03f, 0.03f, 0.04f, 400, 2}; // x,y,z step limits are in micron float x0_next[NPL]; float dL_pos = 0, dL2_pos = 0; float dL_I, dL2_I; // photon and background float step[NPL]; float rate = 1/(1 + lambda); float tmp; int s, p, k; // x,y,z for (p = 0; p < 3; p++) { for (s = 0; s < 4; s++) { dL_pos += FirstDev[s*NP*Nfit + j*NP + p]; dL2_pos += SecondDev[s*NP*Nfit + j*NP + p]; } tmp = -1 * dL_pos / dL2_pos * rate; step[p] = fminf(fmaxf(tmp, -stepLimit[p]), stepLimit[p]); } for (s = 0; s < 4; s++) { // photon dL_I = FirstDev[s*NP*Nfit + j*NP + 3]; dL2_I = SecondDev[s*NP*Nfit + j*NP + 3]; tmp = -1 * dL_I / dL2_I * rate; step[3 + s] = fminf(fmaxf(tmp, -stepLimit[3]), stepLimit[3]); // background dL_I = FirstDev[s*NP*Nfit + j*NP + 4]; dL2_I = SecondDev[s*NP*Nfit + j*NP + 4]; tmp = -1 * dL_I / dL2_I * rate; step[7 + s] = fminf(fmaxf(tmp, -stepLimit[4]), stepLimit[4]); } x0_next[0] = ParamIn[NPL*j + 0] + step[0] * (-1 / SampleSpacingXY / N_int); x0_next[1] = ParamIn[NPL*j + 1] + step[1] * (-1 / SampleSpacingXY / N_int); for (k = 2; k < NPL; k++) { x0_next[k] = ParamIn[NPL*j + k] + step[k]; } for (s = 0; s < 4; s++) { x0_next[3 + s] = (x0_next[3 + s] <= 100 ? 100 : x0_next[3 + s]); // intensity is not less than 100 x0_next[7 + s] = (x0_next[7 + s] <= 0 ? 0.01f : x0_next[7 + s]);// bg is not less than 0 } x0_next[0] = fminf(fmaxf(x0_next[0], 4), FitBoxsize - 4);// xy shift is within fitting box x0_next[1] = fminf(fmaxf(x0_next[1], 4), FitBoxsize - 4); x0_next[2] = fminf(fmaxf(x0_next[2], -1.4), 1.4);//z position is within -1.4 to 1.4 um for (k = 0; k < NPL; k++) { ParamNext[NPL*j + k] = x0_next[k]; Convergence[NPL*j + k] = x0_next[k] - ParamIn[NPL*j + k]; } } __global__ void kernel_getdev(float *data, float *gainR, float *PSF, float *dPSFx, float *dPSFy, float *dPSFz, float *I, float *bg, int Nfit, int PSFsize, float *FirstDev, float *SecondDev) { const int tx = threadIdx.x; const int bx = blockIdx.x; const int BlockSize = blockDim.x; //Prevent read/write past end of array int j = BlockSize*bx + tx; if ((bx*BlockSize + tx) >= Nfit) return; float dL[NP], dL2[NP]; float psfI; int k, i; for (k = 0; k < NP; k++) { dL[k] = 0; dL2[k] = 0; } fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFx[j*PSFsize], I[j], I[j], bg[j], &dL[0], &dL2[0], PSFsize); fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFy[j*PSFsize], I[j], I[j], bg[j], &dL[1], &dL2[1], PSFsize); fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFz[j*PSFsize], I[j], I[j], bg[j], &dL[2], &dL2[2], PSFsize); fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &PSF[j*PSFsize], I[j], 1.0, bg[j], &dL[3], &dL2[3], PSFsize); for (int i = 0; i < PSFsize; i++) { psfI = PSF[j*PSFsize + i] * I[j] + bg[j] + gainR[j*PSFsize + i]; dL[4] += (data[j*PSFsize + i] + gainR[j*PSFsize + i]) / psfI - 1; dL2[4] += -1 * (data[j*PSFsize + i] + gainR[j*PSFsize + i]) / psfI / psfI; } for (int k = 0; k < NP; k++) { FirstDev[NP * j + k] = dL[k]; SecondDev[NP * j + k] = dL2[k]; } } __device__ void fundev(float *data, float *gainR, float *psf, float *dpsf, float I, float Id, float bg, float *dL, float *dL2, int PSFsize) { float psfI; for (int i = 0; i < PSFsize; i++) { psfI = psf[i] * I + bg + gainR[i]; dL[0] += ((data[i] + gainR[i]) / psfI - 1) * dpsf[i] * Id; dL2[0] += -1 * Id * Id * dpsf[i] * dpsf[i] * (data[i] + gainR[i]) / psfI / psfI; } }
fb54263150294a9c25868e02369e44d1ba858fba.cu
#include "cuda_runtime.h" #include "definitions.h" #include "kernel.h" #include <stdio.h> #include <stdlib.h> #include <string.h> __global__ void kernel_Localization(float *ParamIn, float *ParamNext, float *Convergence, float *FirstDev, float *SecondDev, int Nfit, int N_int, int FitBoxsize, float lambda, float SampleSpacingXY) { const int tx = threadIdx.x; const int bx = blockIdx.x; const int BlockSize = blockDim.x; //Prevent read/write past end of array int j = BlockSize*bx + tx; if ((bx*BlockSize + tx) >= Nfit) return; float stepLimit[NP] = {0.03f, 0.03f, 0.04f, 400, 2}; // x,y,z step limits are in micron float x0_next[NPL]; float dL_pos = 0, dL2_pos = 0; float dL_I, dL2_I; // photon and background float step[NPL]; float rate = 1/(1 + lambda); float tmp; int s, p, k; // x,y,z for (p = 0; p < 3; p++) { for (s = 0; s < 4; s++) { dL_pos += FirstDev[s*NP*Nfit + j*NP + p]; dL2_pos += SecondDev[s*NP*Nfit + j*NP + p]; } tmp = -1 * dL_pos / dL2_pos * rate; step[p] = fminf(fmaxf(tmp, -stepLimit[p]), stepLimit[p]); } for (s = 0; s < 4; s++) { // photon dL_I = FirstDev[s*NP*Nfit + j*NP + 3]; dL2_I = SecondDev[s*NP*Nfit + j*NP + 3]; tmp = -1 * dL_I / dL2_I * rate; step[3 + s] = fminf(fmaxf(tmp, -stepLimit[3]), stepLimit[3]); // background dL_I = FirstDev[s*NP*Nfit + j*NP + 4]; dL2_I = SecondDev[s*NP*Nfit + j*NP + 4]; tmp = -1 * dL_I / dL2_I * rate; step[7 + s] = fminf(fmaxf(tmp, -stepLimit[4]), stepLimit[4]); } x0_next[0] = ParamIn[NPL*j + 0] + step[0] * (-1 / SampleSpacingXY / N_int); x0_next[1] = ParamIn[NPL*j + 1] + step[1] * (-1 / SampleSpacingXY / N_int); for (k = 2; k < NPL; k++) { x0_next[k] = ParamIn[NPL*j + k] + step[k]; } for (s = 0; s < 4; s++) { x0_next[3 + s] = (x0_next[3 + s] <= 100 ? 100 : x0_next[3 + s]); // intensity is not less than 100 x0_next[7 + s] = (x0_next[7 + s] <= 0 ? 0.01f : x0_next[7 + s]);// bg is not less than 0 } x0_next[0] = fminf(fmaxf(x0_next[0], 4), FitBoxsize - 4);// xy shift is within fitting box x0_next[1] = fminf(fmaxf(x0_next[1], 4), FitBoxsize - 4); x0_next[2] = fminf(fmaxf(x0_next[2], -1.4), 1.4);//z position is within -1.4 to 1.4 um for (k = 0; k < NPL; k++) { ParamNext[NPL*j + k] = x0_next[k]; Convergence[NPL*j + k] = x0_next[k] - ParamIn[NPL*j + k]; } } __global__ void kernel_getdev(float *data, float *gainR, float *PSF, float *dPSFx, float *dPSFy, float *dPSFz, float *I, float *bg, int Nfit, int PSFsize, float *FirstDev, float *SecondDev) { const int tx = threadIdx.x; const int bx = blockIdx.x; const int BlockSize = blockDim.x; //Prevent read/write past end of array int j = BlockSize*bx + tx; if ((bx*BlockSize + tx) >= Nfit) return; float dL[NP], dL2[NP]; float psfI; int k, i; for (k = 0; k < NP; k++) { dL[k] = 0; dL2[k] = 0; } fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFx[j*PSFsize], I[j], I[j], bg[j], &dL[0], &dL2[0], PSFsize); fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFy[j*PSFsize], I[j], I[j], bg[j], &dL[1], &dL2[1], PSFsize); fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFz[j*PSFsize], I[j], I[j], bg[j], &dL[2], &dL2[2], PSFsize); fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &PSF[j*PSFsize], I[j], 1.0, bg[j], &dL[3], &dL2[3], PSFsize); for (int i = 0; i < PSFsize; i++) { psfI = PSF[j*PSFsize + i] * I[j] + bg[j] + gainR[j*PSFsize + i]; dL[4] += (data[j*PSFsize + i] + gainR[j*PSFsize + i]) / psfI - 1; dL2[4] += -1 * (data[j*PSFsize + i] + gainR[j*PSFsize + i]) / psfI / psfI; } for (int k = 0; k < NP; k++) { FirstDev[NP * j + k] = dL[k]; SecondDev[NP * j + k] = dL2[k]; } } __device__ void fundev(float *data, float *gainR, float *psf, float *dpsf, float I, float Id, float bg, float *dL, float *dL2, int PSFsize) { float psfI; for (int i = 0; i < PSFsize; i++) { psfI = psf[i] * I + bg + gainR[i]; dL[0] += ((data[i] + gainR[i]) / psfI - 1) * dpsf[i] * Id; dL2[0] += -1 * Id * Id * dpsf[i] * dpsf[i] * (data[i] + gainR[i]) / psfI / psfI; } }
41b65b0eda7b0e423ed02f9bd2824ab7464294c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2022, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/factorization/par_ic_kernels.hpp" #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/matrix/coo.hpp> #include <ginkgo/core/matrix/csr.hpp> #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/thread_ids.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The parallel ic factorization namespace. * * @ingroup factor */ namespace par_ic_factorization { constexpr int default_block_size = 512; #include "common/cuda_hip/factorization/par_ic_kernels.hpp.inc" template <typename ValueType, typename IndexType> void init_factor(std::shared_ptr<const DefaultExecutor> exec, matrix::Csr<ValueType, IndexType>* l) { auto num_rows = l->get_size()[0]; auto num_blocks = ceildiv(num_rows, default_block_size); auto l_row_ptrs = l->get_const_row_ptrs(); auto l_vals = l->get_values(); if (num_rows > 0) { hipLaunchKernelGGL(( kernel::ic_init), dim3(num_blocks), dim3(default_block_size), 0, 0, l_row_ptrs, as_cuda_type(l_vals), num_rows); } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_PAR_IC_INIT_FACTOR_KERNEL); template <typename ValueType, typename IndexType> void compute_factor(std::shared_ptr<const DefaultExecutor> exec, size_type iterations, const matrix::Coo<ValueType, IndexType>* a_lower, matrix::Csr<ValueType, IndexType>* l) { auto nnz = l->get_num_stored_elements(); auto num_blocks = ceildiv(nnz, default_block_size); for (size_type i = 0; i < iterations; ++i) { if (num_blocks > 0) { hipLaunchKernelGGL(( kernel::ic_sweep), dim3(num_blocks), dim3(default_block_size), 0, 0, a_lower->get_const_row_idxs(), a_lower->get_const_col_idxs(), as_cuda_type(a_lower->get_const_values()), l->get_const_row_ptrs(), l->get_const_col_idxs(), as_cuda_type(l->get_values()), static_cast<IndexType>(l->get_num_stored_elements())); } } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_PAR_IC_COMPUTE_FACTOR_KERNEL); } // namespace par_ic_factorization } // namespace cuda } // namespace kernels } // namespace gko
41b65b0eda7b0e423ed02f9bd2824ab7464294c9.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2022, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/factorization/par_ic_kernels.hpp" #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/matrix/coo.hpp> #include <ginkgo/core/matrix/csr.hpp> #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/thread_ids.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The parallel ic factorization namespace. * * @ingroup factor */ namespace par_ic_factorization { constexpr int default_block_size = 512; #include "common/cuda_hip/factorization/par_ic_kernels.hpp.inc" template <typename ValueType, typename IndexType> void init_factor(std::shared_ptr<const DefaultExecutor> exec, matrix::Csr<ValueType, IndexType>* l) { auto num_rows = l->get_size()[0]; auto num_blocks = ceildiv(num_rows, default_block_size); auto l_row_ptrs = l->get_const_row_ptrs(); auto l_vals = l->get_values(); if (num_rows > 0) { kernel::ic_init<<<num_blocks, default_block_size>>>( l_row_ptrs, as_cuda_type(l_vals), num_rows); } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_PAR_IC_INIT_FACTOR_KERNEL); template <typename ValueType, typename IndexType> void compute_factor(std::shared_ptr<const DefaultExecutor> exec, size_type iterations, const matrix::Coo<ValueType, IndexType>* a_lower, matrix::Csr<ValueType, IndexType>* l) { auto nnz = l->get_num_stored_elements(); auto num_blocks = ceildiv(nnz, default_block_size); for (size_type i = 0; i < iterations; ++i) { if (num_blocks > 0) { kernel::ic_sweep<<<num_blocks, default_block_size>>>( a_lower->get_const_row_idxs(), a_lower->get_const_col_idxs(), as_cuda_type(a_lower->get_const_values()), l->get_const_row_ptrs(), l->get_const_col_idxs(), as_cuda_type(l->get_values()), static_cast<IndexType>(l->get_num_stored_elements())); } } } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_PAR_IC_COMPUTE_FACTOR_KERNEL); } // namespace par_ic_factorization } // namespace cuda } // namespace kernels } // namespace gko
5a823f33d60cd062b9c8648da252bc596fc13ce9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void square_i32 (int* vector, int* output, int len) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < len) { output[idx] = vector[idx] * vector[idx]; } }
5a823f33d60cd062b9c8648da252bc596fc13ce9.cu
#include "includes.h" __global__ void square_i32 (int* vector, int* output, int len) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < len) { output[idx] = vector[idx] * vector[idx]; } }
5781f66f9f002573d79202ccc7ea2f18b00fdf4f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudarray/common.hpp" #include "cudarray/nnet/one_hot.hpp" namespace cudarray { template <typename T> __global__ void kernel_one_hot_encode(const int *labels, int n_classes, int n, T *out) { CUDA_GRID_STRIDE_LOOP(idx, n*n_classes) { int class_idx = idx % n_classes; int label_idx = idx / n_classes; out[idx] = labels[label_idx] == class_idx ? 1.0 : 0.0; } } template <typename T> void one_hot_encode(const int *labels, int n_classes, int n, T *out) { hipLaunchKernelGGL(( kernel_one_hot_encode), dim3(cuda_blocks(n*n_classes)), dim3(kNumBlockThreads), 0, 0, labels, n_classes, n, out); CUDA_KERNEL_CHECK; } template void one_hot_encode(const int *labels, int n_classes, int n, float *out); // copy rows template <typename T> __global__ void kernel_copy_rows(const int *rowids, int numrows, int rowsize, T *from_mat, T *to_mat) { CUDA_GRID_STRIDE_LOOP(idx, numrows*rowsize) { int row = idx / rowsize; int column = idx % rowsize; int from_row = rowids[row]; to_mat[idx] = from_mat[from_row * rowsize + column]; } } template <typename T> __global__ void kernel_copy_rows_mapto(const int *rowids, int numrows, int rowsize, T *from_mat, T *to_mat) { CUDA_GRID_STRIDE_LOOP(idx, numrows*rowsize) { int row = idx / rowsize; int column = idx % rowsize; int from_row = rowids[row]; to_mat[from_row * rowsize + column] = from_mat[idx]; } } template <typename T> void copy_rows(const int *rowids, int numrows, int rowsize, T *from_mat, T *to_mat, int mapfrom) { if (mapfrom == 1) { hipLaunchKernelGGL(( kernel_copy_rows), dim3(cuda_blocks(numrows*rowsize)), dim3(kNumBlockThreads), 0, 0, rowids, numrows, rowsize, from_mat, to_mat); CUDA_KERNEL_CHECK; } else { hipLaunchKernelGGL(( kernel_copy_rows_mapto), dim3(cuda_blocks(numrows*rowsize)), dim3(kNumBlockThreads), 0, 0, rowids, numrows, rowsize, from_mat, to_mat); CUDA_KERNEL_CHECK; } } template void copy_rows(const int *rowids, int numrows, int rowsize, float *from_mat, float *to_mat, int mapfrom); // copy sum rows template <typename T> __global__ void kernel_copy_sum_rows(const int *rowids, T *coefficients, int numsums, int numrows, int rowsize, T *from_mat, T *to_mat, float constant, float var) { CUDA_GRID_STRIDE_LOOP(idx, numrows*rowsize) { int row = idx / rowsize; int column = idx % rowsize; to_mat[idx]=0; int from_row=0; if (coefficients != NULL ){ for (int j=0; j < numsums; j++) { from_row = rowids[row * numsums + j]; to_mat[idx] += coefficients[row * numsums + j] * from_mat[from_row * rowsize + column]; } } else { for (int j=0; j < numsums; j++) { from_row = rowids[row * numsums + j]; to_mat[idx] += constant * pow(var, (float)j) * from_mat[from_row * rowsize + column]; } } } } template <typename T> __global__ void kernel_copy_sum_rows_mapto(const int *rowids, T *coefficients, int numsums, int numrows, int rowsize, T *from_mat, T *to_mat, float constant, float var) { CUDA_GRID_STRIDE_LOOP(idx, numrows*rowsize) { int row = idx / rowsize; int column = idx % rowsize; //to_mat[idx]=0; int from_row=0; if (coefficients != NULL ){ for (int j=0; j < numsums; j++) { from_row = rowids[row * numsums + j]; // we have to use an atomic add here because different threads are // writnig to the same memory location atomicAdd(&to_mat[from_row * rowsize + column], coefficients[row * numsums + j] * from_mat[idx]); } } else { for (int j=0; j < numsums; j++) { from_row = rowids[row * numsums + j]; // we have to use an atomic add here because different threads are // writnig to the same memory location atomicAdd(&to_mat[from_row * rowsize + column], constant * pow(var,(float)j) *from_mat[idx]); } } } } template <typename T> void copy_sum_rows(const int *rowids, int numsums, int numrows, int rowsize, T *from_mat, T *to_mat, int mapfrom, T *coefficients, float constant, float var) { if (mapfrom == 1) { hipLaunchKernelGGL(( kernel_copy_sum_rows), dim3(cuda_blocks(numrows*rowsize)), dim3(kNumBlockThreads), 0, 0, rowids, coefficients, numsums, numrows, rowsize, from_mat, to_mat, constant, var); CUDA_KERNEL_CHECK; } else { hipLaunchKernelGGL(( kernel_copy_sum_rows_mapto), dim3(cuda_blocks(numrows*rowsize)), dim3(kNumBlockThreads), 0, 0, rowids, coefficients, numsums, numrows, rowsize, from_mat, to_mat, constant, var); CUDA_KERNEL_CHECK; } } template void copy_sum_rows(const int *rowids, int numsums, int numrows, int rowsize, float *from_mat, float *to_mat, int mapfrom, float *coefficients, float constant, float var); }
5781f66f9f002573d79202ccc7ea2f18b00fdf4f.cu
#include "cudarray/common.hpp" #include "cudarray/nnet/one_hot.hpp" namespace cudarray { template <typename T> __global__ void kernel_one_hot_encode(const int *labels, int n_classes, int n, T *out) { CUDA_GRID_STRIDE_LOOP(idx, n*n_classes) { int class_idx = idx % n_classes; int label_idx = idx / n_classes; out[idx] = labels[label_idx] == class_idx ? 1.0 : 0.0; } } template <typename T> void one_hot_encode(const int *labels, int n_classes, int n, T *out) { kernel_one_hot_encode<<<cuda_blocks(n*n_classes), kNumBlockThreads>>>( labels, n_classes, n, out); CUDA_KERNEL_CHECK; } template void one_hot_encode(const int *labels, int n_classes, int n, float *out); // copy rows template <typename T> __global__ void kernel_copy_rows(const int *rowids, int numrows, int rowsize, T *from_mat, T *to_mat) { CUDA_GRID_STRIDE_LOOP(idx, numrows*rowsize) { int row = idx / rowsize; int column = idx % rowsize; int from_row = rowids[row]; to_mat[idx] = from_mat[from_row * rowsize + column]; } } template <typename T> __global__ void kernel_copy_rows_mapto(const int *rowids, int numrows, int rowsize, T *from_mat, T *to_mat) { CUDA_GRID_STRIDE_LOOP(idx, numrows*rowsize) { int row = idx / rowsize; int column = idx % rowsize; int from_row = rowids[row]; to_mat[from_row * rowsize + column] = from_mat[idx]; } } template <typename T> void copy_rows(const int *rowids, int numrows, int rowsize, T *from_mat, T *to_mat, int mapfrom) { if (mapfrom == 1) { kernel_copy_rows<<<cuda_blocks(numrows*rowsize), kNumBlockThreads>>>( rowids, numrows, rowsize, from_mat, to_mat); CUDA_KERNEL_CHECK; } else { kernel_copy_rows_mapto<<<cuda_blocks(numrows*rowsize), kNumBlockThreads>>>( rowids, numrows, rowsize, from_mat, to_mat); CUDA_KERNEL_CHECK; } } template void copy_rows(const int *rowids, int numrows, int rowsize, float *from_mat, float *to_mat, int mapfrom); // copy sum rows template <typename T> __global__ void kernel_copy_sum_rows(const int *rowids, T *coefficients, int numsums, int numrows, int rowsize, T *from_mat, T *to_mat, float constant, float var) { CUDA_GRID_STRIDE_LOOP(idx, numrows*rowsize) { int row = idx / rowsize; int column = idx % rowsize; to_mat[idx]=0; int from_row=0; if (coefficients != NULL ){ for (int j=0; j < numsums; j++) { from_row = rowids[row * numsums + j]; to_mat[idx] += coefficients[row * numsums + j] * from_mat[from_row * rowsize + column]; } } else { for (int j=0; j < numsums; j++) { from_row = rowids[row * numsums + j]; to_mat[idx] += constant * pow(var, (float)j) * from_mat[from_row * rowsize + column]; } } } } template <typename T> __global__ void kernel_copy_sum_rows_mapto(const int *rowids, T *coefficients, int numsums, int numrows, int rowsize, T *from_mat, T *to_mat, float constant, float var) { CUDA_GRID_STRIDE_LOOP(idx, numrows*rowsize) { int row = idx / rowsize; int column = idx % rowsize; //to_mat[idx]=0; int from_row=0; if (coefficients != NULL ){ for (int j=0; j < numsums; j++) { from_row = rowids[row * numsums + j]; // we have to use an atomic add here because different threads are // writnig to the same memory location atomicAdd(&to_mat[from_row * rowsize + column], coefficients[row * numsums + j] * from_mat[idx]); } } else { for (int j=0; j < numsums; j++) { from_row = rowids[row * numsums + j]; // we have to use an atomic add here because different threads are // writnig to the same memory location atomicAdd(&to_mat[from_row * rowsize + column], constant * pow(var,(float)j) *from_mat[idx]); } } } } template <typename T> void copy_sum_rows(const int *rowids, int numsums, int numrows, int rowsize, T *from_mat, T *to_mat, int mapfrom, T *coefficients, float constant, float var) { if (mapfrom == 1) { kernel_copy_sum_rows<<<cuda_blocks(numrows*rowsize), kNumBlockThreads>>>( rowids, coefficients, numsums, numrows, rowsize, from_mat, to_mat, constant, var); CUDA_KERNEL_CHECK; } else { kernel_copy_sum_rows_mapto<<<cuda_blocks(numrows*rowsize), kNumBlockThreads>>>( rowids, coefficients, numsums, numrows, rowsize, from_mat, to_mat, constant, var); CUDA_KERNEL_CHECK; } } template void copy_sum_rows(const int *rowids, int numsums, int numrows, int rowsize, float *from_mat, float *to_mat, int mapfrom, float *coefficients, float constant, float var); }
e18015c137ed8b9a2374eefe3b24432d7f89c892.hip
// !!! This is a file automatically generated by hipify!!! /***************************************************************** This is the CUDA version of backpropagation algorithm using GPGPU to accelerate the performance. *****************************************************************/ #include <stdio.h> #include <stdint.h> #include "support.h" #include "kernel.hip" int main(int argc, char* argv[]) { Timer timer; // Initialize host variables ---------------------------------------------- int layer_size, seed; BPNN *net_h; hipError_t cuda_ret; // device variables float *input_units_d, *hidden_units_d, *output_units_d, *hidden_delta_d, *output_delta_d, *target_d, *input_weights_d, *hidden_weights_d, *input_prev_weights_d, *hidden_prev_weights_d; // __device__ int input_n_d, hidden_n_d, output_n_d; if(argc!=2){ fprintf(stderr, "usage: backprop <num of input elements>\n"); exit(0); } layer_size = atoi(argv[1]); seed = 7; bpnn_initialize(seed); printf("Setting up the problem..."); fflush(stdout); startTime(&timer); net_h = bpnn_create(layer_size, 16, 1); // (16, 1 can not be changed) load(net_h, layer_size); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf("Input layer size : %d\n", layer_size); // Allocate device variables ---------------------------------------------- printf("Creating device neural network..."); fflush(stdout); startTime(&timer); int in = net_h->input_n; int hid = net_h->hidden_n; int out = net_h->output_n; cuda_ret = hipMalloc((void**)&(input_units_d), (in+1)*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**)&hidden_units_d, (hid+1)*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**)&output_units_d, (out+1)*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**)&hidden_delta_d, (hid+1)*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**)&output_delta_d, (out+1)*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**)&target_d, (out+1)*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**)&input_weights_d, (in+1)*(hid+1)*sizeof(float*)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**)&hidden_weights_d, (hid+1)*(out+1)*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**)&input_prev_weights_d, (in+1)*(hid+1)*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**)&hidden_prev_weights_d, (hid+1)*(out+1)*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy host variables to device ------------------------------------------ printf("Copying neural network from host to device..."); fflush(stdout); startTime(&timer); cuda_ret = hipMemcpy(input_units_d, net_h->input_units, (in+1)*sizeof(float), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = hipMemcpy(hidden_units_d, net_h->hidden_units, (hid+1)*sizeof(float), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = hipMemcpy(output_units_d, net_h->output_units, (out+1)*sizeof(float), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = hipMemcpy(hidden_delta_d, net_h->hidden_delta, (hid+1)*sizeof(float), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = hipMemcpy(output_delta_d, net_h->output_delta, (out+1)*sizeof(float), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = hipMemcpy(target_d, net_h->target, (out+1)*sizeof(float), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = hipMemcpy(input_weights_d, net_h->input_weights, (in+1)*(hid+1)*sizeof(float), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = hipMemcpy(hidden_weights_d, net_h->hidden_weights, (hid+1)*(out+1)*sizeof(float), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = hipMemcpy(input_prev_weights_d, net_h->input_prev_weights, (in+1)*(hid+1)*sizeof(float), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = hipMemcpy(hidden_prev_weights_d, net_h->hidden_prev_weights, (hid+1)*(out+1)*sizeof(float), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Launch kernel ---------------------------------------------------------- //entering the training kernel, only one iteration printf("Starting training kernel\n"); startTime(&timer); bpnn_train_kernel_device(in, hid, out, input_units_d, hidden_units_d, output_units_d, hidden_delta_d, output_delta_d, target_d, input_weights_d, hidden_weights_d, input_prev_weights_d, hidden_prev_weights_d); cuda_ret = hipDeviceSynchronize(); if(cuda_ret != hipSuccess) FATAL("Unable to launch/execute kernel"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy device variables from host ---------------------------------------- printf("Copying data from device to host..."); fflush(stdout); startTime(&timer); cuda_ret = hipMemcpy(net_h->hidden_weights, hidden_weights_d, (hid+1)*(out+1)*sizeof(float), hipMemcpyDeviceToHost); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the host"); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Save results to file out_gpu.txt ---------------------------------------- const char *file_gpu = "out_gpu.txt"; bpnn_save_dbg(net_h, file_gpu); // Verify correctness ----------------------------------------------------- // const chat *file_cpu = "../out.txt"; // compare_result(file_gpu, file_cpu); // Free memory ------------------------------------------------------------ bpnn_free(net_h); hipFree(input_units_d); hipFree(hidden_units_d); hipFree(output_units_d); hipFree(hidden_delta_d); hipFree(output_delta_d); hipFree(target_d); hipFree(input_weights_d); hipFree(hidden_weights_d); hipFree(input_prev_weights_d); hipFree(hidden_prev_weights_d); printf("Training done!\n\n"); return 0; }
e18015c137ed8b9a2374eefe3b24432d7f89c892.cu
/***************************************************************** This is the CUDA version of backpropagation algorithm using GPGPU to accelerate the performance. *****************************************************************/ #include <stdio.h> #include <stdint.h> #include "support.h" #include "kernel.cu" int main(int argc, char* argv[]) { Timer timer; // Initialize host variables ---------------------------------------------- int layer_size, seed; BPNN *net_h; cudaError_t cuda_ret; // device variables float *input_units_d, *hidden_units_d, *output_units_d, *hidden_delta_d, *output_delta_d, *target_d, *input_weights_d, *hidden_weights_d, *input_prev_weights_d, *hidden_prev_weights_d; // __device__ int input_n_d, hidden_n_d, output_n_d; if(argc!=2){ fprintf(stderr, "usage: backprop <num of input elements>\n"); exit(0); } layer_size = atoi(argv[1]); seed = 7; bpnn_initialize(seed); printf("Setting up the problem..."); fflush(stdout); startTime(&timer); net_h = bpnn_create(layer_size, 16, 1); // (16, 1 can not be changed) load(net_h, layer_size); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf("Input layer size : %d\n", layer_size); // Allocate device variables ---------------------------------------------- printf("Creating device neural network..."); fflush(stdout); startTime(&timer); int in = net_h->input_n; int hid = net_h->hidden_n; int out = net_h->output_n; cuda_ret = cudaMalloc((void**)&(input_units_d), (in+1)*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**)&hidden_units_d, (hid+1)*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**)&output_units_d, (out+1)*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**)&hidden_delta_d, (hid+1)*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**)&output_delta_d, (out+1)*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**)&target_d, (out+1)*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**)&input_weights_d, (in+1)*(hid+1)*sizeof(float*)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**)&hidden_weights_d, (hid+1)*(out+1)*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**)&input_prev_weights_d, (in+1)*(hid+1)*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**)&hidden_prev_weights_d, (hid+1)*(out+1)*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy host variables to device ------------------------------------------ printf("Copying neural network from host to device..."); fflush(stdout); startTime(&timer); cuda_ret = cudaMemcpy(input_units_d, net_h->input_units, (in+1)*sizeof(float), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = cudaMemcpy(hidden_units_d, net_h->hidden_units, (hid+1)*sizeof(float), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = cudaMemcpy(output_units_d, net_h->output_units, (out+1)*sizeof(float), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = cudaMemcpy(hidden_delta_d, net_h->hidden_delta, (hid+1)*sizeof(float), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = cudaMemcpy(output_delta_d, net_h->output_delta, (out+1)*sizeof(float), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = cudaMemcpy(target_d, net_h->target, (out+1)*sizeof(float), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = cudaMemcpy(input_weights_d, net_h->input_weights, (in+1)*(hid+1)*sizeof(float), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = cudaMemcpy(hidden_weights_d, net_h->hidden_weights, (hid+1)*(out+1)*sizeof(float), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = cudaMemcpy(input_prev_weights_d, net_h->input_prev_weights, (in+1)*(hid+1)*sizeof(float), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = cudaMemcpy(hidden_prev_weights_d, net_h->hidden_prev_weights, (hid+1)*(out+1)*sizeof(float), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Launch kernel ---------------------------------------------------------- //entering the training kernel, only one iteration printf("Starting training kernel\n"); startTime(&timer); bpnn_train_kernel_device(in, hid, out, input_units_d, hidden_units_d, output_units_d, hidden_delta_d, output_delta_d, target_d, input_weights_d, hidden_weights_d, input_prev_weights_d, hidden_prev_weights_d); cuda_ret = cudaDeviceSynchronize(); if(cuda_ret != cudaSuccess) FATAL("Unable to launch/execute kernel"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy device variables from host ---------------------------------------- printf("Copying data from device to host..."); fflush(stdout); startTime(&timer); cuda_ret = cudaMemcpy(net_h->hidden_weights, hidden_weights_d, (hid+1)*(out+1)*sizeof(float), cudaMemcpyDeviceToHost); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the host"); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Save results to file out_gpu.txt ---------------------------------------- const char *file_gpu = "out_gpu.txt"; bpnn_save_dbg(net_h, file_gpu); // Verify correctness ----------------------------------------------------- // const chat *file_cpu = "../out.txt"; // compare_result(file_gpu, file_cpu); // Free memory ------------------------------------------------------------ bpnn_free(net_h); cudaFree(input_units_d); cudaFree(hidden_units_d); cudaFree(output_units_d); cudaFree(hidden_delta_d); cudaFree(output_delta_d); cudaFree(target_d); cudaFree(input_weights_d); cudaFree(hidden_weights_d); cudaFree(input_prev_weights_d); cudaFree(hidden_prev_weights_d); printf("Training done!\n\n"); return 0; }
875c8f8ad27fb734e8d212515e8a63a8d55f14d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <time.h> #include <math.h> using namespace std; #define blocks_num 384 #define thread_per_block 1024 // function to check the num if prime can call from host and device bool Is_Prime(unsigned long long int num) { for (unsigned long long int i = 2; i <= sqrtf(num); ++i) { if (num % i == 0) return false; } return true; } // Create a kernel to check number if prime __global__ void Check_Prime(unsigned long long int *d_number,bool *d_out,int *d_iteration) { int index = threadIdx.x + blockIdx.x * blockDim.x; int loop_begin = index * (*d_iteration); int loop_end = (index + 1)* (*d_iteration); if(index == 0) loop_begin +=2; for(long long i = loop_begin; i<= loop_end ; i++) { if (*d_number % i == 0) { *d_out = false; return; } else { *d_out = true; } } } void main() { float CPU_TIME; float GPU_TIME; cout << "\t\t\t*** CUDA TASK ***\n\t\t\t==================\n\n"; cout << "Checking Numbers ... :\n----------------------\n"; // host variables unsigned long long int number = 100000000000000003; bool *out; int iteration_per_thread = int(sqrtf(number)/(blocks_num * thread_per_block)); // device var unsigned long long int *d_number; bool *d_out; int *d_iteration; // allocate device data hipMalloc((void **)&d_number, sizeof(unsigned long long int)); hipMalloc((void **)&d_out, sizeof(bool)); hipMalloc((void **)&d_iteration, sizeof(int)); // copy data from host to device hipMemcpy(d_number, &number, sizeof(unsigned long long int), hipMemcpyHostToDevice); hipMemcpy(d_iteration, &iteration_per_thread, sizeof(int), hipMemcpyHostToDevice); hipEvent_t start, stop; // define 2 events hipEventCreate(&start); // create start event hipEventCreate(&stop); // create stop event hipEventRecord(start, 0); // begin start event // call check_prime kernal hipLaunchKernelGGL(( Check_Prime) , dim3(blocks_num), dim3(thread_per_block) , 0, 0, d_number, d_out,d_iteration); hipEventRecord(stop, 0); // begin stop event hipEventSynchronize(stop); hipEventElapsedTime(&GPU_TIME, start, stop); // calculate execution time // destroy 2 events hipEventDestroy(start); hipEventDestroy(stop); // copy data back from device to host hipMemcpy(out, d_out, sizeof(bool), hipMemcpyDeviceToHost); // print GPU data cout << "the number = "<<number<<" is "; if(*out == true) cout << "prime\n"; else cout << "not prime\n"; cout << "GPU Time = " << GPU_TIME << endl << "--------------------\n\n"; // sequential code unsigned long long int cpu_start = clock(); // cpu start time cout << "the number = "<<number<<" is "; if (Is_Prime(number)) cout << "prime\n"; else cout << "not prime\n"; unsigned long long int cpu_stop = clock(); // cpu stop time CPU_TIME = float(cpu_stop - cpu_start); // cpu execution time cout << "CPU Time = " << CPU_TIME << endl << "--------------------\n"; // print CPU data // free allocated data on device hipFree(d_number); hipFree(d_out); }
875c8f8ad27fb734e8d212515e8a63a8d55f14d2.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <time.h> #include <math.h> using namespace std; #define blocks_num 384 #define thread_per_block 1024 // function to check the num if prime can call from host and device bool Is_Prime(unsigned long long int num) { for (unsigned long long int i = 2; i <= sqrtf(num); ++i) { if (num % i == 0) return false; } return true; } // Create a kernel to check number if prime __global__ void Check_Prime(unsigned long long int *d_number,bool *d_out,int *d_iteration) { int index = threadIdx.x + blockIdx.x * blockDim.x; int loop_begin = index * (*d_iteration); int loop_end = (index + 1)* (*d_iteration); if(index == 0) loop_begin +=2; for(long long i = loop_begin; i<= loop_end ; i++) { if (*d_number % i == 0) { *d_out = false; return; } else { *d_out = true; } } } void main() { float CPU_TIME; float GPU_TIME; cout << "\t\t\t*** CUDA TASK ***\n\t\t\t==================\n\n"; cout << "Checking Numbers ... :\n----------------------\n"; // host variables unsigned long long int number = 100000000000000003; bool *out; int iteration_per_thread = int(sqrtf(number)/(blocks_num * thread_per_block)); // device var unsigned long long int *d_number; bool *d_out; int *d_iteration; // allocate device data cudaMalloc((void **)&d_number, sizeof(unsigned long long int)); cudaMalloc((void **)&d_out, sizeof(bool)); cudaMalloc((void **)&d_iteration, sizeof(int)); // copy data from host to device cudaMemcpy(d_number, &number, sizeof(unsigned long long int), cudaMemcpyHostToDevice); cudaMemcpy(d_iteration, &iteration_per_thread, sizeof(int), cudaMemcpyHostToDevice); cudaEvent_t start, stop; // define 2 events cudaEventCreate(&start); // create start event cudaEventCreate(&stop); // create stop event cudaEventRecord(start, 0); // begin start event // call check_prime kernal Check_Prime <<< blocks_num, thread_per_block >>> (d_number, d_out,d_iteration); cudaEventRecord(stop, 0); // begin stop event cudaEventSynchronize(stop); cudaEventElapsedTime(&GPU_TIME, start, stop); // calculate execution time // destroy 2 events cudaEventDestroy(start); cudaEventDestroy(stop); // copy data back from device to host cudaMemcpy(out, d_out, sizeof(bool), cudaMemcpyDeviceToHost); // print GPU data cout << "the number = "<<number<<" is "; if(*out == true) cout << "prime\n"; else cout << "not prime\n"; cout << "GPU Time = " << GPU_TIME << endl << "--------------------\n\n"; // sequential code unsigned long long int cpu_start = clock(); // cpu start time cout << "the number = "<<number<<" is "; if (Is_Prime(number)) cout << "prime\n"; else cout << "not prime\n"; unsigned long long int cpu_stop = clock(); // cpu stop time CPU_TIME = float(cpu_stop - cpu_start); // cpu execution time cout << "CPU Time = " << CPU_TIME << endl << "--------------------\n"; // print CPU data // free allocated data on device cudaFree(d_number); cudaFree(d_out); }
c5e5b3549d3492a25fe951b585f824e5604296db.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_advec_mom_kernel1_z_nonvector [5][2]; static int dims_advec_mom_kernel1_z_nonvector_h [5][2] = {0}; //user function __device__ inline void advec_mom_kernel1_z_nonvector_gpu(const ACC<double> &node_flux, const ACC<double> &node_mass_pre, ACC<double> &mom_flux, const ACC<double> &celldz, const ACC<double> &vel1) { double sigma, wind, width; double vdiffuw, vdiffdw, auw, adw, limiter; int upwind, donor, downwind, dif; double advec_vel_temp; if( (node_flux(0,0,0)) < 0.0) { upwind = 2; donor = 1; downwind = 0; dif = donor; } else { upwind = -1; donor = 0; downwind = 1; dif = upwind; } sigma = fabs(node_flux(0,0,0))/node_mass_pre(0,0,donor); width = celldz(0,0,0); vdiffuw = vel1(0,0,donor) - vel1(0,0,upwind); vdiffdw = vel1(0,0,downwind) - vel1(0,0,donor); limiter = 0.0; if(vdiffuw*vdiffdw > 0.0) { auw = fabs(vdiffuw); adw = fabs(vdiffdw); wind = 1.0; if(vdiffdw <= 0.0) wind = -1.0; limiter=wind*MIN(width*((2.0-sigma)*adw/width+(1.0+sigma)*auw/celldz(0,0,dif))/6.0,MIN(auw,adw)); } advec_vel_temp= vel1(0,0,donor) + (1.0 - sigma) * limiter; mom_flux(0,0,0) = advec_vel_temp * node_flux(0,0,0); } __global__ void ops_advec_mom_kernel1_z_nonvector( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_z_nonvector[0][0] + idx_z * 1*1 * dims_advec_mom_kernel1_z_nonvector[0][0] * dims_advec_mom_kernel1_z_nonvector[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_z_nonvector[1][0] + idx_z * 1*1 * dims_advec_mom_kernel1_z_nonvector[1][0] * dims_advec_mom_kernel1_z_nonvector[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_z_nonvector[2][0] + idx_z * 1*1 * dims_advec_mom_kernel1_z_nonvector[2][0] * dims_advec_mom_kernel1_z_nonvector[2][1]; arg3 += idx_x * 0*1 + idx_y * 0*1 * dims_advec_mom_kernel1_z_nonvector[3][0] + idx_z * 1*1 * dims_advec_mom_kernel1_z_nonvector[3][0] * dims_advec_mom_kernel1_z_nonvector[3][1]; arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_z_nonvector[4][0] + idx_z * 1*1 * dims_advec_mom_kernel1_z_nonvector[4][0] * dims_advec_mom_kernel1_z_nonvector[4][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { const ACC<double> argp0(dims_advec_mom_kernel1_z_nonvector[0][0], dims_advec_mom_kernel1_z_nonvector[0][1], arg0); const ACC<double> argp1(dims_advec_mom_kernel1_z_nonvector[1][0], dims_advec_mom_kernel1_z_nonvector[1][1], arg1); ACC<double> argp2(dims_advec_mom_kernel1_z_nonvector[2][0], dims_advec_mom_kernel1_z_nonvector[2][1], arg2); const ACC<double> argp3(dims_advec_mom_kernel1_z_nonvector[3][0], dims_advec_mom_kernel1_z_nonvector[3][1], arg3); const ACC<double> argp4(dims_advec_mom_kernel1_z_nonvector[4][0], dims_advec_mom_kernel1_z_nonvector[4][1], arg4); advec_mom_kernel1_z_nonvector_gpu(argp0, argp1, argp2, argp3, argp4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel1_z_nonvector(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_advec_mom_kernel1_z_nonvector_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif //Timing double t1,t2,c1,c2; ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,5,range,136)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(136,"advec_mom_kernel1_z_nonvector"); OPS_kernels[136].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 5,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != dims_advec_mom_kernel1_z_nonvector_h[0][0] || ydim0 != dims_advec_mom_kernel1_z_nonvector_h[0][1] || xdim1 != dims_advec_mom_kernel1_z_nonvector_h[1][0] || ydim1 != dims_advec_mom_kernel1_z_nonvector_h[1][1] || xdim2 != dims_advec_mom_kernel1_z_nonvector_h[2][0] || ydim2 != dims_advec_mom_kernel1_z_nonvector_h[2][1] || xdim3 != dims_advec_mom_kernel1_z_nonvector_h[3][0] || ydim3 != dims_advec_mom_kernel1_z_nonvector_h[3][1] || xdim4 != dims_advec_mom_kernel1_z_nonvector_h[4][0] || ydim4 != dims_advec_mom_kernel1_z_nonvector_h[4][1]) { dims_advec_mom_kernel1_z_nonvector_h[0][0] = xdim0; dims_advec_mom_kernel1_z_nonvector_h[0][1] = ydim0; dims_advec_mom_kernel1_z_nonvector_h[1][0] = xdim1; dims_advec_mom_kernel1_z_nonvector_h[1][1] = ydim1; dims_advec_mom_kernel1_z_nonvector_h[2][0] = xdim2; dims_advec_mom_kernel1_z_nonvector_h[2][1] = ydim2; dims_advec_mom_kernel1_z_nonvector_h[3][0] = xdim3; dims_advec_mom_kernel1_z_nonvector_h[3][1] = ydim3; dims_advec_mom_kernel1_z_nonvector_h[4][0] = xdim4; dims_advec_mom_kernel1_z_nonvector_h[4][1] = ydim4; cutilSafeCall(hipMemcpyToSymbol( dims_advec_mom_kernel1_z_nonvector, dims_advec_mom_kernel1_z_nonvector_h, sizeof(dims_advec_mom_kernel1_z_nonvector))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); char *p_a[5]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args,5,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[136].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_advec_mom_kernel1_z_nonvector), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[136].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[2],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[136].mpi_time += t2-t1; OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg4); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel1_z_nonvector(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 136; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 136; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg*)malloc(5*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->function = ops_par_loop_advec_mom_kernel1_z_nonvector_execute; if (OPS_diags > 1) { ops_timing_realloc(136,"advec_mom_kernel1_z_nonvector"); } ops_enqueue_kernel(desc); } #endif
c5e5b3549d3492a25fe951b585f824e5604296db.cu
// // auto-generated by ops.py // __constant__ int dims_advec_mom_kernel1_z_nonvector [5][2]; static int dims_advec_mom_kernel1_z_nonvector_h [5][2] = {0}; //user function __device__ inline void advec_mom_kernel1_z_nonvector_gpu(const ACC<double> &node_flux, const ACC<double> &node_mass_pre, ACC<double> &mom_flux, const ACC<double> &celldz, const ACC<double> &vel1) { double sigma, wind, width; double vdiffuw, vdiffdw, auw, adw, limiter; int upwind, donor, downwind, dif; double advec_vel_temp; if( (node_flux(0,0,0)) < 0.0) { upwind = 2; donor = 1; downwind = 0; dif = donor; } else { upwind = -1; donor = 0; downwind = 1; dif = upwind; } sigma = fabs(node_flux(0,0,0))/node_mass_pre(0,0,donor); width = celldz(0,0,0); vdiffuw = vel1(0,0,donor) - vel1(0,0,upwind); vdiffdw = vel1(0,0,downwind) - vel1(0,0,donor); limiter = 0.0; if(vdiffuw*vdiffdw > 0.0) { auw = fabs(vdiffuw); adw = fabs(vdiffdw); wind = 1.0; if(vdiffdw <= 0.0) wind = -1.0; limiter=wind*MIN(width*((2.0-sigma)*adw/width+(1.0+sigma)*auw/celldz(0,0,dif))/6.0,MIN(auw,adw)); } advec_vel_temp= vel1(0,0,donor) + (1.0 - sigma) * limiter; mom_flux(0,0,0) = advec_vel_temp * node_flux(0,0,0); } __global__ void ops_advec_mom_kernel1_z_nonvector( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_z_nonvector[0][0] + idx_z * 1*1 * dims_advec_mom_kernel1_z_nonvector[0][0] * dims_advec_mom_kernel1_z_nonvector[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_z_nonvector[1][0] + idx_z * 1*1 * dims_advec_mom_kernel1_z_nonvector[1][0] * dims_advec_mom_kernel1_z_nonvector[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_z_nonvector[2][0] + idx_z * 1*1 * dims_advec_mom_kernel1_z_nonvector[2][0] * dims_advec_mom_kernel1_z_nonvector[2][1]; arg3 += idx_x * 0*1 + idx_y * 0*1 * dims_advec_mom_kernel1_z_nonvector[3][0] + idx_z * 1*1 * dims_advec_mom_kernel1_z_nonvector[3][0] * dims_advec_mom_kernel1_z_nonvector[3][1]; arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_z_nonvector[4][0] + idx_z * 1*1 * dims_advec_mom_kernel1_z_nonvector[4][0] * dims_advec_mom_kernel1_z_nonvector[4][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { const ACC<double> argp0(dims_advec_mom_kernel1_z_nonvector[0][0], dims_advec_mom_kernel1_z_nonvector[0][1], arg0); const ACC<double> argp1(dims_advec_mom_kernel1_z_nonvector[1][0], dims_advec_mom_kernel1_z_nonvector[1][1], arg1); ACC<double> argp2(dims_advec_mom_kernel1_z_nonvector[2][0], dims_advec_mom_kernel1_z_nonvector[2][1], arg2); const ACC<double> argp3(dims_advec_mom_kernel1_z_nonvector[3][0], dims_advec_mom_kernel1_z_nonvector[3][1], arg3); const ACC<double> argp4(dims_advec_mom_kernel1_z_nonvector[4][0], dims_advec_mom_kernel1_z_nonvector[4][1], arg4); advec_mom_kernel1_z_nonvector_gpu(argp0, argp1, argp2, argp3, argp4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel1_z_nonvector(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_advec_mom_kernel1_z_nonvector_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif //Timing double t1,t2,c1,c2; ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,5,range,136)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(136,"advec_mom_kernel1_z_nonvector"); OPS_kernels[136].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 5,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != dims_advec_mom_kernel1_z_nonvector_h[0][0] || ydim0 != dims_advec_mom_kernel1_z_nonvector_h[0][1] || xdim1 != dims_advec_mom_kernel1_z_nonvector_h[1][0] || ydim1 != dims_advec_mom_kernel1_z_nonvector_h[1][1] || xdim2 != dims_advec_mom_kernel1_z_nonvector_h[2][0] || ydim2 != dims_advec_mom_kernel1_z_nonvector_h[2][1] || xdim3 != dims_advec_mom_kernel1_z_nonvector_h[3][0] || ydim3 != dims_advec_mom_kernel1_z_nonvector_h[3][1] || xdim4 != dims_advec_mom_kernel1_z_nonvector_h[4][0] || ydim4 != dims_advec_mom_kernel1_z_nonvector_h[4][1]) { dims_advec_mom_kernel1_z_nonvector_h[0][0] = xdim0; dims_advec_mom_kernel1_z_nonvector_h[0][1] = ydim0; dims_advec_mom_kernel1_z_nonvector_h[1][0] = xdim1; dims_advec_mom_kernel1_z_nonvector_h[1][1] = ydim1; dims_advec_mom_kernel1_z_nonvector_h[2][0] = xdim2; dims_advec_mom_kernel1_z_nonvector_h[2][1] = ydim2; dims_advec_mom_kernel1_z_nonvector_h[3][0] = xdim3; dims_advec_mom_kernel1_z_nonvector_h[3][1] = ydim3; dims_advec_mom_kernel1_z_nonvector_h[4][0] = xdim4; dims_advec_mom_kernel1_z_nonvector_h[4][1] = ydim4; cutilSafeCall(cudaMemcpyToSymbol( dims_advec_mom_kernel1_z_nonvector, dims_advec_mom_kernel1_z_nonvector_h, sizeof(dims_advec_mom_kernel1_z_nonvector))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); char *p_a[5]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args,5,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[136].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_advec_mom_kernel1_z_nonvector<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[136].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[2],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[136].mpi_time += t2-t1; OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg4); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel1_z_nonvector(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 136; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 136; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg*)malloc(5*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->function = ops_par_loop_advec_mom_kernel1_z_nonvector_execute; if (OPS_diags > 1) { ops_timing_realloc(136,"advec_mom_kernel1_z_nonvector"); } ops_enqueue_kernel(desc); } #endif
df35867135821cf6fc8653647821e75ae97d37e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <stdio.h> #include <omp.h> #include <string> void dot_product(double* dot_ptr, const double* x, const double* y, long N){ double sum = 0; #pragma omp parallel for schedule(static) reduction(+:sum) for (long i = 0; i < N; i++) sum += x[i]*y[i]; *dot_ptr = sum; } void Check_CUDA_Error(const char *message){ hipError_t error = hipGetLastError(); if(error!=hipSuccess) { fprintf(stderr,"ERROR: %s: %s\n", message, hipGetErrorString(error) ); exit(-1); } } #define BLOCK_SIZE 1024 __global__ void dotproduct_kernel2(double* sum, const double* a, const double*b, long N, int flag){ __shared__ double smem[BLOCK_SIZE]; int idx = (blockIdx.x) * blockDim.x + threadIdx.x; if (idx < N && flag == 1) smem[threadIdx.x] = a[idx]*b[idx]; else if (idx < N && flag == 0) smem[threadIdx.x] = a[idx]; else smem[threadIdx.x] = 0; __syncthreads(); if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512]; __syncthreads(); if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256]; __syncthreads(); if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128]; __syncthreads(); if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64]; __syncthreads(); if (threadIdx.x < 32) { smem[threadIdx.x] += smem[threadIdx.x + 32]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 16]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 8]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 4]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 2]; __syncwarp(); if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1]; } } int main() { long N = (1UL<<25); double *x, *y; hipHostMalloc((void**)&x, N * sizeof(double)); hipHostMalloc((void**)&y, N * sizeof(double)); #pragma omp parallel for schedule(static) for (long i = 0; i < N; i++) { x[i] = 1.0/(i+1); y[i] = 1.0/(N - i); } double dot_ref, dot; double tt = omp_get_wtime(); dot_product(&dot_ref, x,y, N); printf("CPU Bandwidth = %f GB/s\n", 1*N*sizeof(double) / (omp_get_wtime()-tt)/1e9); double *x_d, *y_d, *z_d; hipMalloc(&x_d, N*sizeof(double)); hipMalloc(&y_d, N*sizeof(double)); long N_work = 1; for (long i = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i; hipMalloc(&z_d, N_work*sizeof(double)); // extra memory buffer for reduction across thread-blocks hipMemcpyAsync(x_d, x, N*sizeof(double), hipMemcpyHostToDevice); hipMemcpyAsync(y_d, y, N*sizeof(double), hipMemcpyHostToDevice); hipDeviceSynchronize(); tt = omp_get_wtime(); double* dot_d = z_d; long Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); hipLaunchKernelGGL(( dotproduct_kernel2), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, dot_d, x_d, y_d, N, 1); while (Nb > 1) { long N = Nb; Nb = (Nb+BLOCK_SIZE-1)/(BLOCK_SIZE); hipLaunchKernelGGL(( dotproduct_kernel2), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, dot_d + N, dot_d, y_d, N, 0); dot_d += N; } hipMemcpyAsync(&dot, dot_d, 1*sizeof(double), hipMemcpyDeviceToHost); hipDeviceSynchronize(); printf("GPU Bandwidth = %f GB/s\n", 1*N*sizeof(double) / (omp_get_wtime()-tt)/1e9); printf("Error = %f\n", fabs(dot-dot_ref)); hipFree(x_d); hipFree(y_d); hipFree(z_d); hipHostFree(x); hipHostFree(y); return 0; }
df35867135821cf6fc8653647821e75ae97d37e1.cu
#include <algorithm> #include <stdio.h> #include <omp.h> #include <string> void dot_product(double* dot_ptr, const double* x, const double* y, long N){ double sum = 0; #pragma omp parallel for schedule(static) reduction(+:sum) for (long i = 0; i < N; i++) sum += x[i]*y[i]; *dot_ptr = sum; } void Check_CUDA_Error(const char *message){ cudaError_t error = cudaGetLastError(); if(error!=cudaSuccess) { fprintf(stderr,"ERROR: %s: %s\n", message, cudaGetErrorString(error) ); exit(-1); } } #define BLOCK_SIZE 1024 __global__ void dotproduct_kernel2(double* sum, const double* a, const double*b, long N, int flag){ __shared__ double smem[BLOCK_SIZE]; int idx = (blockIdx.x) * blockDim.x + threadIdx.x; if (idx < N && flag == 1) smem[threadIdx.x] = a[idx]*b[idx]; else if (idx < N && flag == 0) smem[threadIdx.x] = a[idx]; else smem[threadIdx.x] = 0; __syncthreads(); if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512]; __syncthreads(); if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256]; __syncthreads(); if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128]; __syncthreads(); if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64]; __syncthreads(); if (threadIdx.x < 32) { smem[threadIdx.x] += smem[threadIdx.x + 32]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 16]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 8]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 4]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 2]; __syncwarp(); if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1]; } } int main() { long N = (1UL<<25); double *x, *y; cudaMallocHost((void**)&x, N * sizeof(double)); cudaMallocHost((void**)&y, N * sizeof(double)); #pragma omp parallel for schedule(static) for (long i = 0; i < N; i++) { x[i] = 1.0/(i+1); y[i] = 1.0/(N - i); } double dot_ref, dot; double tt = omp_get_wtime(); dot_product(&dot_ref, x,y, N); printf("CPU Bandwidth = %f GB/s\n", 1*N*sizeof(double) / (omp_get_wtime()-tt)/1e9); double *x_d, *y_d, *z_d; cudaMalloc(&x_d, N*sizeof(double)); cudaMalloc(&y_d, N*sizeof(double)); long N_work = 1; for (long i = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i; cudaMalloc(&z_d, N_work*sizeof(double)); // extra memory buffer for reduction across thread-blocks cudaMemcpyAsync(x_d, x, N*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpyAsync(y_d, y, N*sizeof(double), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); tt = omp_get_wtime(); double* dot_d = z_d; long Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); dotproduct_kernel2<<<Nb,BLOCK_SIZE>>>(dot_d, x_d, y_d, N, 1); while (Nb > 1) { long N = Nb; Nb = (Nb+BLOCK_SIZE-1)/(BLOCK_SIZE); dotproduct_kernel2<<<Nb,BLOCK_SIZE>>>(dot_d + N, dot_d, y_d, N, 0); dot_d += N; } cudaMemcpyAsync(&dot, dot_d, 1*sizeof(double), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); printf("GPU Bandwidth = %f GB/s\n", 1*N*sizeof(double) / (omp_get_wtime()-tt)/1e9); printf("Error = %f\n", fabs(dot-dot_ref)); cudaFree(x_d); cudaFree(y_d); cudaFree(z_d); cudaFreeHost(x); cudaFreeHost(y); return 0; }
ebf4eec71bd44b933bfd1cd71f80f5c68f55ee1b.hip
// !!! This is a file automatically generated by hipify!!! #ifndef QC_CU #define QC_CU extern "C" { #include "bam_coverage.h" #include "bam_data_batch.h" #include "bam_data_batch_list.h" #include "bam_qc_batch.h" #include "bam_qc_report.h" #include "bam_reader.h" #include "commons.h" #include "file_utils.h" #include "gff_data.h" #include "gff_reader.h" #include "log.h" #include "qc.h" #include "qc_kernel_omp.h" #include "sam.h" #include "system_utils.h" } #include "qc_kernel_cuda.h" /* ****************************************************** * Private thread functions * * *****************************************************/ void* qc_calc_server(void* params_p); void* cpus_server(void* params_p); void* results_server(void* params_p); /* ********************************************** * Global variables * * *********************************************/ list_t bam_qc_batch_list; int bam_batch_reader_alive = 1; int gpus_thread_alive = 1; int cpus_thread_alive = 1; pthread_mutex_t gpus_thread_alive_lock = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_t cpus_thread_alive_lock = PTHREAD_MUTEX_INITIALIZER; /* ********************************************** * Extern calls * * *********************************************/ extern void call_kernel_basic_stats(dim3 dimGrid, dim3 dimBlock, bam_data_core_t* d_core_data_p, int* d_strand_counter_p, int* d_alignment_length_p, int* d_map_quality_p, int num_alignments); extern void call_kernel_map_errors(dim3 dimGrid, dim3 dimBlock, bam_data_core_t* d_core_data_p, uint32_t* d_cigar_data_p, qc_alignment_t* d_qc_alignment_p, int num_alignments); /* ********************************************************************** * Private thread functions implementations * * *********************************************************************/ /* ****************************************************** * QC calc server thread * * *****************************************************/ void* qc_calc_server(void* params_p) { LOG_DEBUG("Thread-GPU: START\n"); if (time_flag) { start_timer(t1_qc_calc_server); } qc_calc_server_input_t* input_p = (qc_calc_server_input_t*) params_p; int cpu_num_threads = input_p->cpu_num_threads; list_item_t* bam_data_batch_list_item_p = NULL; list_t* gpu_batch_list_p = input_p->gpu_batch_list_p; list_t* cpu_batch_list_p = input_p->cpu_batch_list_p; bam_data_batch_t* bam_data_batch_p = NULL; bam_qc_batch_t* bam_qc_batch_p = NULL; // variables for store output results in both CPU and GPU qc_alignment_t* qc_alignment_p; int* strand_counter_p; int* map_quality_p; int* alignment_length_p; bam_data_core_t* d_core_data_p; uint32_t* d_cigar_data_p; qc_alignment_t* d_qc_alignment_p; int* d_strand_counter_p; int* d_map_quality_p; int* d_alignment_length_p; // selecting GPU device CUDA_SAFE_CALL(hipSetDevice(input_p->gpu_device_id[0])); int reads_alive; reads_alive = list_get_writers(gpu_batch_list_p); while ((bam_data_batch_list_item_p = list_remove_item(gpu_batch_list_p)) != NULL) { LOG_DEBUG("Thread-GPU: waiting for batch....\n"); //if (time_flag) { // start_timer(t1_gpu); //} if ((time_flag) && (gpus_standby_time == 0.0)) { stop_timer(t1_active_reader, t1_active_gpus, gpus_standby_time); } char log_message[50]; sprintf(log_message, "Thread-GPU: processing for batch %i....\n", bam_data_batch_list_item_p->id); LOG_DEBUG(log_message); number_of_batchs++; // allocation memory for output results bam_data_batch_p = (bam_data_batch_t*) bam_data_batch_list_item_p->data_p; int num_alignments = bam_data_batch_p->num_alignments; int num_blocks; //cpu_num_threads = 1; if (cpu_num_threads == 0) { // GPU implementation num_blocks = (num_alignments / input_p->gpu_num_threads) + 1; dim3 dimBlock(input_p->gpu_num_threads, 1, 1); dim3 dimGrid(num_blocks, 1, 1); strand_counter_p = (int*) calloc(num_blocks, sizeof(int)); map_quality_p = (int*) calloc(num_blocks, sizeof(int)); alignment_length_p = (int*) calloc(num_blocks, sizeof(int)); qc_alignment_p = (qc_alignment_t*) calloc(bam_data_batch_p->num_alignments, sizeof(qc_alignment_t)); CUDA_SAFE_CALL( hipHostMalloc((void**) &d_core_data_p, (unsigned int)(num_alignments + 1) * sizeof(bam_data_core_t), 0) ); CUDA_SAFE_CALL( hipHostMalloc((void**) &d_strand_counter_p, (unsigned int) num_blocks * sizeof(int), 0) ); CUDA_SAFE_CALL( hipHostMalloc((void**) &d_map_quality_p, (unsigned int) num_blocks * sizeof(int), 0) ); CUDA_SAFE_CALL( hipHostMalloc((void**) &d_alignment_length_p, (unsigned int) num_blocks * sizeof(int), 0) ); CUDA_SAFE_CALL( hipHostMalloc((void**) &d_cigar_data_p, (unsigned int) bam_data_batch_p->num_cigar_operations * sizeof(uint32_t), 0) ); CUDA_SAFE_CALL( hipHostMalloc((void**) &d_qc_alignment_p, (unsigned int) num_alignments * sizeof(qc_alignment_t), 0) ); CUDA_SAFE_CALL( hipMemcpy(d_core_data_p, bam_data_batch_p->core_data_p, (num_alignments + 1) * sizeof(bam_data_core_t), hipMemcpyHostToDevice) ); CUDA_SAFE_CALL( hipMemcpy(d_cigar_data_p, bam_data_batch_p->cigar_data_p, bam_data_batch_p->num_cigar_operations * sizeof(uint32_t), hipMemcpyHostToDevice) ); CUDA_START_TIMER(); call_kernel_basic_stats(dimGrid, dimBlock, d_core_data_p, d_strand_counter_p, d_map_quality_p, d_alignment_length_p, num_alignments); call_kernel_map_errors(dimGrid, dimBlock, d_core_data_p, d_cigar_data_p, d_qc_alignment_p, num_alignments); CUDA_STOP_TIMER(); // copy result from GPU (GPU -> CPU) CUDA_SAFE_CALL( hipMemcpy(strand_counter_p, d_strand_counter_p, num_blocks * sizeof(int), hipMemcpyDeviceToHost) ); CUDA_SAFE_CALL( hipMemcpy(map_quality_p, d_map_quality_p, num_blocks * sizeof(int), hipMemcpyDeviceToHost) ); CUDA_SAFE_CALL( hipMemcpy(alignment_length_p, d_alignment_length_p, num_blocks * sizeof(int), hipMemcpyDeviceToHost) ); CUDA_SAFE_CALL( hipMemcpy(qc_alignment_p, d_qc_alignment_p, num_alignments * sizeof(qc_alignment_t), hipMemcpyDeviceToHost) ); // free device memory CUDA_SAFE_CALL( hipHostFree(d_core_data_p) ); CUDA_SAFE_CALL( hipHostFree(d_cigar_data_p) ); CUDA_SAFE_CALL( hipHostFree(d_strand_counter_p) ); CUDA_SAFE_CALL( hipHostFree(d_map_quality_p) ); CUDA_SAFE_CALL( hipHostFree(d_qc_alignment_p) ); } else { // accumulation of partial results is only made once num_blocks = 1; strand_counter_p = (int*) calloc(num_blocks, sizeof(int)); map_quality_p = (int*) calloc(num_blocks, sizeof(int)); alignment_length_p = (int*) calloc(num_blocks, sizeof(int)); qc_alignment_p = (qc_alignment_t*) calloc(bam_data_batch_p->num_alignments, sizeof(qc_alignment_t)); if (time_flag) { start_timer(t1_gpu); } cpu_bam_qc_basic_stats(bam_data_batch_p->core_data_p, strand_counter_p, map_quality_p, alignment_length_p, num_alignments, cpu_num_threads); cpu_bam_qc_map_errors(bam_data_batch_p->core_data_p, bam_data_batch_p->cigar_data_p, qc_alignment_p, num_alignments); if (time_flag) { stop_timer(t1_gpu, t2_gpu, gpu_time); } } //if (time_flag) { // stop_timer(t1_gpu, t2_gpu, gpu_time); //} // create a new qc_batch object bam_qc_batch_p = (bam_qc_batch_t*) malloc(sizeof(bam_qc_batch_t)); bam_qc_batch_p->id = bam_data_batch_list_item_p->id; bam_qc_batch_p->num_alignments = bam_data_batch_p->num_alignments; bam_qc_batch_p->num_blocks = num_blocks; bam_qc_batch_p->qc_alignment_p = qc_alignment_p; bam_qc_batch_p->strand_counter_p = strand_counter_p; bam_qc_batch_p->map_quality_p = map_quality_p; bam_qc_batch_p->alignment_length_p = alignment_length_p; //bam_qc_batch_p->alignments_p = bam_data_batch_list_item_p->alignments_p; // and insert it into the bam_qc_batch_list list_item_t* item_p = list_item_new(bam_qc_batch_p->id, 0, bam_qc_batch_p); list_insert_item(item_p, &bam_qc_batch_list); // copy the the current batch item to the cpu batch list in order to perform CPU qc operations list_insert_item(bam_data_batch_list_item_p, cpu_batch_list_p); sprintf(log_message, "Thread-GPU:...processing for batch %i done !\n", bam_data_batch_list_item_p->id); LOG_DEBUG(log_message); //if (time_flag) { stop_timer(t1_gpu, t2_gpu, gpu_time); } } // end of external while loop pthread_mutex_lock(&gpus_thread_alive_lock); gpus_thread_alive--; pthread_mutex_unlock(&gpus_thread_alive_lock); list_decr_writers(cpu_batch_list_p); list_decr_writers(&bam_qc_batch_list); if (time_flag) { stop_timer(t1_qc_calc_server, t2_qc_calc_server, qc_calc_server_time); } LOG_DEBUG("Thread-GPU: END\n"); // exiting... pthread_exit(0); } /* ****************************************************** * QC calc server thread * * *****************************************************/ void* cpus_server(void* params_p) { double coverage_time = 0.0; struct timeval t1_coverage, t2_coverage; LOG_DEBUG("Thread-CPU: START\n"); if (time_flag) { start_timer(t1_cpus_server); } //initialize str_coverage_matrix str_coverage_matrix_init(); cpus_server_input_t* input_p = (cpus_server_input_t*) params_p; qc_mapping_counter_t* qc_mapping_counter_p = (qc_mapping_counter_t*) input_p->qc_mapping_counter; int max_distance_size = input_p->max_distance_size; int cpu_num_threads = input_p->cpu_num_threads; bam_data_batch_t* bam_data_batch_p = NULL; list_item_t* bam_data_batch_list_item_p = NULL; list_t* cpu_batch_list_p = input_p->cpu_batch_list_p; char* gff_filename = input_p->gff_filename; char* output_directory = input_p->output_directory; char* input_filename = input_p->input_filename; // delete previous coverage file to append new data bam_coverage_counter_delete_file(output_directory, input_filename); // creating trie //cp_trie* test_trie = cp_trie_create(0); sqlite3* db; sqlite3_stmt* insert_complete_mapping_stmt; qc_hash_t* qc_hash_p; if (input_p->disk_flag) { //disk implementation: sqlite // create tables mapping_db_create_complete_mappings_database(&db, (const char*) input_p->output_directory, 0); // create and prepare queries insert_complete_mapping_stmt = mapping_db_prepare_insert_complete(db); // start transaction mapping_db_begin_transaction(db); } else { //memory implementation: custom hash table // variables for store intermediate and output results in both CPU and GPU qc_hash_p = (qc_hash_t*) qc_hash_new(QC_HASH_LENGTH); } // variables for coverage (regions data) bam_chromosome_coverage_t bam_chromosome_coverage[num_of_chromosomes]; for (int j = 0; j < num_of_chromosomes; j++) { bam_chromosome_coverage_init(&bam_chromosome_coverage[j]); } gff_data_t* gff_data_p = gff_data_new(gff_filename); int gpus_alive; gpus_alive = list_get_writers(cpu_batch_list_p); while ((bam_data_batch_list_item_p = list_remove_item(cpu_batch_list_p)) != NULL) { if ((time_flag) && (cpus_standby_time == 0.0)) { stop_timer(t1_active_reader, t1_active_cpus, cpus_standby_time); } char log_message[50]; sprintf(log_message, "Thread-CPU: processing for batch %i....\n", bam_data_batch_list_item_p->id); LOG_DEBUG(log_message); // allocation memory for output results bam_data_batch_p = (bam_data_batch_t*) bam_data_batch_list_item_p->data_p; int num_alignments = bam_data_batch_p->num_alignments; int cpu_num_threads = input_p->cpu_num_threads; if (time_flag) { start_timer(t1_cpu); } char* id_seq; int tid, mtid, isize, start_coordinate, seq_length; short int paired_end; bam_data_core_t* core_data_p; if (input_p->disk_flag) { // duplicate code for performance, disk implementation for (int i = 0; i < bam_data_batch_p->num_alignments; i++) { id_seq = &(bam_data_batch_p->id_seq_data_p[bam_data_batch_p->core_data_p[i].id_seq_index]); core_data_p = &(bam_data_batch_p->core_data_p[i]); tid = core_data_p->chromosome; mtid = core_data_p->mate_chromosome; isize = core_data_p->isize; start_coordinate = core_data_p->start_coordinate; seq_length = core_data_p->alignment_length; paired_end = core_data_p->paired_end; // insert in SQLite mapping_db_insert_complete(db, id_seq, paired_end, tid, mtid, isize, start_coordinate, seq_length, insert_complete_mapping_stmt); } } else { // memory implementation for (int i = 0; i < bam_data_batch_p->num_alignments; i++) { id_seq = &(bam_data_batch_p->id_seq_data_p[bam_data_batch_p->core_data_p[i].id_seq_index]); core_data_p = &(bam_data_batch_p->core_data_p[i]); tid = core_data_p->chromosome; start_coordinate = core_data_p->start_coordinate; seq_length = core_data_p->alignment_length; paired_end = core_data_p->paired_end; // insert in qc hash qc_hash_insert_alignment(qc_hash_p, id_seq, tid, start_coordinate, seq_length, paired_end); // insert in trie structure //cp_trie_add(test_trie, id_seq, NULL); } } if (gff_data_batch_in_region(bam_data_batch_p, gff_data_p) != 0) { bam_coverage_compute(bam_data_batch_p, bam_chromosome_coverage, gff_data_p, output_directory, input_filename, cpu_num_threads); } if (time_flag) { stop_timer(t1_cpu, t2_cpu, cpu_time); } sprintf(log_message, "Thread-CPU:...processing for batch %i done !\n", bam_data_batch_list_item_p->id); LOG_DEBUG(log_message); // free the current batch item if all processing with the batch is performed bam_data_batch_free((bam_data_batch_t*) bam_data_batch_list_item_p->data_p); list_item_free(bam_data_batch_list_item_p); // ask again for reads server status gpus_alive = list_get_writers(cpu_batch_list_p); } // end of external while loop // print the last counters bam_coverage_counter_mark_to_print(bam_chromosome_coverage, true); bam_coverage_counter_print(bam_chromosome_coverage, output_directory, input_filename); //qc_hash_list_print(qc_hash_p->qc_hash_list_p); //calculate over the qc hash table to obtain: // - Mean distance between paired ends // - Histogram of mappings per reads unsigned long mean_paired_end_distance = 0; if (!input_p->disk_flag) { if (time_flag) { start_timer(t1_cpu); } qc_hash_perform_calculations(qc_hash_p, qc_mapping_counter_p, &mean_paired_end_distance, max_distance_size, cpu_num_threads); qc_hash_free(qc_hash_p, true); if (time_flag) { stop_timer(t1_cpu, t2_cpu, cpu_time); } } if (input_p->disk_flag) { if (time_flag) { start_timer(t1_db); } mapping_db_end_transaction(db); //sqlite3_finalize(insert_complete_mapping_stmt); //mapping_db_create_indexes(&db); mapping_db_perform_calculations(db, max_distance_size, qc_mapping_counter_p->num_mappings_histogram, &mean_paired_end_distance); mapping_db_close(db); if (time_flag) { stop_timer(t1_db, t2_db, db_time); } } qc_mapping_counter_p->mean_paired_end_distance = mean_paired_end_distance; //free qc hash structure, gff data and chromosome coverage for (int j = 0; j < num_of_chromosomes; j++) { bam_chromosome_coverage_clear(&bam_chromosome_coverage[j]); } //qc_hash_free(qc_hash_p, true); gff_data_free(gff_data_p); pthread_mutex_lock(&cpus_thread_alive_lock); cpus_thread_alive--; pthread_mutex_unlock(&cpus_thread_alive_lock); if (time_flag) { stop_timer(t1_cpus_server, t2_cpus_server, cpus_server_time); } // --------------- D E B U G ---------------- printf("--------------- D E B U G ----------------\n"); for (int i = 0; i <= (MAX_MAPPING_COUNT_IN_HISTOGRAM + 1); i++) { printf("qc_mapping_counter_p->num_mappings_histogram[%i]: %i\n", i, qc_mapping_counter_p->num_mappings_histogram[i]); } printf("mean_paired_end_distance: %ld\n\n", qc_mapping_counter_p->mean_paired_end_distance); printf("--------------- D E B U G ----------------\n"); // --------------- D E B U G ---------------- LOG_DEBUG("Thread-CPU: END\n"); // exiting... pthread_exit(0); } /* ****************************************************** * Results server thread * * *****************************************************/ void* results_server(void* params_p) { LOG_DEBUG("Thread-RESULTS: START\n"); if (time_flag) { start_timer(t1_results_server); } results_server_input_t* input_p = (results_server_input_t*) params_p; // variables for storing qc report information bam_qc_report_t bam_qc_report; memset(&bam_qc_report, 0, sizeof(bam_qc_report_t)); qc_mapping_counter_t* qc_mapping_counter_p = (qc_mapping_counter_t*) input_p->qc_mapping_counter; int nb_total_threads = input_p->gpu_num_blocks * input_p->gpu_num_threads; int base_quality = input_p->base_quality; int i, alignments; // go through the results_batch list, and process it // take and remove the first item, and so on... list_item_t* item_p = NULL; bam_qc_batch_t* bam_qc_batch_p = NULL; int gpus_alive, cpus_alive; // getting gpus thread status pthread_mutex_lock(&gpus_thread_alive_lock); gpus_alive = gpus_thread_alive; pthread_mutex_unlock(&gpus_thread_alive_lock); pthread_mutex_lock(&cpus_thread_alive_lock); cpus_alive = cpus_thread_alive; pthread_mutex_unlock(&cpus_thread_alive_lock); //iteration until not NULL is returned, then process batch while ((item_p = list_remove_item(&bam_qc_batch_list)) != NULL) { bam_qc_batch_p = (bam_qc_batch_t*) item_p->data_p; list_item_free(item_p); //printf("while... gpus_alive: %i, cpus_alive: %i, bam_qc_batch_p is NULL: %i\n", gpus_alive, cpus_alive, (bam_qc_batch_p == NULL) ? 1:0); if ((time_flag) && (results_standby_time == 0.0)) { stop_timer(t1_active_reader, t1_active_results, results_standby_time); } if (time_flag) { start_timer(t1_result); } char log_message[50]; sprintf(log_message, "Thread-RESULTS: processing for bam batch %i....\n", bam_qc_batch_p->id); LOG_DEBUG(log_message); // result processing batch per batch alignments = bam_qc_batch_p->num_alignments; bam_qc_report.num_alignments += alignments; for (int k = 0; k < bam_qc_batch_p->num_blocks; k++) { bam_qc_report.strand_counter += bam_qc_batch_p->strand_counter_p[k]; bam_qc_report.mean_map_quality += bam_qc_batch_p->map_quality_p[k]; bam_qc_report.mean_alignment_length += bam_qc_batch_p->alignment_length_p[k]; } for (int k = 0; k < bam_qc_batch_p->num_alignments; k++) { bam_qc_report.map_error_histogram[(bam_qc_batch_p->qc_alignment_p[k].counters[MISMATCHES] <= MAX_MAP_ERRORS_IN_HISTOGRAM) ? bam_qc_batch_p->qc_alignment_p[k].counters[MISMATCHES] : (MAX_MAP_ERRORS_IN_HISTOGRAM + 1)]++; bam_qc_report.map_deletion_histogram[(bam_qc_batch_p->qc_alignment_p[k].counters[D] <= MAX_MAP_ERRORS_IN_HISTOGRAM) ? bam_qc_batch_p->qc_alignment_p[k].counters[D] : (MAX_MAP_ERRORS_IN_HISTOGRAM + 1)]++; bam_qc_report.map_insertion_histogram[(bam_qc_batch_p->qc_alignment_p[k].counters[I] <= MAX_MAP_ERRORS_IN_HISTOGRAM) ? bam_qc_batch_p->qc_alignment_p[k].counters[I] : (MAX_MAP_ERRORS_IN_HISTOGRAM + 1)]++; bam_qc_report.map_matching_histogram[(bam_qc_batch_p->qc_alignment_p[k].counters[EQUAL] <= MAX_MAP_ERRORS_IN_HISTOGRAM) ? bam_qc_batch_p->qc_alignment_p[k].counters[EQUAL] : (MAX_MAP_ERRORS_IN_HISTOGRAM + 1)]++; } sprintf(log_message, "Thread-RESULTS: ....processing for batch %i done !\n", bam_qc_batch_p->id); LOG_DEBUG(log_message); // free ALL memory bam_qc_batch_free(bam_qc_batch_p, false); if (time_flag) { stop_timer(t1_result, t2_result, result_time); } // getting gpus and cpus thread status pthread_mutex_lock(&gpus_thread_alive_lock); gpus_alive = gpus_thread_alive; pthread_mutex_unlock(&gpus_thread_alive_lock); pthread_mutex_lock(&cpus_thread_alive_lock); cpus_alive = cpus_thread_alive; pthread_mutex_unlock(&cpus_thread_alive_lock); } // end of batch loop printf("bam_qc_report.num_alignments: %lu, strand (+): %lu, strand (-): %lu\n", bam_qc_report.num_alignments, bam_qc_report.strand_counter, (bam_qc_report.num_alignments - bam_qc_report.strand_counter)); if (time_flag) { start_timer(t1_result); } //calculate mean quality and mean length per alignment if (bam_qc_report.num_alignments > 0) { printf("bam_qc_report.mean_read_quality: %lu, num_alignments: %lu, mean_quality: %lu\n", bam_qc_report.mean_map_quality, bam_qc_report.num_alignments, (bam_qc_report.mean_map_quality / bam_qc_report.num_alignments)); printf("bam_qc_report.mean_alignment_length: %lu, num_alignments: %lu, mean_alignment_length: %lu\n", bam_qc_report.mean_alignment_length, bam_qc_report.num_alignments, (bam_qc_report.mean_alignment_length / bam_qc_report.num_alignments)); bam_qc_report.mean_map_quality /= bam_qc_report.num_alignments; bam_qc_report.mean_alignment_length /= bam_qc_report.num_alignments; } else { printf("bam_qc_report.mean_read_quality: %lu, num_alignments: %lu, mean_quality: 0\n", bam_qc_report.mean_map_quality, bam_qc_report.num_alignments); printf("bam_qc_report.mean_alignment_length: %lu, num_alignments: %lu, mean_alignment_length: 0\n", bam_qc_report.mean_alignment_length, bam_qc_report.num_alignments); bam_qc_report.mean_map_quality = 0; bam_qc_report.mean_alignment_length = 0; } if (time_flag) { stop_timer(t1_result, t2_result, result_time); } // and finally, print qc report, data files and graphs // when cpu data is ready (cpus_alive = 0) while (cpus_alive > 0) { sched_yield(); usleep(10000); pthread_mutex_lock(&cpus_thread_alive_lock); cpus_alive = cpus_thread_alive; pthread_mutex_unlock(&cpus_thread_alive_lock); } if (time_flag) { start_timer(t1_result); } bam_qc_report.num_mappings_histogram = qc_mapping_counter_p->num_mappings_histogram; bam_qc_report.mean_paired_end_distance = qc_mapping_counter_p->mean_paired_end_distance; if (time_flag) { stop_timer(t1_result, t2_result, result_time); } if (time_flag) { start_timer(t1_reporting); } generate_report(bam_qc_report, input_p->filename, input_p->base_quality, input_p->report_directory, 1); if (time_flag) { stop_timer(t1_reporting, t2_reporting, reporting_time); } if (time_flag) { stop_timer(t1_results_server, t2_results_server, results_server_time); } LOG_DEBUG("Thread-RESULTS: END\n"); // exiting... pthread_exit(0); } /* ************************************************************** * Public functions implementations * * *************************************************************/ void qc_bam_file(size_t batch_size, int batch_list_size, int gpu_num_threads, int gpu_num_blocks, int cpu_num_threads, int base_quality, int max_distance_size, char* input_filename, char* output_directory, char* gff_filename, int disk_flag) { // number of GPUs is obtained, and initializes the number of GPU threads 'alive' int num_gpu_devices; hipError_t cudaResultCode = hipGetDeviceCount(&num_gpu_devices); if (cudaResultCode != hipSuccess) { num_gpu_devices = 0; } gpus_thread_alive = num_gpu_devices; hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); if (!prop.canMapHostMemory) { LOG_FATAL("device does not support MapHostMemory\n"); } //initializing bam_data_batch_list_gpu, bam_data_batch_list_cpu and bam_qc_batch_list list_t bam_data_batch_list_gpu; list_t bam_data_batch_list_cpu; if (num_gpu_devices > 0) { list_init("bam_data_batch_list_cpu", num_gpu_devices, batch_list_size, &bam_data_batch_list_cpu); } else { list_init("bam_data_batch_list_cpu", 1, batch_list_size, &bam_data_batch_list_cpu); } list_init("bam_data_batch_list_gpu", 1, batch_list_size, &bam_data_batch_list_gpu); list_init("bam_qc_batch_list", ((num_gpu_devices > 0) ? num_gpu_devices : 1), batch_list_size, &bam_qc_batch_list); //initializing qc_mapping_counter qc_mapping_counter_t qc_mapping_counter; qc_mapping_counter_init(&qc_mapping_counter); //multi-threads bam_reader_t* bam_reader_p = bam_reader_by_batch_new(input_filename, batch_size, base_quality, &bam_data_batch_list_gpu, LIST_INSERT_MODE); bam_reader_start(bam_reader_p); //some local variables void* r; // multi-threads pthread_t* qc_calc_server_thread_p = (pthread_t*) malloc(((num_gpu_devices == 0) ? 1 : num_gpu_devices) * sizeof(pthread_t)); pthread_t* cpus_server_thread_p = (pthread_t*) malloc(cpu_num_threads * sizeof(pthread_t)); pthread_t results_server_thread; //calling GPU threads to process the bam data, //but first, prepare input parameter int i; qc_calc_server_input_t** qc_calc_server_input_p = (qc_calc_server_input_t**) calloc(num_gpu_devices, sizeof(qc_calc_server_input_t*)); if (num_gpu_devices > 0) { for (i = 0; i < num_gpu_devices; i++) { qc_calc_server_input_p[i] = (qc_calc_server_input_t*) calloc(1, sizeof(qc_calc_server_input_t)); } } else { qc_calc_server_input_p[0] = (qc_calc_server_input_t*) calloc(1, sizeof(qc_calc_server_input_t)); } if (num_gpu_devices > 0) { //GPU implementacion for (i = 0; i < num_gpu_devices; i++) { qc_calc_server_input_p[i]->num_gpu_devices = num_gpu_devices; qc_calc_server_input_p[i]->cpu_num_threads = 0; qc_calc_server_input_p[i]->gpu_device_id[0] = i; qc_calc_server_input_p[i]->gpu_num_blocks = gpu_num_blocks; qc_calc_server_input_p[i]->gpu_num_threads = gpu_num_threads; qc_calc_server_input_p[i]->gpu_batch_list_p = &bam_data_batch_list_gpu; qc_calc_server_input_p[i]->cpu_batch_list_p = &bam_data_batch_list_cpu; pthread_create(&qc_calc_server_thread_p[i], NULL, qc_calc_server, (void*) qc_calc_server_input_p[i]); } } else { //CPU implementacion qc_calc_server_input_p[0]->num_gpu_devices = 0; qc_calc_server_input_p[0]->cpu_num_threads = cpu_num_threads; qc_calc_server_input_p[0]->gpu_device_id[0] = 0; qc_calc_server_input_p[0]->gpu_num_blocks = 0; qc_calc_server_input_p[0]->gpu_num_threads = 0; qc_calc_server_input_p[0]->gpu_batch_list_p = &bam_data_batch_list_gpu; qc_calc_server_input_p[0]->cpu_batch_list_p = &bam_data_batch_list_cpu; pthread_create(&qc_calc_server_thread_p[0], NULL, qc_calc_server, (void*) &qc_calc_server_input_p[0]); } // calling CPU threads to process the bam data, cpus_server_input_t* cpus_server_input_p = (cpus_server_input_t*) calloc(1, sizeof(cpus_server_input_t)); for (i = 0; i < 1; i++) { cpus_server_input_p->cpu_num_threads = cpu_num_threads; cpus_server_input_p->max_distance_size = max_distance_size; cpus_server_input_p->cpu_batch_list_p = &bam_data_batch_list_cpu; cpus_server_input_p->qc_mapping_counter = &qc_mapping_counter; cpus_server_input_p->gff_filename = gff_filename; cpus_server_input_p->output_directory = output_directory; cpus_server_input_p->input_filename = input_filename; cpus_server_input_p->disk_flag = disk_flag; pthread_create(&cpus_server_thread_p[i], NULL, cpus_server, (void*) cpus_server_input_p); } //calling thread to process results from GPU, results_server_input_t results_server_input; results_server_input.gpu_num_blocks = gpu_num_blocks; results_server_input.gpu_num_threads = gpu_num_threads; results_server_input.base_quality = base_quality; results_server_input.qc_mapping_counter = &qc_mapping_counter; results_server_input.filename = input_filename; results_server_input.report_directory = output_directory; pthread_create(&results_server_thread, NULL, results_server, (void*) &results_server_input); num_alignments = bam_reader_join(bam_reader_p); for (int i = 0; i < num_gpu_devices; i++) { pthread_join(qc_calc_server_thread_p[i], &r); } free(qc_calc_server_thread_p); for (int i = 0; i < 1; i++) { pthread_join(cpus_server_thread_p[i], &r); } free(cpus_server_thread_p); pthread_join(results_server_thread, &r); //free thread stuff and parameters if (num_gpu_devices > 0) { for (i = 0; i < num_gpu_devices; i++) { free(qc_calc_server_input_p[i]); } } else { free(qc_calc_server_input_p[0]); } free(qc_calc_server_input_p); free(cpus_server_input_p); bam_reader_free(bam_reader_p); } #endif /* QC_CU */
ebf4eec71bd44b933bfd1cd71f80f5c68f55ee1b.cu
#ifndef QC_CU #define QC_CU extern "C" { #include "bam_coverage.h" #include "bam_data_batch.h" #include "bam_data_batch_list.h" #include "bam_qc_batch.h" #include "bam_qc_report.h" #include "bam_reader.h" #include "commons.h" #include "file_utils.h" #include "gff_data.h" #include "gff_reader.h" #include "log.h" #include "qc.h" #include "qc_kernel_omp.h" #include "sam.h" #include "system_utils.h" } #include "qc_kernel_cuda.h" /* ****************************************************** * Private thread functions * * *****************************************************/ void* qc_calc_server(void* params_p); void* cpus_server(void* params_p); void* results_server(void* params_p); /* ********************************************** * Global variables * * *********************************************/ list_t bam_qc_batch_list; int bam_batch_reader_alive = 1; int gpus_thread_alive = 1; int cpus_thread_alive = 1; pthread_mutex_t gpus_thread_alive_lock = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_t cpus_thread_alive_lock = PTHREAD_MUTEX_INITIALIZER; /* ********************************************** * Extern calls * * *********************************************/ extern void call_kernel_basic_stats(dim3 dimGrid, dim3 dimBlock, bam_data_core_t* d_core_data_p, int* d_strand_counter_p, int* d_alignment_length_p, int* d_map_quality_p, int num_alignments); extern void call_kernel_map_errors(dim3 dimGrid, dim3 dimBlock, bam_data_core_t* d_core_data_p, uint32_t* d_cigar_data_p, qc_alignment_t* d_qc_alignment_p, int num_alignments); /* ********************************************************************** * Private thread functions implementations * * *********************************************************************/ /* ****************************************************** * QC calc server thread * * *****************************************************/ void* qc_calc_server(void* params_p) { LOG_DEBUG("Thread-GPU: START\n"); if (time_flag) { start_timer(t1_qc_calc_server); } qc_calc_server_input_t* input_p = (qc_calc_server_input_t*) params_p; int cpu_num_threads = input_p->cpu_num_threads; list_item_t* bam_data_batch_list_item_p = NULL; list_t* gpu_batch_list_p = input_p->gpu_batch_list_p; list_t* cpu_batch_list_p = input_p->cpu_batch_list_p; bam_data_batch_t* bam_data_batch_p = NULL; bam_qc_batch_t* bam_qc_batch_p = NULL; // variables for store output results in both CPU and GPU qc_alignment_t* qc_alignment_p; int* strand_counter_p; int* map_quality_p; int* alignment_length_p; bam_data_core_t* d_core_data_p; uint32_t* d_cigar_data_p; qc_alignment_t* d_qc_alignment_p; int* d_strand_counter_p; int* d_map_quality_p; int* d_alignment_length_p; // selecting GPU device CUDA_SAFE_CALL(cudaSetDevice(input_p->gpu_device_id[0])); int reads_alive; reads_alive = list_get_writers(gpu_batch_list_p); while ((bam_data_batch_list_item_p = list_remove_item(gpu_batch_list_p)) != NULL) { LOG_DEBUG("Thread-GPU: waiting for batch....\n"); //if (time_flag) { // start_timer(t1_gpu); //} if ((time_flag) && (gpus_standby_time == 0.0)) { stop_timer(t1_active_reader, t1_active_gpus, gpus_standby_time); } char log_message[50]; sprintf(log_message, "Thread-GPU: processing for batch %i....\n", bam_data_batch_list_item_p->id); LOG_DEBUG(log_message); number_of_batchs++; // allocation memory for output results bam_data_batch_p = (bam_data_batch_t*) bam_data_batch_list_item_p->data_p; int num_alignments = bam_data_batch_p->num_alignments; int num_blocks; //cpu_num_threads = 1; if (cpu_num_threads == 0) { // GPU implementation num_blocks = (num_alignments / input_p->gpu_num_threads) + 1; dim3 dimBlock(input_p->gpu_num_threads, 1, 1); dim3 dimGrid(num_blocks, 1, 1); strand_counter_p = (int*) calloc(num_blocks, sizeof(int)); map_quality_p = (int*) calloc(num_blocks, sizeof(int)); alignment_length_p = (int*) calloc(num_blocks, sizeof(int)); qc_alignment_p = (qc_alignment_t*) calloc(bam_data_batch_p->num_alignments, sizeof(qc_alignment_t)); CUDA_SAFE_CALL( cudaHostAlloc((void**) &d_core_data_p, (unsigned int)(num_alignments + 1) * sizeof(bam_data_core_t), 0) ); CUDA_SAFE_CALL( cudaHostAlloc((void**) &d_strand_counter_p, (unsigned int) num_blocks * sizeof(int), 0) ); CUDA_SAFE_CALL( cudaHostAlloc((void**) &d_map_quality_p, (unsigned int) num_blocks * sizeof(int), 0) ); CUDA_SAFE_CALL( cudaHostAlloc((void**) &d_alignment_length_p, (unsigned int) num_blocks * sizeof(int), 0) ); CUDA_SAFE_CALL( cudaHostAlloc((void**) &d_cigar_data_p, (unsigned int) bam_data_batch_p->num_cigar_operations * sizeof(uint32_t), 0) ); CUDA_SAFE_CALL( cudaHostAlloc((void**) &d_qc_alignment_p, (unsigned int) num_alignments * sizeof(qc_alignment_t), 0) ); CUDA_SAFE_CALL( cudaMemcpy(d_core_data_p, bam_data_batch_p->core_data_p, (num_alignments + 1) * sizeof(bam_data_core_t), cudaMemcpyHostToDevice) ); CUDA_SAFE_CALL( cudaMemcpy(d_cigar_data_p, bam_data_batch_p->cigar_data_p, bam_data_batch_p->num_cigar_operations * sizeof(uint32_t), cudaMemcpyHostToDevice) ); CUDA_START_TIMER(); call_kernel_basic_stats(dimGrid, dimBlock, d_core_data_p, d_strand_counter_p, d_map_quality_p, d_alignment_length_p, num_alignments); call_kernel_map_errors(dimGrid, dimBlock, d_core_data_p, d_cigar_data_p, d_qc_alignment_p, num_alignments); CUDA_STOP_TIMER(); // copy result from GPU (GPU -> CPU) CUDA_SAFE_CALL( cudaMemcpy(strand_counter_p, d_strand_counter_p, num_blocks * sizeof(int), cudaMemcpyDeviceToHost) ); CUDA_SAFE_CALL( cudaMemcpy(map_quality_p, d_map_quality_p, num_blocks * sizeof(int), cudaMemcpyDeviceToHost) ); CUDA_SAFE_CALL( cudaMemcpy(alignment_length_p, d_alignment_length_p, num_blocks * sizeof(int), cudaMemcpyDeviceToHost) ); CUDA_SAFE_CALL( cudaMemcpy(qc_alignment_p, d_qc_alignment_p, num_alignments * sizeof(qc_alignment_t), cudaMemcpyDeviceToHost) ); // free device memory CUDA_SAFE_CALL( cudaFreeHost(d_core_data_p) ); CUDA_SAFE_CALL( cudaFreeHost(d_cigar_data_p) ); CUDA_SAFE_CALL( cudaFreeHost(d_strand_counter_p) ); CUDA_SAFE_CALL( cudaFreeHost(d_map_quality_p) ); CUDA_SAFE_CALL( cudaFreeHost(d_qc_alignment_p) ); } else { // accumulation of partial results is only made once num_blocks = 1; strand_counter_p = (int*) calloc(num_blocks, sizeof(int)); map_quality_p = (int*) calloc(num_blocks, sizeof(int)); alignment_length_p = (int*) calloc(num_blocks, sizeof(int)); qc_alignment_p = (qc_alignment_t*) calloc(bam_data_batch_p->num_alignments, sizeof(qc_alignment_t)); if (time_flag) { start_timer(t1_gpu); } cpu_bam_qc_basic_stats(bam_data_batch_p->core_data_p, strand_counter_p, map_quality_p, alignment_length_p, num_alignments, cpu_num_threads); cpu_bam_qc_map_errors(bam_data_batch_p->core_data_p, bam_data_batch_p->cigar_data_p, qc_alignment_p, num_alignments); if (time_flag) { stop_timer(t1_gpu, t2_gpu, gpu_time); } } //if (time_flag) { // stop_timer(t1_gpu, t2_gpu, gpu_time); //} // create a new qc_batch object bam_qc_batch_p = (bam_qc_batch_t*) malloc(sizeof(bam_qc_batch_t)); bam_qc_batch_p->id = bam_data_batch_list_item_p->id; bam_qc_batch_p->num_alignments = bam_data_batch_p->num_alignments; bam_qc_batch_p->num_blocks = num_blocks; bam_qc_batch_p->qc_alignment_p = qc_alignment_p; bam_qc_batch_p->strand_counter_p = strand_counter_p; bam_qc_batch_p->map_quality_p = map_quality_p; bam_qc_batch_p->alignment_length_p = alignment_length_p; //bam_qc_batch_p->alignments_p = bam_data_batch_list_item_p->alignments_p; // and insert it into the bam_qc_batch_list list_item_t* item_p = list_item_new(bam_qc_batch_p->id, 0, bam_qc_batch_p); list_insert_item(item_p, &bam_qc_batch_list); // copy the the current batch item to the cpu batch list in order to perform CPU qc operations list_insert_item(bam_data_batch_list_item_p, cpu_batch_list_p); sprintf(log_message, "Thread-GPU:...processing for batch %i done !\n", bam_data_batch_list_item_p->id); LOG_DEBUG(log_message); //if (time_flag) { stop_timer(t1_gpu, t2_gpu, gpu_time); } } // end of external while loop pthread_mutex_lock(&gpus_thread_alive_lock); gpus_thread_alive--; pthread_mutex_unlock(&gpus_thread_alive_lock); list_decr_writers(cpu_batch_list_p); list_decr_writers(&bam_qc_batch_list); if (time_flag) { stop_timer(t1_qc_calc_server, t2_qc_calc_server, qc_calc_server_time); } LOG_DEBUG("Thread-GPU: END\n"); // exiting... pthread_exit(0); } /* ****************************************************** * QC calc server thread * * *****************************************************/ void* cpus_server(void* params_p) { double coverage_time = 0.0; struct timeval t1_coverage, t2_coverage; LOG_DEBUG("Thread-CPU: START\n"); if (time_flag) { start_timer(t1_cpus_server); } //initialize str_coverage_matrix str_coverage_matrix_init(); cpus_server_input_t* input_p = (cpus_server_input_t*) params_p; qc_mapping_counter_t* qc_mapping_counter_p = (qc_mapping_counter_t*) input_p->qc_mapping_counter; int max_distance_size = input_p->max_distance_size; int cpu_num_threads = input_p->cpu_num_threads; bam_data_batch_t* bam_data_batch_p = NULL; list_item_t* bam_data_batch_list_item_p = NULL; list_t* cpu_batch_list_p = input_p->cpu_batch_list_p; char* gff_filename = input_p->gff_filename; char* output_directory = input_p->output_directory; char* input_filename = input_p->input_filename; // delete previous coverage file to append new data bam_coverage_counter_delete_file(output_directory, input_filename); // creating trie //cp_trie* test_trie = cp_trie_create(0); sqlite3* db; sqlite3_stmt* insert_complete_mapping_stmt; qc_hash_t* qc_hash_p; if (input_p->disk_flag) { //disk implementation: sqlite // create tables mapping_db_create_complete_mappings_database(&db, (const char*) input_p->output_directory, 0); // create and prepare queries insert_complete_mapping_stmt = mapping_db_prepare_insert_complete(db); // start transaction mapping_db_begin_transaction(db); } else { //memory implementation: custom hash table // variables for store intermediate and output results in both CPU and GPU qc_hash_p = (qc_hash_t*) qc_hash_new(QC_HASH_LENGTH); } // variables for coverage (regions data) bam_chromosome_coverage_t bam_chromosome_coverage[num_of_chromosomes]; for (int j = 0; j < num_of_chromosomes; j++) { bam_chromosome_coverage_init(&bam_chromosome_coverage[j]); } gff_data_t* gff_data_p = gff_data_new(gff_filename); int gpus_alive; gpus_alive = list_get_writers(cpu_batch_list_p); while ((bam_data_batch_list_item_p = list_remove_item(cpu_batch_list_p)) != NULL) { if ((time_flag) && (cpus_standby_time == 0.0)) { stop_timer(t1_active_reader, t1_active_cpus, cpus_standby_time); } char log_message[50]; sprintf(log_message, "Thread-CPU: processing for batch %i....\n", bam_data_batch_list_item_p->id); LOG_DEBUG(log_message); // allocation memory for output results bam_data_batch_p = (bam_data_batch_t*) bam_data_batch_list_item_p->data_p; int num_alignments = bam_data_batch_p->num_alignments; int cpu_num_threads = input_p->cpu_num_threads; if (time_flag) { start_timer(t1_cpu); } char* id_seq; int tid, mtid, isize, start_coordinate, seq_length; short int paired_end; bam_data_core_t* core_data_p; if (input_p->disk_flag) { // duplicate code for performance, disk implementation for (int i = 0; i < bam_data_batch_p->num_alignments; i++) { id_seq = &(bam_data_batch_p->id_seq_data_p[bam_data_batch_p->core_data_p[i].id_seq_index]); core_data_p = &(bam_data_batch_p->core_data_p[i]); tid = core_data_p->chromosome; mtid = core_data_p->mate_chromosome; isize = core_data_p->isize; start_coordinate = core_data_p->start_coordinate; seq_length = core_data_p->alignment_length; paired_end = core_data_p->paired_end; // insert in SQLite mapping_db_insert_complete(db, id_seq, paired_end, tid, mtid, isize, start_coordinate, seq_length, insert_complete_mapping_stmt); } } else { // memory implementation for (int i = 0; i < bam_data_batch_p->num_alignments; i++) { id_seq = &(bam_data_batch_p->id_seq_data_p[bam_data_batch_p->core_data_p[i].id_seq_index]); core_data_p = &(bam_data_batch_p->core_data_p[i]); tid = core_data_p->chromosome; start_coordinate = core_data_p->start_coordinate; seq_length = core_data_p->alignment_length; paired_end = core_data_p->paired_end; // insert in qc hash qc_hash_insert_alignment(qc_hash_p, id_seq, tid, start_coordinate, seq_length, paired_end); // insert in trie structure //cp_trie_add(test_trie, id_seq, NULL); } } if (gff_data_batch_in_region(bam_data_batch_p, gff_data_p) != 0) { bam_coverage_compute(bam_data_batch_p, bam_chromosome_coverage, gff_data_p, output_directory, input_filename, cpu_num_threads); } if (time_flag) { stop_timer(t1_cpu, t2_cpu, cpu_time); } sprintf(log_message, "Thread-CPU:...processing for batch %i done !\n", bam_data_batch_list_item_p->id); LOG_DEBUG(log_message); // free the current batch item if all processing with the batch is performed bam_data_batch_free((bam_data_batch_t*) bam_data_batch_list_item_p->data_p); list_item_free(bam_data_batch_list_item_p); // ask again for reads server status gpus_alive = list_get_writers(cpu_batch_list_p); } // end of external while loop // print the last counters bam_coverage_counter_mark_to_print(bam_chromosome_coverage, true); bam_coverage_counter_print(bam_chromosome_coverage, output_directory, input_filename); //qc_hash_list_print(qc_hash_p->qc_hash_list_p); //calculate over the qc hash table to obtain: // - Mean distance between paired ends // - Histogram of mappings per reads unsigned long mean_paired_end_distance = 0; if (!input_p->disk_flag) { if (time_flag) { start_timer(t1_cpu); } qc_hash_perform_calculations(qc_hash_p, qc_mapping_counter_p, &mean_paired_end_distance, max_distance_size, cpu_num_threads); qc_hash_free(qc_hash_p, true); if (time_flag) { stop_timer(t1_cpu, t2_cpu, cpu_time); } } if (input_p->disk_flag) { if (time_flag) { start_timer(t1_db); } mapping_db_end_transaction(db); //sqlite3_finalize(insert_complete_mapping_stmt); //mapping_db_create_indexes(&db); mapping_db_perform_calculations(db, max_distance_size, qc_mapping_counter_p->num_mappings_histogram, &mean_paired_end_distance); mapping_db_close(db); if (time_flag) { stop_timer(t1_db, t2_db, db_time); } } qc_mapping_counter_p->mean_paired_end_distance = mean_paired_end_distance; //free qc hash structure, gff data and chromosome coverage for (int j = 0; j < num_of_chromosomes; j++) { bam_chromosome_coverage_clear(&bam_chromosome_coverage[j]); } //qc_hash_free(qc_hash_p, true); gff_data_free(gff_data_p); pthread_mutex_lock(&cpus_thread_alive_lock); cpus_thread_alive--; pthread_mutex_unlock(&cpus_thread_alive_lock); if (time_flag) { stop_timer(t1_cpus_server, t2_cpus_server, cpus_server_time); } // --------------- D E B U G ---------------- printf("--------------- D E B U G ----------------\n"); for (int i = 0; i <= (MAX_MAPPING_COUNT_IN_HISTOGRAM + 1); i++) { printf("qc_mapping_counter_p->num_mappings_histogram[%i]: %i\n", i, qc_mapping_counter_p->num_mappings_histogram[i]); } printf("mean_paired_end_distance: %ld\n\n", qc_mapping_counter_p->mean_paired_end_distance); printf("--------------- D E B U G ----------------\n"); // --------------- D E B U G ---------------- LOG_DEBUG("Thread-CPU: END\n"); // exiting... pthread_exit(0); } /* ****************************************************** * Results server thread * * *****************************************************/ void* results_server(void* params_p) { LOG_DEBUG("Thread-RESULTS: START\n"); if (time_flag) { start_timer(t1_results_server); } results_server_input_t* input_p = (results_server_input_t*) params_p; // variables for storing qc report information bam_qc_report_t bam_qc_report; memset(&bam_qc_report, 0, sizeof(bam_qc_report_t)); qc_mapping_counter_t* qc_mapping_counter_p = (qc_mapping_counter_t*) input_p->qc_mapping_counter; int nb_total_threads = input_p->gpu_num_blocks * input_p->gpu_num_threads; int base_quality = input_p->base_quality; int i, alignments; // go through the results_batch list, and process it // take and remove the first item, and so on... list_item_t* item_p = NULL; bam_qc_batch_t* bam_qc_batch_p = NULL; int gpus_alive, cpus_alive; // getting gpus thread status pthread_mutex_lock(&gpus_thread_alive_lock); gpus_alive = gpus_thread_alive; pthread_mutex_unlock(&gpus_thread_alive_lock); pthread_mutex_lock(&cpus_thread_alive_lock); cpus_alive = cpus_thread_alive; pthread_mutex_unlock(&cpus_thread_alive_lock); //iteration until not NULL is returned, then process batch while ((item_p = list_remove_item(&bam_qc_batch_list)) != NULL) { bam_qc_batch_p = (bam_qc_batch_t*) item_p->data_p; list_item_free(item_p); //printf("while... gpus_alive: %i, cpus_alive: %i, bam_qc_batch_p is NULL: %i\n", gpus_alive, cpus_alive, (bam_qc_batch_p == NULL) ? 1:0); if ((time_flag) && (results_standby_time == 0.0)) { stop_timer(t1_active_reader, t1_active_results, results_standby_time); } if (time_flag) { start_timer(t1_result); } char log_message[50]; sprintf(log_message, "Thread-RESULTS: processing for bam batch %i....\n", bam_qc_batch_p->id); LOG_DEBUG(log_message); // result processing batch per batch alignments = bam_qc_batch_p->num_alignments; bam_qc_report.num_alignments += alignments; for (int k = 0; k < bam_qc_batch_p->num_blocks; k++) { bam_qc_report.strand_counter += bam_qc_batch_p->strand_counter_p[k]; bam_qc_report.mean_map_quality += bam_qc_batch_p->map_quality_p[k]; bam_qc_report.mean_alignment_length += bam_qc_batch_p->alignment_length_p[k]; } for (int k = 0; k < bam_qc_batch_p->num_alignments; k++) { bam_qc_report.map_error_histogram[(bam_qc_batch_p->qc_alignment_p[k].counters[MISMATCHES] <= MAX_MAP_ERRORS_IN_HISTOGRAM) ? bam_qc_batch_p->qc_alignment_p[k].counters[MISMATCHES] : (MAX_MAP_ERRORS_IN_HISTOGRAM + 1)]++; bam_qc_report.map_deletion_histogram[(bam_qc_batch_p->qc_alignment_p[k].counters[D] <= MAX_MAP_ERRORS_IN_HISTOGRAM) ? bam_qc_batch_p->qc_alignment_p[k].counters[D] : (MAX_MAP_ERRORS_IN_HISTOGRAM + 1)]++; bam_qc_report.map_insertion_histogram[(bam_qc_batch_p->qc_alignment_p[k].counters[I] <= MAX_MAP_ERRORS_IN_HISTOGRAM) ? bam_qc_batch_p->qc_alignment_p[k].counters[I] : (MAX_MAP_ERRORS_IN_HISTOGRAM + 1)]++; bam_qc_report.map_matching_histogram[(bam_qc_batch_p->qc_alignment_p[k].counters[EQUAL] <= MAX_MAP_ERRORS_IN_HISTOGRAM) ? bam_qc_batch_p->qc_alignment_p[k].counters[EQUAL] : (MAX_MAP_ERRORS_IN_HISTOGRAM + 1)]++; } sprintf(log_message, "Thread-RESULTS: ....processing for batch %i done !\n", bam_qc_batch_p->id); LOG_DEBUG(log_message); // free ALL memory bam_qc_batch_free(bam_qc_batch_p, false); if (time_flag) { stop_timer(t1_result, t2_result, result_time); } // getting gpus and cpus thread status pthread_mutex_lock(&gpus_thread_alive_lock); gpus_alive = gpus_thread_alive; pthread_mutex_unlock(&gpus_thread_alive_lock); pthread_mutex_lock(&cpus_thread_alive_lock); cpus_alive = cpus_thread_alive; pthread_mutex_unlock(&cpus_thread_alive_lock); } // end of batch loop printf("bam_qc_report.num_alignments: %lu, strand (+): %lu, strand (-): %lu\n", bam_qc_report.num_alignments, bam_qc_report.strand_counter, (bam_qc_report.num_alignments - bam_qc_report.strand_counter)); if (time_flag) { start_timer(t1_result); } //calculate mean quality and mean length per alignment if (bam_qc_report.num_alignments > 0) { printf("bam_qc_report.mean_read_quality: %lu, num_alignments: %lu, mean_quality: %lu\n", bam_qc_report.mean_map_quality, bam_qc_report.num_alignments, (bam_qc_report.mean_map_quality / bam_qc_report.num_alignments)); printf("bam_qc_report.mean_alignment_length: %lu, num_alignments: %lu, mean_alignment_length: %lu\n", bam_qc_report.mean_alignment_length, bam_qc_report.num_alignments, (bam_qc_report.mean_alignment_length / bam_qc_report.num_alignments)); bam_qc_report.mean_map_quality /= bam_qc_report.num_alignments; bam_qc_report.mean_alignment_length /= bam_qc_report.num_alignments; } else { printf("bam_qc_report.mean_read_quality: %lu, num_alignments: %lu, mean_quality: 0\n", bam_qc_report.mean_map_quality, bam_qc_report.num_alignments); printf("bam_qc_report.mean_alignment_length: %lu, num_alignments: %lu, mean_alignment_length: 0\n", bam_qc_report.mean_alignment_length, bam_qc_report.num_alignments); bam_qc_report.mean_map_quality = 0; bam_qc_report.mean_alignment_length = 0; } if (time_flag) { stop_timer(t1_result, t2_result, result_time); } // and finally, print qc report, data files and graphs // when cpu data is ready (cpus_alive = 0) while (cpus_alive > 0) { sched_yield(); usleep(10000); pthread_mutex_lock(&cpus_thread_alive_lock); cpus_alive = cpus_thread_alive; pthread_mutex_unlock(&cpus_thread_alive_lock); } if (time_flag) { start_timer(t1_result); } bam_qc_report.num_mappings_histogram = qc_mapping_counter_p->num_mappings_histogram; bam_qc_report.mean_paired_end_distance = qc_mapping_counter_p->mean_paired_end_distance; if (time_flag) { stop_timer(t1_result, t2_result, result_time); } if (time_flag) { start_timer(t1_reporting); } generate_report(bam_qc_report, input_p->filename, input_p->base_quality, input_p->report_directory, 1); if (time_flag) { stop_timer(t1_reporting, t2_reporting, reporting_time); } if (time_flag) { stop_timer(t1_results_server, t2_results_server, results_server_time); } LOG_DEBUG("Thread-RESULTS: END\n"); // exiting... pthread_exit(0); } /* ************************************************************** * Public functions implementations * * *************************************************************/ void qc_bam_file(size_t batch_size, int batch_list_size, int gpu_num_threads, int gpu_num_blocks, int cpu_num_threads, int base_quality, int max_distance_size, char* input_filename, char* output_directory, char* gff_filename, int disk_flag) { // number of GPUs is obtained, and initializes the number of GPU threads 'alive' int num_gpu_devices; cudaError_t cudaResultCode = cudaGetDeviceCount(&num_gpu_devices); if (cudaResultCode != cudaSuccess) { num_gpu_devices = 0; } gpus_thread_alive = num_gpu_devices; cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); if (!prop.canMapHostMemory) { LOG_FATAL("device does not support MapHostMemory\n"); } //initializing bam_data_batch_list_gpu, bam_data_batch_list_cpu and bam_qc_batch_list list_t bam_data_batch_list_gpu; list_t bam_data_batch_list_cpu; if (num_gpu_devices > 0) { list_init("bam_data_batch_list_cpu", num_gpu_devices, batch_list_size, &bam_data_batch_list_cpu); } else { list_init("bam_data_batch_list_cpu", 1, batch_list_size, &bam_data_batch_list_cpu); } list_init("bam_data_batch_list_gpu", 1, batch_list_size, &bam_data_batch_list_gpu); list_init("bam_qc_batch_list", ((num_gpu_devices > 0) ? num_gpu_devices : 1), batch_list_size, &bam_qc_batch_list); //initializing qc_mapping_counter qc_mapping_counter_t qc_mapping_counter; qc_mapping_counter_init(&qc_mapping_counter); //multi-threads bam_reader_t* bam_reader_p = bam_reader_by_batch_new(input_filename, batch_size, base_quality, &bam_data_batch_list_gpu, LIST_INSERT_MODE); bam_reader_start(bam_reader_p); //some local variables void* r; // multi-threads pthread_t* qc_calc_server_thread_p = (pthread_t*) malloc(((num_gpu_devices == 0) ? 1 : num_gpu_devices) * sizeof(pthread_t)); pthread_t* cpus_server_thread_p = (pthread_t*) malloc(cpu_num_threads * sizeof(pthread_t)); pthread_t results_server_thread; //calling GPU threads to process the bam data, //but first, prepare input parameter int i; qc_calc_server_input_t** qc_calc_server_input_p = (qc_calc_server_input_t**) calloc(num_gpu_devices, sizeof(qc_calc_server_input_t*)); if (num_gpu_devices > 0) { for (i = 0; i < num_gpu_devices; i++) { qc_calc_server_input_p[i] = (qc_calc_server_input_t*) calloc(1, sizeof(qc_calc_server_input_t)); } } else { qc_calc_server_input_p[0] = (qc_calc_server_input_t*) calloc(1, sizeof(qc_calc_server_input_t)); } if (num_gpu_devices > 0) { //GPU implementacion for (i = 0; i < num_gpu_devices; i++) { qc_calc_server_input_p[i]->num_gpu_devices = num_gpu_devices; qc_calc_server_input_p[i]->cpu_num_threads = 0; qc_calc_server_input_p[i]->gpu_device_id[0] = i; qc_calc_server_input_p[i]->gpu_num_blocks = gpu_num_blocks; qc_calc_server_input_p[i]->gpu_num_threads = gpu_num_threads; qc_calc_server_input_p[i]->gpu_batch_list_p = &bam_data_batch_list_gpu; qc_calc_server_input_p[i]->cpu_batch_list_p = &bam_data_batch_list_cpu; pthread_create(&qc_calc_server_thread_p[i], NULL, qc_calc_server, (void*) qc_calc_server_input_p[i]); } } else { //CPU implementacion qc_calc_server_input_p[0]->num_gpu_devices = 0; qc_calc_server_input_p[0]->cpu_num_threads = cpu_num_threads; qc_calc_server_input_p[0]->gpu_device_id[0] = 0; qc_calc_server_input_p[0]->gpu_num_blocks = 0; qc_calc_server_input_p[0]->gpu_num_threads = 0; qc_calc_server_input_p[0]->gpu_batch_list_p = &bam_data_batch_list_gpu; qc_calc_server_input_p[0]->cpu_batch_list_p = &bam_data_batch_list_cpu; pthread_create(&qc_calc_server_thread_p[0], NULL, qc_calc_server, (void*) &qc_calc_server_input_p[0]); } // calling CPU threads to process the bam data, cpus_server_input_t* cpus_server_input_p = (cpus_server_input_t*) calloc(1, sizeof(cpus_server_input_t)); for (i = 0; i < 1; i++) { cpus_server_input_p->cpu_num_threads = cpu_num_threads; cpus_server_input_p->max_distance_size = max_distance_size; cpus_server_input_p->cpu_batch_list_p = &bam_data_batch_list_cpu; cpus_server_input_p->qc_mapping_counter = &qc_mapping_counter; cpus_server_input_p->gff_filename = gff_filename; cpus_server_input_p->output_directory = output_directory; cpus_server_input_p->input_filename = input_filename; cpus_server_input_p->disk_flag = disk_flag; pthread_create(&cpus_server_thread_p[i], NULL, cpus_server, (void*) cpus_server_input_p); } //calling thread to process results from GPU, results_server_input_t results_server_input; results_server_input.gpu_num_blocks = gpu_num_blocks; results_server_input.gpu_num_threads = gpu_num_threads; results_server_input.base_quality = base_quality; results_server_input.qc_mapping_counter = &qc_mapping_counter; results_server_input.filename = input_filename; results_server_input.report_directory = output_directory; pthread_create(&results_server_thread, NULL, results_server, (void*) &results_server_input); num_alignments = bam_reader_join(bam_reader_p); for (int i = 0; i < num_gpu_devices; i++) { pthread_join(qc_calc_server_thread_p[i], &r); } free(qc_calc_server_thread_p); for (int i = 0; i < 1; i++) { pthread_join(cpus_server_thread_p[i], &r); } free(cpus_server_thread_p); pthread_join(results_server_thread, &r); //free thread stuff and parameters if (num_gpu_devices > 0) { for (i = 0; i < num_gpu_devices; i++) { free(qc_calc_server_input_p[i]); } } else { free(qc_calc_server_input_p[0]); } free(qc_calc_server_input_p); free(cpus_server_input_p); bam_reader_free(bam_reader_p); } #endif /* QC_CU */
best_hyps.hip
// !!! This is a file automatically generated by hipify!!! #include "best_hyps.h" namespace amunmt { namespace GPU { BestHyps::BestHyps(const God &god) : BestHypsBase(god), keys_(god.Get<size_t>("beam-size") * god.Get<size_t>("mini-batch")), costs_(god.Get<size_t>("beam-size") * god.Get<size_t>("mini-batch")), maxBeamSize_(god.Get<uint>("beam-size")) { if (!god_.UseFusedSoftmax()) { NthElement *obj = new NthElement(god.Get<size_t>("beam-size"), god.Get<size_t>("mini-batch")); nthElement_.reset(obj); } } void BestHyps::DisAllowUNK(mblas::Matrix& Prob) { SetColumn(Prob, UNK_ID, std::numeric_limits<float>::lowest()); } void BestHyps::FindBests(const std::vector<uint>& beamSizes, mblas::Matrix& Probs, std::vector<float>& outCosts, std::vector<unsigned>& outKeys, const bool isFirst) { nthElement_->getNBestList(beamSizes, Probs, outCosts, outKeys, isFirst); } // fast fused softmax and nth_element void BestHyps::FindBests(const std::vector<uint>& beamSizes, mblas::Matrix& Probs, mblas::Vector<NthOutBatch> &nBest, std::vector<float>& outCosts, std::vector<unsigned>& outKeys, const bool isFirst) { getNBestList(beamSizes, Probs, nBest, outCosts, outKeys, isFirst); } std::vector<SoftAlignmentPtr> BestHyps::GetAlignments(const std::vector<ScorerPtr>& scorers, size_t hypIndex) { std::vector<SoftAlignmentPtr> alignments; for (auto& scorer : scorers) { if (GPU::EncoderDecoder* encdec = dynamic_cast<GPU::EncoderDecoder*>(scorer.get())) { const mblas::Matrix &attention = encdec->GetAttention(); size_t attLength = attention.dim(1); SoftAlignment *softAlignment = new SoftAlignment(attLength); mblas::copy( attention.data() + hypIndex * attLength, attLength, softAlignment->data(), hipMemcpyDeviceToHost ); alignments.emplace_back(softAlignment); } else { amunmt_UTIL_THROW2("Return Alignment is allowed only with Nematus scorer."); } } return alignments; } // standard nth_element void BestHyps::CalcBeam( const Beam& prevHyps, const std::vector<ScorerPtr>& scorers, const Words& filterIndices, std::vector<Beam>& beams, std::vector<uint>& beamSizes) { BEGIN_TIMER("CalcBeam"); using namespace mblas; mblas::Matrix& Probs = static_cast<mblas::Matrix&>(scorers[0]->GetProbs()); std::vector<float> vCosts; for (auto& h : prevHyps) { vCosts.push_back(h->GetCost()); } mblas::copy(vCosts.data(), vCosts.size(), costs_.data(), hipMemcpyHostToDevice); //mblas::copy(vCosts.begin(), vCosts.end(), costs_.begin()); size_t beamSizeSum = std::accumulate(beamSizes.begin(), beamSizes.end(), 0); std::vector<float> bestCosts; std::vector<unsigned> bestKeys; const bool isFirst = (vCosts[0] == 0.0f) ? true : false; if (god_.UseFusedSoftmax()) { const mblas::Matrix& b4 = *static_cast<const mblas::Matrix*>(scorers[0]->GetBias()); mblas::Vector<NthOutBatch> &nBest = *static_cast<mblas::Vector<NthOutBatch>*>(scorers[0]->GetNBest()); nBest.newSize(beamSizeSum); BEGIN_TIMER("GetProbs.LogSoftmaxAndNBest"); mblas::LogSoftmaxAndNBest(nBest, Probs, b4, costs_, forbidUNK_, maxBeamSize_, beamSizes, beamSizeSum, isFirst); PAUSE_TIMER("GetProbs.LogSoftmaxAndNBest"); //std::cerr << "2Probs=" << Probs.Debug(1) << std::endl; FindBests(beamSizes, Probs, nBest, bestCosts, bestKeys, isFirst); } else { BroadcastVecColumn(weights_.at(scorers[0]->GetName()) * _1 + _2, Probs, costs_); for (size_t i = 1; i < scorers.size(); ++i) { mblas::Matrix &currProbs = static_cast<mblas::Matrix&>(scorers[i]->GetProbs()); Element(_1 + weights_.at(scorers[i]->GetName()) * _2, Probs, currProbs); } if (forbidUNK_) { DisAllowUNK(Probs); } FindBests(beamSizes, Probs, bestCosts, bestKeys, isFirst); } std::vector<std::vector<float>> breakDowns; if (god_.ReturnNBestList()) { breakDowns.push_back(bestCosts); for (size_t i = 1; i < scorers.size(); ++i) { std::vector<float> modelCosts(beamSizeSum); mblas::Matrix &currProbs = static_cast<mblas::Matrix&>(scorers[i]->GetProbs()); nthElement_->getValueByKey(modelCosts, currProbs); breakDowns.push_back(modelCosts); } } std::map<size_t, size_t> batchMap; size_t tmp = 0; for (size_t batchID = 0; batchID < beamSizes.size(); ++batchID) { for (size_t t = 0; t < beamSizes[batchID]; ++t) { batchMap[tmp++] = batchID; } } for (size_t i = 0; i < beamSizeSum; i++) { size_t wordIndex = bestKeys[i] % Probs.dim(1); if (isInputFiltered_) { wordIndex = filterIndices[wordIndex]; } size_t hypIndex = bestKeys[i] / Probs.dim(1); float cost = bestCosts[i]; HypothesisPtr hyp; if (returnAttentionWeights_) { hyp.reset(new Hypothesis(prevHyps[hypIndex], wordIndex, hypIndex, cost, GetAlignments(scorers, hypIndex))); } else { hyp.reset(new Hypothesis(prevHyps[hypIndex], wordIndex, hypIndex, cost)); } if(god_.ReturnNBestList()) { hyp->GetCostBreakdown().resize(scorers.size()); float sum = 0; for (size_t j = 0; j < scorers.size(); ++j) { if (j == 0) hyp->GetCostBreakdown()[0] = breakDowns[0][i]; else { float cost = 0; if (j < scorers.size()) { if (prevHyps[hypIndex]->GetCostBreakdown().size() < scorers.size()) const_cast<HypothesisPtr&>(prevHyps[hypIndex])->GetCostBreakdown().resize(scorers.size(), 0.0f); cost = breakDowns[j][i] + const_cast<HypothesisPtr&>(prevHyps[hypIndex])->GetCostBreakdown()[j]; } sum += weights_.at(scorers[j]->GetName()) * cost; hyp->GetCostBreakdown()[j] = cost; } } hyp->GetCostBreakdown()[0] -= sum; hyp->GetCostBreakdown()[0] /= weights_.at(scorers[0]->GetName()); } beams[batchMap[i]].push_back(hyp); } PAUSE_TIMER("CalcBeam"); } ////////////////////////////////////////////////////////////////////////// void BestHyps::getNBestList(const std::vector<uint>& beamSizes, mblas::Matrix& Probs, mblas::Vector<NthOutBatch> &nBest, std::vector<float>& outCosts, std::vector<uint>& outKeys, const bool isFirst) const { GetPairs(nBest, outKeys, outCosts); assert(outCosts.size() == outKeys.size()); /* cerr << "outCosts/outKeys="; for (size_t i = 0; i < outKeys.size(); ++i) { cerr << "(" << outCosts[i] << "," << outKeys[i] << ") "; } cerr << endl; */ //cerr << endl; } void BestHyps::GetPairs(mblas::Vector<NthOutBatch> &nBest, std::vector<uint>& outKeys, std::vector<float>& outValues) const { //cerr << "top=" << top2.size() << " nBest=" << nBest.size() << endl; outKeys.resize(nBest.size()); outValues.resize(nBest.size()); std::vector<NthOutBatch> hostVec(nBest.size()); mblas::copy(nBest.data(), nBest.size(), hostVec.data(), hipMemcpyDeviceToHost); for (size_t i = 0; i < nBest.size(); ++i) { outKeys[i] = hostVec[i].ind; outValues[i] = hostVec[i].score; } } } // namespace }
best_hyps.cu
#include "best_hyps.h" namespace amunmt { namespace GPU { BestHyps::BestHyps(const God &god) : BestHypsBase(god), keys_(god.Get<size_t>("beam-size") * god.Get<size_t>("mini-batch")), costs_(god.Get<size_t>("beam-size") * god.Get<size_t>("mini-batch")), maxBeamSize_(god.Get<uint>("beam-size")) { if (!god_.UseFusedSoftmax()) { NthElement *obj = new NthElement(god.Get<size_t>("beam-size"), god.Get<size_t>("mini-batch")); nthElement_.reset(obj); } } void BestHyps::DisAllowUNK(mblas::Matrix& Prob) { SetColumn(Prob, UNK_ID, std::numeric_limits<float>::lowest()); } void BestHyps::FindBests(const std::vector<uint>& beamSizes, mblas::Matrix& Probs, std::vector<float>& outCosts, std::vector<unsigned>& outKeys, const bool isFirst) { nthElement_->getNBestList(beamSizes, Probs, outCosts, outKeys, isFirst); } // fast fused softmax and nth_element void BestHyps::FindBests(const std::vector<uint>& beamSizes, mblas::Matrix& Probs, mblas::Vector<NthOutBatch> &nBest, std::vector<float>& outCosts, std::vector<unsigned>& outKeys, const bool isFirst) { getNBestList(beamSizes, Probs, nBest, outCosts, outKeys, isFirst); } std::vector<SoftAlignmentPtr> BestHyps::GetAlignments(const std::vector<ScorerPtr>& scorers, size_t hypIndex) { std::vector<SoftAlignmentPtr> alignments; for (auto& scorer : scorers) { if (GPU::EncoderDecoder* encdec = dynamic_cast<GPU::EncoderDecoder*>(scorer.get())) { const mblas::Matrix &attention = encdec->GetAttention(); size_t attLength = attention.dim(1); SoftAlignment *softAlignment = new SoftAlignment(attLength); mblas::copy( attention.data() + hypIndex * attLength, attLength, softAlignment->data(), cudaMemcpyDeviceToHost ); alignments.emplace_back(softAlignment); } else { amunmt_UTIL_THROW2("Return Alignment is allowed only with Nematus scorer."); } } return alignments; } // standard nth_element void BestHyps::CalcBeam( const Beam& prevHyps, const std::vector<ScorerPtr>& scorers, const Words& filterIndices, std::vector<Beam>& beams, std::vector<uint>& beamSizes) { BEGIN_TIMER("CalcBeam"); using namespace mblas; mblas::Matrix& Probs = static_cast<mblas::Matrix&>(scorers[0]->GetProbs()); std::vector<float> vCosts; for (auto& h : prevHyps) { vCosts.push_back(h->GetCost()); } mblas::copy(vCosts.data(), vCosts.size(), costs_.data(), cudaMemcpyHostToDevice); //mblas::copy(vCosts.begin(), vCosts.end(), costs_.begin()); size_t beamSizeSum = std::accumulate(beamSizes.begin(), beamSizes.end(), 0); std::vector<float> bestCosts; std::vector<unsigned> bestKeys; const bool isFirst = (vCosts[0] == 0.0f) ? true : false; if (god_.UseFusedSoftmax()) { const mblas::Matrix& b4 = *static_cast<const mblas::Matrix*>(scorers[0]->GetBias()); mblas::Vector<NthOutBatch> &nBest = *static_cast<mblas::Vector<NthOutBatch>*>(scorers[0]->GetNBest()); nBest.newSize(beamSizeSum); BEGIN_TIMER("GetProbs.LogSoftmaxAndNBest"); mblas::LogSoftmaxAndNBest(nBest, Probs, b4, costs_, forbidUNK_, maxBeamSize_, beamSizes, beamSizeSum, isFirst); PAUSE_TIMER("GetProbs.LogSoftmaxAndNBest"); //std::cerr << "2Probs=" << Probs.Debug(1) << std::endl; FindBests(beamSizes, Probs, nBest, bestCosts, bestKeys, isFirst); } else { BroadcastVecColumn(weights_.at(scorers[0]->GetName()) * _1 + _2, Probs, costs_); for (size_t i = 1; i < scorers.size(); ++i) { mblas::Matrix &currProbs = static_cast<mblas::Matrix&>(scorers[i]->GetProbs()); Element(_1 + weights_.at(scorers[i]->GetName()) * _2, Probs, currProbs); } if (forbidUNK_) { DisAllowUNK(Probs); } FindBests(beamSizes, Probs, bestCosts, bestKeys, isFirst); } std::vector<std::vector<float>> breakDowns; if (god_.ReturnNBestList()) { breakDowns.push_back(bestCosts); for (size_t i = 1; i < scorers.size(); ++i) { std::vector<float> modelCosts(beamSizeSum); mblas::Matrix &currProbs = static_cast<mblas::Matrix&>(scorers[i]->GetProbs()); nthElement_->getValueByKey(modelCosts, currProbs); breakDowns.push_back(modelCosts); } } std::map<size_t, size_t> batchMap; size_t tmp = 0; for (size_t batchID = 0; batchID < beamSizes.size(); ++batchID) { for (size_t t = 0; t < beamSizes[batchID]; ++t) { batchMap[tmp++] = batchID; } } for (size_t i = 0; i < beamSizeSum; i++) { size_t wordIndex = bestKeys[i] % Probs.dim(1); if (isInputFiltered_) { wordIndex = filterIndices[wordIndex]; } size_t hypIndex = bestKeys[i] / Probs.dim(1); float cost = bestCosts[i]; HypothesisPtr hyp; if (returnAttentionWeights_) { hyp.reset(new Hypothesis(prevHyps[hypIndex], wordIndex, hypIndex, cost, GetAlignments(scorers, hypIndex))); } else { hyp.reset(new Hypothesis(prevHyps[hypIndex], wordIndex, hypIndex, cost)); } if(god_.ReturnNBestList()) { hyp->GetCostBreakdown().resize(scorers.size()); float sum = 0; for (size_t j = 0; j < scorers.size(); ++j) { if (j == 0) hyp->GetCostBreakdown()[0] = breakDowns[0][i]; else { float cost = 0; if (j < scorers.size()) { if (prevHyps[hypIndex]->GetCostBreakdown().size() < scorers.size()) const_cast<HypothesisPtr&>(prevHyps[hypIndex])->GetCostBreakdown().resize(scorers.size(), 0.0f); cost = breakDowns[j][i] + const_cast<HypothesisPtr&>(prevHyps[hypIndex])->GetCostBreakdown()[j]; } sum += weights_.at(scorers[j]->GetName()) * cost; hyp->GetCostBreakdown()[j] = cost; } } hyp->GetCostBreakdown()[0] -= sum; hyp->GetCostBreakdown()[0] /= weights_.at(scorers[0]->GetName()); } beams[batchMap[i]].push_back(hyp); } PAUSE_TIMER("CalcBeam"); } ////////////////////////////////////////////////////////////////////////// void BestHyps::getNBestList(const std::vector<uint>& beamSizes, mblas::Matrix& Probs, mblas::Vector<NthOutBatch> &nBest, std::vector<float>& outCosts, std::vector<uint>& outKeys, const bool isFirst) const { GetPairs(nBest, outKeys, outCosts); assert(outCosts.size() == outKeys.size()); /* cerr << "outCosts/outKeys="; for (size_t i = 0; i < outKeys.size(); ++i) { cerr << "(" << outCosts[i] << "," << outKeys[i] << ") "; } cerr << endl; */ //cerr << endl; } void BestHyps::GetPairs(mblas::Vector<NthOutBatch> &nBest, std::vector<uint>& outKeys, std::vector<float>& outValues) const { //cerr << "top=" << top2.size() << " nBest=" << nBest.size() << endl; outKeys.resize(nBest.size()); outValues.resize(nBest.size()); std::vector<NthOutBatch> hostVec(nBest.size()); mblas::copy(nBest.data(), nBest.size(), hostVec.data(), cudaMemcpyDeviceToHost); for (size_t i = 0; i < nBest.size(); ++i) { outKeys[i] = hostVec[i].ind; outValues[i] = hostVec[i].score; } } } // namespace }
30406b24e01f821b688b6c242fe38027bdee28ab.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include <cstdio> #include <sys/time.h> #include <rocblas.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/swap.h> #include <thrust/copy.h> #include <thrust/execution_policy.h> #define CUDA_CHECK_RETURN(value) {\ hipError_t _m_cudaStat = value;\ if (_m_cudaStat != hipSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n",\ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ }\ } using namespace std; //a[1 2 3 4] //b[5 6 7 8] //swap //a[5 6 7 8] //b[1 2 3 4] __global__ void swap(float *a, float *b, int vector_size){ int indx = blockIdx.x * blockDim.x + threadIdx.x; float k = a[indx]; a[indx] = b[indx]; b[indx] = k; } float thrust_swap(int vector_size){ thrust::host_vector<float> hA(vector_size); thrust::host_vector<float> hB(vector_size); for(int i = 0; i < vector_size; i++) { hA[i] = rand()%5; hB[i] = rand()%5; } thrust::device_vector<float> dA = hA; thrust::device_vector<float> dB = hB; cout << "THRUST_SWAP" << endl; /* cout << "vector A before swap : "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B before swap : "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ float ThrustTime; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); thrust::swap(dA, dB); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&ThrustTime, start, stop); thrust::copy(dA.begin(), dA.end(), hA.begin()); thrust::copy(dB.begin(), dB.end(), hB.begin()); /* cout << "vector A after swap: "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B after swap: "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ hipEventDestroy(start); hipEventDestroy(stop); return ThrustTime; } float blas_swap(int vector_size){ float *hA = new float[vector_size]; float *hB = new float[vector_size]; for(int i = 0; i < vector_size; i++) { hA[i] = rand()%5; hB[i] = rand()%5; } float *dA, *dB; CUDA_CHECK_RETURN(hipMalloc(&dA, sizeof(float) * vector_size)); CUDA_CHECK_RETURN(hipMalloc(&dB, sizeof(float) * vector_size)); CUDA_CHECK_RETURN(hipMemcpy(dA, hA, sizeof(float) * vector_size, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMemcpy(dB, hB, sizeof(float) * vector_size, hipMemcpyHostToDevice)); cout << "CUBLAS_SWAP" << endl; /* cout << "vector A before swap: "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B before swap : "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ hipblasHandle_t handle; hipblasCreate(&handle); float CublasTime; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipblasSswap(handle, vector_size, dA, 1, dB, 1); hipEventRecord(stop, 0); hipEventSynchronize(stop); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); hipEventElapsedTime(&CublasTime, start, stop); CUDA_CHECK_RETURN(hipMemcpy(hA, dA, sizeof(float) * vector_size, hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN(hipMemcpy(hB, dB, sizeof(float) * vector_size, hipMemcpyDeviceToHost)); /* cout << "vector A after swap : "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B after swap : "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ hipEventDestroy(start); hipEventDestroy(stop); hipblasDestroy(handle); delete [] hA; delete [] hB; hipFree(dA); hipFree(dB); return CublasTime; } int main(int argc, char *argv[]) { cout << "1 arg - vector_size, 2 arg - block_size" << endl << endl; int vector_size = atoi(argv[1]); int block_size = atoi(argv[2]); srand(time(NULL)); float *hA = new float[vector_size]; float *hB = new float[vector_size]; for(int i = 0; i < vector_size; i++) { hA[i] = rand()%5; hB[i] = rand()%5; } float *dA, *dB; CUDA_CHECK_RETURN(hipMalloc(&dA, sizeof(float) * vector_size)); CUDA_CHECK_RETURN(hipMalloc(&dB, sizeof(float) * vector_size)); CUDA_CHECK_RETURN(hipMemcpy(dA, hA, sizeof(float) * vector_size, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMemcpy(dB, hB, sizeof(float) * vector_size, hipMemcpyHostToDevice)); cout << "CUDA_SWAP" << endl; /* cout << "vector A before swap : "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B before swap : "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ int num_blocks = (int)ceil((float)vector_size / block_size); float elapsedTime; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( swap) , dim3(num_blocks), dim3(block_size), 0, 0, dA, dB, vector_size); hipEventRecord(stop, 0); hipEventSynchronize(stop); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); hipEventElapsedTime(&elapsedTime, start, stop); CUDA_CHECK_RETURN(hipMemcpy(hA, dA, sizeof(float) * vector_size, hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN(hipMemcpy(hB, dB, sizeof(float) * vector_size, hipMemcpyDeviceToHost)); //cout << "CUDA_COPY" << endl; /* cout << "vector A after swap : "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B after swap : "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ cout << "Cuda_Time = " << elapsedTime << endl; cout << endl; float cublas = blas_swap(vector_size); cout << "Cublas_Time = " << cublas << endl; cout << endl; float thrust = thrust_swap(vector_size); cout << "Thrust_Time = " << thrust << endl; cout << endl; hipEventDestroy(start); hipEventDestroy(stop); delete [] hA; delete [] hB; hipFree(dA); hipFree(dB); }
30406b24e01f821b688b6c242fe38027bdee28ab.cu
#include <iostream> #include <cuda.h> #include <cstdio> #include <sys/time.h> #include <cublas_v2.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/swap.h> #include <thrust/copy.h> #include <thrust/execution_policy.h> #define CUDA_CHECK_RETURN(value) {\ cudaError_t _m_cudaStat = value;\ if (_m_cudaStat != cudaSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n",\ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ }\ } using namespace std; //a[1 2 3 4] //b[5 6 7 8] //swap //a[5 6 7 8] //b[1 2 3 4] __global__ void swap(float *a, float *b, int vector_size){ int indx = blockIdx.x * blockDim.x + threadIdx.x; float k = a[indx]; a[indx] = b[indx]; b[indx] = k; } float thrust_swap(int vector_size){ thrust::host_vector<float> hA(vector_size); thrust::host_vector<float> hB(vector_size); for(int i = 0; i < vector_size; i++) { hA[i] = rand()%5; hB[i] = rand()%5; } thrust::device_vector<float> dA = hA; thrust::device_vector<float> dB = hB; cout << "THRUST_SWAP" << endl; /* cout << "vector A before swap : "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B before swap : "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ float ThrustTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); thrust::swap(dA, dB); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&ThrustTime, start, stop); thrust::copy(dA.begin(), dA.end(), hA.begin()); thrust::copy(dB.begin(), dB.end(), hB.begin()); /* cout << "vector A after swap: "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B after swap: "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ cudaEventDestroy(start); cudaEventDestroy(stop); return ThrustTime; } float blas_swap(int vector_size){ float *hA = new float[vector_size]; float *hB = new float[vector_size]; for(int i = 0; i < vector_size; i++) { hA[i] = rand()%5; hB[i] = rand()%5; } float *dA, *dB; CUDA_CHECK_RETURN(cudaMalloc(&dA, sizeof(float) * vector_size)); CUDA_CHECK_RETURN(cudaMalloc(&dB, sizeof(float) * vector_size)); CUDA_CHECK_RETURN(cudaMemcpy(dA, hA, sizeof(float) * vector_size, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(dB, hB, sizeof(float) * vector_size, cudaMemcpyHostToDevice)); cout << "CUBLAS_SWAP" << endl; /* cout << "vector A before swap: "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B before swap : "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ cublasHandle_t handle; cublasCreate(&handle); float CublasTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cublasSswap(handle, vector_size, dA, 1, dB, 1); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); cudaEventElapsedTime(&CublasTime, start, stop); CUDA_CHECK_RETURN(cudaMemcpy(hA, dA, sizeof(float) * vector_size, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaMemcpy(hB, dB, sizeof(float) * vector_size, cudaMemcpyDeviceToHost)); /* cout << "vector A after swap : "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B after swap : "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ cudaEventDestroy(start); cudaEventDestroy(stop); cublasDestroy(handle); delete [] hA; delete [] hB; cudaFree(dA); cudaFree(dB); return CublasTime; } int main(int argc, char *argv[]) { cout << "1 arg - vector_size, 2 arg - block_size" << endl << endl; int vector_size = atoi(argv[1]); int block_size = atoi(argv[2]); srand(time(NULL)); float *hA = new float[vector_size]; float *hB = new float[vector_size]; for(int i = 0; i < vector_size; i++) { hA[i] = rand()%5; hB[i] = rand()%5; } float *dA, *dB; CUDA_CHECK_RETURN(cudaMalloc(&dA, sizeof(float) * vector_size)); CUDA_CHECK_RETURN(cudaMalloc(&dB, sizeof(float) * vector_size)); CUDA_CHECK_RETURN(cudaMemcpy(dA, hA, sizeof(float) * vector_size, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(dB, hB, sizeof(float) * vector_size, cudaMemcpyHostToDevice)); cout << "CUDA_SWAP" << endl; /* cout << "vector A before swap : "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B before swap : "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ int num_blocks = (int)ceil((float)vector_size / block_size); float elapsedTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); swap <<<num_blocks, block_size>>> (dA, dB, vector_size); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); cudaEventElapsedTime(&elapsedTime, start, stop); CUDA_CHECK_RETURN(cudaMemcpy(hA, dA, sizeof(float) * vector_size, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaMemcpy(hB, dB, sizeof(float) * vector_size, cudaMemcpyDeviceToHost)); //cout << "CUDA_COPY" << endl; /* cout << "vector A after swap : "; for (int i = 0; i < vector_size; i++){ cout << hA[i] << " "; } cout << endl; cout << "vector B after swap : "; for (int i = 0; i < vector_size; i++){ cout << hB[i] << " "; } cout << endl; */ cout << "Cuda_Time = " << elapsedTime << endl; cout << endl; float cublas = blas_swap(vector_size); cout << "Cublas_Time = " << cublas << endl; cout << endl; float thrust = thrust_swap(vector_size); cout << "Thrust_Time = " << thrust << endl; cout << endl; cudaEventDestroy(start); cudaEventDestroy(stop); delete [] hA; delete [] hB; cudaFree(dA); cudaFree(dB); }
eb76a2e2d994ac35d268403b4ce8b7a6c4acb8b2.hip
// !!! This is a file automatically generated by hipify!!! #include "HandleUnsuccessfulCudaCall.h" #include <hip/hip_runtime.h> #include <iostream> // std::cerr #include <string> using std::cerr; namespace Utilities { namespace ErrorHandling { HandleUnsuccessfulCUDACall::HandleUnsuccessfulCUDACall( const std::string& error_message ): error_message_{error_message}, cuda_error_{hipSuccess} {} void HandleUnsuccessfulCUDACall::operator()(const hipError_t cuda_error) { cuda_error_ = cuda_error; if (!is_cuda_success()) { cerr << error_message_ << " (error code " << hipGetErrorString(cuda_error_) << ")!\n"; } } } // namespace ErrorHandling } // namespace Utilities
eb76a2e2d994ac35d268403b4ce8b7a6c4acb8b2.cu
#include "HandleUnsuccessfulCudaCall.h" #include <cuda_runtime.h> #include <iostream> // std::cerr #include <string> using std::cerr; namespace Utilities { namespace ErrorHandling { HandleUnsuccessfulCUDACall::HandleUnsuccessfulCUDACall( const std::string& error_message ): error_message_{error_message}, cuda_error_{cudaSuccess} {} void HandleUnsuccessfulCUDACall::operator()(const cudaError_t cuda_error) { cuda_error_ = cuda_error; if (!is_cuda_success()) { cerr << error_message_ << " (error code " << cudaGetErrorString(cuda_error_) << ")!\n"; } } } // namespace ErrorHandling } // namespace Utilities
ab8ed6d21aa92f2e5dfebc4859a88cd9585ace9a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //Quantile (percentile) functionality #include <thrust/device_vector.h> #include <thrust/copy.h> #include "cudf.h" #include "utilities/cudf_utils.h" #include "utilities/error_utils.hpp" #include "rmm/thrust_rmm_allocator.h" #include "quantiles.h" namespace{ //unknown template<typename VType, typename RetT = double> void f_quantile_tester(rmm::device_vector<VType>& d_in) { using FctrType = std::function<RetT(VType, VType, double)>; FctrType lin_interp{[](VType y0, VType y1, double x){ return static_cast<RetT>(static_cast<double>(y0) + x*static_cast<double>(y1-y0));//(f(x) - y0) / (x - 0) = m = (y1 - y0)/(1 - 0) }}; FctrType midpoint{[](VType y0, VType y1, double x){ return static_cast<RetT>(static_cast<double>(y0 + y1)/2.0); }}; FctrType nearest{[](VType y0, VType y1, double x){ return static_cast<RetT>(x < 0.5 ? y0 : y1); }}; FctrType lowest{[](VType y0, VType y1, double x){ return static_cast<RetT>(y0); }}; FctrType highest{[](VType y0, VType y1, double x){ return static_cast<RetT>(y1); }}; std::vector<std::string> methods{"lin_interp", "midpoint", "nearest", "lowest", "highest"}; size_t n_methods = methods.size(); std::vector<FctrType> vf{lin_interp, midpoint, nearest, lowest, highest}; std::vector<double> qvals{0.0, 0.25, 0.33, 0.5, 1.0}; assert( n_methods == methods.size() ); for(auto q: qvals) { VType res = quantile_approx(d_in.data().get(), d_in.size(), q); std::cout<<"q: "<<q<<"; exact res: "<<res<<"\n"; for(auto i = 0;i<n_methods;++i) { RetT rt = quantile_exact(d_in.data().get(), d_in.size(), q, vf[i]); std::cout<<"q: "<<q<<"; method: "<<methods[i]<<"; rt: "<<rt<<"\n"; } } } template<typename ColType, typename RetT = double> // just in case double won't be enough to hold result, in the future gdf_error trampoline_exact(gdf_column* col_in, gdf_quantile_method prec, double q, void* t_erased_res, gdf_context* ctxt) { RetT* ptr_res = static_cast<RetT*>(t_erased_res); size_t n = col_in->size; ColType* p_dv = static_cast<ColType*>(col_in->data); if( ctxt->flag_sort_inplace || ctxt->flag_sorted) { return select_quantile(p_dv, n, q, prec, *ptr_res, ctxt->flag_sorted); } else { rmm::device_vector<ColType> dv(n); thrust::copy_n(thrust::device, /*TODO: stream*/p_dv, n, dv.begin()); hipDeviceSynchronize(); p_dv = dv.data().get(); return select_quantile(p_dv, n, q, prec, *ptr_res, ctxt->flag_sorted); } } template<typename ColType> void trampoline_approx(gdf_column* col_in, double q, void* t_erased_res, gdf_context* ctxt) { ColType* ptr_res = static_cast<ColType*>(t_erased_res); size_t n = col_in->size; ColType* p_dv = static_cast<ColType*>(col_in->data); if( ctxt->flag_sort_inplace || ctxt->flag_sorted ) { *ptr_res = quantile_approx(p_dv, n, q, NULL, ctxt->flag_sorted); } else { rmm::device_vector<ColType> dv(n); thrust::copy_n(thrust::device, /*TODO: stream*/p_dv, n, dv.begin()); hipDeviceSynchronize(); p_dv = dv.data().get(); *ptr_res = quantile_approx(p_dv, n, q, NULL, ctxt->flag_sorted); } } }//unknown namespace gdf_error gdf_quantile_exact( gdf_column* col_in, //input column; gdf_quantile_method prec, //precision: type of quantile method calculation double q, //requested quantile in [0,1] void* t_erased_res, //result; for <exact> should probably be double*; it's void* because //(1) for uniformity of interface with <approx>; //(2) for possible types bigger than double, in the future; gdf_context* ctxt) //context info { GDF_REQUIRE(!col_in->valid || !col_in->null_count, GDF_VALIDITY_UNSUPPORTED); gdf_error ret = GDF_SUCCESS; assert( col_in->size > 0 ); switch( col_in->dtype ) { case GDF_INT8: { using ColType = int8_t;//char; ret = trampoline_exact<ColType>(col_in, prec, q, t_erased_res, ctxt); break; } case GDF_INT16: { using ColType = int16_t;//short; ret = trampoline_exact<ColType>(col_in, prec, q, t_erased_res, ctxt); break; } case GDF_INT32: { using ColType = int32_t;//int; ret = trampoline_exact<ColType>(col_in, prec, q, t_erased_res, ctxt); break; } case GDF_INT64: { using ColType = int64_t;//long; ret = trampoline_exact<ColType>(col_in, prec, q, t_erased_res, ctxt); break; } case GDF_FLOAT32: { using ColType = float; ret = trampoline_exact<ColType>(col_in, prec, q, t_erased_res, ctxt); break; } case GDF_FLOAT64: { using ColType = double; ret = trampoline_exact<ColType>(col_in, prec, q, t_erased_res, ctxt); break; } default: assert( false );//type not handled, yet } return ret; } gdf_error gdf_quantile_approx( gdf_column* col_in, //input column; double q, //requested quantile in [0,1] void* t_erased_res, //type-erased result of same type as column; gdf_context* ctxt) //context info { GDF_REQUIRE(!col_in->valid || !col_in->null_count, GDF_VALIDITY_UNSUPPORTED); gdf_error ret = GDF_SUCCESS; assert( col_in->size > 0 ); switch( col_in->dtype ) { case GDF_INT8: { using ColType = int8_t;//char; trampoline_approx<ColType>(col_in, q, t_erased_res, ctxt); break; } case GDF_INT16: { using ColType = int16_t;//short; trampoline_approx<ColType>(col_in, q, t_erased_res, ctxt); break; } case GDF_INT32: { using ColType = int32_t;//int; trampoline_approx<ColType>(col_in, q, t_erased_res, ctxt); break; } case GDF_INT64: { using ColType = int64_t;//long; trampoline_approx<ColType>(col_in, q, t_erased_res, ctxt); break; } case GDF_FLOAT32: { using ColType = float; trampoline_approx<ColType>(col_in, q, t_erased_res, ctxt); break; } case GDF_FLOAT64: { using ColType = double; trampoline_approx<ColType>(col_in, q, t_erased_res, ctxt); break; } default: assert( false );//type not handled, yet } return ret; }
ab8ed6d21aa92f2e5dfebc4859a88cd9585ace9a.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //Quantile (percentile) functionality #include <thrust/device_vector.h> #include <thrust/copy.h> #include "cudf.h" #include "utilities/cudf_utils.h" #include "utilities/error_utils.hpp" #include "rmm/thrust_rmm_allocator.h" #include "quantiles.h" namespace{ //unknown template<typename VType, typename RetT = double> void f_quantile_tester(rmm::device_vector<VType>& d_in) { using FctrType = std::function<RetT(VType, VType, double)>; FctrType lin_interp{[](VType y0, VType y1, double x){ return static_cast<RetT>(static_cast<double>(y0) + x*static_cast<double>(y1-y0));//(f(x) - y0) / (x - 0) = m = (y1 - y0)/(1 - 0) }}; FctrType midpoint{[](VType y0, VType y1, double x){ return static_cast<RetT>(static_cast<double>(y0 + y1)/2.0); }}; FctrType nearest{[](VType y0, VType y1, double x){ return static_cast<RetT>(x < 0.5 ? y0 : y1); }}; FctrType lowest{[](VType y0, VType y1, double x){ return static_cast<RetT>(y0); }}; FctrType highest{[](VType y0, VType y1, double x){ return static_cast<RetT>(y1); }}; std::vector<std::string> methods{"lin_interp", "midpoint", "nearest", "lowest", "highest"}; size_t n_methods = methods.size(); std::vector<FctrType> vf{lin_interp, midpoint, nearest, lowest, highest}; std::vector<double> qvals{0.0, 0.25, 0.33, 0.5, 1.0}; assert( n_methods == methods.size() ); for(auto q: qvals) { VType res = quantile_approx(d_in.data().get(), d_in.size(), q); std::cout<<"q: "<<q<<"; exact res: "<<res<<"\n"; for(auto i = 0;i<n_methods;++i) { RetT rt = quantile_exact(d_in.data().get(), d_in.size(), q, vf[i]); std::cout<<"q: "<<q<<"; method: "<<methods[i]<<"; rt: "<<rt<<"\n"; } } } template<typename ColType, typename RetT = double> // just in case double won't be enough to hold result, in the future gdf_error trampoline_exact(gdf_column* col_in, gdf_quantile_method prec, double q, void* t_erased_res, gdf_context* ctxt) { RetT* ptr_res = static_cast<RetT*>(t_erased_res); size_t n = col_in->size; ColType* p_dv = static_cast<ColType*>(col_in->data); if( ctxt->flag_sort_inplace || ctxt->flag_sorted) { return select_quantile(p_dv, n, q, prec, *ptr_res, ctxt->flag_sorted); } else { rmm::device_vector<ColType> dv(n); thrust::copy_n(thrust::device, /*TODO: stream*/p_dv, n, dv.begin()); cudaDeviceSynchronize(); p_dv = dv.data().get(); return select_quantile(p_dv, n, q, prec, *ptr_res, ctxt->flag_sorted); } } template<typename ColType> void trampoline_approx(gdf_column* col_in, double q, void* t_erased_res, gdf_context* ctxt) { ColType* ptr_res = static_cast<ColType*>(t_erased_res); size_t n = col_in->size; ColType* p_dv = static_cast<ColType*>(col_in->data); if( ctxt->flag_sort_inplace || ctxt->flag_sorted ) { *ptr_res = quantile_approx(p_dv, n, q, NULL, ctxt->flag_sorted); } else { rmm::device_vector<ColType> dv(n); thrust::copy_n(thrust::device, /*TODO: stream*/p_dv, n, dv.begin()); cudaDeviceSynchronize(); p_dv = dv.data().get(); *ptr_res = quantile_approx(p_dv, n, q, NULL, ctxt->flag_sorted); } } }//unknown namespace gdf_error gdf_quantile_exact( gdf_column* col_in, //input column; gdf_quantile_method prec, //precision: type of quantile method calculation double q, //requested quantile in [0,1] void* t_erased_res, //result; for <exact> should probably be double*; it's void* because //(1) for uniformity of interface with <approx>; //(2) for possible types bigger than double, in the future; gdf_context* ctxt) //context info { GDF_REQUIRE(!col_in->valid || !col_in->null_count, GDF_VALIDITY_UNSUPPORTED); gdf_error ret = GDF_SUCCESS; assert( col_in->size > 0 ); switch( col_in->dtype ) { case GDF_INT8: { using ColType = int8_t;//char; ret = trampoline_exact<ColType>(col_in, prec, q, t_erased_res, ctxt); break; } case GDF_INT16: { using ColType = int16_t;//short; ret = trampoline_exact<ColType>(col_in, prec, q, t_erased_res, ctxt); break; } case GDF_INT32: { using ColType = int32_t;//int; ret = trampoline_exact<ColType>(col_in, prec, q, t_erased_res, ctxt); break; } case GDF_INT64: { using ColType = int64_t;//long; ret = trampoline_exact<ColType>(col_in, prec, q, t_erased_res, ctxt); break; } case GDF_FLOAT32: { using ColType = float; ret = trampoline_exact<ColType>(col_in, prec, q, t_erased_res, ctxt); break; } case GDF_FLOAT64: { using ColType = double; ret = trampoline_exact<ColType>(col_in, prec, q, t_erased_res, ctxt); break; } default: assert( false );//type not handled, yet } return ret; } gdf_error gdf_quantile_approx( gdf_column* col_in, //input column; double q, //requested quantile in [0,1] void* t_erased_res, //type-erased result of same type as column; gdf_context* ctxt) //context info { GDF_REQUIRE(!col_in->valid || !col_in->null_count, GDF_VALIDITY_UNSUPPORTED); gdf_error ret = GDF_SUCCESS; assert( col_in->size > 0 ); switch( col_in->dtype ) { case GDF_INT8: { using ColType = int8_t;//char; trampoline_approx<ColType>(col_in, q, t_erased_res, ctxt); break; } case GDF_INT16: { using ColType = int16_t;//short; trampoline_approx<ColType>(col_in, q, t_erased_res, ctxt); break; } case GDF_INT32: { using ColType = int32_t;//int; trampoline_approx<ColType>(col_in, q, t_erased_res, ctxt); break; } case GDF_INT64: { using ColType = int64_t;//long; trampoline_approx<ColType>(col_in, q, t_erased_res, ctxt); break; } case GDF_FLOAT32: { using ColType = float; trampoline_approx<ColType>(col_in, q, t_erased_res, ctxt); break; } case GDF_FLOAT64: { using ColType = double; trampoline_approx<ColType>(col_in, q, t_erased_res, ctxt); break; } default: assert( false );//type not handled, yet } return ret; }
798b2fa903c9ce7cf5b60fc36ed1e04f03c136e3.hip
// !!! This is a file automatically generated by hipify!!! #include "compute.h" #include "helper_cuda.h" #include <iostream> #include <cuda_gl_interop.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/extrema.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> //============================================================================== void Compute::Init(dataInfo_t* dataInfo) { hipGLSetGLDevice(0); DataInfo = dataInfo; NumCellFaces[0] = (DataInfo->resolution[0]+1)* (DataInfo->resolution[1])* (DataInfo->resolution[2]); NumCellFaces[1] = (DataInfo->resolution[0])* (DataInfo->resolution[1]+1)* (DataInfo->resolution[2]); NumCellFaces[2] = (DataInfo->resolution[0])* (DataInfo->resolution[1])* (DataInfo->resolution[2]+1); InitData(); InitTextures(); InitSymbols(); Res[0][0] = DataInfo->resolution[0]+1; Res[0][1] = DataInfo->resolution[1]; Res[0][2] = DataInfo->resolution[2]; Res[1][0] = DataInfo->resolution[0]; Res[1][1] = DataInfo->resolution[1]+1; Res[1][2] = DataInfo->resolution[2]; Res[2][0] = DataInfo->resolution[0]; Res[2][1] = DataInfo->resolution[1]; Res[2][2] = DataInfo->resolution[2]+1; Res[3][0] = DataInfo->resolution[0]; Res[3][1] = DataInfo->resolution[1]; Res[3][2] = DataInfo->resolution[2]; VolumeSize.x = DataInfo->resolution[0]; VolumeSize.y = DataInfo->resolution[1]; VolumeSize.z = DataInfo->resolution[2]; } //============================================================================== void Compute::InitData() { int numCells = (DataInfo->resolution[0])* (DataInfo->resolution[1])* (DataInfo->resolution[2]); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); int res[3] = {DataInfo->resolution[0], DataInfo->resolution[1], DataInfo->resolution[2]}; hipExtent volumeSize; // Dye myCudaCall(hipMalloc((void**)&Dye, numCells*sizeof(float)), __LINE__, __FILE__); volumeSize = make_hipExtent(res[0], res[1], res[2]); myCudaCall(hipMalloc3DArray(&ca_Dye, &channelDesc, volumeSize), __LINE__, __FILE__); // Velocity X component myCudaCall(hipMalloc((void**)&VelocityX, NumCellFaces[0]*sizeof(float)), __LINE__, __FILE__); volumeSize = make_hipExtent(res[0]+1, res[1], res[2]); myCudaCall(hipMalloc3DArray(&ca_VelocityX, &channelDesc, volumeSize), __LINE__, __FILE__); // Velocity Y component myCudaCall(hipMalloc((void**)&VelocityY, NumCellFaces[1]*sizeof(float)), __LINE__, __FILE__); volumeSize = make_hipExtent(res[0], res[1]+1, res[2]); myCudaCall(hipMalloc3DArray(&ca_VelocityY, &channelDesc, volumeSize), __LINE__, __FILE__); // Velocity Z component myCudaCall(hipMalloc((void**)&VelocityZ, NumCellFaces[2]*sizeof(float)), __LINE__, __FILE__); volumeSize = make_hipExtent(res[0], res[1], res[2]+1); myCudaCall(hipMalloc3DArray(&ca_VelocityZ, &channelDesc, volumeSize), __LINE__, __FILE__); // Divergence myCudaCall(hipMalloc((void**)&NegDivergence, numCells*sizeof(float)), __LINE__, __FILE__); // Pressure myCudaCall(hipMalloc((void**)&Pressure, numCells*sizeof(float)), __LINE__, __FILE__); // Boundary conditions myCudaCall(hipMallocArray(&ca_BCLeft, &channelDesc, res[1], res[2]), __LINE__, __FILE__); myCudaCall(hipMallocArray(&ca_BCRight, &channelDesc, res[1], res[2]), __LINE__, __FILE__); myCudaCall(hipMallocArray(&ca_BCBottom, &channelDesc, res[0], res[2]), __LINE__, __FILE__); myCudaCall(hipMallocArray(&ca_BCTop, &channelDesc, res[0], res[2]), __LINE__, __FILE__); myCudaCall(hipMallocArray(&ca_BCBack, &channelDesc, res[0], res[1]), __LINE__, __FILE__); myCudaCall(hipMallocArray(&ca_BCFront, &channelDesc, res[0], res[1]), __LINE__, __FILE__); } //============================================================================== void Compute::InitDye() { InitDye_kernel(); hipGraphicsMapResources(1, &VolumeResource); hipGraphicsSubResourceGetMappedArray(&ca_Dye, VolumeResource, 0, 0); UpdateCudaArray(ca_Dye, DataInfo->resolution, Dye); hipGraphicsUnmapResources(1, &VolumeResource); } //============================================================================== void Compute::InitVelocity() { InitVelocity_kernel(); UpdateCudaArray(ca_VelocityX, Res[0], VelocityX); UpdateCudaArray(ca_VelocityY, Res[1], VelocityY); UpdateCudaArray(ca_VelocityZ, Res[2], VelocityZ); } //============================================================================== void Compute::RegisterVolumeTexture(GLuint volume) { hipGraphicsGLRegisterImage(&VolumeResource, volume, GL_TEXTURE_3D, hipGraphicsRegisterFlagsNone); } //============================================================================== void Compute::UnregisterVolumeTexture() { hipGraphicsUnregisterResource(VolumeResource); } //============================================================================== void Compute::AdvectDye() { AdvectDye_kernel(); hipGraphicsMapResources(1, &VolumeResource); hipGraphicsResourceSetMapFlags(VolumeResource, hipGraphicsMapFlagsWriteDiscard); hipGraphicsSubResourceGetMappedArray(&ca_Dye, VolumeResource, 0, 0); UpdateCudaArray(ca_Dye, DataInfo->resolution, Dye); hipGraphicsUnmapResources(1, &VolumeResource); } //============================================================================== void Compute::AdvectVelocity() { AdvectVelocity_kernel(); UpdateCudaArray(ca_VelocityX, Res[0], VelocityX); UpdateCudaArray(ca_VelocityY, Res[1], VelocityY); UpdateCudaArray(ca_VelocityZ, Res[2], VelocityZ); } //============================================================================== void Compute::SetBoundaryConditions() { SetBoundaryConditions_kernel(); float *velobc; int count; count = Res[3][1]*Res[3][2]; velobc = new float[count]; for (int k = 0; k < Res[3][2]; k++) { for (int j = 0; j < Res[3][1]; j++) { int idx = j + k*Res[3][1]; if (j > Res[3][1]/4 && j < Res[3][1]*3/4 && k > Res[3][2]/4 && k < Res[3][2]*3/4 ) { velobc[idx] = 1.0f; } else { velobc[idx] = 0.0f; } } } myCudaCall(hipMemcpyToArray(ca_BCLeft, 0, 0, velobc, count, hipMemcpyHostToDevice), __LINE__, __FILE__); for (int k = 0; k < Res[3][2]; k++) { for (int j = 0; j < Res[3][1]; j++) { int idx = j + k*Res[3][1]; velobc[idx] = 0.0f; } } myCudaCall(hipMemcpyToArray(ca_BCRight, 0, 0, velobc, count, hipMemcpyHostToDevice), __LINE__, __FILE__); delete [] velobc; count = Res[3][0]*Res[3][2]; velobc = new float[count]; for (int k = 0; k < Res[3][2]; k++) { for (int i = 0; i < Res[3][0]; i++) { int idx = i + k*Res[3][0]; velobc[idx] = 0.0f; } } myCudaCall(hipMemcpyToArray(ca_BCBottom, 0, 0, velobc, count, hipMemcpyHostToDevice), __LINE__, __FILE__); myCudaCall(hipMemcpyToArray(ca_BCTop, 0, 0, velobc, count, hipMemcpyHostToDevice), __LINE__, __FILE__); delete [] velobc; count = Res[3][0]*Res[3][1]; velobc = new float[count]; for (int j = 0; j < Res[3][1]; j++) { for (int i = 0; i < Res[3][0]; i++) { int idx = i + j*Res[3][0]; velobc[idx] = 0.0f; } } myCudaCall(hipMemcpyToArray(ca_BCBottom, 0, 0, velobc, count, hipMemcpyHostToDevice), __LINE__, __FILE__); myCudaCall(hipMemcpyToArray(ca_BCTop, 0, 0, velobc, count, hipMemcpyHostToDevice), __LINE__, __FILE__); delete [] velobc; } //============================================================================== void Compute::Update() { SetTimestep(ComputeTimestep()); // SetBoundaryConditions(); ComputeNegDivergence(); // //Projection(); // PressureUpdate(); AdvectDye(); AdvectVelocity(); } //============================================================================== void Compute::UpdateCudaArray(hipArray* ca, int res[3], float* src) { hipMemcpy3DParms copyParams[1] = {0}; hipExtent volumeSize = make_hipExtent(res[0], res[1], res[2]); copyParams[0].srcPtr = make_hipPitchedPtr(src, res[0]*sizeof(float), res[0], res[1]); copyParams[0].dstArray = ca; copyParams[0].extent = volumeSize; copyParams[0].kind = hipMemcpyDeviceToDevice; hipMemcpy3D(copyParams); } //============================================================================== void Compute::CopyCudaArray(hipArray* ca, int res[3], float* dst) { hipMemcpy3DParms copyParams[1] = {0}; hipExtent volumeSize = make_hipExtent(res[0], res[1], res[2]); copyParams[0].dstPtr = make_hipPitchedPtr(dst, res[0]*sizeof(float), res[0], res[1]); copyParams[0].srcArray = ca; copyParams[0].extent = volumeSize; copyParams[0].kind = hipMemcpyDeviceToDevice; hipMemcpy3D(copyParams); } //============================================================================== void Compute::ComputeNegDivergence() { ComputeNegDivergence_kernel(); } //============================================================================== float Compute::ComputeTimestep() { thrust::device_vector<float> tdv_velo; // enough space to hold each velocity component tdv_velo.resize((DataInfo->resolution[0]+1)* (DataInfo->resolution[1]+1)* (DataInfo->resolution[2]+1)); float *velo_raw_ptr = thrust::raw_pointer_cast(tdv_velo.data()); thrust::device_vector<float>::iterator iter; // TODO: do I have to copy the array? // TODO: write my own reduce algorithm CopyCudaArray(ca_VelocityX, Res[0], velo_raw_ptr); iter = thrust::max_element(tdv_velo.begin(), tdv_velo.end()); float max_u = fabs(*iter); iter = thrust::min_element(tdv_velo.begin(), tdv_velo.end()); float min_u = fabs(*iter); max_u = min_u > max_u ? min_u : max_u; CopyCudaArray(ca_VelocityY, Res[1], velo_raw_ptr); iter =thrust::max_element(tdv_velo.begin(), tdv_velo.end()); float max_v = fabs(*iter); iter = thrust::min_element(tdv_velo.begin(), tdv_velo.end()); float min_v = fabs(*iter); max_v = min_v > max_v ? min_v : max_v; CopyCudaArray(ca_VelocityZ, Res[2], velo_raw_ptr); iter = thrust::max_element(tdv_velo.begin(), tdv_velo.end()); float max_w = fabs(*iter); iter = thrust::min_element(tdv_velo.begin(), tdv_velo.end()); float min_w = fabs(*iter); max_w = min_w > max_w ? min_w : max_w; float u_max = max(max(max_u,max_v),max_w); float dx = min(min(DataInfo->spacing[0],DataInfo->spacing[1]),DataInfo->spacing[2]); float CFL = 0.25f; float dt; if (u_max > 0.0f) { dt = CFL*dx/u_max; } else { dt = 0.0f; } return dt; } //============================================================================== void Compute::PressureUpdate() { PressureUpdate_kernel(); UpdateCudaArray(ca_VelocityX, Res[0], VelocityX); UpdateCudaArray(ca_VelocityY, Res[1], VelocityY); UpdateCudaArray(ca_VelocityZ, Res[2], VelocityZ); } // TODO: implement boundary conditions in textures // TODO: modify rhs to account solid velocities // TODO: build matrix A
798b2fa903c9ce7cf5b60fc36ed1e04f03c136e3.cu
#include "compute.h" #include "helper_cuda.h" #include <iostream> #include <cuda_gl_interop.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/extrema.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> //============================================================================== void Compute::Init(dataInfo_t* dataInfo) { cudaGLSetGLDevice(0); DataInfo = dataInfo; NumCellFaces[0] = (DataInfo->resolution[0]+1)* (DataInfo->resolution[1])* (DataInfo->resolution[2]); NumCellFaces[1] = (DataInfo->resolution[0])* (DataInfo->resolution[1]+1)* (DataInfo->resolution[2]); NumCellFaces[2] = (DataInfo->resolution[0])* (DataInfo->resolution[1])* (DataInfo->resolution[2]+1); InitData(); InitTextures(); InitSymbols(); Res[0][0] = DataInfo->resolution[0]+1; Res[0][1] = DataInfo->resolution[1]; Res[0][2] = DataInfo->resolution[2]; Res[1][0] = DataInfo->resolution[0]; Res[1][1] = DataInfo->resolution[1]+1; Res[1][2] = DataInfo->resolution[2]; Res[2][0] = DataInfo->resolution[0]; Res[2][1] = DataInfo->resolution[1]; Res[2][2] = DataInfo->resolution[2]+1; Res[3][0] = DataInfo->resolution[0]; Res[3][1] = DataInfo->resolution[1]; Res[3][2] = DataInfo->resolution[2]; VolumeSize.x = DataInfo->resolution[0]; VolumeSize.y = DataInfo->resolution[1]; VolumeSize.z = DataInfo->resolution[2]; } //============================================================================== void Compute::InitData() { int numCells = (DataInfo->resolution[0])* (DataInfo->resolution[1])* (DataInfo->resolution[2]); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); int res[3] = {DataInfo->resolution[0], DataInfo->resolution[1], DataInfo->resolution[2]}; cudaExtent volumeSize; // Dye myCudaCall(cudaMalloc((void**)&Dye, numCells*sizeof(float)), __LINE__, __FILE__); volumeSize = make_cudaExtent(res[0], res[1], res[2]); myCudaCall(cudaMalloc3DArray(&ca_Dye, &channelDesc, volumeSize), __LINE__, __FILE__); // Velocity X component myCudaCall(cudaMalloc((void**)&VelocityX, NumCellFaces[0]*sizeof(float)), __LINE__, __FILE__); volumeSize = make_cudaExtent(res[0]+1, res[1], res[2]); myCudaCall(cudaMalloc3DArray(&ca_VelocityX, &channelDesc, volumeSize), __LINE__, __FILE__); // Velocity Y component myCudaCall(cudaMalloc((void**)&VelocityY, NumCellFaces[1]*sizeof(float)), __LINE__, __FILE__); volumeSize = make_cudaExtent(res[0], res[1]+1, res[2]); myCudaCall(cudaMalloc3DArray(&ca_VelocityY, &channelDesc, volumeSize), __LINE__, __FILE__); // Velocity Z component myCudaCall(cudaMalloc((void**)&VelocityZ, NumCellFaces[2]*sizeof(float)), __LINE__, __FILE__); volumeSize = make_cudaExtent(res[0], res[1], res[2]+1); myCudaCall(cudaMalloc3DArray(&ca_VelocityZ, &channelDesc, volumeSize), __LINE__, __FILE__); // Divergence myCudaCall(cudaMalloc((void**)&NegDivergence, numCells*sizeof(float)), __LINE__, __FILE__); // Pressure myCudaCall(cudaMalloc((void**)&Pressure, numCells*sizeof(float)), __LINE__, __FILE__); // Boundary conditions myCudaCall(cudaMallocArray(&ca_BCLeft, &channelDesc, res[1], res[2]), __LINE__, __FILE__); myCudaCall(cudaMallocArray(&ca_BCRight, &channelDesc, res[1], res[2]), __LINE__, __FILE__); myCudaCall(cudaMallocArray(&ca_BCBottom, &channelDesc, res[0], res[2]), __LINE__, __FILE__); myCudaCall(cudaMallocArray(&ca_BCTop, &channelDesc, res[0], res[2]), __LINE__, __FILE__); myCudaCall(cudaMallocArray(&ca_BCBack, &channelDesc, res[0], res[1]), __LINE__, __FILE__); myCudaCall(cudaMallocArray(&ca_BCFront, &channelDesc, res[0], res[1]), __LINE__, __FILE__); } //============================================================================== void Compute::InitDye() { InitDye_kernel(); cudaGraphicsMapResources(1, &VolumeResource); cudaGraphicsSubResourceGetMappedArray(&ca_Dye, VolumeResource, 0, 0); UpdateCudaArray(ca_Dye, DataInfo->resolution, Dye); cudaGraphicsUnmapResources(1, &VolumeResource); } //============================================================================== void Compute::InitVelocity() { InitVelocity_kernel(); UpdateCudaArray(ca_VelocityX, Res[0], VelocityX); UpdateCudaArray(ca_VelocityY, Res[1], VelocityY); UpdateCudaArray(ca_VelocityZ, Res[2], VelocityZ); } //============================================================================== void Compute::RegisterVolumeTexture(GLuint volume) { cudaGraphicsGLRegisterImage(&VolumeResource, volume, GL_TEXTURE_3D, cudaGraphicsRegisterFlagsNone); } //============================================================================== void Compute::UnregisterVolumeTexture() { cudaGraphicsUnregisterResource(VolumeResource); } //============================================================================== void Compute::AdvectDye() { AdvectDye_kernel(); cudaGraphicsMapResources(1, &VolumeResource); cudaGraphicsResourceSetMapFlags(VolumeResource, cudaGraphicsMapFlagsWriteDiscard); cudaGraphicsSubResourceGetMappedArray(&ca_Dye, VolumeResource, 0, 0); UpdateCudaArray(ca_Dye, DataInfo->resolution, Dye); cudaGraphicsUnmapResources(1, &VolumeResource); } //============================================================================== void Compute::AdvectVelocity() { AdvectVelocity_kernel(); UpdateCudaArray(ca_VelocityX, Res[0], VelocityX); UpdateCudaArray(ca_VelocityY, Res[1], VelocityY); UpdateCudaArray(ca_VelocityZ, Res[2], VelocityZ); } //============================================================================== void Compute::SetBoundaryConditions() { SetBoundaryConditions_kernel(); float *velobc; int count; count = Res[3][1]*Res[3][2]; velobc = new float[count]; for (int k = 0; k < Res[3][2]; k++) { for (int j = 0; j < Res[3][1]; j++) { int idx = j + k*Res[3][1]; if (j > Res[3][1]/4 && j < Res[3][1]*3/4 && k > Res[3][2]/4 && k < Res[3][2]*3/4 ) { velobc[idx] = 1.0f; } else { velobc[idx] = 0.0f; } } } myCudaCall(cudaMemcpyToArray(ca_BCLeft, 0, 0, velobc, count, cudaMemcpyHostToDevice), __LINE__, __FILE__); for (int k = 0; k < Res[3][2]; k++) { for (int j = 0; j < Res[3][1]; j++) { int idx = j + k*Res[3][1]; velobc[idx] = 0.0f; } } myCudaCall(cudaMemcpyToArray(ca_BCRight, 0, 0, velobc, count, cudaMemcpyHostToDevice), __LINE__, __FILE__); delete [] velobc; count = Res[3][0]*Res[3][2]; velobc = new float[count]; for (int k = 0; k < Res[3][2]; k++) { for (int i = 0; i < Res[3][0]; i++) { int idx = i + k*Res[3][0]; velobc[idx] = 0.0f; } } myCudaCall(cudaMemcpyToArray(ca_BCBottom, 0, 0, velobc, count, cudaMemcpyHostToDevice), __LINE__, __FILE__); myCudaCall(cudaMemcpyToArray(ca_BCTop, 0, 0, velobc, count, cudaMemcpyHostToDevice), __LINE__, __FILE__); delete [] velobc; count = Res[3][0]*Res[3][1]; velobc = new float[count]; for (int j = 0; j < Res[3][1]; j++) { for (int i = 0; i < Res[3][0]; i++) { int idx = i + j*Res[3][0]; velobc[idx] = 0.0f; } } myCudaCall(cudaMemcpyToArray(ca_BCBottom, 0, 0, velobc, count, cudaMemcpyHostToDevice), __LINE__, __FILE__); myCudaCall(cudaMemcpyToArray(ca_BCTop, 0, 0, velobc, count, cudaMemcpyHostToDevice), __LINE__, __FILE__); delete [] velobc; } //============================================================================== void Compute::Update() { SetTimestep(ComputeTimestep()); // SetBoundaryConditions(); ComputeNegDivergence(); // //Projection(); // PressureUpdate(); AdvectDye(); AdvectVelocity(); } //============================================================================== void Compute::UpdateCudaArray(cudaArray* ca, int res[3], float* src) { cudaMemcpy3DParms copyParams[1] = {0}; cudaExtent volumeSize = make_cudaExtent(res[0], res[1], res[2]); copyParams[0].srcPtr = make_cudaPitchedPtr(src, res[0]*sizeof(float), res[0], res[1]); copyParams[0].dstArray = ca; copyParams[0].extent = volumeSize; copyParams[0].kind = cudaMemcpyDeviceToDevice; cudaMemcpy3D(copyParams); } //============================================================================== void Compute::CopyCudaArray(cudaArray* ca, int res[3], float* dst) { cudaMemcpy3DParms copyParams[1] = {0}; cudaExtent volumeSize = make_cudaExtent(res[0], res[1], res[2]); copyParams[0].dstPtr = make_cudaPitchedPtr(dst, res[0]*sizeof(float), res[0], res[1]); copyParams[0].srcArray = ca; copyParams[0].extent = volumeSize; copyParams[0].kind = cudaMemcpyDeviceToDevice; cudaMemcpy3D(copyParams); } //============================================================================== void Compute::ComputeNegDivergence() { ComputeNegDivergence_kernel(); } //============================================================================== float Compute::ComputeTimestep() { thrust::device_vector<float> tdv_velo; // enough space to hold each velocity component tdv_velo.resize((DataInfo->resolution[0]+1)* (DataInfo->resolution[1]+1)* (DataInfo->resolution[2]+1)); float *velo_raw_ptr = thrust::raw_pointer_cast(tdv_velo.data()); thrust::device_vector<float>::iterator iter; // TODO: do I have to copy the array? // TODO: write my own reduce algorithm CopyCudaArray(ca_VelocityX, Res[0], velo_raw_ptr); iter = thrust::max_element(tdv_velo.begin(), tdv_velo.end()); float max_u = fabs(*iter); iter = thrust::min_element(tdv_velo.begin(), tdv_velo.end()); float min_u = fabs(*iter); max_u = min_u > max_u ? min_u : max_u; CopyCudaArray(ca_VelocityY, Res[1], velo_raw_ptr); iter =thrust::max_element(tdv_velo.begin(), tdv_velo.end()); float max_v = fabs(*iter); iter = thrust::min_element(tdv_velo.begin(), tdv_velo.end()); float min_v = fabs(*iter); max_v = min_v > max_v ? min_v : max_v; CopyCudaArray(ca_VelocityZ, Res[2], velo_raw_ptr); iter = thrust::max_element(tdv_velo.begin(), tdv_velo.end()); float max_w = fabs(*iter); iter = thrust::min_element(tdv_velo.begin(), tdv_velo.end()); float min_w = fabs(*iter); max_w = min_w > max_w ? min_w : max_w; float u_max = max(max(max_u,max_v),max_w); float dx = min(min(DataInfo->spacing[0],DataInfo->spacing[1]),DataInfo->spacing[2]); float CFL = 0.25f; float dt; if (u_max > 0.0f) { dt = CFL*dx/u_max; } else { dt = 0.0f; } return dt; } //============================================================================== void Compute::PressureUpdate() { PressureUpdate_kernel(); UpdateCudaArray(ca_VelocityX, Res[0], VelocityX); UpdateCudaArray(ca_VelocityY, Res[1], VelocityY); UpdateCudaArray(ca_VelocityZ, Res[2], VelocityZ); } // TODO: implement boundary conditions in textures // TODO: modify rhs to account solid velocities // TODO: build matrix A
5fe218b49c88f18f8741e89f5eb2dd7cd9dc90ce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cstdio> #include<fstream> #include<cmath> #include<cuda.h> int threshold=256; int xthread=32; __global__ void multiply(float* A,float* B,float* C,int jump,int jump1,int jump2,int iter) { __shared__ float A1[32][32],B1[32][32]; int posy=blockIdx.y*blockDim.y+threadIdx.y; int posx=blockIdx.x*blockDim.x+threadIdx.x; int row=posy*jump+threadIdx.x; int col=posx+threadIdx.y*jump1; int place=posy*jump2+posx; for(int i=0;i<iter;i++) { A1[threadIdx.y][threadIdx.x]=A[row]; B1[threadIdx.y][threadIdx.x]=B[col]; __syncthreads(); for(int i=0;i<blockDim.x;i++) C[place]+=A1[threadIdx.y][i]*B1[i][threadIdx.x]; row+=blockDim.x; col+=blockDim.y*jump1; __syncthreads(); } } //non_square matrix multiplication __global__ void kernel1(float* A,int jump1,int n1,float* B,int jump2,int n2,float* C,int jump3,float* D,int jump4,int m1,int m2,int m3) { int col=blockIdx.x * blockDim.x+threadIdx.x; int row=blockIdx.y * blockDim.y+threadIdx.y; if(col<m2 && row<m1) C[col+jump3*row]=A[col+jump1*row]+A[n1+col+jump1*row]; if(col<m3 && row<m2) D[col+jump4*row]=B[col+jump2*row]+B[n2+col+jump2*row]; } __global__ void kernel7(float* A,int jump1,int n1,float* B,int jump2,int n2,float* C,int jump3,float* D,int jump4,int m1,int m2,int m3) { int col=blockIdx.x * blockDim.x+threadIdx.x; int row=blockIdx.y * blockDim.y+threadIdx.y; if(col<m2 && row<m1) C[col+jump3*row]=A[col+jump1*row]-A[n1+col+jump1*row]; if(col<m3 && row<m2) D[col+jump4*row]=B[col+jump2*row]+B[n2+col+jump2*row]; } __global__ void kernel2(float* A,int jump,int n1,int n2,float* B,int jump1,float* C,int jump2,int n3,int m2,int m3) { int col=blockIdx.x * blockDim.x+threadIdx.x; if(col<m3) { if(3*blockIdx.y<gridDim.y) { int point=col+(blockIdx.y * blockDim.y + threadIdx.y)*jump; A[point+n2]+=A[point]; } else if(3*blockIdx.y<2*gridDim.y) { int point=col+((blockIdx.y-gridDim.y/3) * blockDim.y + threadIdx.y)*jump; A[point]+=A[point+n1]; A[point+n1]=0; } } if(col<m2 && 3*blockIdx.y>=2*gridDim.y) { int row=(blockIdx.y-2*gridDim.y/3) * blockDim.y + threadIdx.y; B[col+jump1*row]=C[col+jump2*row]+C[col+jump2*row+n3]; } } __global__ void kernel3(float* A,int jump,int n,float* B,int jump1,float* C,int jump2,int n1,int m2,int m3) { int col=blockIdx.x * blockDim.x+threadIdx.x; int row=blockIdx.y * blockDim.y + threadIdx.y; if(col<m2) A[col+jump*row+n]-=A[col+jump*row]; if(col<m3) B[col+jump1*row]=C[col+jump2*row]+C[col+jump2*row+n1]; } __global__ void kernel4(float* A,int jump,int n1,float* B,int jump1,float* C,int jump2,float* D,int jump3,int n2,int m1,int m2) { int col=blockIdx.x * blockDim.x+threadIdx.x,row; if(2*blockIdx.y<gridDim.y) { row=blockIdx.y * blockDim.y + threadIdx.y; if(row<m1) { A[col+jump*row]+=B[col+jump1*row]; A[col+jump*row+n1]+=B[col+jump1*row]; B[col+jump1*row]=0; } } else { row=(blockIdx.y-gridDim.y/2) * blockDim.y + threadIdx.y; if(row<m2) C[col+jump2*row]=D[col+jump3*row+n2]-D[col+jump3*row]; } } __global__ void kernel5(float* A,int jump,int n1,float* B,int jump1) { int point=(blockIdx.x * blockDim.x+threadIdx.x)+(blockIdx.y * blockDim.y + threadIdx.y)*jump; int point1=(blockIdx.x * blockDim.x+threadIdx.x)+(blockIdx.y * blockDim.y + threadIdx.y)*jump1; A[point]+=B[point1]; A[point+n1]+=B[point1]; } __global__ void kernel6(float* A,int jump,float* B,int jump1,int n1,float* C,int jump2,float* D,int jump3,int n2,int m1,int m2,int m3) { int col=blockIdx.x*blockDim.x+threadIdx.x; if(col<m2 && 3*blockIdx.y<gridDim.y) { int row=blockIdx.y*blockDim.y+threadIdx.y; if(row<m1) A[col+row*jump]=0; } if(col<m3 && 3*blockIdx.y>=gridDim.y && 3*blockIdx.y<2*gridDim.y) { int row=(blockIdx.y-gridDim.y/3) * blockDim.y + threadIdx.y; if(row<m1) B[col+row*jump1]-=B[col+row*jump1+n1]; } if(col<m3 && 3*blockIdx.y>2*gridDim.y) { int row=(blockIdx.y-(2*gridDim.y/3)) * blockDim.y + threadIdx.y; if(row<m2) C[col+jump2*row]=D[col+jump3*row]-D[col+jump3*row+n2]; } } void strassen(float* A,int jump,float* B,int jump1,float* C,int jump2,int n1,int n2,int n3,float* temp1,float* temp2,int n,int n_) { if(n1<=threshold || n2<=threshold || n3<=threshold) { hipLaunchKernelGGL(( multiply) , dim3(dim3(n3/xthread,n1/xthread)),dim3(dim3(xthread,xthread)), 0, 0, A,B,C,jump,jump1,jump2,n2/xthread); } else { n1/=2;n2/=2;n3/=2; n/=2;n_/=2; //M1 hipLaunchKernelGGL(( kernel1) , dim3(dim3(n_/xthread,n/xthread)),dim3(dim3(xthread,xthread)), 0, 0, A,jump,jump*n1+n2,B,jump1,jump1*n2+n3,temp1,n2,temp2,n3,n1,n2,n3); //temp1=A11+A22 //temp2=B11+B22 strassen(temp1,n2,temp2,n3,C,jump2,n1,n2,n3,temp1+n1*n2,temp2+n2*n3,n,n_); //C11=temp1*temp2 //M6 hipLaunchKernelGGL(( kernel7) , dim3(dim3(n_/xthread,n/xthread)),dim3(dim3(xthread,xthread)), 0, 0, A+jump*n1,jump,-jump*n1,B,jump1,n3,temp1,n2,temp2,n3,n1,n2,n3); //temp1=A21-A11//temp2=B11+B12 strassen(temp1,n2,temp2,n3,C+jump2*n1+n3,jump2,n1,n2,n3,temp1+n1*n2,temp2+n2*n3,n,n_); //C22=temp1*temp2 //M7 hipLaunchKernelGGL(( kernel7) , dim3(dim3(n_/xthread,n/xthread)),dim3(dim3(xthread,xthread)), 0, 0, A+n2,jump,jump*n1,B+jump1*n2,jump1,n3,temp1,n2,temp2,n3,n1,n2,n3);//temp1=A12-A22 //temp2=B21+B22 strassen(temp1,n2,temp2,n3,C+n3,jump2,n1,n2,n3,temp1+n1*n2,temp2+n2*n3,n,n_); //C12+=temp1*temp2 hipLaunchKernelGGL(( kernel2) , dim3(dim3(n_/xthread,(3*n1)/xthread)),dim3(dim3(xthread,xthread)), 0, 0, C,jump2,n3,jump2*n1+n3,temp1,n2,A+jump*n1,jump,n2,n2,n3); //C22+=C11//C11+=C12 //temp1=A21+A22 //C12=0 //M2 strassen(temp1,n2,B,jump1,C+jump2*n1,jump2,n1,n2,n3,temp1+n1*n2,temp2+n2*n3,n,n_); //C21=temp1*B11 hipLaunchKernelGGL(( kernel3) , dim3(dim3(n_/xthread,n1/xthread)),dim3(dim3(xthread,xthread)), 0, 0, C+jump2*n1,jump2,n3,temp1,n2,A,jump,n2,n2,n3); //C22-=C21 //temp1=A11+A12 //M5 strassen(temp1,n2,B+jump1*n2+n3,jump1,C+n3,jump2,n1,n2,n3,temp1+n1*n2,temp2+n2*n3,n,n_); //C12=temp1*B22 hipLaunchKernelGGL(( kernel6), dim3(dim3(n_/xthread,(3*n)/xthread)),dim3(dim3(xthread,xthread)), 0, 0, temp1,n2,C,jump2,n3,temp2,n3,B+n3,jump1,jump1*n2,n1,n2,n3); //C11-=C12 //temp2=B12-B22 //temp1=0 //M3 strassen(A,jump,temp2,n3,temp1,n3,n1,n2,n3,temp1+n1*n3,temp2+n2*n3,n,n_); //temp1=A11*temp2 hipLaunchKernelGGL(( kernel4) , dim3(dim3(n3/xthread,(2*n)/xthread)),dim3(dim3(xthread,xthread)), 0, 0, C+n3,jump2,jump2*n1,temp1,n2,temp2,n3,B,jump1,jump1*n2,n1,n2); //C12=C12+temp1//C22=C22+temp1 //temp2=B21-B11 //temp1=0 //M4 strassen(A+jump*n1+n2,jump,temp2,n3,temp1,n3,n1,n2,n3,temp1+n1*n3,temp2+n2*n3,n,n_); //temp1=A22*temp2 hipLaunchKernelGGL(( kernel5) , dim3(dim3(n3/xthread,n1/xthread)),dim3(dim3(xthread,xthread)), 0, 0, C,jump2,jump2*n1,temp1,n2); //C21=C21+temp1//C11=C11+temp1 } } //Square matrix multiplication __global__ void kernel1_sq(float* A,int jump1,int n1,float* B,int jump2,int n2,float* C,float* D,int jump) { int col=blockIdx.x * blockDim.x+threadIdx.x; int row=blockIdx.y * blockDim.y+threadIdx.y; C[col+jump*row]=A[col+jump1*row]+A[n1+col+jump1*row]; D[col+jump*row]=B[col+jump2*row]+B[n2+col+jump2*row]; } __global__ void kernel7_sq(float* A,int jump1,int n1,float* B,int jump2,int n2,float* C,float* D,int jump) { int col=blockIdx.x * blockDim.x+threadIdx.x; int row=blockIdx.y * blockDim.y+threadIdx.y; C[col+jump*row]=A[col+jump1*row]-A[n1+col+jump1*row]; D[col+jump*row]=B[col+jump2*row]+B[n2+col+jump2*row]; } __global__ void kernel2_sq(float* A,int jump,int n1,int n2,float* B,int jump1,float* C,int jump2,int n3) { int col=blockIdx.x * blockDim.x+threadIdx.x; if(3*blockIdx.y<gridDim.y) { int point=col+(blockIdx.y * blockDim.y + threadIdx.y)*jump; A[point+n2]+=A[point]; } else if(3*blockIdx.y<2*gridDim.y) { int point=col+((blockIdx.y-gridDim.y/3) * blockDim.y + threadIdx.y)*jump; A[point]+=A[point+n1]; A[point+n1]=0; } else { int row=(blockIdx.y-2*gridDim.y/3) * blockDim.y + threadIdx.y; B[col+jump1*row]=C[col+jump2*row]+C[col+jump2*row+n3]; } } __global__ void kernel3_sq(float* A,int jump,int n,float* B,int jump1,float* C,int jump2,int n1) { int col=blockIdx.x * blockDim.x+threadIdx.x; int row=blockIdx.y * blockDim.y + threadIdx.y; A[col+jump*row+n]-=A[col+jump*row]; B[col+jump1*row]=C[col+jump2*row]+C[col+jump2*row+n1]; } __global__ void kernel4_sq(float* A,int jump,int n1,float* B,int jump1,float* C,int jump2,float* D,int jump3,int n2) { int col=blockIdx.x * blockDim.x+threadIdx.x,row; if(2*blockIdx.y<gridDim.y) { row=blockIdx.y * blockDim.y + threadIdx.y; A[col+jump*row]+=B[col+jump1*row]; A[col+jump*row+n1]+=B[col+jump1*row]; B[col+jump1*row]=0; } else { row=(blockIdx.y-gridDim.y/2) * blockDim.y + threadIdx.y; C[col+jump2*row]=D[col+jump3*row+n2]-D[col+jump3*row]; } } __global__ void kernel6_sq(float* A,int jump,float* B,int jump1,int n1,float* C,int jump2,float* D,int jump3,int n2) { int col=blockIdx.x*blockDim.x+threadIdx.x; if(3*blockIdx.y<gridDim.y) { int row=blockIdx.y*blockDim.y+threadIdx.y; A[col+row*jump]=0; } else if(3*blockIdx.y<2*gridDim.y) { int row=(blockIdx.y-gridDim.y/3) * blockDim.y + threadIdx.y; B[col+row*jump1]-=B[col+row*jump1+n1]; } else { int row=(blockIdx.y-(2*gridDim.y/3)) * blockDim.y + threadIdx.y; C[col+jump2*row]=D[col+jump3*row]-D[col+jump3*row+n2]; } } void strassen_sq(float* A,int jump,float* B,int jump1,float* C,int jump2,int n,float* temp1,float* temp2,int block_len) { if(n<=threshold) hipLaunchKernelGGL(( multiply), dim3(dim3(block_len,block_len)),dim3(dim3(xthread,xthread)), 0, 0, A,B,C,jump,jump1,jump2,n/xthread); else { n/=2; block_len/=2; hipLaunchKernelGGL(( kernel1_sq), dim3(dim3(block_len,block_len)),dim3(dim3(xthread,xthread)), 0, 0, A,jump,jump*n+n,B,jump1,jump1*n+n,temp1,temp2,n); strassen_sq(temp1,n,temp2,n,C,jump2,n,temp1+n*n,temp2+n*n,block_len); hipLaunchKernelGGL(( kernel7_sq), dim3(dim3(block_len,block_len)),dim3(dim3(xthread,xthread)), 0, 0, A+jump*n,jump,-jump*n,B,jump1,n,temp1,temp2,n); strassen_sq(temp1,n,temp2,n,C+jump2*n+n,jump2,n,temp1+n*n,temp2+n*n,block_len); hipLaunchKernelGGL(( kernel7_sq), dim3(dim3(block_len,block_len)),dim3(dim3(xthread,xthread)), 0, 0, A+n,jump,jump*n,B+jump1*n,jump1,n,temp1,temp2,n); strassen_sq(temp1,n,temp2,n,C+n,jump2,n,temp1+n*n,temp2+n*n,block_len); hipLaunchKernelGGL(( kernel2_sq), dim3(dim3(block_len,3*block_len)),dim3(dim3(xthread,xthread)), 0, 0, C,jump2,n,jump2*n+n,temp1,n,A+jump*n,jump,n); strassen_sq(temp1,n,B,jump1,C+jump2*n,jump2,n,temp1+n*n,temp2+n*n,block_len); hipLaunchKernelGGL(( kernel3_sq), dim3(dim3(block_len,block_len)),dim3(dim3(xthread,xthread)), 0, 0, C+jump2*n,jump2,n,temp1,n,A,jump,n); strassen_sq(temp1,n,B+jump1*n+n,jump1,C+n,jump2,n,temp1+n*n,temp2+n*n,block_len); hipLaunchKernelGGL(( kernel6_sq), dim3(dim3(block_len,3*block_len)),dim3(dim3(xthread,xthread)), 0, 0, temp1,n,C,jump2,n,temp2,n,B+n,jump1,jump1*n); strassen_sq(A,jump,temp2,n,temp1,n,n,temp1+n*n,temp2+n*n,block_len); hipLaunchKernelGGL(( kernel4_sq), dim3(dim3(block_len,2*block_len)),dim3(dim3(xthread,xthread)), 0, 0, C+n,jump2,jump2*n,temp1,n,temp2,n,B,jump1,jump1*n); strassen_sq(A+jump*n+n,jump,temp2,n,temp1,n,n,temp1+n*n,temp2+n*n,block_len); hipLaunchKernelGGL(( kernel5), dim3(dim3(block_len,block_len)),dim3(dim3(xthread,xthread)), 0, 0, C,jump2,jump2*n,temp1,n); } } int nearest_ideal(int &n,int &temp) { int temp1=(xthread-n%xthread)%xthread; int pow=1; n+=temp1; int m=n/xthread; while(m>threshold/xthread) { if(m%2==1) { temp+=pow; m++; } m/=2; pow*=2; } n+=temp*xthread; temp=temp*xthread; temp+=temp1; return pow; } int main(int argc,char** argv) { std::ifstream in(argv[1]); std::ifstream in1(argv[2]); std::ofstream out(argv[3]); float *A,*B,*C; int n1,n2,n3; int temp1=0,temp2=0,temp3=0; in>>n1>>n2; in1>>n2>>n3; out<<n1<<'\t'<<n3<<'\n'; int power=nearest_ideal(n1,temp1); power=::min(power,nearest_ideal(n2,temp2)); power=::min(power,nearest_ideal(n3,temp3)); float factor=0; for(int i=power;i>1;i/=2) factor+=1/(float)(i*i); A=new float[n1*n2]; B=new float[n2*n3]; C=new float[n1*n3]; for(int i=0; i<n1-temp1; i++) { for(int j=0; j<n2-temp2; j++) in>>A[i*n2+j]; for(int j=n2-temp2;j<n2;j++) A[i*n2+j]=0; } for(int i=(n1-temp1)*n2;i<n1*n2;i++) A[i]=0; in.close(); for(int i=0; i<n2-temp2; i++) { for(int j=0; j<n3-temp3; j++) in1>>B[i*n3+j]; for(int j=n3-temp3;j<n3;j++) B[i*n3+j]=0; } for(int i=(n2-temp2)*n3;i<n2*n3;i++) B[i]=0; in1.close(); struct timespec start,end; int size_temp1,size_temp2,n_,n; n=n2>n3?n2:n3; n_=n1>n2?n1:n2; size_temp1=(int)(n1*n_)*factor; size_temp2=(int)(n2*n3)*factor; float *d_A, *d_B, *d_C,*temp1_,*temp2_; hipMalloc( (void **) &d_A, sizeof(float)*n1*n2); hipMalloc( (void **) &d_B, sizeof(float)*n2*n3); hipMalloc( (void **) &d_C, sizeof(float)*n1*n3); hipMalloc( (void **) &temp1_,sizeof(float)*size_temp1); hipMalloc( (void **) &temp2_,sizeof(float)*size_temp2); //copy from host to device hipMemcpy (d_A, A, sizeof(float)*n1*n2, hipMemcpyHostToDevice); hipMemcpy (d_B, B, sizeof(float)*n2*n3, hipMemcpyHostToDevice); hipMemset(d_C,0,sizeof(float)*n1*n3); hipMemset(temp1_,0,sizeof(float)*size_temp1); hipMemset(temp2_,0,sizeof(float)*size_temp2); clock_gettime(CLOCK_PROCESS_CPUTIME_ID,&start); if(n1!=n2 || n3!=n2) strassen(d_A,n2,d_B,n3,d_C,n3,n1,n2,n3,temp1_,temp2_,n,n_); else strassen_sq(d_A,n1,d_B,n1,d_C,n1,n1,temp1_,temp2_,n1/xthread); hipDeviceSynchronize(); clock_gettime(CLOCK_PROCESS_CPUTIME_ID,&end); hipMemcpy (C, d_C, sizeof(float)*n1*n3, hipMemcpyDeviceToHost); printf("Error %s \n",hipGetErrorString(hipGetLastError())); double time_taken = (end.tv_nsec-start.tv_nsec)+1e+9*(end.tv_sec-start.tv_sec); printf("StrassenRec - Time taken: %f\n",time_taken); for(int i=0;i<n1-temp1;i++) { for(int j=0;j<n3-temp3;j++) out<<C[i*n3+j]<<'\t'; out<<'\n'; } std::ofstream ofile; ofile.open(argv[4],std::ios_base::app); ofile<<"strassenRec - Time taken (ns): "<<time_taken<<"\n"; ofile.close(); hipFree(d_A); hipFree(d_B); hipFree(d_C); hipFree(temp1_); hipFree(temp2_); delete C,A,B; }
5fe218b49c88f18f8741e89f5eb2dd7cd9dc90ce.cu
#include<cstdio> #include<fstream> #include<cmath> #include<cuda.h> int threshold=256; int xthread=32; __global__ void multiply(float* A,float* B,float* C,int jump,int jump1,int jump2,int iter) { __shared__ float A1[32][32],B1[32][32]; int posy=blockIdx.y*blockDim.y+threadIdx.y; int posx=blockIdx.x*blockDim.x+threadIdx.x; int row=posy*jump+threadIdx.x; int col=posx+threadIdx.y*jump1; int place=posy*jump2+posx; for(int i=0;i<iter;i++) { A1[threadIdx.y][threadIdx.x]=A[row]; B1[threadIdx.y][threadIdx.x]=B[col]; __syncthreads(); for(int i=0;i<blockDim.x;i++) C[place]+=A1[threadIdx.y][i]*B1[i][threadIdx.x]; row+=blockDim.x; col+=blockDim.y*jump1; __syncthreads(); } } //non_square matrix multiplication __global__ void kernel1(float* A,int jump1,int n1,float* B,int jump2,int n2,float* C,int jump3,float* D,int jump4,int m1,int m2,int m3) { int col=blockIdx.x * blockDim.x+threadIdx.x; int row=blockIdx.y * blockDim.y+threadIdx.y; if(col<m2 && row<m1) C[col+jump3*row]=A[col+jump1*row]+A[n1+col+jump1*row]; if(col<m3 && row<m2) D[col+jump4*row]=B[col+jump2*row]+B[n2+col+jump2*row]; } __global__ void kernel7(float* A,int jump1,int n1,float* B,int jump2,int n2,float* C,int jump3,float* D,int jump4,int m1,int m2,int m3) { int col=blockIdx.x * blockDim.x+threadIdx.x; int row=blockIdx.y * blockDim.y+threadIdx.y; if(col<m2 && row<m1) C[col+jump3*row]=A[col+jump1*row]-A[n1+col+jump1*row]; if(col<m3 && row<m2) D[col+jump4*row]=B[col+jump2*row]+B[n2+col+jump2*row]; } __global__ void kernel2(float* A,int jump,int n1,int n2,float* B,int jump1,float* C,int jump2,int n3,int m2,int m3) { int col=blockIdx.x * blockDim.x+threadIdx.x; if(col<m3) { if(3*blockIdx.y<gridDim.y) { int point=col+(blockIdx.y * blockDim.y + threadIdx.y)*jump; A[point+n2]+=A[point]; } else if(3*blockIdx.y<2*gridDim.y) { int point=col+((blockIdx.y-gridDim.y/3) * blockDim.y + threadIdx.y)*jump; A[point]+=A[point+n1]; A[point+n1]=0; } } if(col<m2 && 3*blockIdx.y>=2*gridDim.y) { int row=(blockIdx.y-2*gridDim.y/3) * blockDim.y + threadIdx.y; B[col+jump1*row]=C[col+jump2*row]+C[col+jump2*row+n3]; } } __global__ void kernel3(float* A,int jump,int n,float* B,int jump1,float* C,int jump2,int n1,int m2,int m3) { int col=blockIdx.x * blockDim.x+threadIdx.x; int row=blockIdx.y * blockDim.y + threadIdx.y; if(col<m2) A[col+jump*row+n]-=A[col+jump*row]; if(col<m3) B[col+jump1*row]=C[col+jump2*row]+C[col+jump2*row+n1]; } __global__ void kernel4(float* A,int jump,int n1,float* B,int jump1,float* C,int jump2,float* D,int jump3,int n2,int m1,int m2) { int col=blockIdx.x * blockDim.x+threadIdx.x,row; if(2*blockIdx.y<gridDim.y) { row=blockIdx.y * blockDim.y + threadIdx.y; if(row<m1) { A[col+jump*row]+=B[col+jump1*row]; A[col+jump*row+n1]+=B[col+jump1*row]; B[col+jump1*row]=0; } } else { row=(blockIdx.y-gridDim.y/2) * blockDim.y + threadIdx.y; if(row<m2) C[col+jump2*row]=D[col+jump3*row+n2]-D[col+jump3*row]; } } __global__ void kernel5(float* A,int jump,int n1,float* B,int jump1) { int point=(blockIdx.x * blockDim.x+threadIdx.x)+(blockIdx.y * blockDim.y + threadIdx.y)*jump; int point1=(blockIdx.x * blockDim.x+threadIdx.x)+(blockIdx.y * blockDim.y + threadIdx.y)*jump1; A[point]+=B[point1]; A[point+n1]+=B[point1]; } __global__ void kernel6(float* A,int jump,float* B,int jump1,int n1,float* C,int jump2,float* D,int jump3,int n2,int m1,int m2,int m3) { int col=blockIdx.x*blockDim.x+threadIdx.x; if(col<m2 && 3*blockIdx.y<gridDim.y) { int row=blockIdx.y*blockDim.y+threadIdx.y; if(row<m1) A[col+row*jump]=0; } if(col<m3 && 3*blockIdx.y>=gridDim.y && 3*blockIdx.y<2*gridDim.y) { int row=(blockIdx.y-gridDim.y/3) * blockDim.y + threadIdx.y; if(row<m1) B[col+row*jump1]-=B[col+row*jump1+n1]; } if(col<m3 && 3*blockIdx.y>2*gridDim.y) { int row=(blockIdx.y-(2*gridDim.y/3)) * blockDim.y + threadIdx.y; if(row<m2) C[col+jump2*row]=D[col+jump3*row]-D[col+jump3*row+n2]; } } void strassen(float* A,int jump,float* B,int jump1,float* C,int jump2,int n1,int n2,int n3,float* temp1,float* temp2,int n,int n_) { if(n1<=threshold || n2<=threshold || n3<=threshold) { multiply <<<dim3(n3/xthread,n1/xthread),dim3(xthread,xthread)>>> (A,B,C,jump,jump1,jump2,n2/xthread); } else { n1/=2;n2/=2;n3/=2; n/=2;n_/=2; //M1 kernel1 <<<dim3(n_/xthread,n/xthread),dim3(xthread,xthread)>>> (A,jump,jump*n1+n2,B,jump1,jump1*n2+n3,temp1,n2,temp2,n3,n1,n2,n3); //temp1=A11+A22 //temp2=B11+B22 strassen(temp1,n2,temp2,n3,C,jump2,n1,n2,n3,temp1+n1*n2,temp2+n2*n3,n,n_); //C11=temp1*temp2 //M6 kernel7 <<<dim3(n_/xthread,n/xthread),dim3(xthread,xthread)>>> (A+jump*n1,jump,-jump*n1,B,jump1,n3,temp1,n2,temp2,n3,n1,n2,n3); //temp1=A21-A11//temp2=B11+B12 strassen(temp1,n2,temp2,n3,C+jump2*n1+n3,jump2,n1,n2,n3,temp1+n1*n2,temp2+n2*n3,n,n_); //C22=temp1*temp2 //M7 kernel7 <<<dim3(n_/xthread,n/xthread),dim3(xthread,xthread)>>> (A+n2,jump,jump*n1,B+jump1*n2,jump1,n3,temp1,n2,temp2,n3,n1,n2,n3);//temp1=A12-A22 //temp2=B21+B22 strassen(temp1,n2,temp2,n3,C+n3,jump2,n1,n2,n3,temp1+n1*n2,temp2+n2*n3,n,n_); //C12+=temp1*temp2 kernel2 <<<dim3(n_/xthread,(3*n1)/xthread),dim3(xthread,xthread)>>> (C,jump2,n3,jump2*n1+n3,temp1,n2,A+jump*n1,jump,n2,n2,n3); //C22+=C11//C11+=C12 //temp1=A21+A22 //C12=0 //M2 strassen(temp1,n2,B,jump1,C+jump2*n1,jump2,n1,n2,n3,temp1+n1*n2,temp2+n2*n3,n,n_); //C21=temp1*B11 kernel3 <<<dim3(n_/xthread,n1/xthread),dim3(xthread,xthread)>>> (C+jump2*n1,jump2,n3,temp1,n2,A,jump,n2,n2,n3); //C22-=C21 //temp1=A11+A12 //M5 strassen(temp1,n2,B+jump1*n2+n3,jump1,C+n3,jump2,n1,n2,n3,temp1+n1*n2,temp2+n2*n3,n,n_); //C12=temp1*B22 kernel6<<<dim3(n_/xthread,(3*n)/xthread),dim3(xthread,xthread)>>>(temp1,n2,C,jump2,n3,temp2,n3,B+n3,jump1,jump1*n2,n1,n2,n3); //C11-=C12 //temp2=B12-B22 //temp1=0 //M3 strassen(A,jump,temp2,n3,temp1,n3,n1,n2,n3,temp1+n1*n3,temp2+n2*n3,n,n_); //temp1=A11*temp2 kernel4 <<<dim3(n3/xthread,(2*n)/xthread),dim3(xthread,xthread)>>> (C+n3,jump2,jump2*n1,temp1,n2,temp2,n3,B,jump1,jump1*n2,n1,n2); //C12=C12+temp1//C22=C22+temp1 //temp2=B21-B11 //temp1=0 //M4 strassen(A+jump*n1+n2,jump,temp2,n3,temp1,n3,n1,n2,n3,temp1+n1*n3,temp2+n2*n3,n,n_); //temp1=A22*temp2 kernel5 <<<dim3(n3/xthread,n1/xthread),dim3(xthread,xthread)>>> (C,jump2,jump2*n1,temp1,n2); //C21=C21+temp1//C11=C11+temp1 } } //Square matrix multiplication __global__ void kernel1_sq(float* A,int jump1,int n1,float* B,int jump2,int n2,float* C,float* D,int jump) { int col=blockIdx.x * blockDim.x+threadIdx.x; int row=blockIdx.y * blockDim.y+threadIdx.y; C[col+jump*row]=A[col+jump1*row]+A[n1+col+jump1*row]; D[col+jump*row]=B[col+jump2*row]+B[n2+col+jump2*row]; } __global__ void kernel7_sq(float* A,int jump1,int n1,float* B,int jump2,int n2,float* C,float* D,int jump) { int col=blockIdx.x * blockDim.x+threadIdx.x; int row=blockIdx.y * blockDim.y+threadIdx.y; C[col+jump*row]=A[col+jump1*row]-A[n1+col+jump1*row]; D[col+jump*row]=B[col+jump2*row]+B[n2+col+jump2*row]; } __global__ void kernel2_sq(float* A,int jump,int n1,int n2,float* B,int jump1,float* C,int jump2,int n3) { int col=blockIdx.x * blockDim.x+threadIdx.x; if(3*blockIdx.y<gridDim.y) { int point=col+(blockIdx.y * blockDim.y + threadIdx.y)*jump; A[point+n2]+=A[point]; } else if(3*blockIdx.y<2*gridDim.y) { int point=col+((blockIdx.y-gridDim.y/3) * blockDim.y + threadIdx.y)*jump; A[point]+=A[point+n1]; A[point+n1]=0; } else { int row=(blockIdx.y-2*gridDim.y/3) * blockDim.y + threadIdx.y; B[col+jump1*row]=C[col+jump2*row]+C[col+jump2*row+n3]; } } __global__ void kernel3_sq(float* A,int jump,int n,float* B,int jump1,float* C,int jump2,int n1) { int col=blockIdx.x * blockDim.x+threadIdx.x; int row=blockIdx.y * blockDim.y + threadIdx.y; A[col+jump*row+n]-=A[col+jump*row]; B[col+jump1*row]=C[col+jump2*row]+C[col+jump2*row+n1]; } __global__ void kernel4_sq(float* A,int jump,int n1,float* B,int jump1,float* C,int jump2,float* D,int jump3,int n2) { int col=blockIdx.x * blockDim.x+threadIdx.x,row; if(2*blockIdx.y<gridDim.y) { row=blockIdx.y * blockDim.y + threadIdx.y; A[col+jump*row]+=B[col+jump1*row]; A[col+jump*row+n1]+=B[col+jump1*row]; B[col+jump1*row]=0; } else { row=(blockIdx.y-gridDim.y/2) * blockDim.y + threadIdx.y; C[col+jump2*row]=D[col+jump3*row+n2]-D[col+jump3*row]; } } __global__ void kernel6_sq(float* A,int jump,float* B,int jump1,int n1,float* C,int jump2,float* D,int jump3,int n2) { int col=blockIdx.x*blockDim.x+threadIdx.x; if(3*blockIdx.y<gridDim.y) { int row=blockIdx.y*blockDim.y+threadIdx.y; A[col+row*jump]=0; } else if(3*blockIdx.y<2*gridDim.y) { int row=(blockIdx.y-gridDim.y/3) * blockDim.y + threadIdx.y; B[col+row*jump1]-=B[col+row*jump1+n1]; } else { int row=(blockIdx.y-(2*gridDim.y/3)) * blockDim.y + threadIdx.y; C[col+jump2*row]=D[col+jump3*row]-D[col+jump3*row+n2]; } } void strassen_sq(float* A,int jump,float* B,int jump1,float* C,int jump2,int n,float* temp1,float* temp2,int block_len) { if(n<=threshold) multiply<<<dim3(block_len,block_len),dim3(xthread,xthread)>>>(A,B,C,jump,jump1,jump2,n/xthread); else { n/=2; block_len/=2; kernel1_sq<<<dim3(block_len,block_len),dim3(xthread,xthread)>>>(A,jump,jump*n+n,B,jump1,jump1*n+n,temp1,temp2,n); strassen_sq(temp1,n,temp2,n,C,jump2,n,temp1+n*n,temp2+n*n,block_len); kernel7_sq<<<dim3(block_len,block_len),dim3(xthread,xthread)>>>(A+jump*n,jump,-jump*n,B,jump1,n,temp1,temp2,n); strassen_sq(temp1,n,temp2,n,C+jump2*n+n,jump2,n,temp1+n*n,temp2+n*n,block_len); kernel7_sq<<<dim3(block_len,block_len),dim3(xthread,xthread)>>>(A+n,jump,jump*n,B+jump1*n,jump1,n,temp1,temp2,n); strassen_sq(temp1,n,temp2,n,C+n,jump2,n,temp1+n*n,temp2+n*n,block_len); kernel2_sq<<<dim3(block_len,3*block_len),dim3(xthread,xthread)>>>(C,jump2,n,jump2*n+n,temp1,n,A+jump*n,jump,n); strassen_sq(temp1,n,B,jump1,C+jump2*n,jump2,n,temp1+n*n,temp2+n*n,block_len); kernel3_sq<<<dim3(block_len,block_len),dim3(xthread,xthread)>>>(C+jump2*n,jump2,n,temp1,n,A,jump,n); strassen_sq(temp1,n,B+jump1*n+n,jump1,C+n,jump2,n,temp1+n*n,temp2+n*n,block_len); kernel6_sq<<<dim3(block_len,3*block_len),dim3(xthread,xthread)>>>(temp1,n,C,jump2,n,temp2,n,B+n,jump1,jump1*n); strassen_sq(A,jump,temp2,n,temp1,n,n,temp1+n*n,temp2+n*n,block_len); kernel4_sq<<<dim3(block_len,2*block_len),dim3(xthread,xthread)>>>(C+n,jump2,jump2*n,temp1,n,temp2,n,B,jump1,jump1*n); strassen_sq(A+jump*n+n,jump,temp2,n,temp1,n,n,temp1+n*n,temp2+n*n,block_len); kernel5<<<dim3(block_len,block_len),dim3(xthread,xthread)>>>(C,jump2,jump2*n,temp1,n); } } int nearest_ideal(int &n,int &temp) { int temp1=(xthread-n%xthread)%xthread; int pow=1; n+=temp1; int m=n/xthread; while(m>threshold/xthread) { if(m%2==1) { temp+=pow; m++; } m/=2; pow*=2; } n+=temp*xthread; temp=temp*xthread; temp+=temp1; return pow; } int main(int argc,char** argv) { std::ifstream in(argv[1]); std::ifstream in1(argv[2]); std::ofstream out(argv[3]); float *A,*B,*C; int n1,n2,n3; int temp1=0,temp2=0,temp3=0; in>>n1>>n2; in1>>n2>>n3; out<<n1<<'\t'<<n3<<'\n'; int power=nearest_ideal(n1,temp1); power=std::min(power,nearest_ideal(n2,temp2)); power=std::min(power,nearest_ideal(n3,temp3)); float factor=0; for(int i=power;i>1;i/=2) factor+=1/(float)(i*i); A=new float[n1*n2]; B=new float[n2*n3]; C=new float[n1*n3]; for(int i=0; i<n1-temp1; i++) { for(int j=0; j<n2-temp2; j++) in>>A[i*n2+j]; for(int j=n2-temp2;j<n2;j++) A[i*n2+j]=0; } for(int i=(n1-temp1)*n2;i<n1*n2;i++) A[i]=0; in.close(); for(int i=0; i<n2-temp2; i++) { for(int j=0; j<n3-temp3; j++) in1>>B[i*n3+j]; for(int j=n3-temp3;j<n3;j++) B[i*n3+j]=0; } for(int i=(n2-temp2)*n3;i<n2*n3;i++) B[i]=0; in1.close(); struct timespec start,end; int size_temp1,size_temp2,n_,n; n=n2>n3?n2:n3; n_=n1>n2?n1:n2; size_temp1=(int)(n1*n_)*factor; size_temp2=(int)(n2*n3)*factor; float *d_A, *d_B, *d_C,*temp1_,*temp2_; cudaMalloc( (void **) &d_A, sizeof(float)*n1*n2); cudaMalloc( (void **) &d_B, sizeof(float)*n2*n3); cudaMalloc( (void **) &d_C, sizeof(float)*n1*n3); cudaMalloc( (void **) &temp1_,sizeof(float)*size_temp1); cudaMalloc( (void **) &temp2_,sizeof(float)*size_temp2); //copy from host to device cudaMemcpy (d_A, A, sizeof(float)*n1*n2, cudaMemcpyHostToDevice); cudaMemcpy (d_B, B, sizeof(float)*n2*n3, cudaMemcpyHostToDevice); cudaMemset(d_C,0,sizeof(float)*n1*n3); cudaMemset(temp1_,0,sizeof(float)*size_temp1); cudaMemset(temp2_,0,sizeof(float)*size_temp2); clock_gettime(CLOCK_PROCESS_CPUTIME_ID,&start); if(n1!=n2 || n3!=n2) strassen(d_A,n2,d_B,n3,d_C,n3,n1,n2,n3,temp1_,temp2_,n,n_); else strassen_sq(d_A,n1,d_B,n1,d_C,n1,n1,temp1_,temp2_,n1/xthread); cudaDeviceSynchronize(); clock_gettime(CLOCK_PROCESS_CPUTIME_ID,&end); cudaMemcpy (C, d_C, sizeof(float)*n1*n3, cudaMemcpyDeviceToHost); printf("Error %s \n",cudaGetErrorString(cudaGetLastError())); double time_taken = (end.tv_nsec-start.tv_nsec)+1e+9*(end.tv_sec-start.tv_sec); printf("StrassenRec - Time taken: %f\n",time_taken); for(int i=0;i<n1-temp1;i++) { for(int j=0;j<n3-temp3;j++) out<<C[i*n3+j]<<'\t'; out<<'\n'; } std::ofstream ofile; ofile.open(argv[4],std::ios_base::app); ofile<<"strassenRec - Time taken (ns): "<<time_taken<<"\n"; ofile.close(); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaFree(temp1_); cudaFree(temp2_); delete C,A,B; }
7c8fa6041f6a9e1d86cfbb1a793cb533fc62292f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2014 Nervana Systems Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // nvcc -arch sm_52 -cubin hconv_updat_C64_K64.cu extern "C" __global__ void __launch_bounds__(64) hconv_updat_C64_K64 ( short* param_Rand, short* param_F, const short* param_I, const short* param_E, float param_alpha, int param_flags, int param_N, int param_K, int param_D, int param_H, int param_W, int param_WN, int param_HWN, int param_DHWN, int param_C, int param_CRST, int param_RST, int param_magic_RST, int param_shift_RST, int param_RS, int param_magic_RS, int param_shift_RS, int param_S, int param_magic_S, int param_shift_S, int param_pad_d, int param_pad_h, int param_pad_w, int param_str_d, int param_str_h, int param_str_w, int param_P, int param_Q, int param_PQ, int param_QN, int param_PQN, int param_MPQN, int param_magic_Q, int param_shift_Q, int param_magic_PQ, int param_shift_PQ, int param_part_P, int param_part_Q, int param_part_PQ ) { __shared__ float share[64*8*4 + 6]; int tid = threadIdx.x; share[tid] = 1; *param_F = share[63-tid]; }
7c8fa6041f6a9e1d86cfbb1a793cb533fc62292f.cu
/* * Copyright 2014 Nervana Systems Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // nvcc -arch sm_52 -cubin hconv_updat_C64_K64.cu extern "C" __global__ void __launch_bounds__(64) hconv_updat_C64_K64 ( short* param_Rand, short* param_F, const short* param_I, const short* param_E, float param_alpha, int param_flags, int param_N, int param_K, int param_D, int param_H, int param_W, int param_WN, int param_HWN, int param_DHWN, int param_C, int param_CRST, int param_RST, int param_magic_RST, int param_shift_RST, int param_RS, int param_magic_RS, int param_shift_RS, int param_S, int param_magic_S, int param_shift_S, int param_pad_d, int param_pad_h, int param_pad_w, int param_str_d, int param_str_h, int param_str_w, int param_P, int param_Q, int param_PQ, int param_QN, int param_PQN, int param_MPQN, int param_magic_Q, int param_shift_Q, int param_magic_PQ, int param_shift_PQ, int param_part_P, int param_part_Q, int param_part_PQ ) { __shared__ float share[64*8*4 + 6]; int tid = threadIdx.x; share[tid] = 1; *param_F = share[63-tid]; }
6fbd0b644d7f8fbd3d64890d10e72a7eb2223a8b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2009, Jiri Matela * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <unistd.h> #include <error.h> #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <assert.h> #include "components.h" #include "common.h" #define THREADS 256 /* Store 3 RGB float components */ __device__ void storeComponents(float *d_r, float *d_g, float *d_b, float r, float g, float b, int pos) { d_r[pos] = (r/255.0f) - 0.5f; d_g[pos] = (g/255.0f) - 0.5f; d_b[pos] = (b/255.0f) - 0.5f; } /* Store 3 RGB intege components */ __device__ void storeComponents(int *d_r, int *d_g, int *d_b, int r, int g, int b, int pos) { d_r[pos] = r - 128; d_g[pos] = g - 128; d_b[pos] = b - 128; } /* Store float component */ __device__ void storeComponent(float *d_c, float c, int pos) { d_c[pos] = (c/255.0f) - 0.5f; } /* Store integer component */ __device__ void storeComponent(int *d_c, int c, int pos) { d_c[pos] = c - 128; } /* Copy img src data into three separated component buffers */ template<typename T> __global__ void c_CopySrcToComponents(T *d_r, T *d_g, T *d_b, unsigned char * d_src, int pixels) { int x = threadIdx.x; int gX = blockDim.x*blockIdx.x; __shared__ unsigned char sData[THREADS*3]; /* Copy data to shared mem by 4bytes other checks are not necessary, since d_src buffer is aligned to sharedDataSize */ if ( (x*4) < THREADS*3 ) { float *s = (float *)d_src; float *d = (float *)sData; d[x] = s[((gX*3)>>2) + x]; } __syncthreads(); T r, g, b; int offset = x*3; r = (T)(sData[offset]); g = (T)(sData[offset+1]); b = (T)(sData[offset+2]); int globalOutputPosition = gX + x; if (globalOutputPosition < pixels) { storeComponents(d_r, d_g, d_b, r, g, b, globalOutputPosition); } } /* Copy img src data into three separated component buffers */ template<typename T> __global__ void c_CopySrcToComponent(T *d_c, unsigned char * d_src, int pixels) { int x = threadIdx.x; int gX = blockDim.x*blockIdx.x; __shared__ unsigned char sData[THREADS]; /* Copy data to shared mem by 4bytes other checks are not necessary, since d_src buffer is aligned to sharedDataSize */ if ( (x*4) < THREADS) { float *s = (float *)d_src; float *d = (float *)sData; d[x] = s[(gX>>2) + x]; } __syncthreads(); T c; c = (T)(sData[x]); int globalOutputPosition = gX + x; if (globalOutputPosition < pixels) { storeComponent(d_c, c, globalOutputPosition); } } /* Separate compoents of 8bit RGB source image */ template<typename T> void rgbToComponents(T *d_r, T *d_g, T *d_b, unsigned char * src, int width, int height) { unsigned char * d_src; int pixels = width*height; int alignedSize = DIVANDRND(width*height, THREADS) * THREADS * 3; //aligned to thread block size -- THREADS /* Alloc d_src buffer */ hipMalloc((void **)&d_src, alignedSize); hipMemset(d_src, 0, alignedSize); /* Copy data to device */ hipMemcpy(d_src, src, pixels*3, hipMemcpyHostToDevice); /* Kernel */ dim3 threads(THREADS); dim3 grid(alignedSize/(THREADS*3)); assert(alignedSize%(THREADS*3) == 0); hipLaunchKernelGGL(( c_CopySrcToComponents), dim3(grid), dim3(threads), 0, 0, d_r, d_g, d_b, d_src, pixels); /* Free Memory */ hipFree(d_src); } template void rgbToComponents<float>(float *d_r, float *d_g, float *d_b, unsigned char * src, int width, int height); template void rgbToComponents<int>(int *d_r, int *d_g, int *d_b, unsigned char * src, int width, int height); /* Copy a 8bit source image data into a color compoment of type T */ template<typename T> void bwToComponent(T *d_c, unsigned char * src, int width, int height) { unsigned char * d_src; int pixels = width*height; int alignedSize = DIVANDRND(pixels, THREADS) * THREADS; //aligned to thread block size -- THREADS /* Alloc d_src buffer */ hipMalloc((void **)&d_src, alignedSize); hipMemset(d_src, 0, alignedSize); /* Copy data to device */ hipMemcpy(d_src, src, pixels, hipMemcpyHostToDevice); /* Kernel */ dim3 threads(THREADS); dim3 grid(alignedSize/(THREADS)); assert(alignedSize%(THREADS) == 0); hipLaunchKernelGGL(( c_CopySrcToComponent), dim3(grid), dim3(threads), 0, 0, d_c, d_src, pixels); /* Free Memory */ hipFree(d_src); } template void bwToComponent<float>(float *d_c, unsigned char *src, int width, int height); template void bwToComponent<int>(int *d_c, unsigned char *src, int width, int height);
6fbd0b644d7f8fbd3d64890d10e72a7eb2223a8b.cu
/* * Copyright (c) 2009, Jiri Matela * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <unistd.h> #include <error.h> #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <assert.h> #include "components.h" #include "common.h" #define THREADS 256 /* Store 3 RGB float components */ __device__ void storeComponents(float *d_r, float *d_g, float *d_b, float r, float g, float b, int pos) { d_r[pos] = (r/255.0f) - 0.5f; d_g[pos] = (g/255.0f) - 0.5f; d_b[pos] = (b/255.0f) - 0.5f; } /* Store 3 RGB intege components */ __device__ void storeComponents(int *d_r, int *d_g, int *d_b, int r, int g, int b, int pos) { d_r[pos] = r - 128; d_g[pos] = g - 128; d_b[pos] = b - 128; } /* Store float component */ __device__ void storeComponent(float *d_c, float c, int pos) { d_c[pos] = (c/255.0f) - 0.5f; } /* Store integer component */ __device__ void storeComponent(int *d_c, int c, int pos) { d_c[pos] = c - 128; } /* Copy img src data into three separated component buffers */ template<typename T> __global__ void c_CopySrcToComponents(T *d_r, T *d_g, T *d_b, unsigned char * d_src, int pixels) { int x = threadIdx.x; int gX = blockDim.x*blockIdx.x; __shared__ unsigned char sData[THREADS*3]; /* Copy data to shared mem by 4bytes other checks are not necessary, since d_src buffer is aligned to sharedDataSize */ if ( (x*4) < THREADS*3 ) { float *s = (float *)d_src; float *d = (float *)sData; d[x] = s[((gX*3)>>2) + x]; } __syncthreads(); T r, g, b; int offset = x*3; r = (T)(sData[offset]); g = (T)(sData[offset+1]); b = (T)(sData[offset+2]); int globalOutputPosition = gX + x; if (globalOutputPosition < pixels) { storeComponents(d_r, d_g, d_b, r, g, b, globalOutputPosition); } } /* Copy img src data into three separated component buffers */ template<typename T> __global__ void c_CopySrcToComponent(T *d_c, unsigned char * d_src, int pixels) { int x = threadIdx.x; int gX = blockDim.x*blockIdx.x; __shared__ unsigned char sData[THREADS]; /* Copy data to shared mem by 4bytes other checks are not necessary, since d_src buffer is aligned to sharedDataSize */ if ( (x*4) < THREADS) { float *s = (float *)d_src; float *d = (float *)sData; d[x] = s[(gX>>2) + x]; } __syncthreads(); T c; c = (T)(sData[x]); int globalOutputPosition = gX + x; if (globalOutputPosition < pixels) { storeComponent(d_c, c, globalOutputPosition); } } /* Separate compoents of 8bit RGB source image */ template<typename T> void rgbToComponents(T *d_r, T *d_g, T *d_b, unsigned char * src, int width, int height) { unsigned char * d_src; int pixels = width*height; int alignedSize = DIVANDRND(width*height, THREADS) * THREADS * 3; //aligned to thread block size -- THREADS /* Alloc d_src buffer */ cudaMalloc((void **)&d_src, alignedSize); cudaMemset(d_src, 0, alignedSize); /* Copy data to device */ cudaMemcpy(d_src, src, pixels*3, cudaMemcpyHostToDevice); /* Kernel */ dim3 threads(THREADS); dim3 grid(alignedSize/(THREADS*3)); assert(alignedSize%(THREADS*3) == 0); c_CopySrcToComponents<<<grid, threads>>>(d_r, d_g, d_b, d_src, pixels); /* Free Memory */ cudaFree(d_src); } template void rgbToComponents<float>(float *d_r, float *d_g, float *d_b, unsigned char * src, int width, int height); template void rgbToComponents<int>(int *d_r, int *d_g, int *d_b, unsigned char * src, int width, int height); /* Copy a 8bit source image data into a color compoment of type T */ template<typename T> void bwToComponent(T *d_c, unsigned char * src, int width, int height) { unsigned char * d_src; int pixels = width*height; int alignedSize = DIVANDRND(pixels, THREADS) * THREADS; //aligned to thread block size -- THREADS /* Alloc d_src buffer */ cudaMalloc((void **)&d_src, alignedSize); cudaMemset(d_src, 0, alignedSize); /* Copy data to device */ cudaMemcpy(d_src, src, pixels, cudaMemcpyHostToDevice); /* Kernel */ dim3 threads(THREADS); dim3 grid(alignedSize/(THREADS)); assert(alignedSize%(THREADS) == 0); c_CopySrcToComponent<<<grid, threads>>>(d_c, d_src, pixels); /* Free Memory */ cudaFree(d_src); } template void bwToComponent<float>(float *d_c, unsigned char *src, int width, int height); template void bwToComponent<int>(int *d_c, unsigned char *src, int width, int height);
37cb9f161befafa1b655c775a424955845c29913.hip
// !!! This is a file automatically generated by hipify!!! // // CUDA implementation of Total Variation Filter // #include "opencv2/imgproc/imgproc.hpp" #include <opencv2/highgui.hpp> #include <iostream> #include <string> #include <stdio.h> #include <hip/hip_runtime.h> #include "hip/hip_runtime.h" #define BLOCK_SIZE 16 #define FILTER_WIDTH 3 #define FILTER_HEIGHT 3 using namespace std; // Run Total Variation Filter on GPU __global__ void tvFilter(unsigned char *srcImage, unsigned char *dstImage, unsigned int width, unsigned int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; // only threads inside image will write results if((x>=FILTER_WIDTH/2) && (x<(width-FILTER_WIDTH/2)) && (y>=FILTER_HEIGHT/2) && (y<(height-FILTER_HEIGHT/2))) { float sod = 0; // Loop inside the filter to average pixel values for(int ky=-FILTER_HEIGHT/2; ky<=FILTER_HEIGHT/2; ky++) { for(int kx=-FILTER_WIDTH/2; kx<=FILTER_WIDTH/2; kx++) { float fl = srcImage[((y+ky)*width + (x+kx))]; float center = srcImage[((y)*width + (x))]; sod += fl-center; } } dstImage[(y*width+x)] = sod; } } // The wrapper is used to call total variation filter extern "C" void tvFilter_GPU_wrapper(const cv::Mat& input, cv::Mat& output) { // Use cuda event to catch time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Calculate number of input & output bytes in each block const int inputSize = input.cols * input.rows; const int outputSize = output.cols * output.rows; unsigned char *d_input, *d_output; // Allocate device memory hipMalloc<unsigned char>(&d_input,inputSize); hipMalloc<unsigned char>(&d_output,outputSize); // Copy data from OpenCV input image to device memory hipMemcpy(d_input,input.ptr(),inputSize,hipMemcpyHostToDevice); // Specify block size const dim3 block(BLOCK_SIZE,BLOCK_SIZE); // Calculate grid size to cover the whole image const dim3 grid((output.cols + block.x - 1)/block.x, (output.rows + block.y - 1)/block.y); // Start time hipEventRecord(start); // Run BoxFilter kernel on CUDA hipLaunchKernelGGL(( tvFilter), dim3(grid),dim3(block), 0, 0, d_input, d_output, output.cols, output.rows); // Stop time hipEventRecord(stop); //Copy data from device memory to output image hipMemcpy(output.ptr(),d_output,outputSize,hipMemcpyDeviceToHost); //Free the device memory hipFree(d_input); hipFree(d_output); hipEventSynchronize(stop); float milliseconds = 0; // Calculate elapsed time in milisecond hipEventElapsedTime(&milliseconds, start, stop); cout<< "\nTotal processing time on GPU (ms): " << milliseconds << "\n"; }
37cb9f161befafa1b655c775a424955845c29913.cu
// // CUDA implementation of Total Variation Filter // #include "opencv2/imgproc/imgproc.hpp" #include <opencv2/highgui.hpp> #include <iostream> #include <string> #include <stdio.h> #include <cuda.h> #include "cuda_runtime.h" #define BLOCK_SIZE 16 #define FILTER_WIDTH 3 #define FILTER_HEIGHT 3 using namespace std; // Run Total Variation Filter on GPU __global__ void tvFilter(unsigned char *srcImage, unsigned char *dstImage, unsigned int width, unsigned int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; // only threads inside image will write results if((x>=FILTER_WIDTH/2) && (x<(width-FILTER_WIDTH/2)) && (y>=FILTER_HEIGHT/2) && (y<(height-FILTER_HEIGHT/2))) { float sod = 0; // Loop inside the filter to average pixel values for(int ky=-FILTER_HEIGHT/2; ky<=FILTER_HEIGHT/2; ky++) { for(int kx=-FILTER_WIDTH/2; kx<=FILTER_WIDTH/2; kx++) { float fl = srcImage[((y+ky)*width + (x+kx))]; float center = srcImage[((y)*width + (x))]; sod += fl-center; } } dstImage[(y*width+x)] = sod; } } // The wrapper is used to call total variation filter extern "C" void tvFilter_GPU_wrapper(const cv::Mat& input, cv::Mat& output) { // Use cuda event to catch time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Calculate number of input & output bytes in each block const int inputSize = input.cols * input.rows; const int outputSize = output.cols * output.rows; unsigned char *d_input, *d_output; // Allocate device memory cudaMalloc<unsigned char>(&d_input,inputSize); cudaMalloc<unsigned char>(&d_output,outputSize); // Copy data from OpenCV input image to device memory cudaMemcpy(d_input,input.ptr(),inputSize,cudaMemcpyHostToDevice); // Specify block size const dim3 block(BLOCK_SIZE,BLOCK_SIZE); // Calculate grid size to cover the whole image const dim3 grid((output.cols + block.x - 1)/block.x, (output.rows + block.y - 1)/block.y); // Start time cudaEventRecord(start); // Run BoxFilter kernel on CUDA tvFilter<<<grid,block>>>(d_input, d_output, output.cols, output.rows); // Stop time cudaEventRecord(stop); //Copy data from device memory to output image cudaMemcpy(output.ptr(),d_output,outputSize,cudaMemcpyDeviceToHost); //Free the device memory cudaFree(d_input); cudaFree(d_output); cudaEventSynchronize(stop); float milliseconds = 0; // Calculate elapsed time in milisecond cudaEventElapsedTime(&milliseconds, start, stop); cout<< "\nTotal processing time on GPU (ms): " << milliseconds << "\n"; }
efd83836fda51c7d3e9896a49677a630e0de5bb4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> template<typename T> static inline void check(T err, const char* const func, const char* const file, const int line) { if (err != hipSuccess) { std::cerr << "CUDA error at: " << file << ":" << line << std::endl; std::cerr << hipGetErrorString(err) << " " << func << std::endl; exit(1); } } #define CHECK(x) check(x, #x, __FILE__, __LINE__) // template <class T> // void cuda_memcpy(T* target, const T* source, std::size_t num, hipMemcpyKind direction) { // CHECK(hipMemcpy(target, source, num * sizeof(T), direction)); // } __global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){ // printf("checing len: %p",len); // printf("checing n: %p",n); unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; int start=index*len+1;//exclusive if (start>n) return; //exclusive, could equal to n int end=start+step; output[start]=mask[start-1]; for(unsigned int i=start+1;i<end&&i<n;i++){ output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1] } } __global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){ unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; // int offset=2*step-1; int offset=2*step; unsigned int start=step*blockDim.x*index+offset; unsigned int end=step*blockDim.x*(index+1)+1; for(unsigned int i=start;i<end && i<n; i+=step){ sum[i]+=sum[i-step]; } } __global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){ if (threadIdx.x==0) return; unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; unsigned int start=index*step+1;//exclusive unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum unsigned int base=sum[start-1]; for(unsigned int i=start; i<end && i<n; i++){ sum[i]+=base; } } // __global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n){ // unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; // int step=len*blockDim.x; // // int offset=2*step-1; // int offset=2*step; // unsigned int start= blockDim.x*step*index+offset; // unsigned int end= blockDim.x*step*(index+1); // for(unsigned int i=start; i<end && i<n; i+=step){ // sum[i]+=sum[i-step]; // } // } void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){ int step=len*block_size;//each block has step number int start=2*step; for(unsigned int i=start; i<n; i+=step){ sum[i]+=sum[i-step]; } } __global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){ unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; if (index==0) return; //the first block is not needed to merge int step=len*blockDim.x; int start=index*step+1; //exclusive // int end=start+step; int end=start+step-1;// -1 is important, this position has been added in serial sum // int base=sum[blockIdx.x*len*blockDim.x-1];//last element at last block int base=sum[start-1];//last element at last block for(int i=start; i<end && i<n; i++){ sum[i]+=base; } } static inline int divup(int a, int b) { return (a + b - 1)/b; } int main(){ const unsigned int n=100000; //100000 number unsigned int data[n]; unsigned int result[n]; unsigned int inter_sum[n]; unsigned int inter_result[n]; unsigned int *cal_result=new unsigned int [n]; for (unsigned int i=0; i<n; i++){ data[i]=i; } for (unsigned int i=0; i<n; i++){ cal_result[i]=i; } for (long long i=0; i<n; i++){ result[i]=(i-1)*i/2; } std::cout<< "data preparation done"<<std::endl; const int block_size=64;//64 threads per block; const int len=1000; // add 1000 prefix sum per thread; unsigned int *d_in=NULL; CHECK(hipMalloc((void**)&d_in,n*sizeof(unsigned int))); unsigned int *d_sum=NULL; CHECK(hipMalloc((void**)&d_sum,n*sizeof(unsigned int))); CHECK(hipMemset(d_sum,0,n*sizeof(unsigned int))); CHECK(hipMemcpy(d_in,data,n * sizeof(unsigned int), hipMemcpyHostToDevice)); // cuda_memcpy(d_in,data,n,hipMemcpyHostToDevice); // std::cout<< divup(n,block_size*len) <<std::endl; // for (long long i=64001; i<65001; i++){ // inter_result[i]=(64000+i-1)*(i-64000)/2; // } hipLaunchKernelGGL(( prefixsum), dim3(divup(n,block_size*len)),dim3(block_size), 0, 0, d_in,d_sum,len,n); // CHECK(hipMemcpy(cal_result, d_sum, n * sizeof(unsigned int), hipMemcpyDeviceToHost)); // for (int i=64001; i<65001; i++){ // if(inter_result[i]!=cal_result[i]){ // std::cout<<"i: "<< i <<"error!"<<std::endl; // std::cout<< inter_result[i] << "vs" << cal_result[i] <<std::endl; // break; // // return 0; // } // } // std::cout<<"pass here"<<std::endl; CHECK(hipGetLastError()); long long start=64001; // int end=start+1000; unsigned int end=100000; // std::cout<< end*end<<std::endl; for (unsigned int i=start; i<end; i++){ // int index=i-64000; inter_result[i]=((start-1+i-1)*(i-start+1))/2; } start=1; end=start+64000; for (long long i=start; i<end; i++){ // int index=i-64000; inter_result[i]=(i-1)*(i-start+1)/2; } // CHECK(hipMemcpy(cal_result, d_sum, n * sizeof(unsigned int), hipMemcpyDeviceToHost)); // for (int i=65000; i<66000; i++){ // if(inter_result[i]!=cal_result[i]){ // std::cout<<"i: "<< i <<"error!"<<std::endl; // break; // // return 0; // } // } // std::cout<<"pass"<<std::endl; // for (unsigned int i=65000; i<66000; i++){ // inter_result[i]=(65000+i)*(i-65000+1)/2; // } // for (long long i=64001; i<65001; i++){ // inter_result[i]=(64000+i-1)*(i-64000)/2; // } // for (unsigned int i=1001; i<2001; i++){ // inter_result[i]=(1000+i-1)*(i-1000)/2; // } // inter_result[2000]+=result[1000]; hipLaunchKernelGGL(( serialsum_accrossthread), dim3(divup(n,block_size*len*block_size)),dim3(block_size), 0, 0, d_sum,len,n); CHECK(hipGetLastError()); // CHECK(hipMemcpy(cal_result, d_sum, n * sizeof(unsigned int), hipMemcpyDeviceToHost)); // for(int i=1001; i<2001; i++){ // if(inter_result[i]!=cal_result[i]){ // std::cout<<"first: i: "<< i << " " << cal_result[i] <<"error!"<<std::endl; // break; // // return 0; // } // } // std::cout<<"pass"<<std::endl; // std::cout << "pass first one"<<std::endl; // for (int i=64000; i<65000; i++){ // if(inter_result[i]!=cal_result[i]){ // std::cout<<"i: "<< i << " " << cal_result[i] <<"error!"<<std::endl; // break; // // return 0; // } // } // inter_result[65999]+=inter_result[64999]; // for (int i=65000; i<66000; i++){ // if(inter_result[i]!=cal_result[i]){ // std::cout<<"i: "<< i << " " << cal_result[i] <<"error!"<<std::endl; // break; // // return 0; // } // } hipLaunchKernelGGL(( mergethread), dim3(divup(n,block_size*len)),dim3(block_size), 0, 0, d_sum,len,n); CHECK(hipGetLastError()); // CHECK(hipMemcpy(inter_result, d_sum, n * sizeof(unsigned int), hipMemcpyDeviceToHost)); //serial sum CHECK(hipMemcpy(inter_sum, d_sum, n * sizeof(unsigned int), hipMemcpyDeviceToHost)); for (int i=64001; i<100000; i++){ if(inter_result[i]!=inter_sum[i]){ std::cout<<"i: "<< i <<"error!"<<std::endl; std::cout<< inter_result[i] << "vs" << inter_sum[i] <<std::endl; break; // return 0; } } std::cout<<"pass here 1"<<std::endl; serialsum_accrossblock(inter_sum, len, n, block_size); CHECK(hipMemcpy(d_sum, inter_sum,n * sizeof(unsigned int), hipMemcpyHostToDevice)); // CHECK(hipMemcpy(cal_result, d_sum, n * sizeof(unsigned int), hipMemcpyDeviceToHost)); for (int i=1; i<100000; i++){ if(inter_result[i]!=inter_sum[i]){ std::cout<<"i: "<< i <<"error!"<<std::endl; std::cout<< inter_result[i] << "vs" << inter_sum[i] <<std::endl; break; // return 0; } } std::cout<<"pass here"<<std::endl; // serialsum_accrossblock<<<divup(n,block_size*len*block_size*block_size) ,block_size>>>(d_sum,len,n); // CHECK(hipGetLastError()); // CHECK(hipMemcpy(cal_result, d_sum, n * sizeof(unsigned int), hipMemcpyDeviceToHost)); // for (int i=0; i<100000; i++){ // if(inter_result[i]!=cal_result[i]){ // std::cout<<"i: "<< i <<"error!"<<std::endl; // std::cout<< inter_result[i] << "vs" << cal_result[i] <<std::endl; // break; // // return 0; // } // } // for (unsigned int i=64000; i<100000; i++){ // inter_result[i]+=inter_result[63999]; // } // std::cout<< divup(n,block_size*len) << std::endl; hipLaunchKernelGGL(( mergeblock), dim3(divup(n,block_size*len*block_size)) ,dim3(block_size), 0, 0, d_sum,len,n); CHECK(hipGetLastError()); // cuda_memcpy(cal_result, d_sum, n, hipMemcpyDeviceToHost); CHECK(hipMemcpy(cal_result, d_sum, n * sizeof(unsigned int), hipMemcpyDeviceToHost)); CHECK(hipFree(d_in)); CHECK(hipFree(d_sum)); //compare unsigned int i; for (i=0; i<n; i++){ if(result[i]!=cal_result[i]){ std::cout<<"i: "<< i <<"error!"<<std::endl; std::cout<<result[i]<<"vs"<<cal_result[i]<<std::endl; break; } } if(i==n){ std::cout<<"correct"<<std::endl; } return 0; }
efd83836fda51c7d3e9896a49677a630e0de5bb4.cu
#include <iostream> template<typename T> static inline void check(T err, const char* const func, const char* const file, const int line) { if (err != cudaSuccess) { std::cerr << "CUDA error at: " << file << ":" << line << std::endl; std::cerr << cudaGetErrorString(err) << " " << func << std::endl; exit(1); } } #define CHECK(x) check(x, #x, __FILE__, __LINE__) // template <class T> // void cuda_memcpy(T* target, const T* source, std::size_t num, cudaMemcpyKind direction) { // CHECK(cudaMemcpy(target, source, num * sizeof(T), direction)); // } __global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){ // printf("checing len: %p",len); // printf("checing n: %p",n); unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; int start=index*len+1;//exclusive if (start>n) return; //exclusive, could equal to n int end=start+step; output[start]=mask[start-1]; for(unsigned int i=start+1;i<end&&i<n;i++){ output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1] } } __global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){ unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; // int offset=2*step-1; int offset=2*step; unsigned int start=step*blockDim.x*index+offset; unsigned int end=step*blockDim.x*(index+1)+1; for(unsigned int i=start;i<end && i<n; i+=step){ sum[i]+=sum[i-step]; } } __global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){ if (threadIdx.x==0) return; unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; unsigned int start=index*step+1;//exclusive unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum unsigned int base=sum[start-1]; for(unsigned int i=start; i<end && i<n; i++){ sum[i]+=base; } } // __global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n){ // unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; // int step=len*blockDim.x; // // int offset=2*step-1; // int offset=2*step; // unsigned int start= blockDim.x*step*index+offset; // unsigned int end= blockDim.x*step*(index+1); // for(unsigned int i=start; i<end && i<n; i+=step){ // sum[i]+=sum[i-step]; // } // } void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){ int step=len*block_size;//each block has step number int start=2*step; for(unsigned int i=start; i<n; i+=step){ sum[i]+=sum[i-step]; } } __global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){ unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; if (index==0) return; //the first block is not needed to merge int step=len*blockDim.x; int start=index*step+1; //exclusive // int end=start+step; int end=start+step-1;// -1 is important, this position has been added in serial sum // int base=sum[blockIdx.x*len*blockDim.x-1];//last element at last block int base=sum[start-1];//last element at last block for(int i=start; i<end && i<n; i++){ sum[i]+=base; } } static inline int divup(int a, int b) { return (a + b - 1)/b; } int main(){ const unsigned int n=100000; //100000 number unsigned int data[n]; unsigned int result[n]; unsigned int inter_sum[n]; unsigned int inter_result[n]; unsigned int *cal_result=new unsigned int [n]; for (unsigned int i=0; i<n; i++){ data[i]=i; } for (unsigned int i=0; i<n; i++){ cal_result[i]=i; } for (long long i=0; i<n; i++){ result[i]=(i-1)*i/2; } std::cout<< "data preparation done"<<std::endl; const int block_size=64;//64 threads per block; const int len=1000; // add 1000 prefix sum per thread; unsigned int *d_in=NULL; CHECK(cudaMalloc((void**)&d_in,n*sizeof(unsigned int))); unsigned int *d_sum=NULL; CHECK(cudaMalloc((void**)&d_sum,n*sizeof(unsigned int))); CHECK(cudaMemset(d_sum,0,n*sizeof(unsigned int))); CHECK(cudaMemcpy(d_in,data,n * sizeof(unsigned int), cudaMemcpyHostToDevice)); // cuda_memcpy(d_in,data,n,cudaMemcpyHostToDevice); // std::cout<< divup(n,block_size*len) <<std::endl; // for (long long i=64001; i<65001; i++){ // inter_result[i]=(64000+i-1)*(i-64000)/2; // } prefixsum<<<divup(n,block_size*len),block_size>>>(d_in,d_sum,len,n); // CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // for (int i=64001; i<65001; i++){ // if(inter_result[i]!=cal_result[i]){ // std::cout<<"i: "<< i <<"error!"<<std::endl; // std::cout<< inter_result[i] << "vs" << cal_result[i] <<std::endl; // break; // // return 0; // } // } // std::cout<<"pass here"<<std::endl; CHECK(cudaGetLastError()); long long start=64001; // int end=start+1000; unsigned int end=100000; // std::cout<< end*end<<std::endl; for (unsigned int i=start; i<end; i++){ // int index=i-64000; inter_result[i]=((start-1+i-1)*(i-start+1))/2; } start=1; end=start+64000; for (long long i=start; i<end; i++){ // int index=i-64000; inter_result[i]=(i-1)*(i-start+1)/2; } // CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // for (int i=65000; i<66000; i++){ // if(inter_result[i]!=cal_result[i]){ // std::cout<<"i: "<< i <<"error!"<<std::endl; // break; // // return 0; // } // } // std::cout<<"pass"<<std::endl; // for (unsigned int i=65000; i<66000; i++){ // inter_result[i]=(65000+i)*(i-65000+1)/2; // } // for (long long i=64001; i<65001; i++){ // inter_result[i]=(64000+i-1)*(i-64000)/2; // } // for (unsigned int i=1001; i<2001; i++){ // inter_result[i]=(1000+i-1)*(i-1000)/2; // } // inter_result[2000]+=result[1000]; serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n); CHECK(cudaGetLastError()); // CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // for(int i=1001; i<2001; i++){ // if(inter_result[i]!=cal_result[i]){ // std::cout<<"first: i: "<< i << " " << cal_result[i] <<"error!"<<std::endl; // break; // // return 0; // } // } // std::cout<<"pass"<<std::endl; // std::cout << "pass first one"<<std::endl; // for (int i=64000; i<65000; i++){ // if(inter_result[i]!=cal_result[i]){ // std::cout<<"i: "<< i << " " << cal_result[i] <<"error!"<<std::endl; // break; // // return 0; // } // } // inter_result[65999]+=inter_result[64999]; // for (int i=65000; i<66000; i++){ // if(inter_result[i]!=cal_result[i]){ // std::cout<<"i: "<< i << " " << cal_result[i] <<"error!"<<std::endl; // break; // // return 0; // } // } mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n); CHECK(cudaGetLastError()); // CHECK(cudaMemcpy(inter_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); //serial sum CHECK(cudaMemcpy(inter_sum, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); for (int i=64001; i<100000; i++){ if(inter_result[i]!=inter_sum[i]){ std::cout<<"i: "<< i <<"error!"<<std::endl; std::cout<< inter_result[i] << "vs" << inter_sum[i] <<std::endl; break; // return 0; } } std::cout<<"pass here 1"<<std::endl; serialsum_accrossblock(inter_sum, len, n, block_size); CHECK(cudaMemcpy(d_sum, inter_sum,n * sizeof(unsigned int), cudaMemcpyHostToDevice)); // CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); for (int i=1; i<100000; i++){ if(inter_result[i]!=inter_sum[i]){ std::cout<<"i: "<< i <<"error!"<<std::endl; std::cout<< inter_result[i] << "vs" << inter_sum[i] <<std::endl; break; // return 0; } } std::cout<<"pass here"<<std::endl; // serialsum_accrossblock<<<divup(n,block_size*len*block_size*block_size) ,block_size>>>(d_sum,len,n); // CHECK(cudaGetLastError()); // CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // for (int i=0; i<100000; i++){ // if(inter_result[i]!=cal_result[i]){ // std::cout<<"i: "<< i <<"error!"<<std::endl; // std::cout<< inter_result[i] << "vs" << cal_result[i] <<std::endl; // break; // // return 0; // } // } // for (unsigned int i=64000; i<100000; i++){ // inter_result[i]+=inter_result[63999]; // } // std::cout<< divup(n,block_size*len) << std::endl; mergeblock<<<divup(n,block_size*len*block_size) ,block_size>>>(d_sum,len,n); CHECK(cudaGetLastError()); // cuda_memcpy(cal_result, d_sum, n, cudaMemcpyDeviceToHost); CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); CHECK(cudaFree(d_in)); CHECK(cudaFree(d_sum)); //compare unsigned int i; for (i=0; i<n; i++){ if(result[i]!=cal_result[i]){ std::cout<<"i: "<< i <<"error!"<<std::endl; std::cout<<result[i]<<"vs"<<cal_result[i]<<std::endl; break; } } if(i==n){ std::cout<<"correct"<<std::endl; } return 0; }
65d99ef76c6692060198a0b2b017a71b95e36bcf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> inline __host__ __device__ float dot(float3 a, float3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } inline __host__ __device__ float length(float3 v) { return sqrtf(dot(v, v)); } const float G = 6.6742867e-5f; __global__ void simGlobalStep(float3* pos, float3* vel, int total) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < total) { float3 force = make_float3(0.0f, 0.0f, 0.0f); for (int j = 0; j < total; ++j) { float3 dlt; float sqlen; float len; if (j == idx) { continue; } dlt.x = pos[j].x - pos[idx].x; dlt.y = pos[j].y - pos[idx].y; dlt.z = pos[j].z - pos[idx].z; sqlen = dot(dlt, dlt); len = sqrtf(sqlen); dlt.x /= len; dlt.y /= len; dlt.z /= len; sqlen = (sqlen < 1.0f) ? 1.0f : sqlen; force.x += dlt.x * G / sqlen; force.y += dlt.y * G / sqlen; force.z += dlt.z * G / sqlen; } vel[idx].x += force.x; vel[idx].y += force.y; vel[idx].z += force.z; } } void* cudaPosData; void* cudaVelData; int simInitialize(int totalSize, void* ipos, void* ivel) { hipError_t error; error = hipGetLastError(); if (error != hipSuccess) { printf("%s:%d: CUDA: %s (%d)\n", __FILE__, __LINE__, hipGetErrorString(error), error); return -1; } hipMalloc(&cudaPosData, totalSize*sizeof(float3)); hipDeviceSynchronize(); error = hipGetLastError(); if (error != hipSuccess) { printf("%s:%d: CUDA: %s (%d)\n", __FILE__, __LINE__, hipGetErrorString(error), error); return -1; } hipMemcpy(cudaPosData, ipos, totalSize*sizeof(float3), hipMemcpyHostToDevice); hipMalloc(&cudaVelData, totalSize*sizeof(float3)); hipDeviceSynchronize(); error = hipGetLastError(); if (error != hipSuccess) { printf("%s:%d: CUDA: %s (%d)\n", __FILE__, __LINE__, hipGetErrorString(error), error); return -1; } hipMemcpy(cudaVelData, ivel, totalSize*sizeof(float3), hipMemcpyHostToDevice); return 0; } int simStep(void* inPos, void* outVel, int totalSize) { hipError_t error; hipMemcpy(cudaPosData, inPos, totalSize*sizeof(float3), hipMemcpyHostToDevice); hipLaunchKernelGGL(( simGlobalStep), dim3(totalSize/512),dim3(512), 0, 0, (float3*)cudaPosData, (float3*)cudaVelData, totalSize); hipMemcpy(outVel, cudaVelData, totalSize*sizeof(float3), hipMemcpyDeviceToHost); hipDeviceSynchronize(); error = hipGetLastError(); if (error != hipSuccess) { printf("%s:%d: CUDA: %s (%d)\n", __FILE__, __LINE__, hipGetErrorString(error), error); return -1; } return 0; } int simCleanup() { hipFree(cudaPosData); hipFree(cudaVelData); return 0; }
65d99ef76c6692060198a0b2b017a71b95e36bcf.cu
#include <stdio.h> inline __host__ __device__ float dot(float3 a, float3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } inline __host__ __device__ float length(float3 v) { return sqrtf(dot(v, v)); } const float G = 6.6742867e-5f; __global__ void simGlobalStep(float3* pos, float3* vel, int total) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < total) { float3 force = make_float3(0.0f, 0.0f, 0.0f); for (int j = 0; j < total; ++j) { float3 dlt; float sqlen; float len; if (j == idx) { continue; } dlt.x = pos[j].x - pos[idx].x; dlt.y = pos[j].y - pos[idx].y; dlt.z = pos[j].z - pos[idx].z; sqlen = dot(dlt, dlt); len = sqrtf(sqlen); dlt.x /= len; dlt.y /= len; dlt.z /= len; sqlen = (sqlen < 1.0f) ? 1.0f : sqlen; force.x += dlt.x * G / sqlen; force.y += dlt.y * G / sqlen; force.z += dlt.z * G / sqlen; } vel[idx].x += force.x; vel[idx].y += force.y; vel[idx].z += force.z; } } void* cudaPosData; void* cudaVelData; int simInitialize(int totalSize, void* ipos, void* ivel) { cudaError_t error; error = cudaGetLastError(); if (error != cudaSuccess) { printf("%s:%d: CUDA: %s (%d)\n", __FILE__, __LINE__, cudaGetErrorString(error), error); return -1; } cudaMalloc(&cudaPosData, totalSize*sizeof(float3)); cudaDeviceSynchronize(); error = cudaGetLastError(); if (error != cudaSuccess) { printf("%s:%d: CUDA: %s (%d)\n", __FILE__, __LINE__, cudaGetErrorString(error), error); return -1; } cudaMemcpy(cudaPosData, ipos, totalSize*sizeof(float3), cudaMemcpyHostToDevice); cudaMalloc(&cudaVelData, totalSize*sizeof(float3)); cudaDeviceSynchronize(); error = cudaGetLastError(); if (error != cudaSuccess) { printf("%s:%d: CUDA: %s (%d)\n", __FILE__, __LINE__, cudaGetErrorString(error), error); return -1; } cudaMemcpy(cudaVelData, ivel, totalSize*sizeof(float3), cudaMemcpyHostToDevice); return 0; } int simStep(void* inPos, void* outVel, int totalSize) { cudaError_t error; cudaMemcpy(cudaPosData, inPos, totalSize*sizeof(float3), cudaMemcpyHostToDevice); simGlobalStep<<<totalSize/512,512>>>((float3*)cudaPosData, (float3*)cudaVelData, totalSize); cudaMemcpy(outVel, cudaVelData, totalSize*sizeof(float3), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); error = cudaGetLastError(); if (error != cudaSuccess) { printf("%s:%d: CUDA: %s (%d)\n", __FILE__, __LINE__, cudaGetErrorString(error), error); return -1; } return 0; } int simCleanup() { cudaFree(cudaPosData); cudaFree(cudaVelData); return 0; }
e222be080538413c6f088af39cfcc12cd40288bf.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2016, The Bifrost Authors. All rights reserved. * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of The Bifrost Authors nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* SgemmEx: f16.f16->f16 f16.f16->f32 i8.i8->f32 f32.f32->f32 *CgemmEx: ci8.ci8->cf32 [>= sm_50] cf32.cf32->cf32 [>= sm_50] DgemmEx: f64.f64->f64 ZgemmEx: cf64.cf64->cf64 *Cgemm3m: cf32.cf32->cf32 (Gauss) [>= sm_50] Zgemm3m: cf64.cf64->cf64 (Gauss) [>= sm_50] Cherk: cf32->cf32 CherkEx: ci8->cf32 [>= sm_50] cf32->cf32 [>= sm_50] *Cherk3mEx: ci8->cf32 (Gauss) [>= sm_50] cf32->cf32 (Gauss) [>= sm_50] # TODO: Start with: Cgemm (+preconvert to fp32) CgemmEx (8bit, cuda >= 8.0, >=sm_50) Cgemm3m (fp32, cuda >= 8.0, >=sm_50) Cherk (+preconvert to fp32) Cherk3mEx (8bit or fp32, cuda >= 8.0, >=sm_50) The preconvert paths should support ci4, ci8, ci16, fp16 The other paths should only be used if the dtype already matches Eventually it will probably be worth integrating the xGPU kernel, given the lack of cublasHerkEx (particularly the small-N problem). */ #include <bifrost/linalg.h> #include "linalg_kernels.h" #include "assert.hpp" #include "utils.hpp" #include "cuda.hpp" #include "cuda/stream.hpp" #include "ShapeIndexer.cuh" #include "trace.hpp" class BFlinalg_impl { hipblasHandle_t _cublas; // No copy-assign BFlinalg_impl(BFlinalg_impl const& ); BFlinalg_impl& operator=(BFlinalg_impl const& ); public: BFlinalg_impl() { BF_CHECK_CUBLAS_EXCEPTION(hipblasCreate(&_cublas)); } ~BFlinalg_impl() { if( _cublas ) { hipblasDestroy(_cublas); } } hipblasHandle_t cublas() const { return _cublas; } }; BFstatus bfMatMul_aa_exec(BFlinalg handle, hipStream_t stream, hipblasOperation_t trans, long n, long k, double alpha, void const* a_data, BFdtype a_type, long a_stride, double beta, void const* c_data, BFdtype c_type, long c_stride) { BF_TRACE_STREAM(stream); BF_CHECK_CUBLAS(hipblasSetStream(handle->cublas(), stream)); // Note: UPPER here means lower for row-major ordering hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER; BF_CHECK_CUBLAS(hipblasSetPointerMode(handle->cublas(), HIPBLAS_POINTER_MODE_HOST)); BF_ASSERT(a_data, BF_STATUS_INVALID_POINTER); BF_ASSERT(c_data, BF_STATUS_INVALID_POINTER); switch( a_type ) { case BF_DTYPE_F32: { BF_ASSERT(c_type == BF_DTYPE_F32, BF_STATUS_UNSUPPORTED_DTYPE); float alpha_f = (float)alpha; float beta_f = (float)beta; BF_CHECK_CUBLAS(hipblasSsyrk(handle->cublas(), uplo, trans, n, k, &alpha_f, (float*)a_data, a_stride, &beta_f, (float*)c_data, c_stride)); break; } case BF_DTYPE_F64: { BF_ASSERT(c_type == BF_DTYPE_F64, BF_STATUS_UNSUPPORTED_DTYPE); BF_CHECK_CUBLAS(hipblasDsyrk(handle->cublas(), uplo, trans, n, k, &alpha, (double*)a_data, a_stride, &beta, (double*)c_data, c_stride)); break; } #if CUDART_VERSION >= 8000 case BF_DTYPE_CI8: { BF_ASSERT(c_type == BF_DTYPE_CF32, BF_STATUS_UNSUPPORTED_DTYPE); float alpha_f = (float)alpha; float beta_f = (float)beta; if( get_cuda_device_cc() >= 50 ) { BF_CHECK_CUBLAS(hipblasCherk3mEx(handle->cublas(), uplo, trans, n, k, &alpha_f, (hipComplex*)a_data, HIP_C_8I, a_stride, &beta_f, (hipComplex*)c_data, HIP_C_32F, c_stride)); break; } BF_FAIL("Supported dtype for array a", BF_STATUS_UNSUPPORTED_DTYPE); } #endif case BF_DTYPE_CF32: { BF_ASSERT(c_type == BF_DTYPE_CF32, BF_STATUS_UNSUPPORTED_DTYPE); float alpha_f = (float)alpha; float beta_f = (float)beta; #if CUDART_VERSION >= 8000 if( get_cuda_device_cc() >= 50 ) { BF_CHECK_CUBLAS(hipblasCherk3mEx(handle->cublas(), uplo, trans, n, k, &alpha_f, (hipComplex*)a_data, HIP_C_32F, a_stride, &beta_f, (hipComplex*)c_data, HIP_C_32F, c_stride)); break; } #endif BF_CHECK_CUBLAS(hipblasCherk(handle->cublas(), uplo, trans, n, k, &alpha_f, (hipComplex*)a_data, a_stride, &beta_f, (hipComplex*)c_data, c_stride)); break; } case BF_DTYPE_CF64: { BF_ASSERT(c_type == BF_DTYPE_CF64, BF_STATUS_UNSUPPORTED_DTYPE); BF_CHECK_CUBLAS(hipblasZherk(handle->cublas(), uplo, trans, n, k, &alpha, (hipDoubleComplex*)a_data, a_stride, &beta, (hipDoubleComplex*)c_data, c_stride)); break; } default: BF_FAIL("Supported dtype for array a", BF_STATUS_UNSUPPORTED_DTYPE); } return BF_STATUS_SUCCESS; } BFstatus bfMatMul_aa(BFlinalg handle, double alpha, BFarray const* a, double beta, BFarray const* c) { BF_TRACE(); BF_ASSERT(c->ndim == a->ndim, BF_STATUS_INVALID_SHAPE); int ndim = a->ndim; // Convert byte strides to element strides int shape[BF_MAX_DIMS]; int astrides[BF_MAX_DIMS]; int cstrides[BF_MAX_DIMS]; for( int d=0; d<ndim ; ++d ) { shape[d] = a->shape[d]; astrides[d] = a->strides[d]; cstrides[d] = c->strides[d]; } for( int d=0; d<ndim ; ++d ) { BF_ASSERT(astrides[d] % BF_DTYPE_NBYTE(a->dtype) == 0, BF_STATUS_INVALID_STRIDE); BF_ASSERT(cstrides[d] % BF_DTYPE_NBYTE(c->dtype) == 0, BF_STATUS_INVALID_STRIDE); astrides[d] /= BF_DTYPE_NBYTE(a->dtype); cstrides[d] /= BF_DTYPE_NBYTE(c->dtype); } // Determine transposition based on strides, and update strides and shape hipblasOperation_t trans; if( astrides[ndim-1] < astrides[ndim-2] ) { BF_ASSERT(c->shape[ndim-1] == a->shape[ndim-2], BF_STATUS_INVALID_SHAPE); BF_ASSERT(c->shape[ndim-2] == a->shape[ndim-2], BF_STATUS_INVALID_SHAPE); trans = (BF_DTYPE_IS_COMPLEX(a->dtype) ? HIPBLAS_OP_C : HIPBLAS_OP_T); } else if( astrides[ndim-1] > astrides[ndim-2] ) { BF_ASSERT(c->shape[ndim-1] == a->shape[ndim-1], BF_STATUS_INVALID_SHAPE); BF_ASSERT(c->shape[ndim-2] == a->shape[ndim-1], BF_STATUS_INVALID_SHAPE); trans = HIPBLAS_OP_N; std::swap(astrides[ndim-1], astrides[ndim-2]); std::swap( shape[ndim-1], shape[ndim-2]); } else { BF_ASSERT(false, BF_STATUS_INVALID_STRIDE); } ShapeIndexer<BF_MAX_DIMS> shape_indexer(shape, ndim-2); for( long i=0; i<shape_indexer.size(); ++i ) { auto inds = shape_indexer.at(i); void* a_data = array_get_pointer(a, inds); void* c_data = array_get_pointer(c, inds); cuda::child_stream stream(g_cuda_stream); BF_CHECK( bfMatMul_aa_exec(handle, stream, trans, shape[ndim-2], shape[ndim-1], alpha, a_data, a->dtype, astrides[ndim-2], beta, c_data, c->dtype, cstrides[ndim-2]) ); } return BF_STATUS_SUCCESS; } BFstatus bfMatMul_ab(BFlinalg handle, double alpha, BFarray const* a, BFarray const* b, double beta, BFarray const* c) { // **TODO: Implement this! BF_FAIL("Implemented", BF_STATUS_UNSUPPORTED); } BFstatus bfLinAlgCreate(BFlinalg* handle_ptr) { BF_TRACE(); BF_ASSERT(handle_ptr, BF_STATUS_INVALID_POINTER); BF_TRY_RETURN_ELSE(*handle_ptr = new BFlinalg_impl(), *handle_ptr = 0); } BFstatus bfLinAlgDestroy(BFlinalg handle) { BF_TRACE(); BF_ASSERT(handle, BF_STATUS_INVALID_HANDLE); delete handle; return BF_STATUS_SUCCESS; } // Computes c = a.b or a.a^H if b is NULL BFstatus bfLinAlgMatMul(BFlinalg handle, double alpha, BFarray const* a, // [...,i,j] BFarray const* b, // [...,j,k] double beta, BFarray const* c) { // [...,i,k] // TODO: Use weight_and_sum kernel when: // Dim i is the fastest dim of a // Dim j is the fastest dim of b // Dim k is NOT the fastest dim of c // [Dim k is small (say < 64)] // TODO: Generalise weight_and_sum kernel to arbitrary strides and dtypes // For dtypes, need Complex<T> to work for vectorized loads // UNLESS, we use something like storage_type<T>::type BF_TRACE(); BF_ASSERT(handle, BF_STATUS_INVALID_HANDLE); BF_ASSERT(a, BF_STATUS_INVALID_POINTER); BF_ASSERT(c, BF_STATUS_INVALID_POINTER); if( b ) { return bfMatMul_ab(handle, alpha, a, b, beta, c); } else { return bfMatMul_aa(handle, alpha, a, beta, c); } }
e222be080538413c6f088af39cfcc12cd40288bf.cu
/* * Copyright (c) 2016, The Bifrost Authors. All rights reserved. * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of The Bifrost Authors nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* SgemmEx: f16.f16->f16 f16.f16->f32 i8.i8->f32 f32.f32->f32 *CgemmEx: ci8.ci8->cf32 [>= sm_50] cf32.cf32->cf32 [>= sm_50] DgemmEx: f64.f64->f64 ZgemmEx: cf64.cf64->cf64 *Cgemm3m: cf32.cf32->cf32 (Gauss) [>= sm_50] Zgemm3m: cf64.cf64->cf64 (Gauss) [>= sm_50] Cherk: cf32->cf32 CherkEx: ci8->cf32 [>= sm_50] cf32->cf32 [>= sm_50] *Cherk3mEx: ci8->cf32 (Gauss) [>= sm_50] cf32->cf32 (Gauss) [>= sm_50] # TODO: Start with: Cgemm (+preconvert to fp32) CgemmEx (8bit, cuda >= 8.0, >=sm_50) Cgemm3m (fp32, cuda >= 8.0, >=sm_50) Cherk (+preconvert to fp32) Cherk3mEx (8bit or fp32, cuda >= 8.0, >=sm_50) The preconvert paths should support ci4, ci8, ci16, fp16 The other paths should only be used if the dtype already matches Eventually it will probably be worth integrating the xGPU kernel, given the lack of cublasHerkEx (particularly the small-N problem). */ #include <bifrost/linalg.h> #include "linalg_kernels.h" #include "assert.hpp" #include "utils.hpp" #include "cuda.hpp" #include "cuda/stream.hpp" #include "ShapeIndexer.cuh" #include "trace.hpp" class BFlinalg_impl { cublasHandle_t _cublas; // No copy-assign BFlinalg_impl(BFlinalg_impl const& ); BFlinalg_impl& operator=(BFlinalg_impl const& ); public: BFlinalg_impl() { BF_CHECK_CUBLAS_EXCEPTION(cublasCreate(&_cublas)); } ~BFlinalg_impl() { if( _cublas ) { cublasDestroy(_cublas); } } cublasHandle_t cublas() const { return _cublas; } }; BFstatus bfMatMul_aa_exec(BFlinalg handle, cudaStream_t stream, cublasOperation_t trans, long n, long k, double alpha, void const* a_data, BFdtype a_type, long a_stride, double beta, void const* c_data, BFdtype c_type, long c_stride) { BF_TRACE_STREAM(stream); BF_CHECK_CUBLAS(cublasSetStream(handle->cublas(), stream)); // Note: UPPER here means lower for row-major ordering cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER; BF_CHECK_CUBLAS(cublasSetPointerMode(handle->cublas(), CUBLAS_POINTER_MODE_HOST)); BF_ASSERT(a_data, BF_STATUS_INVALID_POINTER); BF_ASSERT(c_data, BF_STATUS_INVALID_POINTER); switch( a_type ) { case BF_DTYPE_F32: { BF_ASSERT(c_type == BF_DTYPE_F32, BF_STATUS_UNSUPPORTED_DTYPE); float alpha_f = (float)alpha; float beta_f = (float)beta; BF_CHECK_CUBLAS(cublasSsyrk(handle->cublas(), uplo, trans, n, k, &alpha_f, (float*)a_data, a_stride, &beta_f, (float*)c_data, c_stride)); break; } case BF_DTYPE_F64: { BF_ASSERT(c_type == BF_DTYPE_F64, BF_STATUS_UNSUPPORTED_DTYPE); BF_CHECK_CUBLAS(cublasDsyrk(handle->cublas(), uplo, trans, n, k, &alpha, (double*)a_data, a_stride, &beta, (double*)c_data, c_stride)); break; } #if CUDART_VERSION >= 8000 case BF_DTYPE_CI8: { BF_ASSERT(c_type == BF_DTYPE_CF32, BF_STATUS_UNSUPPORTED_DTYPE); float alpha_f = (float)alpha; float beta_f = (float)beta; if( get_cuda_device_cc() >= 50 ) { BF_CHECK_CUBLAS(cublasCherk3mEx(handle->cublas(), uplo, trans, n, k, &alpha_f, (cuComplex*)a_data, CUDA_C_8I, a_stride, &beta_f, (cuComplex*)c_data, CUDA_C_32F, c_stride)); break; } BF_FAIL("Supported dtype for array a", BF_STATUS_UNSUPPORTED_DTYPE); } #endif case BF_DTYPE_CF32: { BF_ASSERT(c_type == BF_DTYPE_CF32, BF_STATUS_UNSUPPORTED_DTYPE); float alpha_f = (float)alpha; float beta_f = (float)beta; #if CUDART_VERSION >= 8000 if( get_cuda_device_cc() >= 50 ) { BF_CHECK_CUBLAS(cublasCherk3mEx(handle->cublas(), uplo, trans, n, k, &alpha_f, (cuComplex*)a_data, CUDA_C_32F, a_stride, &beta_f, (cuComplex*)c_data, CUDA_C_32F, c_stride)); break; } #endif BF_CHECK_CUBLAS(cublasCherk(handle->cublas(), uplo, trans, n, k, &alpha_f, (cuComplex*)a_data, a_stride, &beta_f, (cuComplex*)c_data, c_stride)); break; } case BF_DTYPE_CF64: { BF_ASSERT(c_type == BF_DTYPE_CF64, BF_STATUS_UNSUPPORTED_DTYPE); BF_CHECK_CUBLAS(cublasZherk(handle->cublas(), uplo, trans, n, k, &alpha, (cuDoubleComplex*)a_data, a_stride, &beta, (cuDoubleComplex*)c_data, c_stride)); break; } default: BF_FAIL("Supported dtype for array a", BF_STATUS_UNSUPPORTED_DTYPE); } return BF_STATUS_SUCCESS; } BFstatus bfMatMul_aa(BFlinalg handle, double alpha, BFarray const* a, double beta, BFarray const* c) { BF_TRACE(); BF_ASSERT(c->ndim == a->ndim, BF_STATUS_INVALID_SHAPE); int ndim = a->ndim; // Convert byte strides to element strides int shape[BF_MAX_DIMS]; int astrides[BF_MAX_DIMS]; int cstrides[BF_MAX_DIMS]; for( int d=0; d<ndim ; ++d ) { shape[d] = a->shape[d]; astrides[d] = a->strides[d]; cstrides[d] = c->strides[d]; } for( int d=0; d<ndim ; ++d ) { BF_ASSERT(astrides[d] % BF_DTYPE_NBYTE(a->dtype) == 0, BF_STATUS_INVALID_STRIDE); BF_ASSERT(cstrides[d] % BF_DTYPE_NBYTE(c->dtype) == 0, BF_STATUS_INVALID_STRIDE); astrides[d] /= BF_DTYPE_NBYTE(a->dtype); cstrides[d] /= BF_DTYPE_NBYTE(c->dtype); } // Determine transposition based on strides, and update strides and shape cublasOperation_t trans; if( astrides[ndim-1] < astrides[ndim-2] ) { BF_ASSERT(c->shape[ndim-1] == a->shape[ndim-2], BF_STATUS_INVALID_SHAPE); BF_ASSERT(c->shape[ndim-2] == a->shape[ndim-2], BF_STATUS_INVALID_SHAPE); trans = (BF_DTYPE_IS_COMPLEX(a->dtype) ? CUBLAS_OP_C : CUBLAS_OP_T); } else if( astrides[ndim-1] > astrides[ndim-2] ) { BF_ASSERT(c->shape[ndim-1] == a->shape[ndim-1], BF_STATUS_INVALID_SHAPE); BF_ASSERT(c->shape[ndim-2] == a->shape[ndim-1], BF_STATUS_INVALID_SHAPE); trans = CUBLAS_OP_N; std::swap(astrides[ndim-1], astrides[ndim-2]); std::swap( shape[ndim-1], shape[ndim-2]); } else { BF_ASSERT(false, BF_STATUS_INVALID_STRIDE); } ShapeIndexer<BF_MAX_DIMS> shape_indexer(shape, ndim-2); for( long i=0; i<shape_indexer.size(); ++i ) { auto inds = shape_indexer.at(i); void* a_data = array_get_pointer(a, inds); void* c_data = array_get_pointer(c, inds); cuda::child_stream stream(g_cuda_stream); BF_CHECK( bfMatMul_aa_exec(handle, stream, trans, shape[ndim-2], shape[ndim-1], alpha, a_data, a->dtype, astrides[ndim-2], beta, c_data, c->dtype, cstrides[ndim-2]) ); } return BF_STATUS_SUCCESS; } BFstatus bfMatMul_ab(BFlinalg handle, double alpha, BFarray const* a, BFarray const* b, double beta, BFarray const* c) { // **TODO: Implement this! BF_FAIL("Implemented", BF_STATUS_UNSUPPORTED); } BFstatus bfLinAlgCreate(BFlinalg* handle_ptr) { BF_TRACE(); BF_ASSERT(handle_ptr, BF_STATUS_INVALID_POINTER); BF_TRY_RETURN_ELSE(*handle_ptr = new BFlinalg_impl(), *handle_ptr = 0); } BFstatus bfLinAlgDestroy(BFlinalg handle) { BF_TRACE(); BF_ASSERT(handle, BF_STATUS_INVALID_HANDLE); delete handle; return BF_STATUS_SUCCESS; } // Computes c = a.b or a.a^H if b is NULL BFstatus bfLinAlgMatMul(BFlinalg handle, double alpha, BFarray const* a, // [...,i,j] BFarray const* b, // [...,j,k] double beta, BFarray const* c) { // [...,i,k] // TODO: Use weight_and_sum kernel when: // Dim i is the fastest dim of a // Dim j is the fastest dim of b // Dim k is NOT the fastest dim of c // [Dim k is small (say < 64)] // TODO: Generalise weight_and_sum kernel to arbitrary strides and dtypes // For dtypes, need Complex<T> to work for vectorized loads // UNLESS, we use something like storage_type<T>::type BF_TRACE(); BF_ASSERT(handle, BF_STATUS_INVALID_HANDLE); BF_ASSERT(a, BF_STATUS_INVALID_POINTER); BF_ASSERT(c, BF_STATUS_INVALID_POINTER); if( b ) { return bfMatMul_ab(handle, alpha, a, b, beta, c); } else { return bfMatMul_aa(handle, alpha, a, beta, c); } }
27e10a993cff655bd504faeb30c1f1793315ea19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<curd_lib_host.h> #include<curd_lib_host.h> #include<curd_lib_host.h> #include<curd_lib_host.h> /// /// @file rdwt53.cu /// @brief CUDA implementation of reverse 5/3 2D DWT. /// @author Martin Jirman ([email protected]) /// @date 2011-02-04 14:19 /// /// /// Copyright (c) 2011 Martin Jirman /// All rights reserved. /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above copyright /// notice, this list of conditions and the following disclaimer in the /// documentation and/or other materials provided with the distribution. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" /// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE /// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE /// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE /// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR /// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF /// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS /// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN /// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) /// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. /// #include "common.h" #include "transform_buffer.h" #include "io.h" namespace dwt_cuda { /// Wraps shared momory buffer and algorithms needed for computing 5/3 RDWT /// using sliding window and lifting schema. /// @tparam WIN_SIZE_X width of sliding window /// @tparam WIN_SIZE_Y height of sliding window template <int WIN_SIZE_X, int WIN_SIZE_Y> class RDWT53 { private: /// Shared memory buffer used for 5/3 DWT transforms. typedef TransformBuffer<int, WIN_SIZE_X, WIN_SIZE_Y + 3, 2> RDWT53Buffer; /// Shared buffer used for reverse 5/3 DWT. RDWT53Buffer buffer; /// Difference between indices of two vertically neighboring items in buffer. enum { STRIDE = RDWT53Buffer::VERTICAL_STRIDE }; /// Info needed for loading of one input column from input image. /// @tparam CHECKED true if loader should check boundaries template <bool CHECKED> struct RDWT53Column { /// loader of pixels from column in input image VerticalDWTBandLoader<int, CHECKED> loader; /// Offset of corresponding column in shared buffer. int offset; /// Sets all fields to some values to avoid 'uninitialized' warnings. __device__ void clear() { offset = 0; loader.clear(); } }; /// 5/3 DWT reverse update operation. struct Reverse53Update { __device__ void operator() (const int p, int & c, const int n) const { c -= (p + n + 2) / 4; // F.3, page 118, ITU-T Rec. T.800 final draft } }; /// 5/3 DWT reverse predict operation. struct Reverse53Predict { __device__ void operator() (const int p, int & c, const int n) const { c += (p + n) / 2; // F.4, page 118, ITU-T Rec. T.800 final draft } }; /// Horizontal 5/3 RDWT on specified lines of transform buffer. /// @param lines number of lines to be transformed /// @param firstLine index of the first line to be transformed __device__ void horizontalTransform(const int lines, const int firstLine) { __syncthreads(); buffer.forEachHorizontalEven(firstLine, lines, Reverse53Update()); __syncthreads(); buffer.forEachHorizontalOdd(firstLine, lines, Reverse53Predict()); __syncthreads(); } /// Using given loader, it loads another WIN_SIZE_Y coefficients /// into specified column. /// @tparam CHECKED true if loader should check image boundaries /// @param input input coefficients to load from /// @param col info about loaded column template <bool CHECKED> inline __device__ void loadWindowIntoColumn(const int * const input, RDWT53Column<CHECKED> & col) { for(int i = 3; i < (3 + WIN_SIZE_Y); i += 2) { buffer[col.offset + i * STRIDE] = col.loader.loadLowFrom(input); buffer[col.offset + (i + 1) * STRIDE] = col.loader.loadHighFrom(input); } } /// Initializes one column of shared transform buffer with 7 input pixels. /// Those 7 pixels will not be transformed. Also initializes given loader. /// @tparam CHECKED true if loader should check image boundaries /// @param columnX x coordinate of column in shared transform buffer /// @param input input image /// @param sizeX width of the input image /// @param sizeY height of the input image /// @param loader (uninitialized) info about loaded column template <bool CHECKED> __device__ void initColumn(const int columnX, const int * const input, const int sizeX, const int sizeY, RDWT53Column<CHECKED> & column, const int firstY) { // coordinates of the first coefficient to be loaded const int firstX = blockIdx.x * WIN_SIZE_X + columnX; // offset of the column with index 'colIndex' in the transform buffer column.offset = buffer.getColumnOffset(columnX); if(blockIdx.y == 0) { // topmost block - apply mirroring rules when loading first 3 rows column.loader.init(sizeX, sizeY, firstX, firstY); // load pixels in mirrored way buffer[column.offset + 1 * STRIDE] = column.loader.loadLowFrom(input); buffer[column.offset + 0 * STRIDE] = buffer[column.offset + 2 * STRIDE] = column.loader.loadHighFrom(input); } else { // non-topmost row - regular loading: column.loader.init(sizeX, sizeY, firstX, firstY - 1); buffer[column.offset + 0 * STRIDE] = column.loader.loadHighFrom(input); buffer[column.offset + 1 * STRIDE] = column.loader.loadLowFrom(input); buffer[column.offset + 2 * STRIDE] = column.loader.loadHighFrom(input); } // Now, the next coefficient, which will be loaded by loader, is #2. } /// Actual GPU 5/3 RDWT implementation. /// @tparam CHECKED_LOADS true if boundaries must be checked when reading /// @tparam CHECKED_WRITES true if boundaries must be checked when writing /// @param in input image (5/3 transformed coefficients) /// @param out output buffer (for reverse transformed image) /// @param sizeX width of the output image /// @param sizeY height of the output image /// @param winSteps number of sliding window steps template<bool CHECKED_LOADS, bool CHECKED_WRITES> __device__ void transform(const int * const in, int * const out, const int sizeX, const int sizeY, const int winSteps) { // info about one main and one boundary column RDWT53Column<CHECKED_LOADS> column, boundaryColumn; // index of first row to be transformed const int firstY = blockIdx.y * WIN_SIZE_Y * winSteps; // some threads initialize boundary columns boundaryColumn.clear(); if(threadIdx.x < 3) { // First 3 threads also handle boundary columns. Thread #0 gets right // column #0, thread #1 get right column #1 and thread #2 left column. const int colId = threadIdx.x + ((threadIdx.x != 2) ? WIN_SIZE_X : -3); // Thread initializes offset of the boundary column (in shared // buffer), first 3 pixels of the column and a loader for this column. initColumn(colId, in, sizeX, sizeY, boundaryColumn, firstY); } // All threads initialize central columns. initColumn(parityIdx<WIN_SIZE_X>(), in, sizeX, sizeY, column, firstY); // horizontally transform first 3 rows horizontalTransform(3, 0); // writer of output pixels - initialize it const int outX = blockIdx.x * WIN_SIZE_X + threadIdx.x; VerticalDWTPixelWriter<int, CHECKED_WRITES> writer; writer.init(sizeX, sizeY, outX, firstY); // offset of column (in transform buffer) saved by this thread const int outputColumnOffset = buffer.getColumnOffset(threadIdx.x); // (Each iteration assumes that first 3 rows of transform buffer are // already loaded with horizontally transformed pixels.) for(int w = 0; w < winSteps; w++) { // Load another WIN_SIZE_Y lines of this thread's column // into the transform buffer. loadWindowIntoColumn(in, column); // possibly load boundary columns if(threadIdx.x < 3) { loadWindowIntoColumn(in, boundaryColumn); } // horizontally transform all newly loaded lines horizontalTransform(WIN_SIZE_Y, 3); // Using 3 registers, remember current values of last 3 rows // of transform buffer. These rows are transformed horizontally // only and will be used in next iteration. int last3Lines[3]; last3Lines[0] = buffer[outputColumnOffset + (WIN_SIZE_Y + 0) * STRIDE]; last3Lines[1] = buffer[outputColumnOffset + (WIN_SIZE_Y + 1) * STRIDE]; last3Lines[2] = buffer[outputColumnOffset + (WIN_SIZE_Y + 2) * STRIDE]; // vertically transform all central columns buffer.forEachVerticalOdd(outputColumnOffset, Reverse53Update()); buffer.forEachVerticalEven(outputColumnOffset, Reverse53Predict()); // Save all results of current window. Results are in transform buffer // at rows from #1 to #(1 + WIN_SIZE_Y). Other rows are invalid now. // (They only served as a boundary for vertical RDWT.) for(int i = 1; i < (1 + WIN_SIZE_Y); i++) { writer.writeInto(out, buffer[outputColumnOffset + i * STRIDE]); } // Use last 3 remembered lines as first 3 lines for next iteration. // As expected, these lines are already horizontally transformed. buffer[outputColumnOffset + 0 * STRIDE] = last3Lines[0]; buffer[outputColumnOffset + 1 * STRIDE] = last3Lines[1]; buffer[outputColumnOffset + 2 * STRIDE] = last3Lines[2]; // Wait for all writing threads before proceeding to loading new // coeficients in next iteration. (Not to overwrite those which // are not written yet.) __syncthreads(); } } public: /// Main GPU 5/3 RDWT entry point. /// @param in input image (5/3 transformed coefficients) /// @param out output buffer (for reverse transformed image) /// @param sizeX width of the output image /// @param sizeY height of the output image /// @param winSteps number of sliding window steps __device__ static void run(const int * const input, int * const output, const int sx, const int sy, const int steps) { // prepare instance with buffer in shared memory __shared__ RDWT53<WIN_SIZE_X, WIN_SIZE_Y> rdwt53; // Compute limits of this threadblock's block of pixels and use them to // determine, whether this threadblock will have to deal with boundary. // (1 in next expressions is for radius of impulse response of 5/3 RDWT.) const int maxX = (blockIdx.x + 1) * WIN_SIZE_X + 1; const int maxY = (blockIdx.y + 1) * WIN_SIZE_Y * steps + 1; const bool atRightBoudary = maxX >= sx; const bool atBottomBoudary = maxY >= sy; // Select specialized version of code according to distance of this // threadblock's pixels from image boundary. if(atBottomBoudary) { // near bottom boundary => check both writing and reading rdwt53.transform<true, true>(input, output, sx, sy, steps); } else if(atRightBoudary) { // near right boundary only => check writing only rdwt53.transform<false, true>(input, output, sx, sy, steps); } else { // no nearby boundary => check nothing rdwt53.transform<false, false>(input, output, sx, sy, steps); } } }; // end of class RDWT53 /// Main GPU 5/3 RDWT entry point. /// @param in input image (5/3 transformed coefficients) /// @param out output buffer (for reverse transformed image) /// @param sizeX width of the output image /// @param sizeY height of the output image /// @param winSteps number of sliding window steps template <int WIN_SX, int WIN_SY> __launch_bounds__(WIN_SX, CTMIN(SHM_SIZE/sizeof(RDWT53<WIN_SX, WIN_SY>), 8)) __global__ void rdwt53Kernel(const int * const in, int * const out, const int sx, const int sy, const int steps) { RDWT53<WIN_SX, WIN_SY>::run(in, out, sx, sy, steps); } /// Only computes optimal number of sliding window steps, /// number of threadblocks and then lanches the 5/3 RDWT kernel. /// @tparam WIN_SX width of sliding window /// @tparam WIN_SY height of sliding window /// @param in input image /// @param out output buffer /// @param sx width of the input image /// @param sy height of the input image template <int WIN_SX, int WIN_SY> void launchRDWT53Kernel (int * in, int * out, const int sx, const int sy) { // compute optimal number of steps of each sliding window const int steps = divRndUp(sy, 15 * WIN_SY); // prepare grid size dim3 gSize(divRndUp(sx, WIN_SX), divRndUp(sy, WIN_SY * steps)); // finally transform this level PERF_BEGIN allocateReadWriteSets(gSize, WIN_SX); hipLaunchKernelGGL(( rdwt53Kernel<WIN_SX, WIN_SY>), dim3(gSize), dim3(WIN_SX), 0, 0, in, out, sx, sy, steps); freeReadWriteSets(gSize, WIN_SX); PERF_END(" RDWT53", sx, sy) CudaDWTTester::checkLastKernelCall("RDWT 5/3 kernel"); } /// Reverse 5/3 2D DWT. See common rules (above) for more details. /// @param in Input DWT coefficients. Format described in common rules. /// Will not be preserved (will be overwritten). /// @param out output buffer on GPU - will contain original image /// in normalized range [-128, 127]. /// @param sizeX width of input image (in pixels) /// @param sizeY height of input image (in pixels) /// @param levels number of recursive DWT levels void rdwt53(int * in, int * out, int sizeX, int sizeY, int levels) { if(levels > 1) { // let this function recursively reverse transform deeper levels first const int llSizeX = divRndUp(sizeX, 2); const int llSizeY = divRndUp(sizeY, 2); rdwt53(in, out, llSizeX, llSizeY, levels - 1); // copy reverse transformed LL band from output back into the input memCopy(in, out, llSizeX, llSizeY); } // select right width of kernel for the size of the image if(sizeX >= 960) { launchRDWT53Kernel<192, 8>(in, out, sizeX, sizeY); } else if (sizeX >= 480) { launchRDWT53Kernel<128, 8>(in, out, sizeX, sizeY); } else { launchRDWT53Kernel<64, 8>(in, out, sizeX, sizeY); } } } // end of namespace dwt_cuda
27e10a993cff655bd504faeb30c1f1793315ea19.cu
#include<curd_lib_host.h> #include<curd_lib_host.h> #include<curd_lib_host.h> #include<curd_lib_host.h> /// /// @file rdwt53.cu /// @brief CUDA implementation of reverse 5/3 2D DWT. /// @author Martin Jirman ([email protected]) /// @date 2011-02-04 14:19 /// /// /// Copyright (c) 2011 Martin Jirman /// All rights reserved. /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above copyright /// notice, this list of conditions and the following disclaimer in the /// documentation and/or other materials provided with the distribution. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" /// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE /// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE /// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE /// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR /// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF /// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS /// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN /// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) /// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. /// #include "common.h" #include "transform_buffer.h" #include "io.h" namespace dwt_cuda { /// Wraps shared momory buffer and algorithms needed for computing 5/3 RDWT /// using sliding window and lifting schema. /// @tparam WIN_SIZE_X width of sliding window /// @tparam WIN_SIZE_Y height of sliding window template <int WIN_SIZE_X, int WIN_SIZE_Y> class RDWT53 { private: /// Shared memory buffer used for 5/3 DWT transforms. typedef TransformBuffer<int, WIN_SIZE_X, WIN_SIZE_Y + 3, 2> RDWT53Buffer; /// Shared buffer used for reverse 5/3 DWT. RDWT53Buffer buffer; /// Difference between indices of two vertically neighboring items in buffer. enum { STRIDE = RDWT53Buffer::VERTICAL_STRIDE }; /// Info needed for loading of one input column from input image. /// @tparam CHECKED true if loader should check boundaries template <bool CHECKED> struct RDWT53Column { /// loader of pixels from column in input image VerticalDWTBandLoader<int, CHECKED> loader; /// Offset of corresponding column in shared buffer. int offset; /// Sets all fields to some values to avoid 'uninitialized' warnings. __device__ void clear() { offset = 0; loader.clear(); } }; /// 5/3 DWT reverse update operation. struct Reverse53Update { __device__ void operator() (const int p, int & c, const int n) const { c -= (p + n + 2) / 4; // F.3, page 118, ITU-T Rec. T.800 final draft } }; /// 5/3 DWT reverse predict operation. struct Reverse53Predict { __device__ void operator() (const int p, int & c, const int n) const { c += (p + n) / 2; // F.4, page 118, ITU-T Rec. T.800 final draft } }; /// Horizontal 5/3 RDWT on specified lines of transform buffer. /// @param lines number of lines to be transformed /// @param firstLine index of the first line to be transformed __device__ void horizontalTransform(const int lines, const int firstLine) { __syncthreads(); buffer.forEachHorizontalEven(firstLine, lines, Reverse53Update()); __syncthreads(); buffer.forEachHorizontalOdd(firstLine, lines, Reverse53Predict()); __syncthreads(); } /// Using given loader, it loads another WIN_SIZE_Y coefficients /// into specified column. /// @tparam CHECKED true if loader should check image boundaries /// @param input input coefficients to load from /// @param col info about loaded column template <bool CHECKED> inline __device__ void loadWindowIntoColumn(const int * const input, RDWT53Column<CHECKED> & col) { for(int i = 3; i < (3 + WIN_SIZE_Y); i += 2) { buffer[col.offset + i * STRIDE] = col.loader.loadLowFrom(input); buffer[col.offset + (i + 1) * STRIDE] = col.loader.loadHighFrom(input); } } /// Initializes one column of shared transform buffer with 7 input pixels. /// Those 7 pixels will not be transformed. Also initializes given loader. /// @tparam CHECKED true if loader should check image boundaries /// @param columnX x coordinate of column in shared transform buffer /// @param input input image /// @param sizeX width of the input image /// @param sizeY height of the input image /// @param loader (uninitialized) info about loaded column template <bool CHECKED> __device__ void initColumn(const int columnX, const int * const input, const int sizeX, const int sizeY, RDWT53Column<CHECKED> & column, const int firstY) { // coordinates of the first coefficient to be loaded const int firstX = blockIdx.x * WIN_SIZE_X + columnX; // offset of the column with index 'colIndex' in the transform buffer column.offset = buffer.getColumnOffset(columnX); if(blockIdx.y == 0) { // topmost block - apply mirroring rules when loading first 3 rows column.loader.init(sizeX, sizeY, firstX, firstY); // load pixels in mirrored way buffer[column.offset + 1 * STRIDE] = column.loader.loadLowFrom(input); buffer[column.offset + 0 * STRIDE] = buffer[column.offset + 2 * STRIDE] = column.loader.loadHighFrom(input); } else { // non-topmost row - regular loading: column.loader.init(sizeX, sizeY, firstX, firstY - 1); buffer[column.offset + 0 * STRIDE] = column.loader.loadHighFrom(input); buffer[column.offset + 1 * STRIDE] = column.loader.loadLowFrom(input); buffer[column.offset + 2 * STRIDE] = column.loader.loadHighFrom(input); } // Now, the next coefficient, which will be loaded by loader, is #2. } /// Actual GPU 5/3 RDWT implementation. /// @tparam CHECKED_LOADS true if boundaries must be checked when reading /// @tparam CHECKED_WRITES true if boundaries must be checked when writing /// @param in input image (5/3 transformed coefficients) /// @param out output buffer (for reverse transformed image) /// @param sizeX width of the output image /// @param sizeY height of the output image /// @param winSteps number of sliding window steps template<bool CHECKED_LOADS, bool CHECKED_WRITES> __device__ void transform(const int * const in, int * const out, const int sizeX, const int sizeY, const int winSteps) { // info about one main and one boundary column RDWT53Column<CHECKED_LOADS> column, boundaryColumn; // index of first row to be transformed const int firstY = blockIdx.y * WIN_SIZE_Y * winSteps; // some threads initialize boundary columns boundaryColumn.clear(); if(threadIdx.x < 3) { // First 3 threads also handle boundary columns. Thread #0 gets right // column #0, thread #1 get right column #1 and thread #2 left column. const int colId = threadIdx.x + ((threadIdx.x != 2) ? WIN_SIZE_X : -3); // Thread initializes offset of the boundary column (in shared // buffer), first 3 pixels of the column and a loader for this column. initColumn(colId, in, sizeX, sizeY, boundaryColumn, firstY); } // All threads initialize central columns. initColumn(parityIdx<WIN_SIZE_X>(), in, sizeX, sizeY, column, firstY); // horizontally transform first 3 rows horizontalTransform(3, 0); // writer of output pixels - initialize it const int outX = blockIdx.x * WIN_SIZE_X + threadIdx.x; VerticalDWTPixelWriter<int, CHECKED_WRITES> writer; writer.init(sizeX, sizeY, outX, firstY); // offset of column (in transform buffer) saved by this thread const int outputColumnOffset = buffer.getColumnOffset(threadIdx.x); // (Each iteration assumes that first 3 rows of transform buffer are // already loaded with horizontally transformed pixels.) for(int w = 0; w < winSteps; w++) { // Load another WIN_SIZE_Y lines of this thread's column // into the transform buffer. loadWindowIntoColumn(in, column); // possibly load boundary columns if(threadIdx.x < 3) { loadWindowIntoColumn(in, boundaryColumn); } // horizontally transform all newly loaded lines horizontalTransform(WIN_SIZE_Y, 3); // Using 3 registers, remember current values of last 3 rows // of transform buffer. These rows are transformed horizontally // only and will be used in next iteration. int last3Lines[3]; last3Lines[0] = buffer[outputColumnOffset + (WIN_SIZE_Y + 0) * STRIDE]; last3Lines[1] = buffer[outputColumnOffset + (WIN_SIZE_Y + 1) * STRIDE]; last3Lines[2] = buffer[outputColumnOffset + (WIN_SIZE_Y + 2) * STRIDE]; // vertically transform all central columns buffer.forEachVerticalOdd(outputColumnOffset, Reverse53Update()); buffer.forEachVerticalEven(outputColumnOffset, Reverse53Predict()); // Save all results of current window. Results are in transform buffer // at rows from #1 to #(1 + WIN_SIZE_Y). Other rows are invalid now. // (They only served as a boundary for vertical RDWT.) for(int i = 1; i < (1 + WIN_SIZE_Y); i++) { writer.writeInto(out, buffer[outputColumnOffset + i * STRIDE]); } // Use last 3 remembered lines as first 3 lines for next iteration. // As expected, these lines are already horizontally transformed. buffer[outputColumnOffset + 0 * STRIDE] = last3Lines[0]; buffer[outputColumnOffset + 1 * STRIDE] = last3Lines[1]; buffer[outputColumnOffset + 2 * STRIDE] = last3Lines[2]; // Wait for all writing threads before proceeding to loading new // coeficients in next iteration. (Not to overwrite those which // are not written yet.) __syncthreads(); } } public: /// Main GPU 5/3 RDWT entry point. /// @param in input image (5/3 transformed coefficients) /// @param out output buffer (for reverse transformed image) /// @param sizeX width of the output image /// @param sizeY height of the output image /// @param winSteps number of sliding window steps __device__ static void run(const int * const input, int * const output, const int sx, const int sy, const int steps) { // prepare instance with buffer in shared memory __shared__ RDWT53<WIN_SIZE_X, WIN_SIZE_Y> rdwt53; // Compute limits of this threadblock's block of pixels and use them to // determine, whether this threadblock will have to deal with boundary. // (1 in next expressions is for radius of impulse response of 5/3 RDWT.) const int maxX = (blockIdx.x + 1) * WIN_SIZE_X + 1; const int maxY = (blockIdx.y + 1) * WIN_SIZE_Y * steps + 1; const bool atRightBoudary = maxX >= sx; const bool atBottomBoudary = maxY >= sy; // Select specialized version of code according to distance of this // threadblock's pixels from image boundary. if(atBottomBoudary) { // near bottom boundary => check both writing and reading rdwt53.transform<true, true>(input, output, sx, sy, steps); } else if(atRightBoudary) { // near right boundary only => check writing only rdwt53.transform<false, true>(input, output, sx, sy, steps); } else { // no nearby boundary => check nothing rdwt53.transform<false, false>(input, output, sx, sy, steps); } } }; // end of class RDWT53 /// Main GPU 5/3 RDWT entry point. /// @param in input image (5/3 transformed coefficients) /// @param out output buffer (for reverse transformed image) /// @param sizeX width of the output image /// @param sizeY height of the output image /// @param winSteps number of sliding window steps template <int WIN_SX, int WIN_SY> __launch_bounds__(WIN_SX, CTMIN(SHM_SIZE/sizeof(RDWT53<WIN_SX, WIN_SY>), 8)) __global__ void rdwt53Kernel(const int * const in, int * const out, const int sx, const int sy, const int steps) { RDWT53<WIN_SX, WIN_SY>::run(in, out, sx, sy, steps); } /// Only computes optimal number of sliding window steps, /// number of threadblocks and then lanches the 5/3 RDWT kernel. /// @tparam WIN_SX width of sliding window /// @tparam WIN_SY height of sliding window /// @param in input image /// @param out output buffer /// @param sx width of the input image /// @param sy height of the input image template <int WIN_SX, int WIN_SY> void launchRDWT53Kernel (int * in, int * out, const int sx, const int sy) { // compute optimal number of steps of each sliding window const int steps = divRndUp(sy, 15 * WIN_SY); // prepare grid size dim3 gSize(divRndUp(sx, WIN_SX), divRndUp(sy, WIN_SY * steps)); // finally transform this level PERF_BEGIN allocateReadWriteSets(gSize, WIN_SX); rdwt53Kernel<WIN_SX, WIN_SY><<<gSize, WIN_SX>>>(in, out, sx, sy, steps); freeReadWriteSets(gSize, WIN_SX); PERF_END(" RDWT53", sx, sy) CudaDWTTester::checkLastKernelCall("RDWT 5/3 kernel"); } /// Reverse 5/3 2D DWT. See common rules (above) for more details. /// @param in Input DWT coefficients. Format described in common rules. /// Will not be preserved (will be overwritten). /// @param out output buffer on GPU - will contain original image /// in normalized range [-128, 127]. /// @param sizeX width of input image (in pixels) /// @param sizeY height of input image (in pixels) /// @param levels number of recursive DWT levels void rdwt53(int * in, int * out, int sizeX, int sizeY, int levels) { if(levels > 1) { // let this function recursively reverse transform deeper levels first const int llSizeX = divRndUp(sizeX, 2); const int llSizeY = divRndUp(sizeY, 2); rdwt53(in, out, llSizeX, llSizeY, levels - 1); // copy reverse transformed LL band from output back into the input memCopy(in, out, llSizeX, llSizeY); } // select right width of kernel for the size of the image if(sizeX >= 960) { launchRDWT53Kernel<192, 8>(in, out, sizeX, sizeY); } else if (sizeX >= 480) { launchRDWT53Kernel<128, 8>(in, out, sizeX, sizeY); } else { launchRDWT53Kernel<64, 8>(in, out, sizeX, sizeY); } } } // end of namespace dwt_cuda
790a10515eb8f6798cd5b3eb00b1d70f19ba4f06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex helva(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ hipComplex hylva(hipComplex z) { hipComplex out(j1f(1/j0f(z.r)),j1f(1/j0f(z.i))); return out; } __device__ hipComplex hilva(hipComplex z) { hipComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ hipComplex halva(hipComplex z) { hipComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ hipComplex hinva(hipComplex z) { hipComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ hipComplex henga(hipComplex z) { hipComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ hipComplex holva(hipComplex z) { hipComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ hipComplex aliva(hipComplex z) { hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ hipComplex ariva(hipComplex z) { hipComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex irigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex urigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex arreg(hipComplex q, hipComplex r, hipComplex z) { /* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are derivatives with respect to z or q, we'll see */ hipComplex out(0.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); hipComplex morra(-1.0,0.0); hipComplex tla(1.0,0.0); hipComplex vnn(0.0,0.0); hipComplex fou(4.0,0.0); hipComplex tw(2.0,0.0); hipComplex run(1.0,0.0); int v; for(v=0;v<20;v++) { qoo = qoo * q; roo = roo * r * r; tla = tla * morra; vnn = vnn + run; out = out + morra*qoo*sins(tw*z*run)/(run-roo); } return fou*out; } __device__ hipComplex urreg(hipComplex q, hipComplex r, hipComplex z) { /* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are derivatives with respect to z or q, we'll see */ hipComplex out(0.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); hipComplex morra(-1.0,0.0); hipComplex tla(1.0,0.0); hipComplex vnn(0.0,0.0); hipComplex fou(4.0,0.0); hipComplex tw(2.0,0.0); hipComplex run(1.0,0.0); int v; for(v=0;v<10;v++) { qoo = qoo * q; roo = roo * r * r; tla = tla * morra; vnn = vnn + run; out = out + morra*qoo*the3(tw*z*run,r)/(run-roo); } return fou*out; } // * small q-exponential __device__ hipComplex qexp(hipComplex z, hipComplex q) { hipComplex mone(-1.0,0.0); hipComplex une(1.0,0.0); return une/qpoch(z,q); } //* large q exponential is just qpoch(-z,q) __device__ hipComplex qExp(hipComplex z, hipComplex q) { hipComplex mone(-1.0,0.0); hipComplex une(1.0,0.0); return qpoch(mone*z,q); } __device__ hipComplex sinq(hipComplex z, hipComplex q) { hipComplex aie(0.0,1.0); hipComplex out(0.0,0.0); hipComplex doo(2.0,0.0); out = (qexp(z*aie,q) -qexp(z*aie,q))/doo; return out; } __device__ hipComplex cosq(hipComplex z, hipComplex q) { hipComplex aie(0.0,1.0); hipComplex out(0.0,0.0); hipComplex doo(2.0,0.0); out = (qexp(z*aie,q) +qexp(z*aie,q))/doo; return out; } __device__ hipComplex Sinq(hipComplex z, hipComplex q) { hipComplex aie(0.0,1.0); hipComplex out(0.0,0.0); hipComplex doo(2.0,0.0); out = (qExp(z*aie,q) -qExp(z*aie,q))/doo; return out; } __device__ hipComplex Cosq(hipComplex z, hipComplex q) { hipComplex aie(0.0,1.0); hipComplex out(0.0,0.0); hipComplex doo(2.0,0.0); out = (qExp(z*aie,q) +qExp(z*aie,q))/doo; return out; } __device__ hipComplex asins(hipComplex z) { float alp = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) + sqrtf((z.r-1)*(z.r-1) + z.i*z.i)); float bet = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) - sqrtf((z.r-1)*(z.r-1) + z.i*z.i)); float fla = z.i/abs(z.i); // *signum, but without a comparison, probably a saner way to do this? // hipComplex out(0.0,0.0); out.r = asinf(bet); out.i = fla * logf(alp + sqrtf(alp*alp-1)); return out; } __device__ int gcd(int a, int b) { int remainder = a % b; if (remainder == 0) { return b; } return gcd(b, remainder); } /* Real Analytic Eisenstein Series */ __device__ hipComplex reis(hipComplex s, hipComplex z) { // see en.wikipedia.org/wiki/Real_analytic_Eisenstein_series hipComplex out(0.0,0.0); hipComplex hav(0.5,0.0); hipComplex xu=out; hipComplex yu=out; yu.r = z.i; int m,n; hipComplex ema=out; hipComplex ena=out; hipComplex den=out; for(m=-20;m<20;m++) { for(n=-20;n<20;n++) { if((m!=0)&&(n!=0)) { if((gcd(m,n)==1)) { ena.r = n; ema.r = m; den.r = norg(ema*z+ena); out = out + powc(yu,s)/powc(den,s/hav); } } } } return out; } __device__ hipComplex thu3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * asins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex trev(hipComplex lav, hipComplex mel, hipComplex rel) { hipComplex out(0.0,0.0); hipComplex V(0.739085133215160641655312087674,0.0); int v; for(v=0;v<3;v++) { lav = lav - rel*(cosc(lav)-powc(V,rel))/cosc(lav); out = out + mel*(cosc(lav)-powc(V,mel)); } return out; } __device__ hipComplex polylog(hipComplex z, hipComplex s) { hipComplex out(0.0,0.0); hipComplex oom(1.0,0.0); hipComplex flag=oom; int v; for(v=0;v<30;v++) { flag = flag + oom; out = out + powc(z,flag)/powc(flag,s); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale = 4.1; float fx = scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); hipComplex aon = expc(ai*moux); hipComplex uon= expc(mouy); hipComplex flurn(0.0,0.0); hipComplex accume(1.0,0.0); hipComplex eccume(1.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(0.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = cue; hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); hipComplex gren(2.0,0.0); hipComplex next=flurn; hipComplex current = cue; hipComplex xnext = flurn; hipComplex xcurrent = cue; hipComplex rue=cue; hipComplex tinny(.0001,0.0001); hipComplex raga(0.5,27.0); hipComplex ruga(0.5,54.0); hipComplex senna(0.5,0.0); hipComplex finch(0.001,.001); float ah, ex, feig; feig = 3.67; ex = 2.10981; float xa,xb,ya,yb,tta,ttb; /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; int uu; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // One way of describing this would be we want to perform Newton's method //on the Mandelbrot set /* preiterate */ //tex.stackexchange.com/questions/278843/making-a-phase-portrait-of-two-autonomous-system-of-differential-equations-with?fbclid=IwAR2Tz66CbUAq7LFVYck4uUGF5uQWnmzf5iZw3Bi8IOycvCC7czO6ZVgkz3s // this is not terribly hard to do with cuda // what we need: // x' = x - y -> dx / dt = x - y // y' = 1 - x^2 -> dy / dt = 1-x^2 // dy / dx = (dy / dt) / (dx/ dt) // so the trick is to convert dy/dx into a unit complex number to make this work, okay that's not that difficult for(v=1;v<30;v++) { cue = cue - expc((hylva(cue)-aon)/(hylva(cue)-uon)); } /*cue = accume;*/ /*if(norg(q)>1.0)*/ /*{*/ /* d_out[i].x = 0;*/ /* d_out[i].y = 0;*/ /* d_out[i].z = 0;*/ /* */ /* d_out[i].w = 255;*/ /**/ /*}*/ /*else*/ { double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
790a10515eb8f6798cd5b3eb00b1d70f19ba4f06.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex helva(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ cuComplex hylva(cuComplex z) { cuComplex out(j1f(1/j0f(z.r)),j1f(1/j0f(z.i))); return out; } __device__ cuComplex hilva(cuComplex z) { cuComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ cuComplex halva(cuComplex z) { cuComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ cuComplex hinva(cuComplex z) { cuComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ cuComplex henga(cuComplex z) { cuComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ cuComplex holva(cuComplex z) { cuComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ cuComplex aliva(cuComplex z) { cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ cuComplex ariva(cuComplex z) { cuComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex irigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex urigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex arreg(cuComplex q, cuComplex r, cuComplex z) { /* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are derivatives with respect to z or q, we'll see */ cuComplex out(0.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); cuComplex morra(-1.0,0.0); cuComplex tla(1.0,0.0); cuComplex vnn(0.0,0.0); cuComplex fou(4.0,0.0); cuComplex tw(2.0,0.0); cuComplex run(1.0,0.0); int v; for(v=0;v<20;v++) { qoo = qoo * q; roo = roo * r * r; tla = tla * morra; vnn = vnn + run; out = out + morra*qoo*sins(tw*z*run)/(run-roo); } return fou*out; } __device__ cuComplex urreg(cuComplex q, cuComplex r, cuComplex z) { /* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are derivatives with respect to z or q, we'll see */ cuComplex out(0.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); cuComplex morra(-1.0,0.0); cuComplex tla(1.0,0.0); cuComplex vnn(0.0,0.0); cuComplex fou(4.0,0.0); cuComplex tw(2.0,0.0); cuComplex run(1.0,0.0); int v; for(v=0;v<10;v++) { qoo = qoo * q; roo = roo * r * r; tla = tla * morra; vnn = vnn + run; out = out + morra*qoo*the3(tw*z*run,r)/(run-roo); } return fou*out; } // * small q-exponential __device__ cuComplex qexp(cuComplex z, cuComplex q) { cuComplex mone(-1.0,0.0); cuComplex une(1.0,0.0); return une/qpoch(z,q); } //* large q exponential is just qpoch(-z,q) __device__ cuComplex qExp(cuComplex z, cuComplex q) { cuComplex mone(-1.0,0.0); cuComplex une(1.0,0.0); return qpoch(mone*z,q); } __device__ cuComplex sinq(cuComplex z, cuComplex q) { cuComplex aie(0.0,1.0); cuComplex out(0.0,0.0); cuComplex doo(2.0,0.0); out = (qexp(z*aie,q) -qexp(z*aie,q))/doo; return out; } __device__ cuComplex cosq(cuComplex z, cuComplex q) { cuComplex aie(0.0,1.0); cuComplex out(0.0,0.0); cuComplex doo(2.0,0.0); out = (qexp(z*aie,q) +qexp(z*aie,q))/doo; return out; } __device__ cuComplex Sinq(cuComplex z, cuComplex q) { cuComplex aie(0.0,1.0); cuComplex out(0.0,0.0); cuComplex doo(2.0,0.0); out = (qExp(z*aie,q) -qExp(z*aie,q))/doo; return out; } __device__ cuComplex Cosq(cuComplex z, cuComplex q) { cuComplex aie(0.0,1.0); cuComplex out(0.0,0.0); cuComplex doo(2.0,0.0); out = (qExp(z*aie,q) +qExp(z*aie,q))/doo; return out; } __device__ cuComplex asins(cuComplex z) { float alp = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) + sqrtf((z.r-1)*(z.r-1) + z.i*z.i)); float bet = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) - sqrtf((z.r-1)*(z.r-1) + z.i*z.i)); float fla = z.i/abs(z.i); // *signum, but without a comparison, probably a saner way to do this? // cuComplex out(0.0,0.0); out.r = asinf(bet); out.i = fla * logf(alp + sqrtf(alp*alp-1)); return out; } __device__ int gcd(int a, int b) { int remainder = a % b; if (remainder == 0) { return b; } return gcd(b, remainder); } /* Real Analytic Eisenstein Series */ __device__ cuComplex reis(cuComplex s, cuComplex z) { // see en.wikipedia.org/wiki/Real_analytic_Eisenstein_series cuComplex out(0.0,0.0); cuComplex hav(0.5,0.0); cuComplex xu=out; cuComplex yu=out; yu.r = z.i; int m,n; cuComplex ema=out; cuComplex ena=out; cuComplex den=out; for(m=-20;m<20;m++) { for(n=-20;n<20;n++) { if((m!=0)&&(n!=0)) { if((gcd(m,n)==1)) { ena.r = n; ema.r = m; den.r = norg(ema*z+ena); out = out + powc(yu,s)/powc(den,s/hav); } } } } return out; } __device__ cuComplex thu3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * asins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex trev(cuComplex lav, cuComplex mel, cuComplex rel) { cuComplex out(0.0,0.0); cuComplex V(0.739085133215160641655312087674,0.0); int v; for(v=0;v<3;v++) { lav = lav - rel*(cosc(lav)-powc(V,rel))/cosc(lav); out = out + mel*(cosc(lav)-powc(V,mel)); } return out; } __device__ cuComplex polylog(cuComplex z, cuComplex s) { cuComplex out(0.0,0.0); cuComplex oom(1.0,0.0); cuComplex flag=oom; int v; for(v=0;v<30;v++) { flag = flag + oom; out = out + powc(z,flag)/powc(flag,s); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale = 4.1; float fx = scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); cuComplex aon = expc(ai*moux); cuComplex uon= expc(mouy); cuComplex flurn(0.0,0.0); cuComplex accume(1.0,0.0); cuComplex eccume(1.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(0.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = cue; cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); cuComplex gren(2.0,0.0); cuComplex next=flurn; cuComplex current = cue; cuComplex xnext = flurn; cuComplex xcurrent = cue; cuComplex rue=cue; cuComplex tinny(.0001,0.0001); cuComplex raga(0.5,27.0); cuComplex ruga(0.5,54.0); cuComplex senna(0.5,0.0); cuComplex finch(0.001,.001); float ah, ex, feig; feig = 3.67; ex = 2.10981; float xa,xb,ya,yb,tta,ttb; /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; int uu; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // One way of describing this would be we want to perform Newton's method //on the Mandelbrot set /* preiterate */ //tex.stackexchange.com/questions/278843/making-a-phase-portrait-of-two-autonomous-system-of-differential-equations-with?fbclid=IwAR2Tz66CbUAq7LFVYck4uUGF5uQWnmzf5iZw3Bi8IOycvCC7czO6ZVgkz3s // this is not terribly hard to do with cuda // what we need: // x' = x - y -> dx / dt = x - y // y' = 1 - x^2 -> dy / dt = 1-x^2 // dy / dx = (dy / dt) / (dx/ dt) // so the trick is to convert dy/dx into a unit complex number to make this work, okay that's not that difficult for(v=1;v<30;v++) { cue = cue - expc((hylva(cue)-aon)/(hylva(cue)-uon)); } /*cue = accume;*/ /*if(norg(q)>1.0)*/ /*{*/ /* d_out[i].x = 0;*/ /* d_out[i].y = 0;*/ /* d_out[i].z = 0;*/ /* */ /* d_out[i].w = 255;*/ /**/ /*}*/ /*else*/ { double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
80324eb7801ce9fae60ce4a16d64fc4a8bdd123c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define THREADS 5 #define BLOCKS 1 __global__ void testFunction(float *dev_a, float *dev_b, float dev_c, float *dev_d) { int thread = threadIdx.x; if(thread == 0) { printf("dev[%d] = %.2f;\n", thread, dev_a[thread]); printf("b = %.2f;\n", *dev_b); printf("c 1 = %.2f;\n", dev_c); dev_c = dev_c*dev_c; printf("c 2 = %.2f;\n", dev_c); for(int i = 0; i<THREADS; i++) { printf("dev_d[%d] = %.2f; ", i, dev_d[i]); } printf("\nNOT WORKING!\n"); } } int main() { float a[THREADS] = { 1, 2, 3, 4, 5 }; float d[THREADS] = { 6, 7, 8, 9, 10 }; printf("BEFORE START 1\n"); for(int i = 0; i<THREADS; i++) { printf("a[%d] = %.2f; ", i, a[i]); } printf("\nBEFORE END 2\n"); printf("BEFORE START 2\n"); for(int i = 0; i<THREADS; i++) { printf("d[%d] = %.2f; ", i, d[i]); } printf("\nBEFORE END 2\n"); float *dev_a; hipMalloc((void**)&dev_a, THREADS*sizeof(float)); hipMemcpy(dev_a, a, THREADS*sizeof(float), hipMemcpyHostToDevice); float b = 25; float *dev_b; hipMalloc((void**)&dev_b, sizeof(float)); hipMemcpy(dev_b, &b, sizeof(float), hipMemcpyHostToDevice); float c = 77; hipLaunchKernelGGL(( testFunction), dim3(BLOCKS), dim3(THREADS), 0, 0, dev_a, dev_b, c, d); hipFree(dev_a); hipFree(dev_b); printf("after kernel: c = %.2f;\n", c); return 0; }
80324eb7801ce9fae60ce4a16d64fc4a8bdd123c.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda.h" #include <stdio.h> #include <stdlib.h> #define THREADS 5 #define BLOCKS 1 __global__ void testFunction(float *dev_a, float *dev_b, float dev_c, float *dev_d) { int thread = threadIdx.x; if(thread == 0) { printf("dev[%d] = %.2f;\n", thread, dev_a[thread]); printf("b = %.2f;\n", *dev_b); printf("c 1 = %.2f;\n", dev_c); dev_c = dev_c*dev_c; printf("c 2 = %.2f;\n", dev_c); for(int i = 0; i<THREADS; i++) { printf("dev_d[%d] = %.2f; ", i, dev_d[i]); } printf("\nNOT WORKING!\n"); } } int main() { float a[THREADS] = { 1, 2, 3, 4, 5 }; float d[THREADS] = { 6, 7, 8, 9, 10 }; printf("BEFORE START 1\n"); for(int i = 0; i<THREADS; i++) { printf("a[%d] = %.2f; ", i, a[i]); } printf("\nBEFORE END 2\n"); printf("BEFORE START 2\n"); for(int i = 0; i<THREADS; i++) { printf("d[%d] = %.2f; ", i, d[i]); } printf("\nBEFORE END 2\n"); float *dev_a; cudaMalloc((void**)&dev_a, THREADS*sizeof(float)); cudaMemcpy(dev_a, a, THREADS*sizeof(float), cudaMemcpyHostToDevice); float b = 25; float *dev_b; cudaMalloc((void**)&dev_b, sizeof(float)); cudaMemcpy(dev_b, &b, sizeof(float), cudaMemcpyHostToDevice); float c = 77; testFunction<<<BLOCKS, THREADS>>>(dev_a, dev_b, c, d); cudaFree(dev_a); cudaFree(dev_b); printf("after kernel: c = %.2f;\n", c); return 0; }
e98d10587cd805a8ee4fc3296d273c655115e7f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <climits> #include <cfloat> #include <math_constants.h> #include "cudarray/common.hpp" #include "cudarray/reduction.hpp" #include "cudarray/elementwise.hpp" // The parallel reductions below are heavily based on // http://developer.download.nvidia.com/assets/cuda/files/reduction.pdf // and http://cudpp.github.io/ // TODO: parallelize reduce_to_int() and reduce_mat() la reduce() namespace cudarray { template <typename T> struct SharedMemory { __device__ T* pointer() const; }; template <> __device__ inline int *SharedMemory<int>::pointer() const { extern __shared__ int s_int[]; return s_int; } template <> __device__ inline float *SharedMemory<float>::pointer() const { extern __shared__ float s_float[]; return s_float; } template <typename T> struct MaxOp { __device__ T identity() const; __device__ T operator()(const T a, const T b) { return max(a, b); } }; template <> __device__ inline int MaxOp<int>::identity() const { return INT_MIN; } template <> __device__ inline float MaxOp<float>::identity() const { return -FLT_MAX; } template <typename T> struct MinOp { __device__ T identity() const; __device__ T operator()(const T a, const T b) { return min(a, b); } }; template <> __device__ inline int MinOp<int>::identity() const { return INT_MAX; } template <> __device__ inline float MinOp<float>::identity() const { return FLT_MAX; } template <typename T> struct MulOp { __device__ T identity() { return (T) 1; } __device__ T operator()(const T a, const T b) { return a * b; } }; template <typename T> struct AddOp { __device__ T identity() { return (T) 0; } __device__ T operator()(const T a, const T b) { return a + b; } }; template <typename T, typename Op, unsigned int block_size> __global__ void reduce(const T *a, unsigned int n, T *b) { Op op; if (block_size == 1) { if (n == 1) { b[0] = a[0]; } else if (n == 2) { b[0] = op(a[0], a[1]); } } else { unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(block_size*2) + threadIdx.x; unsigned int gridSize = block_size*2*gridDim.x; SharedMemory<T> smem; volatile T* sdata = smem.pointer(); T reduced = op.identity(); // Reduce multiple elements per thread. while (i < n) { reduced = op(reduced, a[i]); // Check array bounds if (i + block_size < n) { reduced = op(reduced, a[i+block_size]); } i += gridSize; } // Reduce in shared memory sdata[tid] = reduced; __syncthreads(); #pragma unroll for (unsigned int i=512; i >= 2; i >>= 1) { if (block_size >= i) { if (tid < (i << 1)) { sdata[tid] = reduced = op(reduced, sdata[tid + i]); } // No need to sync threads in the same warp if (tid >= 32) __syncthreads(); } } // Write reduced block back to global memory if (tid == 0) { b[blockIdx.x] = sdata[0]; } } } const unsigned int max_blocks = 64; const unsigned int reduce_cta_size = 256; inline unsigned int ceil_pow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } unsigned int n_reduce_blocks(unsigned int n) { return min(max_blocks, (n + (2*reduce_cta_size - 1)) / (2*reduce_cta_size)); } unsigned int n_reduce_threads(unsigned int n) { return n > 2 * reduce_cta_size ? reduce_cta_size : max(1, ceil_pow2(n) / 2); } template <typename T, typename Op> void reduce_blocks(const T *a, unsigned int n, T *b) { unsigned int n_threads = n_reduce_threads(n); dim3 block(n_threads, 1, 1); unsigned int n_blocks = n_reduce_blocks(n); dim3 grid(n_blocks, 1, 1); int smem_size = reduce_cta_size * sizeof(T); switch (block.x) { case 512: hipLaunchKernelGGL(( reduce<T, Op, 512>), dim3(grid), dim3(block), smem_size, 0, a, n, b); break; case 256: hipLaunchKernelGGL(( reduce<T, Op, 256>), dim3(grid), dim3(block), smem_size, 0, a, n, b); break; case 128: hipLaunchKernelGGL(( reduce<T, Op, 128>), dim3(grid), dim3(block), smem_size, 0, a, n, b); break; case 64: hipLaunchKernelGGL(( reduce<T, Op, 64>), dim3(grid), dim3(block), smem_size, 0, a, n, b); break; case 32: hipLaunchKernelGGL(( reduce<T, Op, 32>), dim3(grid), dim3(block), smem_size, 0, a, n, b); break; case 16: hipLaunchKernelGGL(( reduce<T, Op, 16>), dim3(grid), dim3(block), smem_size, 0, a, n, b); break; case 8: hipLaunchKernelGGL(( reduce<T, Op, 8>), dim3(grid), dim3(block), smem_size, 0, a, n, b); break; case 4: hipLaunchKernelGGL(( reduce<T, Op, 4>), dim3(grid), dim3(block), smem_size, 0, a, n, b); break; case 2: hipLaunchKernelGGL(( reduce<T, Op, 2>), dim3(grid), dim3(block), smem_size, 0, a, n, b); break; case 1: hipLaunchKernelGGL(( reduce<T, Op, 1>), dim3(grid), dim3(block), smem_size, 0, a, n, b); break; } } template <typename T, typename Op> void reduce(const T *a, unsigned int n, T *b) { unsigned int n_blocks = n_reduce_blocks(n); if (n_blocks > 1) { T *buf = (T *) CUDA::buffer(n_blocks*sizeof(T)); reduce_blocks<T, Op>(a, n, buf); reduce_blocks<T, Op>(buf, n_blocks, b); } else { reduce_blocks<T, Op>(a, n, b); } } template<typename T> void reduce(ReduceOp op, const T *a, unsigned int n, T *b) { switch (op) { case MAX_OP: reduce<T, MaxOp<T> >(a, n, b); break; case MEAN_OP: reduce<T, AddOp<T> >(a, n, b); binary_scalar(DIV_OP, b, (T) n, 1, b); break; case MIN_OP: reduce<T, MinOp<T> >(a, n, b); break; case SUM_OP: reduce<T, AddOp<T> >(a, n, b); break; } } template void reduce<float>(ReduceOp op, const float *a, unsigned int n, float *b); template void reduce<int>(ReduceOp op, const int *a, unsigned int n, int *b); #define REDUCE_OP(name, ident_f, ident_i, reduce_op, scale_op, select_op) \ template <typename Tb> \ struct name; \ template <> \ struct name<float> { \ __device__ inline static float identity() { \ return ident_f; \ } \ template <typename Ta, typename Tb> \ __device__ inline static void reduce(volatile Ta a, volatile int idx, \ volatile Tb &b, volatile int &b_idx) { \ reduce_op; \ } \ template <typename Tb> \ __device__ inline static void scale(volatile Tb &b, volatile float n) { \ scale_op; \ } \ template <typename Ta, typename Tb> \ __device__ inline static void select(volatile Tb &b, volatile Ta a, \ volatile int idx) { \ select_op; \ } \ }; \ template <> \ struct name<int> { \ __device__ inline static int identity() { \ return ident_i; \ } \ template <typename Ta, typename Tb> \ __device__ inline static void reduce(volatile Ta a, volatile int idx, \ volatile Tb &b, volatile int &b_idx) { \ reduce_op; \ } \ template <typename Tb> \ __device__ inline static void scale(volatile Tb &b, volatile float n) { \ scale_op; \ } \ template <typename Ta, typename Tb> \ __device__ inline static void select(volatile Tb &b, volatile Ta a, \ volatile int idx) { \ select_op; \ } \ }; REDUCE_OP(max_op, -FLT_MAX, INT_MIN, if (a > b) b = a, , b = a) REDUCE_OP(mean_op, 0.0f, 0, b += a, b /= n, b = a) REDUCE_OP(min_op, FLT_MAX, INT_MAX, if (a < b) b = a, , b = a) REDUCE_OP(sum_op, 0.0f, 0, b += a, , b = a) REDUCE_OP(argmax_op, -FLT_MAX, INT_MIN, if (a > b) {b = a; b_idx=idx;}, , b = idx) REDUCE_OP(argmin_op, FLT_MAX, INT_MAX, if (a < b) {b = a; b_idx=idx;}, , b = idx) template <typename Ta, typename Tb, typename Op> __global__ void kernel_reduce(const Ta *a, unsigned int n, Tb *b) { CUDA_GRID_STRIDE_LOOP(idx, 1) { Ta a_ = Op::identity(); int idx_ = 0; for (unsigned int i = 0; i < n; ++i) { Op::reduce(*a, i, a_, idx_); ++a; } Op::scale(a_, n); Op::select(*b, a_, idx_); } } template <typename Ta, typename Tb, typename Op> void reduce(const Ta *a, unsigned int n, Tb *b) { hipLaunchKernelGGL(( kernel_reduce<Ta, Tb, Op>), dim3(cuda_blocks(1)), dim3(kNumBlockThreads), 0, 0, a, n, b); } template<typename T> void reduce_to_int(ReduceToIntOp op, const T *a, unsigned int n, int *b) { switch (op) { case ARGMAX_OP: reduce<T, int, argmax_op<T> >(a, n, b); break; case ARGMIN_OP: reduce<T, int, argmin_op<T> >(a, n, b); break; } } template void reduce_to_int<float>(ReduceToIntOp op, const float *a, unsigned int n, int *b); template void reduce_to_int<int>(ReduceToIntOp op, const int *a, unsigned int n, int *b); template <typename Ta, typename Tb, typename Op, bool reduce_leading> __global__ void kernel_reduce_mat(const Ta *a, unsigned int m, unsigned int n, Tb *b) { unsigned int n_threads; if (reduce_leading) { n_threads = n; } else { n_threads = m; } CUDA_GRID_STRIDE_LOOP(idx, n_threads) { if (reduce_leading) { a += idx; b += idx; } else { a += idx * n; b += idx; } Ta a_ = Op::identity(); int idx_ = 0; if (reduce_leading) { for (unsigned int i = 0; i < m; ++i) { Op::reduce(*a, i, a_, idx_); a += n; } } else { for (unsigned int i = 0; i < n; ++i) { Op::reduce(*a, i, a_, idx_); ++a; } } if (reduce_leading) { Op::scale(a_, m); } else { Op::scale(a_, n); } Op::select(*b, a_, idx_); } } template<typename Ta, typename Tb, typename Op> void reduce_mat(const Ta *a, unsigned int m, unsigned int n, bool reduce_leading, Tb *b) { if (reduce_leading) { hipLaunchKernelGGL(( kernel_reduce_mat<Ta, Tb, Op, true>), dim3(cuda_blocks(n)), dim3(kNumBlockThreads), 0, 0, a, m, n, b); } else { hipLaunchKernelGGL(( kernel_reduce_mat<Ta, Tb, Op, false>), dim3(cuda_blocks(m)), dim3(kNumBlockThreads), 0, 0, a, m, n, b); } } template<typename T> void reduce_mat(ReduceOp op, const T *a, unsigned int m, unsigned int n, bool reduce_leading, T *b) { switch (op) { case MAX_OP: reduce_mat<T, T, max_op<T> >(a, m, n, reduce_leading, b); break; case MEAN_OP: reduce_mat<T, T, mean_op<T> >(a, m, n, reduce_leading, b); break; case MIN_OP: reduce_mat<T, T, min_op<T> >(a, m, n, reduce_leading, b); break; case SUM_OP: reduce_mat<T, T, sum_op<T> >(a, m, n, reduce_leading, b); break; } } template void reduce_mat<float>(ReduceOp op, const float *a, unsigned int m, unsigned int n, bool reduce_leading, float *b); template void reduce_mat<int>(ReduceOp op, const int *a, unsigned int m, unsigned int n, bool reduce_leading, int *b); template<typename T> void reduce_mat_to_int(ReduceToIntOp op, const T *a, unsigned int m, unsigned int n, bool reduce_leading, int *b) { switch (op) { case ARGMAX_OP: reduce_mat<T, int, argmax_op<T> >(a, m, n, reduce_leading, b); break; case ARGMIN_OP: reduce_mat<T, int, argmin_op<T> >(a, m, n, reduce_leading, b); break; } } template void reduce_mat_to_int<float>(ReduceToIntOp op, const float *a, unsigned int m, unsigned int n, bool reduce_leading, int *b); template void reduce_mat_to_int<int>(ReduceToIntOp op, const int *a, unsigned int m, unsigned int n, bool reduce_leading, int *b); }
e98d10587cd805a8ee4fc3296d273c655115e7f6.cu
#include <climits> #include <cfloat> #include <math_constants.h> #include "cudarray/common.hpp" #include "cudarray/reduction.hpp" #include "cudarray/elementwise.hpp" // The parallel reductions below are heavily based on // http://developer.download.nvidia.com/assets/cuda/files/reduction.pdf // and http://cudpp.github.io/ // TODO: parallelize reduce_to_int() and reduce_mat() à la reduce() namespace cudarray { template <typename T> struct SharedMemory { __device__ T* pointer() const; }; template <> __device__ inline int *SharedMemory<int>::pointer() const { extern __shared__ int s_int[]; return s_int; } template <> __device__ inline float *SharedMemory<float>::pointer() const { extern __shared__ float s_float[]; return s_float; } template <typename T> struct MaxOp { __device__ T identity() const; __device__ T operator()(const T a, const T b) { return max(a, b); } }; template <> __device__ inline int MaxOp<int>::identity() const { return INT_MIN; } template <> __device__ inline float MaxOp<float>::identity() const { return -FLT_MAX; } template <typename T> struct MinOp { __device__ T identity() const; __device__ T operator()(const T a, const T b) { return min(a, b); } }; template <> __device__ inline int MinOp<int>::identity() const { return INT_MAX; } template <> __device__ inline float MinOp<float>::identity() const { return FLT_MAX; } template <typename T> struct MulOp { __device__ T identity() { return (T) 1; } __device__ T operator()(const T a, const T b) { return a * b; } }; template <typename T> struct AddOp { __device__ T identity() { return (T) 0; } __device__ T operator()(const T a, const T b) { return a + b; } }; template <typename T, typename Op, unsigned int block_size> __global__ void reduce(const T *a, unsigned int n, T *b) { Op op; if (block_size == 1) { if (n == 1) { b[0] = a[0]; } else if (n == 2) { b[0] = op(a[0], a[1]); } } else { unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(block_size*2) + threadIdx.x; unsigned int gridSize = block_size*2*gridDim.x; SharedMemory<T> smem; volatile T* sdata = smem.pointer(); T reduced = op.identity(); // Reduce multiple elements per thread. while (i < n) { reduced = op(reduced, a[i]); // Check array bounds if (i + block_size < n) { reduced = op(reduced, a[i+block_size]); } i += gridSize; } // Reduce in shared memory sdata[tid] = reduced; __syncthreads(); #pragma unroll for (unsigned int i=512; i >= 2; i >>= 1) { if (block_size >= i) { if (tid < (i << 1)) { sdata[tid] = reduced = op(reduced, sdata[tid + i]); } // No need to sync threads in the same warp if (tid >= 32) __syncthreads(); } } // Write reduced block back to global memory if (tid == 0) { b[blockIdx.x] = sdata[0]; } } } const unsigned int max_blocks = 64; const unsigned int reduce_cta_size = 256; inline unsigned int ceil_pow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } unsigned int n_reduce_blocks(unsigned int n) { return min(max_blocks, (n + (2*reduce_cta_size - 1)) / (2*reduce_cta_size)); } unsigned int n_reduce_threads(unsigned int n) { return n > 2 * reduce_cta_size ? reduce_cta_size : max(1, ceil_pow2(n) / 2); } template <typename T, typename Op> void reduce_blocks(const T *a, unsigned int n, T *b) { unsigned int n_threads = n_reduce_threads(n); dim3 block(n_threads, 1, 1); unsigned int n_blocks = n_reduce_blocks(n); dim3 grid(n_blocks, 1, 1); int smem_size = reduce_cta_size * sizeof(T); switch (block.x) { case 512: reduce<T, Op, 512><<<grid, block, smem_size>>>(a, n, b); break; case 256: reduce<T, Op, 256><<<grid, block, smem_size>>>(a, n, b); break; case 128: reduce<T, Op, 128><<<grid, block, smem_size>>>(a, n, b); break; case 64: reduce<T, Op, 64><<<grid, block, smem_size>>>(a, n, b); break; case 32: reduce<T, Op, 32><<<grid, block, smem_size>>>(a, n, b); break; case 16: reduce<T, Op, 16><<<grid, block, smem_size>>>(a, n, b); break; case 8: reduce<T, Op, 8><<<grid, block, smem_size>>>(a, n, b); break; case 4: reduce<T, Op, 4><<<grid, block, smem_size>>>(a, n, b); break; case 2: reduce<T, Op, 2><<<grid, block, smem_size>>>(a, n, b); break; case 1: reduce<T, Op, 1><<<grid, block, smem_size>>>(a, n, b); break; } } template <typename T, typename Op> void reduce(const T *a, unsigned int n, T *b) { unsigned int n_blocks = n_reduce_blocks(n); if (n_blocks > 1) { T *buf = (T *) CUDA::buffer(n_blocks*sizeof(T)); reduce_blocks<T, Op>(a, n, buf); reduce_blocks<T, Op>(buf, n_blocks, b); } else { reduce_blocks<T, Op>(a, n, b); } } template<typename T> void reduce(ReduceOp op, const T *a, unsigned int n, T *b) { switch (op) { case MAX_OP: reduce<T, MaxOp<T> >(a, n, b); break; case MEAN_OP: reduce<T, AddOp<T> >(a, n, b); binary_scalar(DIV_OP, b, (T) n, 1, b); break; case MIN_OP: reduce<T, MinOp<T> >(a, n, b); break; case SUM_OP: reduce<T, AddOp<T> >(a, n, b); break; } } template void reduce<float>(ReduceOp op, const float *a, unsigned int n, float *b); template void reduce<int>(ReduceOp op, const int *a, unsigned int n, int *b); #define REDUCE_OP(name, ident_f, ident_i, reduce_op, scale_op, select_op) \ template <typename Tb> \ struct name; \ template <> \ struct name<float> { \ __device__ inline static float identity() { \ return ident_f; \ } \ template <typename Ta, typename Tb> \ __device__ inline static void reduce(volatile Ta a, volatile int idx, \ volatile Tb &b, volatile int &b_idx) { \ reduce_op; \ } \ template <typename Tb> \ __device__ inline static void scale(volatile Tb &b, volatile float n) { \ scale_op; \ } \ template <typename Ta, typename Tb> \ __device__ inline static void select(volatile Tb &b, volatile Ta a, \ volatile int idx) { \ select_op; \ } \ }; \ template <> \ struct name<int> { \ __device__ inline static int identity() { \ return ident_i; \ } \ template <typename Ta, typename Tb> \ __device__ inline static void reduce(volatile Ta a, volatile int idx, \ volatile Tb &b, volatile int &b_idx) { \ reduce_op; \ } \ template <typename Tb> \ __device__ inline static void scale(volatile Tb &b, volatile float n) { \ scale_op; \ } \ template <typename Ta, typename Tb> \ __device__ inline static void select(volatile Tb &b, volatile Ta a, \ volatile int idx) { \ select_op; \ } \ }; REDUCE_OP(max_op, -FLT_MAX, INT_MIN, if (a > b) b = a, , b = a) REDUCE_OP(mean_op, 0.0f, 0, b += a, b /= n, b = a) REDUCE_OP(min_op, FLT_MAX, INT_MAX, if (a < b) b = a, , b = a) REDUCE_OP(sum_op, 0.0f, 0, b += a, , b = a) REDUCE_OP(argmax_op, -FLT_MAX, INT_MIN, if (a > b) {b = a; b_idx=idx;}, , b = idx) REDUCE_OP(argmin_op, FLT_MAX, INT_MAX, if (a < b) {b = a; b_idx=idx;}, , b = idx) template <typename Ta, typename Tb, typename Op> __global__ void kernel_reduce(const Ta *a, unsigned int n, Tb *b) { CUDA_GRID_STRIDE_LOOP(idx, 1) { Ta a_ = Op::identity(); int idx_ = 0; for (unsigned int i = 0; i < n; ++i) { Op::reduce(*a, i, a_, idx_); ++a; } Op::scale(a_, n); Op::select(*b, a_, idx_); } } template <typename Ta, typename Tb, typename Op> void reduce(const Ta *a, unsigned int n, Tb *b) { kernel_reduce<Ta, Tb, Op><<<cuda_blocks(1), kNumBlockThreads>>>(a, n, b); } template<typename T> void reduce_to_int(ReduceToIntOp op, const T *a, unsigned int n, int *b) { switch (op) { case ARGMAX_OP: reduce<T, int, argmax_op<T> >(a, n, b); break; case ARGMIN_OP: reduce<T, int, argmin_op<T> >(a, n, b); break; } } template void reduce_to_int<float>(ReduceToIntOp op, const float *a, unsigned int n, int *b); template void reduce_to_int<int>(ReduceToIntOp op, const int *a, unsigned int n, int *b); template <typename Ta, typename Tb, typename Op, bool reduce_leading> __global__ void kernel_reduce_mat(const Ta *a, unsigned int m, unsigned int n, Tb *b) { unsigned int n_threads; if (reduce_leading) { n_threads = n; } else { n_threads = m; } CUDA_GRID_STRIDE_LOOP(idx, n_threads) { if (reduce_leading) { a += idx; b += idx; } else { a += idx * n; b += idx; } Ta a_ = Op::identity(); int idx_ = 0; if (reduce_leading) { for (unsigned int i = 0; i < m; ++i) { Op::reduce(*a, i, a_, idx_); a += n; } } else { for (unsigned int i = 0; i < n; ++i) { Op::reduce(*a, i, a_, idx_); ++a; } } if (reduce_leading) { Op::scale(a_, m); } else { Op::scale(a_, n); } Op::select(*b, a_, idx_); } } template<typename Ta, typename Tb, typename Op> void reduce_mat(const Ta *a, unsigned int m, unsigned int n, bool reduce_leading, Tb *b) { if (reduce_leading) { kernel_reduce_mat<Ta, Tb, Op, true><<<cuda_blocks(n), kNumBlockThreads>>> (a, m, n, b); } else { kernel_reduce_mat<Ta, Tb, Op, false><<<cuda_blocks(m), kNumBlockThreads>>> (a, m, n, b); } } template<typename T> void reduce_mat(ReduceOp op, const T *a, unsigned int m, unsigned int n, bool reduce_leading, T *b) { switch (op) { case MAX_OP: reduce_mat<T, T, max_op<T> >(a, m, n, reduce_leading, b); break; case MEAN_OP: reduce_mat<T, T, mean_op<T> >(a, m, n, reduce_leading, b); break; case MIN_OP: reduce_mat<T, T, min_op<T> >(a, m, n, reduce_leading, b); break; case SUM_OP: reduce_mat<T, T, sum_op<T> >(a, m, n, reduce_leading, b); break; } } template void reduce_mat<float>(ReduceOp op, const float *a, unsigned int m, unsigned int n, bool reduce_leading, float *b); template void reduce_mat<int>(ReduceOp op, const int *a, unsigned int m, unsigned int n, bool reduce_leading, int *b); template<typename T> void reduce_mat_to_int(ReduceToIntOp op, const T *a, unsigned int m, unsigned int n, bool reduce_leading, int *b) { switch (op) { case ARGMAX_OP: reduce_mat<T, int, argmax_op<T> >(a, m, n, reduce_leading, b); break; case ARGMIN_OP: reduce_mat<T, int, argmin_op<T> >(a, m, n, reduce_leading, b); break; } } template void reduce_mat_to_int<float>(ReduceToIntOp op, const float *a, unsigned int m, unsigned int n, bool reduce_leading, int *b); template void reduce_mat_to_int<int>(ReduceToIntOp op, const int *a, unsigned int m, unsigned int n, bool reduce_leading, int *b); }
3e982cbfd0323fb28011e88e51497b65046a7dc5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "taco2ModulationRemovalKernel.h" #include "taco2Utils.h" #include <algorithm> #include <cassert> #include <cfloat> namespace nvinfer1 { namespace plugin { /****************************************************************************** * CONSTANTS ****************************************************************** *****************************************************************************/ namespace { constexpr const int WINDOW_SIZE = 1024; } // namespace /****************************************************************************** * KERNELS ******************************************************************** *****************************************************************************/ __global__ void modulationRemovalKernel(const int batchSize, const float* const weightsDevice, const float* const inputDevice, float* const outputDevice, const int inputLength, const int hopLength, const float scale) { // load weights into shared memory __shared__ float localWeights[WINDOW_SIZE]; for (int i = threadIdx.x; i < WINDOW_SIZE; i += blockDim.x) { localWeights[i] = weightsDevice[i]; } __syncthreads(); const int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < inputLength - WINDOW_SIZE) { const int inIdx = idx + (WINDOW_SIZE / 2); // start the window over the first overlap, and slide it until the last // overlap for this point float sum = 0.0f; const int windowOffset = inIdx % hopLength; for (int j = windowOffset; j < WINDOW_SIZE; j += hopLength) { if (inIdx - j >= 0) { sum += localWeights[j]; } } // normal all non-zero values for (int i = 0; i < batchSize; ++i) { float val = inputDevice[inIdx + inputLength * i]; if (sum > FLT_MIN) { val /= sum; } val *= scale; outputDevice[idx + (inputLength - WINDOW_SIZE) * i] = val; } } } /****************************************************************************** * PUBLIC STATIC METHODS ****************************************************** *****************************************************************************/ void Taco2ModulationRemovalKernel::compute(const int batchSize, const float* const weightsDevice, const float* const inputDevice, float* const outputDevice, const int inputLength, const int filterLength, const int hopLength, hipStream_t stream) { assert(filterLength == WINDOW_SIZE); const dim3 grid(taco2::Taco2Utils::roundUpBlocks(inputLength - filterLength, WINDOW_SIZE)); const dim3 block(WINDOW_SIZE); hipLaunchKernelGGL(( modulationRemovalKernel), dim3(grid), dim3(block), 0, stream, batchSize, weightsDevice, inputDevice, outputDevice, inputLength, hopLength, static_cast<float>(filterLength) / static_cast<float>(hopLength)); } } // namespace plugin } // namespace nvinfer1
3e982cbfd0323fb28011e88e51497b65046a7dc5.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "taco2ModulationRemovalKernel.h" #include "taco2Utils.h" #include <algorithm> #include <cassert> #include <cfloat> namespace nvinfer1 { namespace plugin { /****************************************************************************** * CONSTANTS ****************************************************************** *****************************************************************************/ namespace { constexpr const int WINDOW_SIZE = 1024; } // namespace /****************************************************************************** * KERNELS ******************************************************************** *****************************************************************************/ __global__ void modulationRemovalKernel(const int batchSize, const float* const weightsDevice, const float* const inputDevice, float* const outputDevice, const int inputLength, const int hopLength, const float scale) { // load weights into shared memory __shared__ float localWeights[WINDOW_SIZE]; for (int i = threadIdx.x; i < WINDOW_SIZE; i += blockDim.x) { localWeights[i] = weightsDevice[i]; } __syncthreads(); const int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < inputLength - WINDOW_SIZE) { const int inIdx = idx + (WINDOW_SIZE / 2); // start the window over the first overlap, and slide it until the last // overlap for this point float sum = 0.0f; const int windowOffset = inIdx % hopLength; for (int j = windowOffset; j < WINDOW_SIZE; j += hopLength) { if (inIdx - j >= 0) { sum += localWeights[j]; } } // normal all non-zero values for (int i = 0; i < batchSize; ++i) { float val = inputDevice[inIdx + inputLength * i]; if (sum > FLT_MIN) { val /= sum; } val *= scale; outputDevice[idx + (inputLength - WINDOW_SIZE) * i] = val; } } } /****************************************************************************** * PUBLIC STATIC METHODS ****************************************************** *****************************************************************************/ void Taco2ModulationRemovalKernel::compute(const int batchSize, const float* const weightsDevice, const float* const inputDevice, float* const outputDevice, const int inputLength, const int filterLength, const int hopLength, cudaStream_t stream) { assert(filterLength == WINDOW_SIZE); const dim3 grid(taco2::Taco2Utils::roundUpBlocks(inputLength - filterLength, WINDOW_SIZE)); const dim3 block(WINDOW_SIZE); modulationRemovalKernel<<<grid, block, 0, stream>>>(batchSize, weightsDevice, inputDevice, outputDevice, inputLength, hopLength, static_cast<float>(filterLength) / static_cast<float>(hopLength)); } } // namespace plugin } // namespace nvinfer1
a9e17c8eaaa46ae5a7eb9874d39b5055473675d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Histogram Equalization #include <wb.h> #define cudaCheck(stmt) do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ std::cerr << "Failed to run stmt " #stmt \ << " on line " << __LINE__ \ << ": " << hipGetErrorString(err) << "\n"; \ exit(-1); \ } \ } while(0) #define BLOCK_WIDTH 8 #define HISTOGRAM_LENGTH 256 //@@ insert code herea __global__ void toChar( const float* data, uint8_t* output, int len ) { int index = threadIdx.x + blockIdx.x * blockDim.x; output[ index ] = static_cast<unsigned char>(255*data[index]); } __global__ void RGB2Grayscale( const uint8_t* input, int width, int height , uint8_t* output ) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if( row < height && col < width ) { int idx = row*width + col; float r = input[ 3*idx ]; float g = input[ 3*idx + 1 ]; float b = input[ 3*idx + 2 ]; output[ idx ] = static_cast<uint8_t>(0.21*r + 0.71*g + 0.07*b); } } __global__ void histogram( const uint8_t* input, int size, int* hist ) { int index = blockIdx.x * blockDim.x + threadIdx.x; printf( "(%d:%d)\n", blockIdx.x, threadIdx.x ); if( index < size ) { printf( "(%d:%d) index (%d) - value = %d count(%d)\n", blockIdx.x, threadIdx.x, index, (int)input[index], hist[input[index]] ); atomicAdd( &hist[input[index]], 1 ); printf( "(%d:%d) count is now: %d\n", blockIdx.x, threadIdx.x, hist[input[index]] ); } } int main(int argc, char ** argv) { wbArg_t args; int imageWidth; int imageHeight; int imageChannels; wbImage_t inputImage; wbImage_t outputImage; float * hostInputImageData; float * hostOutputImageData; const char * inputImageFile; float *devInputImageData; uint8_t *devCharImage, *devGrayscaleImageData; int *devHistogram; args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); wbTime_start(Generic, "Importing data and creating memory on host"); inputImage = wbImport(inputImageFile); imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); wbTime_stop(Generic, "Importing data and creating memory on host"); hostInputImageData = wbImage_getData( inputImage ); hostOutputImageData = wbImage_getData( inputImage ); hipMalloc( &devInputImageData , imageWidth*imageHeight*imageChannels * sizeof(float) ); hipMalloc( &devCharImage , imageWidth*imageHeight*imageChannels * sizeof(uint8_t) ); hipMemcpy( devInputImageData, hostInputImageData , imageWidth*imageHeight*imageChannels * sizeof(float) , hipMemcpyHostToDevice ); std::cout << "imageHeight: " << imageHeight << " imageWidth: " << imageWidth << std::endl; { dim3 dBlock( BLOCK_WIDTH, 1, 1 ); dim3 dGrid( ((imageHeight*imageWidth*imageChannels)-1)/BLOCK_WIDTH+1, 1, 1 ); hipLaunchKernelGGL(( toChar), dim3(dGrid), dim3(dBlock) , 0, 0, devInputImageData, devCharImage, imageHeight*imageWidth*imageChannels ); cudaCheck( hipGetLastError() ); hipDeviceSynchronize(); } hipMalloc( &devGrayscaleImageData , imageWidth*imageHeight * sizeof(uint8_t) ); { dim3 dBlock( BLOCK_WIDTH, BLOCK_WIDTH, imageChannels ); std::cout << "(imageWidth-1)/BLOCK_WIDTH+1: " << (imageWidth-1)/BLOCK_WIDTH+1 << std::endl; dim3 dGrid( (imageWidth-1)/BLOCK_WIDTH+1,(imageHeight-1)/BLOCK_WIDTH+1 ); std::cout << "RGB dGrid.x: " << dGrid.x << " dGrid.y: " << dGrid.y << " dGrid.z: " << dGrid.z << std::endl; std::cout << "RGB dBlock.x: " << dBlock.x << " dBlock.y: " << dBlock.y << " dBlock.z: " << dBlock.z << std::endl; hipLaunchKernelGGL(( RGB2Grayscale), dim3(dGrid), dim3(dBlock) , 0, 0, devCharImage, imageWidth, imageHeight, devGrayscaleImageData ); cudaCheck( hipGetLastError() ); hipDeviceSynchronize(); } // Copy to host /*{ uint8_t* grayscale = new uint8_t[ imageHeight*imageWidth ]; hipMemcpy( grayscale, devGrayscaleImageData, }*/ hipMalloc( &devHistogram , HISTOGRAM_LENGTH * sizeof(int) ); cudaCheck( hipMemset( devHistogram, 0, HISTOGRAM_LENGTH*sizeof(int) ) ); { dim3 dGrid( (imageHeight*imageWidth-1)/BLOCK_WIDTH+1 ); dim3 dBlock( BLOCK_WIDTH ); std::cout << "dGrid.x: " << dGrid.x << std::endl; hipLaunchKernelGGL(( histogram), dim3(256), dim3(dBlock) , 0, 0, devGrayscaleImageData, imageHeight*imageWidth, devHistogram ); cudaCheck( hipGetLastError() ); hipDeviceSynchronize(); } { int* hostHistogram = new int[ HISTOGRAM_LENGTH ]; hipMemcpy( hostHistogram, devHistogram, HISTOGRAM_LENGTH, hipMemcpyDeviceToHost ); for( int i = 0; i < HISTOGRAM_LENGTH; ++i ) { std::cout << "Bucket [" << i << "] = [" << hostHistogram[i] << "]\n"; } } /*hipMalloc( &devOutputImageData , imageWidth*imageHeight*imageChannels * sizeof(float) );*/ wbSolution(args, outputImage); //@@ insert code here return 0; }
a9e17c8eaaa46ae5a7eb9874d39b5055473675d5.cu
// Histogram Equalization #include <wb.h> #define cudaCheck(stmt) do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ std::cerr << "Failed to run stmt " #stmt \ << " on line " << __LINE__ \ << ": " << cudaGetErrorString(err) << "\n"; \ exit(-1); \ } \ } while(0) #define BLOCK_WIDTH 8 #define HISTOGRAM_LENGTH 256 //@@ insert code herea __global__ void toChar( const float* data, uint8_t* output, int len ) { int index = threadIdx.x + blockIdx.x * blockDim.x; output[ index ] = static_cast<unsigned char>(255*data[index]); } __global__ void RGB2Grayscale( const uint8_t* input, int width, int height , uint8_t* output ) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if( row < height && col < width ) { int idx = row*width + col; float r = input[ 3*idx ]; float g = input[ 3*idx + 1 ]; float b = input[ 3*idx + 2 ]; output[ idx ] = static_cast<uint8_t>(0.21*r + 0.71*g + 0.07*b); } } __global__ void histogram( const uint8_t* input, int size, int* hist ) { int index = blockIdx.x * blockDim.x + threadIdx.x; printf( "(%d:%d)\n", blockIdx.x, threadIdx.x ); if( index < size ) { printf( "(%d:%d) index (%d) - value = %d count(%d)\n", blockIdx.x, threadIdx.x, index, (int)input[index], hist[input[index]] ); atomicAdd( &hist[input[index]], 1 ); printf( "(%d:%d) count is now: %d\n", blockIdx.x, threadIdx.x, hist[input[index]] ); } } int main(int argc, char ** argv) { wbArg_t args; int imageWidth; int imageHeight; int imageChannels; wbImage_t inputImage; wbImage_t outputImage; float * hostInputImageData; float * hostOutputImageData; const char * inputImageFile; float *devInputImageData; uint8_t *devCharImage, *devGrayscaleImageData; int *devHistogram; args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); wbTime_start(Generic, "Importing data and creating memory on host"); inputImage = wbImport(inputImageFile); imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); wbTime_stop(Generic, "Importing data and creating memory on host"); hostInputImageData = wbImage_getData( inputImage ); hostOutputImageData = wbImage_getData( inputImage ); cudaMalloc( &devInputImageData , imageWidth*imageHeight*imageChannels * sizeof(float) ); cudaMalloc( &devCharImage , imageWidth*imageHeight*imageChannels * sizeof(uint8_t) ); cudaMemcpy( devInputImageData, hostInputImageData , imageWidth*imageHeight*imageChannels * sizeof(float) , cudaMemcpyHostToDevice ); std::cout << "imageHeight: " << imageHeight << " imageWidth: " << imageWidth << std::endl; { dim3 dBlock( BLOCK_WIDTH, 1, 1 ); dim3 dGrid( ((imageHeight*imageWidth*imageChannels)-1)/BLOCK_WIDTH+1, 1, 1 ); toChar<<< dGrid, dBlock >>>( devInputImageData, devCharImage, imageHeight*imageWidth*imageChannels ); cudaCheck( cudaGetLastError() ); cudaDeviceSynchronize(); } cudaMalloc( &devGrayscaleImageData , imageWidth*imageHeight * sizeof(uint8_t) ); { dim3 dBlock( BLOCK_WIDTH, BLOCK_WIDTH, imageChannels ); std::cout << "(imageWidth-1)/BLOCK_WIDTH+1: " << (imageWidth-1)/BLOCK_WIDTH+1 << std::endl; dim3 dGrid( (imageWidth-1)/BLOCK_WIDTH+1,(imageHeight-1)/BLOCK_WIDTH+1 ); std::cout << "RGB dGrid.x: " << dGrid.x << " dGrid.y: " << dGrid.y << " dGrid.z: " << dGrid.z << std::endl; std::cout << "RGB dBlock.x: " << dBlock.x << " dBlock.y: " << dBlock.y << " dBlock.z: " << dBlock.z << std::endl; RGB2Grayscale<<< dGrid, dBlock >>>( devCharImage, imageWidth, imageHeight, devGrayscaleImageData ); cudaCheck( cudaGetLastError() ); cudaDeviceSynchronize(); } // Copy to host /*{ uint8_t* grayscale = new uint8_t[ imageHeight*imageWidth ]; cudaMemcpy( grayscale, devGrayscaleImageData, }*/ cudaMalloc( &devHistogram , HISTOGRAM_LENGTH * sizeof(int) ); cudaCheck( cudaMemset( devHistogram, 0, HISTOGRAM_LENGTH*sizeof(int) ) ); { dim3 dGrid( (imageHeight*imageWidth-1)/BLOCK_WIDTH+1 ); dim3 dBlock( BLOCK_WIDTH ); std::cout << "dGrid.x: " << dGrid.x << std::endl; histogram<<< 256, dBlock >>>( devGrayscaleImageData, imageHeight*imageWidth, devHistogram ); cudaCheck( cudaGetLastError() ); cudaDeviceSynchronize(); } { int* hostHistogram = new int[ HISTOGRAM_LENGTH ]; cudaMemcpy( hostHistogram, devHistogram, HISTOGRAM_LENGTH, cudaMemcpyDeviceToHost ); for( int i = 0; i < HISTOGRAM_LENGTH; ++i ) { std::cout << "Bucket [" << i << "] = [" << hostHistogram[i] << "]\n"; } } /*cudaMalloc( &devOutputImageData , imageWidth*imageHeight*imageChannels * sizeof(float) );*/ wbSolution(args, outputImage); //@@ insert code here return 0; }
af1685c716292a6d7dc198104a9f6420262f9f73.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2015-2017 Illia Olenchenko #include <iostream> #include <math.h> #include "vector" #include "../lib/alglib/src/ap.h" #include "../lib/alglib/src/alglibmisc.h" #include "../lib/alglib/src/alglibinternal.h" #include "../lib/alglib/src/linalg.h" #include "../lib/alglib/src/statistics.h" #include "../lib/alglib/src/dataanalysis.h" #include "../lib/alglib/src/specialfunctions.h" #include "../lib/alglib/src/solvers.h" #include "../lib/alglib/src/optimization.h" #include "../lib/alglib/src/diffequations.h" #include "../lib/alglib/src/fasttransforms.h" #include "../lib/alglib/src/integration.h" #include "../lib/alglib/src/interpolation.h" #include "../utils/out.h" #include "../utils/functions.h" #include "../utils/init.h" #include "../utils/transform.h" #include "../utils/upper_relaxation.h" #include "../utils/tools.h" #include <string> #include <ctime> #include <mkl.h> using namespace std; using namespace alglib; using namespace alglib_impl; /** * CUDA functions */ #ifndef N #define N 4 #endif #ifndef GPU #define GPU 2 #endif __device__ int blocks = N - 2; __global__ void mykernel(double *rightSide, double wOpt, double *fa, double *diff, int n, double *all, int i, int j, int litN) { // tempAppr[j] = (B[j] - aMulX(A, firstAppr, j)) * Tau[i - 1] + firstAppr[j]; int index = threadIdx.x; //+ blockIdx.x * blockDim.x; int row = (i - j + 1) / 2; int index1 = row * litN + (((j - i) == 1)? j : litN) - 1 + index * (litN - 1); int lindex = index1 + N + 1 + 2 * (int) (index1 / (N - 2)); if (index1 < n) { // fa[index1] = index1; fa[index1] = (-rightSide[index1] + all[lindex - N] + all[lindex + N] + all[lindex - 1] + all[lindex + 1] - 4 * (1 - 1. / wOpt) * all[lindex]) * wOpt / 4.; diff[index1] = fa[index1] - all[lindex]; all[lindex] = fa[index1]; } } __global__ void my_red_black_kernel(double *rightSide, double wOpt, double *fa, double *diff, int n, double *all, int litN, int first) { // tempAppr[j] = (B[j] - aMulX(A, firstAppr, j)) * Tau[i - 1] + firstAppr[j]; int index = threadIdx.x + blockIdx.x * blockDim.x; int lindex = index + N + 1 + 2 * (int) (index / (N - 2)); int row = (int)(index / litN); int str = index % litN; if (index < n && ((row + str) % 2 == first)) { // fa[index] = index; fa[index] = (-rightSide[index] + all[lindex - N] + all[lindex + N] + all[lindex - 1] + all[lindex + 1] - 4 * (1 - 1. / wOpt) * all[lindex]) * wOpt / 4.; diff[index] = fa[index] - all[lindex]; all[lindex] = fa[index]; } } int main() { vector<int> cudas(GPU); for (size_t i = 0; i < GPU; i++) { cudas[i] = i; } int n_inner = (N - 2) * (N - 2); int n_splitted_inner = n_inner / GPU; double t0 = dsecnd(); /* *TODO: add CUDA improvements * */ /* * N means matr size * A means main Matr * B means right vector */ /* * Getting inputs A and B */ vector<vector<double> > A((N - 2) * (N - 2), vector<double>((N - 2) * (N - 2), 0)); readMatr(A); vector<vector<double> > B(N, vector<double>(N, 0)); vector<vector<double> > firstAppr(N, vector<double>(N, 0)); vector<vector<double> > changeAppr(N, vector<double>(N, 0)); firstApprSet(firstAppr); readVector(B); double eps = 0.001; double spectr; double wOpt; double maxDiff = 0; alglib::real_2d_array matrix; matrix.setcontent((N - 2) * (N - 2), (N - 2) * (N - 2), arrToRealArr(A)); /* *creating another parts *wr - *wi - *vl - *vr - */ alglib::real_1d_array wr; alglib::real_2d_array vl; /* * */ alglib::smatrixevd(matrix, N - 2, 0, true, wr, vl); /* * ( ) , */ spectr = findMaxRealArr(wr); wOpt = wOptSet(A, spectr, 1. / N); /* *main loop here * , */ // just iterator int k = 0; // char aber; // allocate pointers for host arrays double **rightSide = new double*[(int)GPU]; double **fa = new double*[(int)GPU]; double *all = new double[N * N]; double **diff = new double*[(int)GPU]; for (size_t i = 0; i < GPU; i++) { rightSide[i] = new double[(int)n_splitted_inner]; diff[i] = new double[(int)n_splitted_inner]; fa[i] = new double[(int)n_splitted_inner]; } // allocate pointers for dev arrays double *d_rs[GPU], *d_fa[GPU], *d_all[GPU], *d_diff[GPU]; int size = sizeof(double); // init memcpy from dev to dev for (size_t i = 0; i < GPU; i++) { hipSetDevice(cudas[i]); hipDeviceEnablePeerAccess(cudas[i], 0); } // malloc dev arrays for (size_t i = 0; i < GPU; i++) { hipSetDevice(cudas[i]); hipMalloc((void **)&d_rs[i], size * (n_splitted_inner)); hipMalloc((void **)&d_all[i], size * (N * N)); hipMalloc((void **)&d_fa[i], size * (n_splitted_inner)); hipMalloc((void **)&d_diff[i], size * (n_splitted_inner)); } // fill host arrays for (size_t i = 0; i < GPU; i++) { int plus = i * ((int)(N / 2) - 1); for (int j = 1; j < (int)(N / 2); j++) { for (int k = 1; k < N - 1; k++) { rightSide[i][(j - 1) * (int)((N - 2) / 2) + (k - 1)] = B[j + plus][k]; fa[i][(j - 1) * (int)((N - 2) / 2) + (k - 1)] = firstAppr[j + plus][k]; diff[i][(j - 1) * (int)((N - 2) / 2) + (k - 1)] = 1; } } } for (int j = 0; j < N ; j++) { for (int k = 0; k < N; k++) { all[j * N + k] = firstAppr[j][k]; } } outMatr(B); outMatr(firstAppr); for (size_t i = 0; i < GPU; i++) { outVector(rightSide[i], n_splitted_inner); outVector(fa[i], n_splitted_inner); outVector(diff[i], n_splitted_inner); } outVector(all, N * N); // hipMemcpy(d_rs, rightSide, size * (N * N - 4 * N + 4), hipMemcpyHostToDevice); // hipMemcpy(d_fa, fa, size * (N * N - 4 * N + 4), hipMemcpyHostToDevice); // hipMemcpy(d_all, all, size * (N * N), hipMemcpyHostToDevice); // hipMemcpy(d_diff, diff, size * (N * N - 4 * N + 4), hipMemcpyHostToDevice); // double timeChecker = dsecnd(); // int n = N - 2; // for (int k = n; k > 0; --k) { // for (int j = 0, i = 0; i < 2 * k - 1; i++) { // if (i < n) { // ++j; // } else { // --j; // } // mykernel<<<1, j>>>(d_rs, wOpt, d_fa, d_diff, N * N - 4 * N + 4, d_all, i, j, n); // } // } // hipMemcpy(fa, d_fa, size * (N * N - 4 * N + 4), hipMemcpyDeviceToHost); // outVector(fa, N * N - 4 * N + 4); // // cin>>aber; // hipMemcpy(diff, d_diff, size * (N * N - 4 * N + 4), hipMemcpyDeviceToHost); // do { // // outVector(diff, N * N - 4 * N + 4); // // diff[0] = 0; // // hipMemcpy(d_diff, diff, size, hipMemcpyHostToDevice); // cout << "The " << k << " iter" << endl; // // copyVectors(firstAppr, changeAppr); // // cout<<"change: "<<endl; // // outMatr(changeAppr); // // cout<<"fa: "<<endl; // // outMatr(firstAppr); // // cin>>aber; // // for (int i = 0; i < A.size(); i++) { // // firstAppr[i] = firstAppr[i] + (B[i] - aMulX(A, firstAppr, i)) * wOpt / (DwL(A, i, wOpt)); // // } // // for (int j = 1; j < N - 1; ++j) { // // for (int i = 1; i < N - 1; i++) { // // firstAppr[j][i] = (B[j][i] - (firstAppr[j][i + 1] + firstAppr[j][i - 1] + // // firstAppr[j + 1][i] + firstAppr[j - 1][i] - 4 * firstAppr[j][i])) * wOpt / (DwL(A, i, wOpt)); + firstAppr[j][i]; // my_red_black_kernel<<<N - 2, N - 2>>>(d_rs, wOpt, d_fa, d_diff, N * N - 4 * N + 4, d_all, n, 1); // // hipMemcpy(fa, d_fa, size * (N * N - 4 * N + 4), hipMemcpyDeviceToHost); // // cout<<"Halfstep"<<endl; // // outVector(fa, N * N - 4 * N + 4); // my_red_black_kernel<<<N - 2, N - 2>>>(d_rs, wOpt, d_fa, d_diff, N * N - 4 * N + 4, d_all, n, 0); // // for (int j = 0,i = 0; i < 2 * n - 1; i++) { // // if (i < n) { // // ++j; // // } else { // // --j; // // } // // mykernel<<<1, j>>>(d_rs, wOpt, d_fa, d_diff, N * N - 4 * N + 4, d_all, i, j, n); // // // hipMemcpy(fa, d_fa, size * (N * N - 4 * N + 4), hipMemcpyDeviceToHost); // // // cout<<"The fa is "<<endl; // // // outVector(fa, N * N - 4 * N + 4); // // // cin>>aber; // // } // // firstAppr[j][i] = (-B[j][i] + firstAppr[j + 1][i] + firstAppr[j - 1][i] + firstAppr[j][i - 1] + // // firstAppr[j][i + 1] - 4 * (1 - 1. / wOpt) * firstAppr[j][i]) * wOpt / 4.; // // } // // } // // for (int j = 1; j < N - 1; ++j) { // // for (int i = 1; i < N - 1; i++) { // // changeAppr[j][i] = fabs(firstAppr[j][i] - changeAppr[j][i]); // // } // // } // // hipMemcpy(fa, d_fa, size * (N * N - 4 * N + 4), hipMemcpyDeviceToHost); // hipMemcpy(diff, d_diff, size * (N * N - 4 * N + 4), hipMemcpyDeviceToHost); // // cout<<"The fa is "<<endl; // // outVector(fa, N * N - 4 * N + 4); // // cout<<endl; // // outVector(diff, N * N - 4 * N + 4); // maxDiff = findMaxInVector(diff, N * N - 4 * N + 4); // // outVector(changeAppr); // // cout<<findMaxInVector(changeAppr)<<endl; // // maxDiff = findMaxInVector(changeAppr); // // system("pause"); // ++k; // cout << "Maxdiff is " << maxDiff << endl; // // cin>>aber; // if (maxDiff > 1000) { // break; // } // } while (maxDiff > eps); // timeChecker = dsecnd() - timeChecker; // cout << "The iter is:" << endl; // cout << k << endl; // firstApprSet(changeAppr); // hipMemcpy(fa, d_fa, size * (N * N - 4 * N + 4), hipMemcpyDeviceToHost); // for (int j = 1; j < N - 1; j++) { // for (int k = 1; k < N - 1; k++) { // firstAppr[j][k] = fa[(j - 1) * (N - 2) + (k - 1)]; // } // } // hipFree(d_rs); // hipFree(d_fa); // // hipFree(d_c); // hipFree(d_all); // hipFree(d_diff); // /* // * outing // */ // cout << "The Matr Is:" << endl; // outMatr(A); // cout << "The Vector Is:" << endl; // outMatr(B); // cout << "The first approximation Is:" << endl; // outMatr(changeAppr); // cout << "The epsilon Is:" << endl; // cout << eps << endl; // cout << "The Vector of ownValues:" << endl; // outReal1Array(wr); // cout << "The Spectr Is:" << endl; // cout << spectr << endl; // cout << "The wOpt Is:" << endl; // cout << wOpt << endl; // cout << "The result Is:" << endl; // outMatr(firstAppr); // cout << "The time is:" << endl; // cout << dsecnd() - t0 << " s" << endl; // cout << "The time of main is:" << endl; // cout << timeChecker << " s" << endl; // cout << "The 1 1 is:" << endl; // cout << firstAppr[1][1] << endl; // cout << "The 2 2 is:" << endl; // cout << firstAppr[2][2] << endl; // cout << "The N - 2 N - 2 is:" << endl; // cout << firstAppr[firstAppr.size() - 2][firstAppr.size() - 2] << endl; // cout << "The N - 3 N - 3 is:" << endl; // cout << firstAppr[firstAppr.size() - 3][firstAppr.size() - 3] << endl; return 0; }
af1685c716292a6d7dc198104a9f6420262f9f73.cu
// Copyright 2015-2017 Illia Olenchenko #include <iostream> #include <math.h> #include "vector" #include "../lib/alglib/src/ap.h" #include "../lib/alglib/src/alglibmisc.h" #include "../lib/alglib/src/alglibinternal.h" #include "../lib/alglib/src/linalg.h" #include "../lib/alglib/src/statistics.h" #include "../lib/alglib/src/dataanalysis.h" #include "../lib/alglib/src/specialfunctions.h" #include "../lib/alglib/src/solvers.h" #include "../lib/alglib/src/optimization.h" #include "../lib/alglib/src/diffequations.h" #include "../lib/alglib/src/fasttransforms.h" #include "../lib/alglib/src/integration.h" #include "../lib/alglib/src/interpolation.h" #include "../utils/out.h" #include "../utils/functions.h" #include "../utils/init.h" #include "../utils/transform.h" #include "../utils/upper_relaxation.h" #include "../utils/tools.h" #include <string> #include <ctime> #include <mkl.h> using namespace std; using namespace alglib; using namespace alglib_impl; /** * CUDA functions */ #ifndef N #define N 4 #endif #ifndef GPU #define GPU 2 #endif __device__ int blocks = N - 2; __global__ void mykernel(double *rightSide, double wOpt, double *fa, double *diff, int n, double *all, int i, int j, int litN) { // tempAppr[j] = (B[j] - aMulX(A, firstAppr, j)) * Tau[i - 1] + firstAppr[j]; int index = threadIdx.x; //+ blockIdx.x * blockDim.x; int row = (i - j + 1) / 2; int index1 = row * litN + (((j - i) == 1)? j : litN) - 1 + index * (litN - 1); int lindex = index1 + N + 1 + 2 * (int) (index1 / (N - 2)); if (index1 < n) { // fa[index1] = index1; fa[index1] = (-rightSide[index1] + all[lindex - N] + all[lindex + N] + all[lindex - 1] + all[lindex + 1] - 4 * (1 - 1. / wOpt) * all[lindex]) * wOpt / 4.; diff[index1] = fa[index1] - all[lindex]; all[lindex] = fa[index1]; } } __global__ void my_red_black_kernel(double *rightSide, double wOpt, double *fa, double *diff, int n, double *all, int litN, int first) { // tempAppr[j] = (B[j] - aMulX(A, firstAppr, j)) * Tau[i - 1] + firstAppr[j]; int index = threadIdx.x + blockIdx.x * blockDim.x; int lindex = index + N + 1 + 2 * (int) (index / (N - 2)); int row = (int)(index / litN); int str = index % litN; if (index < n && ((row + str) % 2 == first)) { // fa[index] = index; fa[index] = (-rightSide[index] + all[lindex - N] + all[lindex + N] + all[lindex - 1] + all[lindex + 1] - 4 * (1 - 1. / wOpt) * all[lindex]) * wOpt / 4.; diff[index] = fa[index] - all[lindex]; all[lindex] = fa[index]; } } int main() { vector<int> cudas(GPU); for (size_t i = 0; i < GPU; i++) { cudas[i] = i; } int n_inner = (N - 2) * (N - 2); int n_splitted_inner = n_inner / GPU; double t0 = dsecnd(); /* *TODO: add CUDA improvements *эта часть задачи решает по матрице и правой части итерационный процесс верхних релаксаций */ /* * N means matr size * A means main Matr * B means right vector */ /* * Getting inputs A and B */ vector<vector<double> > A((N - 2) * (N - 2), vector<double>((N - 2) * (N - 2), 0)); readMatr(A); vector<vector<double> > B(N, vector<double>(N, 0)); vector<vector<double> > firstAppr(N, vector<double>(N, 0)); vector<vector<double> > changeAppr(N, vector<double>(N, 0)); firstApprSet(firstAppr); readVector(B); double eps = 0.001; double spectr; double wOpt; double maxDiff = 0; alglib::real_2d_array matrix; matrix.setcontent((N - 2) * (N - 2), (N - 2) * (N - 2), arrToRealArr(A)); /* *creating another parts *wr - целые части собственных чисел *wi - мнимые части собственных чисел *vl - собственный левый вектор *vr - собственный правый вектор */ alglib::real_1d_array wr; alglib::real_2d_array vl; /* * расчет собственных чисел */ alglib::smatrixevd(matrix, N - 2, 0, true, wr, vl); /* *допустим что спектральынй радиус матрицы это максимальное собственное число (которые все норм должны быть) без модуля, так как все должны быть положительны */ spectr = findMaxRealArr(wr); wOpt = wOptSet(A, spectr, 1. / N); /* *main loop here *если я правильно понял то новые вычисления нужно тут же использовать, исхожу из этого мнения */ // just iterator int k = 0; // char aber; // allocate pointers for host arrays double **rightSide = new double*[(int)GPU]; double **fa = new double*[(int)GPU]; double *all = new double[N * N]; double **diff = new double*[(int)GPU]; for (size_t i = 0; i < GPU; i++) { rightSide[i] = new double[(int)n_splitted_inner]; diff[i] = new double[(int)n_splitted_inner]; fa[i] = new double[(int)n_splitted_inner]; } // allocate pointers for dev arrays double *d_rs[GPU], *d_fa[GPU], *d_all[GPU], *d_diff[GPU]; int size = sizeof(double); // init memcpy from dev to dev for (size_t i = 0; i < GPU; i++) { cudaSetDevice(cudas[i]); cudaDeviceEnablePeerAccess(cudas[i], 0); } // malloc dev arrays for (size_t i = 0; i < GPU; i++) { cudaSetDevice(cudas[i]); cudaMalloc((void **)&d_rs[i], size * (n_splitted_inner)); cudaMalloc((void **)&d_all[i], size * (N * N)); cudaMalloc((void **)&d_fa[i], size * (n_splitted_inner)); cudaMalloc((void **)&d_diff[i], size * (n_splitted_inner)); } // fill host arrays for (size_t i = 0; i < GPU; i++) { int plus = i * ((int)(N / 2) - 1); for (int j = 1; j < (int)(N / 2); j++) { for (int k = 1; k < N - 1; k++) { rightSide[i][(j - 1) * (int)((N - 2) / 2) + (k - 1)] = B[j + plus][k]; fa[i][(j - 1) * (int)((N - 2) / 2) + (k - 1)] = firstAppr[j + plus][k]; diff[i][(j - 1) * (int)((N - 2) / 2) + (k - 1)] = 1; } } } for (int j = 0; j < N ; j++) { for (int k = 0; k < N; k++) { all[j * N + k] = firstAppr[j][k]; } } outMatr(B); outMatr(firstAppr); for (size_t i = 0; i < GPU; i++) { outVector(rightSide[i], n_splitted_inner); outVector(fa[i], n_splitted_inner); outVector(diff[i], n_splitted_inner); } outVector(all, N * N); // cudaMemcpy(d_rs, rightSide, size * (N * N - 4 * N + 4), cudaMemcpyHostToDevice); // cudaMemcpy(d_fa, fa, size * (N * N - 4 * N + 4), cudaMemcpyHostToDevice); // cudaMemcpy(d_all, all, size * (N * N), cudaMemcpyHostToDevice); // cudaMemcpy(d_diff, diff, size * (N * N - 4 * N + 4), cudaMemcpyHostToDevice); // double timeChecker = dsecnd(); // int n = N - 2; // for (int k = n; k > 0; --k) { // for (int j = 0, i = 0; i < 2 * k - 1; i++) { // if (i < n) { // ++j; // } else { // --j; // } // mykernel<<<1, j>>>(d_rs, wOpt, d_fa, d_diff, N * N - 4 * N + 4, d_all, i, j, n); // } // } // cudaMemcpy(fa, d_fa, size * (N * N - 4 * N + 4), cudaMemcpyDeviceToHost); // outVector(fa, N * N - 4 * N + 4); // // cin>>aber; // cudaMemcpy(diff, d_diff, size * (N * N - 4 * N + 4), cudaMemcpyDeviceToHost); // do { // // outVector(diff, N * N - 4 * N + 4); // // diff[0] = 0; // // cudaMemcpy(d_diff, diff, size, cudaMemcpyHostToDevice); // cout << "The " << k << " iter" << endl; // // copyVectors(firstAppr, changeAppr); // // cout<<"change: "<<endl; // // outMatr(changeAppr); // // cout<<"fa: "<<endl; // // outMatr(firstAppr); // // cin>>aber; // // for (int i = 0; i < A.size(); i++) { // // firstAppr[i] = firstAppr[i] + (B[i] - aMulX(A, firstAppr, i)) * wOpt / (DwL(A, i, wOpt)); // // } // // for (int j = 1; j < N - 1; ++j) { // // for (int i = 1; i < N - 1; i++) { // // firstAppr[j][i] = (B[j][i] - (firstAppr[j][i + 1] + firstAppr[j][i - 1] + // // firstAppr[j + 1][i] + firstAppr[j - 1][i] - 4 * firstAppr[j][i])) * wOpt / (DwL(A, i, wOpt)); + firstAppr[j][i]; // my_red_black_kernel<<<N - 2, N - 2>>>(d_rs, wOpt, d_fa, d_diff, N * N - 4 * N + 4, d_all, n, 1); // // cudaMemcpy(fa, d_fa, size * (N * N - 4 * N + 4), cudaMemcpyDeviceToHost); // // cout<<"Halfstep"<<endl; // // outVector(fa, N * N - 4 * N + 4); // my_red_black_kernel<<<N - 2, N - 2>>>(d_rs, wOpt, d_fa, d_diff, N * N - 4 * N + 4, d_all, n, 0); // // for (int j = 0,i = 0; i < 2 * n - 1; i++) { // // if (i < n) { // // ++j; // // } else { // // --j; // // } // // mykernel<<<1, j>>>(d_rs, wOpt, d_fa, d_diff, N * N - 4 * N + 4, d_all, i, j, n); // // // cudaMemcpy(fa, d_fa, size * (N * N - 4 * N + 4), cudaMemcpyDeviceToHost); // // // cout<<"The fa is "<<endl; // // // outVector(fa, N * N - 4 * N + 4); // // // cin>>aber; // // } // // firstAppr[j][i] = (-B[j][i] + firstAppr[j + 1][i] + firstAppr[j - 1][i] + firstAppr[j][i - 1] + // // firstAppr[j][i + 1] - 4 * (1 - 1. / wOpt) * firstAppr[j][i]) * wOpt / 4.; // // } // // } // // for (int j = 1; j < N - 1; ++j) { // // for (int i = 1; i < N - 1; i++) { // // changeAppr[j][i] = fabs(firstAppr[j][i] - changeAppr[j][i]); // // } // // } // // cudaMemcpy(fa, d_fa, size * (N * N - 4 * N + 4), cudaMemcpyDeviceToHost); // cudaMemcpy(diff, d_diff, size * (N * N - 4 * N + 4), cudaMemcpyDeviceToHost); // // cout<<"The fa is "<<endl; // // outVector(fa, N * N - 4 * N + 4); // // cout<<endl; // // outVector(diff, N * N - 4 * N + 4); // maxDiff = findMaxInVector(diff, N * N - 4 * N + 4); // // outVector(changeAppr); // // cout<<findMaxInVector(changeAppr)<<endl; // // maxDiff = findMaxInVector(changeAppr); // // system("pause"); // ++k; // cout << "Maxdiff is " << maxDiff << endl; // // cin>>aber; // if (maxDiff > 1000) { // break; // } // } while (maxDiff > eps); // timeChecker = dsecnd() - timeChecker; // cout << "The iter is:" << endl; // cout << k << endl; // firstApprSet(changeAppr); // cudaMemcpy(fa, d_fa, size * (N * N - 4 * N + 4), cudaMemcpyDeviceToHost); // for (int j = 1; j < N - 1; j++) { // for (int k = 1; k < N - 1; k++) { // firstAppr[j][k] = fa[(j - 1) * (N - 2) + (k - 1)]; // } // } // cudaFree(d_rs); // cudaFree(d_fa); // // cudaFree(d_c); // cudaFree(d_all); // cudaFree(d_diff); // /* // * outing // */ // cout << "The Matr Is:" << endl; // outMatr(A); // cout << "The Vector Is:" << endl; // outMatr(B); // cout << "The first approximation Is:" << endl; // outMatr(changeAppr); // cout << "The epsilon Is:" << endl; // cout << eps << endl; // cout << "The Vector of ownValues:" << endl; // outReal1Array(wr); // cout << "The Spectr Is:" << endl; // cout << spectr << endl; // cout << "The wOpt Is:" << endl; // cout << wOpt << endl; // cout << "The result Is:" << endl; // outMatr(firstAppr); // cout << "The time is:" << endl; // cout << dsecnd() - t0 << " s" << endl; // cout << "The time of main is:" << endl; // cout << timeChecker << " s" << endl; // cout << "The 1 1 is:" << endl; // cout << firstAppr[1][1] << endl; // cout << "The 2 2 is:" << endl; // cout << firstAppr[2][2] << endl; // cout << "The N - 2 N - 2 is:" << endl; // cout << firstAppr[firstAppr.size() - 2][firstAppr.size() - 2] << endl; // cout << "The N - 3 N - 3 is:" << endl; // cout << firstAppr[firstAppr.size() - 3][firstAppr.size() - 3] << endl; return 0; }
a61cc54c7986cfc6a371cb7f4d7641b50bf630f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cassert> #include <cstdlib> #include <memory> #include <Eigen/Core> #include <Eigen/Dense> #include "DataFormats/SoATemplate/interface/SoALayout.h" #include "DataFormats/SoATemplate/interface/SoAView.h" #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h" // Test SoA stores and view. // Use cases // Multiple stores in a buffer // Scalars, Columns of scalars and of Eigen vectors // View to each of them, from one and multiple stores. GENERATE_SOA_LAYOUT(SoAHostDeviceLayoutTemplate, /*SoAHostDeviceViewTemplate,*/ // predefined static scalars // size_t size; // size_t alignment; // columns: one value per element SOA_COLUMN(double, x), SOA_COLUMN(double, y), SOA_COLUMN(double, z), SOA_EIGEN_COLUMN(Eigen::Vector3d, a), SOA_EIGEN_COLUMN(Eigen::Vector3d, b), SOA_EIGEN_COLUMN(Eigen::Vector3d, r), // scalars: one value for the whole structure SOA_SCALAR(const char*, description), SOA_SCALAR(uint32_t, someNumber)) using SoAHostDeviceLayout = SoAHostDeviceLayoutTemplate<>; using SoAHostDeviceView = SoAHostDeviceLayout::View; using SoAHostDeviceConstView = SoAHostDeviceLayout::ConstView; GENERATE_SOA_LAYOUT(SoADeviceOnlyLayoutTemplate, /*SoADeviceOnlyViewTemplate,*/ SOA_COLUMN(uint16_t, color), SOA_COLUMN(double, value), SOA_COLUMN(double*, py), SOA_COLUMN(uint32_t, count), SOA_COLUMN(uint32_t, anotherCount)) using SoADeviceOnlyLayout = SoADeviceOnlyLayoutTemplate<>; using SoADeviceOnlyView = SoADeviceOnlyLayout::View; // A 1 to 1 view of the store (except for unsupported types). GENERATE_SOA_VIEW(SoAFullDeviceConstViewTemplate, SoAFullDeviceViewTemplate, SOA_VIEW_LAYOUT_LIST(SOA_VIEW_LAYOUT(SoAHostDeviceLayout, soaHD), SOA_VIEW_LAYOUT(SoADeviceOnlyLayout, soaDO)), SOA_VIEW_VALUE_LIST(SOA_VIEW_VALUE(soaHD, x), SOA_VIEW_VALUE(soaHD, y), SOA_VIEW_VALUE(soaHD, z), SOA_VIEW_VALUE(soaDO, color), SOA_VIEW_VALUE(soaDO, value), SOA_VIEW_VALUE(soaDO, py), SOA_VIEW_VALUE(soaDO, count), SOA_VIEW_VALUE(soaDO, anotherCount), SOA_VIEW_VALUE(soaHD, description), SOA_VIEW_VALUE(soaHD, someNumber))) using SoAFullDeviceView = SoAFullDeviceViewTemplate<cms::soa::CacheLineSize::NvidiaGPU, cms::soa::AlignmentEnforcement::enforced>; // These SoAs validate that the generating macros do not get confused in the special case where there are // no columns and only scalar elements in the SoA. GENERATE_SOA_LAYOUT(TestSoALayoutNoColumn, SOA_SCALAR(double, r)) GENERATE_SOA_LAYOUT(TestSoALayoutNoColumn2, SOA_SCALAR(double, r), SOA_SCALAR(double, r2)) // Eigen cross product kernel (on store) __global__ void crossProduct(SoAHostDeviceView soa, const unsigned int numElements) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= numElements) return; auto si = soa[i]; si.r() = si.a().cross(si.b()); } // Device-only producer kernel __global__ void producerKernel(SoAFullDeviceView soa, const unsigned int numElements) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= numElements) return; auto si = soa[i]; si.color() &= 0x55 << i % (sizeof(si.color()) - sizeof(char)); si.value() = sqrt(si.x() * si.x() + si.y() * si.y() + si.z() * si.z()); } // Device-only consumer with result in host-device area __global__ void consumerKernel(SoAFullDeviceView soa, const unsigned int numElements) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= numElements) return; auto si = soa[i]; si.x() = si.color() * si.value(); } // Get a view like the default, except for range checking using RangeCheckingHostDeviceView = SoAHostDeviceLayout::ViewTemplate<SoAHostDeviceView::restrictQualify, cms::soa::RangeChecking::enabled>; // We expect to just run one thread. __global__ void rangeCheckKernel(RangeCheckingHostDeviceView soa) { printf("About to fail range-check (operator[]) in CUDA thread: %d\n", threadIdx.x); [[maybe_unused]] auto si = soa[soa.metadata().size()]; printf("Fail: range-check failure should have stopped the kernel.\n"); } int main(void) { cms::cudatest::requireDevices(); hipStream_t stream; cudaCheck(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); // Non-aligned number of elements to check alignment features. constexpr unsigned int numElements = 65537; // Allocate buffer and store on host size_t hostDeviceSize = SoAHostDeviceLayout::computeDataSize(numElements); std::byte* h_buf = nullptr; cudaCheck(hipHostMalloc(&h_buf, hostDeviceSize)); SoAHostDeviceLayout h_soahdLayout(h_buf, numElements); SoAHostDeviceView h_soahd(h_soahdLayout); SoAHostDeviceConstView h_soahd_c(h_soahdLayout); // Alocate buffer, stores and views on the device (single, shared buffer). size_t deviceOnlySize = SoADeviceOnlyLayout::computeDataSize(numElements); std::byte* d_buf = nullptr; cudaCheck(hipHostMalloc(&d_buf, hostDeviceSize + deviceOnlySize)); SoAHostDeviceLayout d_soahdLayout(d_buf, numElements); SoADeviceOnlyLayout d_soadoLayout(d_soahdLayout.metadata().nextByte(), numElements); SoAHostDeviceView d_soahdView(d_soahdLayout); SoAFullDeviceView d_soaFullView(d_soahdLayout, d_soadoLayout); // Assert column alignments assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_x()) % decltype(h_soahd)::alignment); assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_y()) % decltype(h_soahd)::alignment); assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_z()) % decltype(h_soahd)::alignment); assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_a()) % decltype(h_soahd)::alignment); assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_b()) % decltype(h_soahd)::alignment); assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_r()) % decltype(h_soahd)::alignment); assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_description()) % decltype(h_soahd)::alignment); assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_someNumber()) % decltype(h_soahd)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_x()) % decltype(d_soahdLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_y()) % decltype(d_soahdLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_z()) % decltype(d_soahdLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_a()) % decltype(d_soahdLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_b()) % decltype(d_soahdLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_r()) % decltype(d_soahdLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_description()) % decltype(d_soahdLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_someNumber()) % decltype(d_soahdLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_color()) % decltype(d_soadoLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_value()) % decltype(d_soadoLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_py()) % decltype(d_soadoLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_count()) % decltype(d_soadoLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_anotherCount()) % decltype(d_soadoLayout)::alignment); // Views should get the same alignment as the stores they refer to assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_x()) % decltype(d_soaFullView)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_y()) % decltype(d_soaFullView)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_z()) % decltype(d_soaFullView)::alignment); // Limitation of views: we have to get scalar member addresses via metadata. assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_description()) % decltype(d_soaFullView)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_someNumber()) % decltype(d_soaFullView)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_color()) % decltype(d_soaFullView)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_value()) % decltype(d_soaFullView)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_py()) % decltype(d_soaFullView)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_count()) % decltype(d_soaFullView)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_anotherCount()) % decltype(d_soaFullView)::alignment); // Initialize and fill the host buffer std::memset(h_soahdLayout.metadata().data(), 0, hostDeviceSize); for (size_t i = 0; i < numElements; ++i) { auto si = h_soahd[i]; // Tuple assignment... // elements are: x, y, z, a, b, r auto v1 = 1.0 * i + 1.0; auto v2 = 2.0 * i; auto v3 = 3.0 * i - 1.0; if (i % 2) { si = {v1, v2, v3, {v1, v2, v3}, {v3, v2, v1}, {0, 0, 0}}; } else { si.x() = si.a()(0) = si.b()(2) = v1; si.y() = si.a()(1) = si.b()(1) = v2; si.z() = si.a()(2) = si.b()(0) = v3; } } auto& sn = h_soahd.someNumber(); sn = numElements + 2; // Push to device cudaCheck(hipMemcpyAsync(d_buf, h_buf, hostDeviceSize, hipMemcpyDefault, stream)); // Process on device hipLaunchKernelGGL(( crossProduct), dim3((numElements + 255) / 256), dim3(256), 0, stream, d_soahdView, numElements); // Paint the device only with 0xFF initially cudaCheck(hipMemsetAsync(d_soadoLayout.metadata().data(), 0xFF, d_soadoLayout.metadata().byteSize(), stream)); // Produce to the device only area hipLaunchKernelGGL(( producerKernel), dim3((numElements + 255) / 256), dim3(256), 0, stream, d_soaFullView, numElements); // Consume the device only area and generate a result on the host-device area hipLaunchKernelGGL(( consumerKernel), dim3((numElements + 255) / 256), dim3(256), 0, stream, d_soaFullView, numElements); // Get result back cudaCheck(hipMemcpyAsync(h_buf, d_buf, hostDeviceSize, hipMemcpyDefault, stream)); // Wait and validate. cudaCheck(hipStreamSynchronize(stream)); for (size_t i = 0; i < numElements; ++i) { auto si = h_soahd_c[i]; assert(si.r() == si.a().cross(si.b())); double initialX = 1.0 * i + 1.0; double initialY = 2.0 * i; double initialZ = 3.0 * i - 1.0; uint16_t expectedColor = 0x55 << i % (sizeof(uint16_t) - sizeof(char)); double expectedX = expectedColor * sqrt(initialX * initialX + initialY * initialY + initialZ * initialZ); if (abs(si.x() - expectedX) / expectedX >= 2 * std::numeric_limits<double>::epsilon()) { std::cout << "X failed: for i=" << i << std::endl << "initialX=" << initialX << " initialY=" << initialY << " initialZ=" << initialZ << std::endl << "expectedX=" << expectedX << std::endl << "resultX=" << si.x() << " resultY=" << si.y() << " resultZ=" << si.z() << std::endl << "relativeDiff=" << abs(si.x() - expectedX) / expectedX << " epsilon=" << std::numeric_limits<double>::epsilon() << std::endl; assert(false); } } // Validation of range checking try { // Get a view like the default, except for range checking SoAHostDeviceLayout::ViewTemplate<SoAHostDeviceView::restrictQualify, cms::soa::RangeChecking::enabled> soa1viewRangeChecking(h_soahdLayout); // This should throw an exception [[maybe_unused]] auto si = soa1viewRangeChecking[soa1viewRangeChecking.metadata().size()]; std::cout << "Fail: expected range-check exception (operator[]) not caught on the host." << std::endl; assert(false); } catch (const std::out_of_range&) { std::cout << "Pass: expected range-check exception (operator[]) successfully caught on the host." << std::endl; } try { // Get a view like the default, except for range checking SoAHostDeviceLayout::ViewTemplate<SoAHostDeviceView::restrictQualify, cms::soa::RangeChecking::enabled> soa1viewRangeChecking(h_soahdLayout); // This should throw an exception [[maybe_unused]] auto si = soa1viewRangeChecking[soa1viewRangeChecking.metadata().size()]; std::cout << "Fail: expected range-check exception (view-level index access) not caught on the host." << std::endl; assert(false); } catch (const std::out_of_range&) { std::cout << "Pass: expected range-check exception (view-level index access) successfully caught on the host." << std::endl; } // Validation of range checking in a kernel // Get a view like the default one, except for range checking RangeCheckingHostDeviceView soa1viewRangeChecking(d_soahdLayout); // This should throw an exception in the kernel hipLaunchKernelGGL(( rangeCheckKernel), dim3(1), dim3(1), 0, stream, soa1viewRangeChecking); // Wait and confirm that the CUDA kernel failed try { cudaCheck(hipStreamSynchronize(stream)); std::cout << "Fail: expected range-check exception not caught while executing the kernel." << std::endl; assert(false); } catch (const std::runtime_error&) { std::cout << "Pass: expected range-check exception caught while executing the kernel." << std::endl; } std::cout << "OK" << std::endl; }
a61cc54c7986cfc6a371cb7f4d7641b50bf630f9.cu
#include <cassert> #include <cstdlib> #include <memory> #include <Eigen/Core> #include <Eigen/Dense> #include "DataFormats/SoATemplate/interface/SoALayout.h" #include "DataFormats/SoATemplate/interface/SoAView.h" #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h" // Test SoA stores and view. // Use cases // Multiple stores in a buffer // Scalars, Columns of scalars and of Eigen vectors // View to each of them, from one and multiple stores. GENERATE_SOA_LAYOUT(SoAHostDeviceLayoutTemplate, /*SoAHostDeviceViewTemplate,*/ // predefined static scalars // size_t size; // size_t alignment; // columns: one value per element SOA_COLUMN(double, x), SOA_COLUMN(double, y), SOA_COLUMN(double, z), SOA_EIGEN_COLUMN(Eigen::Vector3d, a), SOA_EIGEN_COLUMN(Eigen::Vector3d, b), SOA_EIGEN_COLUMN(Eigen::Vector3d, r), // scalars: one value for the whole structure SOA_SCALAR(const char*, description), SOA_SCALAR(uint32_t, someNumber)) using SoAHostDeviceLayout = SoAHostDeviceLayoutTemplate<>; using SoAHostDeviceView = SoAHostDeviceLayout::View; using SoAHostDeviceConstView = SoAHostDeviceLayout::ConstView; GENERATE_SOA_LAYOUT(SoADeviceOnlyLayoutTemplate, /*SoADeviceOnlyViewTemplate,*/ SOA_COLUMN(uint16_t, color), SOA_COLUMN(double, value), SOA_COLUMN(double*, py), SOA_COLUMN(uint32_t, count), SOA_COLUMN(uint32_t, anotherCount)) using SoADeviceOnlyLayout = SoADeviceOnlyLayoutTemplate<>; using SoADeviceOnlyView = SoADeviceOnlyLayout::View; // A 1 to 1 view of the store (except for unsupported types). GENERATE_SOA_VIEW(SoAFullDeviceConstViewTemplate, SoAFullDeviceViewTemplate, SOA_VIEW_LAYOUT_LIST(SOA_VIEW_LAYOUT(SoAHostDeviceLayout, soaHD), SOA_VIEW_LAYOUT(SoADeviceOnlyLayout, soaDO)), SOA_VIEW_VALUE_LIST(SOA_VIEW_VALUE(soaHD, x), SOA_VIEW_VALUE(soaHD, y), SOA_VIEW_VALUE(soaHD, z), SOA_VIEW_VALUE(soaDO, color), SOA_VIEW_VALUE(soaDO, value), SOA_VIEW_VALUE(soaDO, py), SOA_VIEW_VALUE(soaDO, count), SOA_VIEW_VALUE(soaDO, anotherCount), SOA_VIEW_VALUE(soaHD, description), SOA_VIEW_VALUE(soaHD, someNumber))) using SoAFullDeviceView = SoAFullDeviceViewTemplate<cms::soa::CacheLineSize::NvidiaGPU, cms::soa::AlignmentEnforcement::enforced>; // These SoAs validate that the generating macros do not get confused in the special case where there are // no columns and only scalar elements in the SoA. GENERATE_SOA_LAYOUT(TestSoALayoutNoColumn, SOA_SCALAR(double, r)) GENERATE_SOA_LAYOUT(TestSoALayoutNoColumn2, SOA_SCALAR(double, r), SOA_SCALAR(double, r2)) // Eigen cross product kernel (on store) __global__ void crossProduct(SoAHostDeviceView soa, const unsigned int numElements) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= numElements) return; auto si = soa[i]; si.r() = si.a().cross(si.b()); } // Device-only producer kernel __global__ void producerKernel(SoAFullDeviceView soa, const unsigned int numElements) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= numElements) return; auto si = soa[i]; si.color() &= 0x55 << i % (sizeof(si.color()) - sizeof(char)); si.value() = sqrt(si.x() * si.x() + si.y() * si.y() + si.z() * si.z()); } // Device-only consumer with result in host-device area __global__ void consumerKernel(SoAFullDeviceView soa, const unsigned int numElements) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= numElements) return; auto si = soa[i]; si.x() = si.color() * si.value(); } // Get a view like the default, except for range checking using RangeCheckingHostDeviceView = SoAHostDeviceLayout::ViewTemplate<SoAHostDeviceView::restrictQualify, cms::soa::RangeChecking::enabled>; // We expect to just run one thread. __global__ void rangeCheckKernel(RangeCheckingHostDeviceView soa) { printf("About to fail range-check (operator[]) in CUDA thread: %d\n", threadIdx.x); [[maybe_unused]] auto si = soa[soa.metadata().size()]; printf("Fail: range-check failure should have stopped the kernel.\n"); } int main(void) { cms::cudatest::requireDevices(); cudaStream_t stream; cudaCheck(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); // Non-aligned number of elements to check alignment features. constexpr unsigned int numElements = 65537; // Allocate buffer and store on host size_t hostDeviceSize = SoAHostDeviceLayout::computeDataSize(numElements); std::byte* h_buf = nullptr; cudaCheck(cudaMallocHost(&h_buf, hostDeviceSize)); SoAHostDeviceLayout h_soahdLayout(h_buf, numElements); SoAHostDeviceView h_soahd(h_soahdLayout); SoAHostDeviceConstView h_soahd_c(h_soahdLayout); // Alocate buffer, stores and views on the device (single, shared buffer). size_t deviceOnlySize = SoADeviceOnlyLayout::computeDataSize(numElements); std::byte* d_buf = nullptr; cudaCheck(cudaMallocHost(&d_buf, hostDeviceSize + deviceOnlySize)); SoAHostDeviceLayout d_soahdLayout(d_buf, numElements); SoADeviceOnlyLayout d_soadoLayout(d_soahdLayout.metadata().nextByte(), numElements); SoAHostDeviceView d_soahdView(d_soahdLayout); SoAFullDeviceView d_soaFullView(d_soahdLayout, d_soadoLayout); // Assert column alignments assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_x()) % decltype(h_soahd)::alignment); assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_y()) % decltype(h_soahd)::alignment); assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_z()) % decltype(h_soahd)::alignment); assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_a()) % decltype(h_soahd)::alignment); assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_b()) % decltype(h_soahd)::alignment); assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_r()) % decltype(h_soahd)::alignment); assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_description()) % decltype(h_soahd)::alignment); assert(0 == reinterpret_cast<uintptr_t>(h_soahd.metadata().addressOf_someNumber()) % decltype(h_soahd)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_x()) % decltype(d_soahdLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_y()) % decltype(d_soahdLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_z()) % decltype(d_soahdLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_a()) % decltype(d_soahdLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_b()) % decltype(d_soahdLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_r()) % decltype(d_soahdLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_description()) % decltype(d_soahdLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soahdLayout.metadata().addressOf_someNumber()) % decltype(d_soahdLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_color()) % decltype(d_soadoLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_value()) % decltype(d_soadoLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_py()) % decltype(d_soadoLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_count()) % decltype(d_soadoLayout)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soadoLayout.metadata().addressOf_anotherCount()) % decltype(d_soadoLayout)::alignment); // Views should get the same alignment as the stores they refer to assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_x()) % decltype(d_soaFullView)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_y()) % decltype(d_soaFullView)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_z()) % decltype(d_soaFullView)::alignment); // Limitation of views: we have to get scalar member addresses via metadata. assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_description()) % decltype(d_soaFullView)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_someNumber()) % decltype(d_soaFullView)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_color()) % decltype(d_soaFullView)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_value()) % decltype(d_soaFullView)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_py()) % decltype(d_soaFullView)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_count()) % decltype(d_soaFullView)::alignment); assert(0 == reinterpret_cast<uintptr_t>(d_soaFullView.metadata().addressOf_anotherCount()) % decltype(d_soaFullView)::alignment); // Initialize and fill the host buffer std::memset(h_soahdLayout.metadata().data(), 0, hostDeviceSize); for (size_t i = 0; i < numElements; ++i) { auto si = h_soahd[i]; // Tuple assignment... // elements are: x, y, z, a, b, r auto v1 = 1.0 * i + 1.0; auto v2 = 2.0 * i; auto v3 = 3.0 * i - 1.0; if (i % 2) { si = {v1, v2, v3, {v1, v2, v3}, {v3, v2, v1}, {0, 0, 0}}; } else { si.x() = si.a()(0) = si.b()(2) = v1; si.y() = si.a()(1) = si.b()(1) = v2; si.z() = si.a()(2) = si.b()(0) = v3; } } auto& sn = h_soahd.someNumber(); sn = numElements + 2; // Push to device cudaCheck(cudaMemcpyAsync(d_buf, h_buf, hostDeviceSize, cudaMemcpyDefault, stream)); // Process on device crossProduct<<<(numElements + 255) / 256, 256, 0, stream>>>(d_soahdView, numElements); // Paint the device only with 0xFF initially cudaCheck(cudaMemsetAsync(d_soadoLayout.metadata().data(), 0xFF, d_soadoLayout.metadata().byteSize(), stream)); // Produce to the device only area producerKernel<<<(numElements + 255) / 256, 256, 0, stream>>>(d_soaFullView, numElements); // Consume the device only area and generate a result on the host-device area consumerKernel<<<(numElements + 255) / 256, 256, 0, stream>>>(d_soaFullView, numElements); // Get result back cudaCheck(cudaMemcpyAsync(h_buf, d_buf, hostDeviceSize, cudaMemcpyDefault, stream)); // Wait and validate. cudaCheck(cudaStreamSynchronize(stream)); for (size_t i = 0; i < numElements; ++i) { auto si = h_soahd_c[i]; assert(si.r() == si.a().cross(si.b())); double initialX = 1.0 * i + 1.0; double initialY = 2.0 * i; double initialZ = 3.0 * i - 1.0; uint16_t expectedColor = 0x55 << i % (sizeof(uint16_t) - sizeof(char)); double expectedX = expectedColor * sqrt(initialX * initialX + initialY * initialY + initialZ * initialZ); if (abs(si.x() - expectedX) / expectedX >= 2 * std::numeric_limits<double>::epsilon()) { std::cout << "X failed: for i=" << i << std::endl << "initialX=" << initialX << " initialY=" << initialY << " initialZ=" << initialZ << std::endl << "expectedX=" << expectedX << std::endl << "resultX=" << si.x() << " resultY=" << si.y() << " resultZ=" << si.z() << std::endl << "relativeDiff=" << abs(si.x() - expectedX) / expectedX << " epsilon=" << std::numeric_limits<double>::epsilon() << std::endl; assert(false); } } // Validation of range checking try { // Get a view like the default, except for range checking SoAHostDeviceLayout::ViewTemplate<SoAHostDeviceView::restrictQualify, cms::soa::RangeChecking::enabled> soa1viewRangeChecking(h_soahdLayout); // This should throw an exception [[maybe_unused]] auto si = soa1viewRangeChecking[soa1viewRangeChecking.metadata().size()]; std::cout << "Fail: expected range-check exception (operator[]) not caught on the host." << std::endl; assert(false); } catch (const std::out_of_range&) { std::cout << "Pass: expected range-check exception (operator[]) successfully caught on the host." << std::endl; } try { // Get a view like the default, except for range checking SoAHostDeviceLayout::ViewTemplate<SoAHostDeviceView::restrictQualify, cms::soa::RangeChecking::enabled> soa1viewRangeChecking(h_soahdLayout); // This should throw an exception [[maybe_unused]] auto si = soa1viewRangeChecking[soa1viewRangeChecking.metadata().size()]; std::cout << "Fail: expected range-check exception (view-level index access) not caught on the host." << std::endl; assert(false); } catch (const std::out_of_range&) { std::cout << "Pass: expected range-check exception (view-level index access) successfully caught on the host." << std::endl; } // Validation of range checking in a kernel // Get a view like the default one, except for range checking RangeCheckingHostDeviceView soa1viewRangeChecking(d_soahdLayout); // This should throw an exception in the kernel rangeCheckKernel<<<1, 1, 0, stream>>>(soa1viewRangeChecking); // Wait and confirm that the CUDA kernel failed try { cudaCheck(cudaStreamSynchronize(stream)); std::cout << "Fail: expected range-check exception not caught while executing the kernel." << std::endl; assert(false); } catch (const std::runtime_error&) { std::cout << "Pass: expected range-check exception caught while executing the kernel." << std::endl; } std::cout << "OK" << std::endl; }
038d53cbbd5424965dc761a48d0a015affaa0a5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../common/common.h" #include <stdio.h> #include <stdlib.h> /** * This example illustrates implementation of custom atomic operations using * CUDA's built-in atomicCAS function to implement atomic signed 32-bit integer * addition. **/ __device__ int myAtomicAdd(int *address, int incr) { // Create an initial guess for the value stored at *address. int guess = *address; int oldValue = atomicCAS(address, guess, guess + incr); // Loop while the guess is incorrect. while (oldValue != guess) { guess = oldValue; oldValue = atomicCAS(address, guess, guess + incr); } return oldValue; } __global__ void kernel(int *sharedInteger) { myAtomicAdd(sharedInteger, 1); }
038d53cbbd5424965dc761a48d0a015affaa0a5c.cu
#include "../common/common.h" #include <stdio.h> #include <stdlib.h> /** * This example illustrates implementation of custom atomic operations using * CUDA's built-in atomicCAS function to implement atomic signed 32-bit integer * addition. **/ __device__ int myAtomicAdd(int *address, int incr) { // Create an initial guess for the value stored at *address. int guess = *address; int oldValue = atomicCAS(address, guess, guess + incr); // Loop while the guess is incorrect. while (oldValue != guess) { guess = oldValue; oldValue = atomicCAS(address, guess, guess + incr); } return oldValue; } __global__ void kernel(int *sharedInteger) { myAtomicAdd(sharedInteger, 1); }
5dee5f1932eb512974a651b9227a2cc9037c0995.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "coords.cuh" #include <stdio.h> __device__ void calculateTransformMatrix(CoordinateSystem from, CoordinateSystem to, float *matrix) { float3 u = to.x; float3 v = to.y; float3 w = to.z; // printf("U: {%f %f %f} V: {%f %f %f} W: {%f %f %f}\n", // u.x, u.y, u.z, // v.x, v.y, v.z, // w.x, w.y, w.z // ); float3 ts[4] = {from.x, from.y, from.z, {0,0,0}}; for (int i = 0; i < 4; i++) { float3 t = ts[i]; float d0 = dot(u, cross(v,w)); float d1 = dot(t, cross(v,w)); float d2 = dot(u, cross(t,w)); float d3 = dot(u, cross(v,t)); float e1 = d1/d0; float e2 = d2/d0; float e3 = d3/d0; matrix[i*4+0] = e1; matrix[i*4+1] = e2; matrix[i*4+2] = e3; if(i == 3) { matrix[i*4+3] = 1; } else { matrix[i*4+3] = 0; } } // printf("[[%f %f %f %f]\n[%f %f %f %f]\n[%f %f %f %f]\n[%f %f %f %f]]\n", // matrix[0], // matrix[1], // matrix[2], // matrix[3], // matrix[4], // matrix[5], // matrix[6], // matrix[7], // matrix[8], // matrix[9], // matrix[10], // matrix[11], // matrix[12], // matrix[13], // matrix[14], // matrix[15] // ); } __device__ float3 multiply4x4x3(float *matrix, float3 a) { float u = a.x * matrix[0] + a.y * matrix[4] + a.z * matrix[8] + matrix[12]; float v = a.x * matrix[1] + a.y * matrix[5] + a.z * matrix[9] + matrix[13]; float w = a.x * matrix[2] + a.y * matrix[6] + a.z * matrix[10] + matrix[14]; float3 result = {u,v,w}; return result; } __global__ void transformPoints(float3 *values, uint32_t numValues, float *matrix, float3 *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < numValues) { result[i] = multiply4x4x3(matrix, values[i]); } } __global__ void transformPoints(float3idx *values, uint32_t numValues, float *matrix, float3idx *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < numValues) { float3 value = values[i].value; uint32_t idx = values[i].i; value = multiply4x4x3(matrix, value); float3idx res = {value, idx}; result[i] = res; } } __global__ void createMatrix(CoordinateSystem from, CoordinateSystem to, float *matrix) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < 1) { calculateTransformMatrix(from, to, matrix); } } void coordSpaceMatrix(CoordinateSystem from, CoordinateSystem to, float *matrix) { hipLaunchKernelGGL(( createMatrix), dim3(1),dim3(1), 0, 0, from, to, matrix); } void moveToCoordSpace(CoordinateSystem from, CoordinateSystem to, float3 *values, uint32_t numValues, float3 *result) { float *matrix = NULL; size_t matrixSize = 4*4*sizeof(float); hipMalloc((void**) &matrix, matrixSize); coordSpaceMatrix(from, to, matrix); int threadsPerBlock = 256; int blocksPerGrid = (numValues + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( transformPoints), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, values, numValues, matrix, result); } void moveToCoordSpace(CoordinateSystem from, CoordinateSystem to, float3idx *values, uint32_t numValues, float3idx *result) { float *matrix = NULL; size_t matrixSize = 4*4*sizeof(float); hipMalloc((void**) &matrix, matrixSize); coordSpaceMatrix(from, to, matrix); int threadsPerBlock = 256; int blocksPerGrid = (numValues + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( transformPoints), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, values, numValues, matrix, result); }
5dee5f1932eb512974a651b9227a2cc9037c0995.cu
#include "coords.cuh" #include <stdio.h> __device__ void calculateTransformMatrix(CoordinateSystem from, CoordinateSystem to, float *matrix) { float3 u = to.x; float3 v = to.y; float3 w = to.z; // printf("U: {%f %f %f} V: {%f %f %f} W: {%f %f %f}\n", // u.x, u.y, u.z, // v.x, v.y, v.z, // w.x, w.y, w.z // ); float3 ts[4] = {from.x, from.y, from.z, {0,0,0}}; for (int i = 0; i < 4; i++) { float3 t = ts[i]; float d0 = dot(u, cross(v,w)); float d1 = dot(t, cross(v,w)); float d2 = dot(u, cross(t,w)); float d3 = dot(u, cross(v,t)); float e1 = d1/d0; float e2 = d2/d0; float e3 = d3/d0; matrix[i*4+0] = e1; matrix[i*4+1] = e2; matrix[i*4+2] = e3; if(i == 3) { matrix[i*4+3] = 1; } else { matrix[i*4+3] = 0; } } // printf("[[%f %f %f %f]\n[%f %f %f %f]\n[%f %f %f %f]\n[%f %f %f %f]]\n", // matrix[0], // matrix[1], // matrix[2], // matrix[3], // matrix[4], // matrix[5], // matrix[6], // matrix[7], // matrix[8], // matrix[9], // matrix[10], // matrix[11], // matrix[12], // matrix[13], // matrix[14], // matrix[15] // ); } __device__ float3 multiply4x4x3(float *matrix, float3 a) { float u = a.x * matrix[0] + a.y * matrix[4] + a.z * matrix[8] + matrix[12]; float v = a.x * matrix[1] + a.y * matrix[5] + a.z * matrix[9] + matrix[13]; float w = a.x * matrix[2] + a.y * matrix[6] + a.z * matrix[10] + matrix[14]; float3 result = {u,v,w}; return result; } __global__ void transformPoints(float3 *values, uint32_t numValues, float *matrix, float3 *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < numValues) { result[i] = multiply4x4x3(matrix, values[i]); } } __global__ void transformPoints(float3idx *values, uint32_t numValues, float *matrix, float3idx *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < numValues) { float3 value = values[i].value; uint32_t idx = values[i].i; value = multiply4x4x3(matrix, value); float3idx res = {value, idx}; result[i] = res; } } __global__ void createMatrix(CoordinateSystem from, CoordinateSystem to, float *matrix) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < 1) { calculateTransformMatrix(from, to, matrix); } } void coordSpaceMatrix(CoordinateSystem from, CoordinateSystem to, float *matrix) { createMatrix<<<1,1>>>(from, to, matrix); } void moveToCoordSpace(CoordinateSystem from, CoordinateSystem to, float3 *values, uint32_t numValues, float3 *result) { float *matrix = NULL; size_t matrixSize = 4*4*sizeof(float); cudaMalloc((void**) &matrix, matrixSize); coordSpaceMatrix(from, to, matrix); int threadsPerBlock = 256; int blocksPerGrid = (numValues + threadsPerBlock - 1) / threadsPerBlock; transformPoints<<<blocksPerGrid, threadsPerBlock>>>(values, numValues, matrix, result); } void moveToCoordSpace(CoordinateSystem from, CoordinateSystem to, float3idx *values, uint32_t numValues, float3idx *result) { float *matrix = NULL; size_t matrixSize = 4*4*sizeof(float); cudaMalloc((void**) &matrix, matrixSize); coordSpaceMatrix(from, to, matrix); int threadsPerBlock = 256; int blocksPerGrid = (numValues + threadsPerBlock - 1) / threadsPerBlock; transformPoints<<<blocksPerGrid, threadsPerBlock>>>(values, numValues, matrix, result); }
241dbac26ed3df962f18641eb8aadf336a24eaec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "benchmark_tc_half_rrr.hpp" #include "benchmark_cpu_float_rrr.hpp" #include "numeric.hpp" #include "time.hpp" #include "cuda_runtime.hpp" #include <hiprand/hiprand.h> #include <mma.h> #include <iostream> constexpr int WMMA_TILE_M = 16; constexpr int WMMA_TILE_N = 16; constexpr int WMMA_TILE_K = 16; static __global__ void mm_tc(float *_c, const half *_a, const half *_b, const int M, const int N, const int K) { #define a(_i, _j) (_a[(_i)*K + (_j)]) #define b(_i, _j) (_b[(_i)*N + (_j)]) #define c(_i, _j) (_c[(_i)*N + (_j)]) using nvcuda::wmma::accumulator; using nvcuda::wmma::fill_fragment; using nvcuda::wmma::fragment; using nvcuda::wmma::load_matrix_sync; using nvcuda::wmma::matrix_a; using nvcuda::wmma::matrix_b; using nvcuda::wmma::mem_row_major; // a layout_t using nvcuda::wmma::mma_sync; using nvcuda::wmma::row_major; // a type using nvcuda::wmma::store_matrix_sync; typedef fragment<matrix_a, WMMA_TILE_M, WMMA_TILE_N, WMMA_TILE_K, half, row_major> FragA; typedef fragment<matrix_b, WMMA_TILE_M, WMMA_TILE_N, WMMA_TILE_K, half, row_major> FragB; typedef fragment<accumulator, WMMA_TILE_M, WMMA_TILE_N, WMMA_TILE_K, float> FragC; FragA fa; FragB fb; FragC fc; fill_fragment(fc, 0.0f); // TODO -- loop over matrices with warps const int wx = (blockIdx.x * blockDim.x + threadIdx.x) / 32; const int wy = blockIdx.y * blockDim.y + threadIdx.y; for (int t = 0; t < K; t += WMMA_TILE_K) { // row and col of beginning of tile const int aRow = wy * WMMA_TILE_M; const int aCol = t; const int bRow = t; const int bCol = wx * WMMA_TILE_N; if (aRow < M && aCol < K && bRow < K && bCol < N) { // cast to half for now load_matrix_sync(fa, &a(aRow, aCol), unsigned(K)); load_matrix_sync(fb, &b(bRow, bCol), unsigned(N)); mma_sync(fc, fa, fb, fc); } } const int cRow = wy * WMMA_TILE_M; const int cCol = wx * WMMA_TILE_N; if (cRow < M && cCol < N) { store_matrix_sync(&c(cRow, cCol), fc, N, mem_row_major); } #undef a #undef b #undef c } static __global__ void float_to_half(half *h, const float *f, size_t n) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { h[i] = f[i]; } } TCHalfRRR::TCHalfRRR() { CUDA_RUNTIME(hipStreamCreate(&stream_)); CUDA_RUNTIME(hipEventCreate(&start_)); CUDA_RUNTIME(hipEventCreate(&stop_)); } TCHalfRRR::~TCHalfRRR() { CUDA_RUNTIME(hipStreamDestroy(stream_)); CUDA_RUNTIME(hipEventDestroy(start_)); CUDA_RUNTIME(hipEventDestroy(stop_)); } bool TCHalfRRR::check() { bool success = true; // compute expected std::vector<float> _ce(m_ * n_); CPURRR::mm(_ce.data(), a32_, b32_, m_, n_, k_); #define ca(i, j) (ca_[(i)*n_ + (j)]) #define ce(i, j) (_ce[(i)*n_ + (j)]) for (int i = 0; i < m_; ++i) { for (int j = 0; j < n_; ++j) { if (!almost_equal(ca(i, j), ce(i, j), 1e-2)) { std::cerr << "ERR at " << i << " " << j << " " << "ce=" << ce(i, j) << " ca=" << ca(i, j) << std::endl; success = false; } } } #undef ca #undef ce // send ca back to GPU CUDA_RUNTIME(hipMemPrefetchAsync(ca_, sizeof(*ca_) *m_ *n_, 0, 0)); CUDA_RUNTIME(hipDeviceSynchronize()); return success; } void TCHalfRRR::initialize(const Spec &spec) { // pad out to next multiple of 16 in each dimension m_ = (spec.m + 15) / 16 * 16; n_ = (spec.n + 15) / 16 * 16; k_ = (spec.k + 15) / 16 * 16; // generate random numbers on CPU CUDA_RUNTIME(hipMallocManaged(&a32_, sizeof(*a32_) * m_ * k_)); CUDA_RUNTIME(hipMallocManaged(&b32_, sizeof(*b32_) * k_ * n_)); fill(a32_, m_ * k_); fill(b32_, k_ * n_); // convert to half-precision GPU inputs CUDA_RUNTIME(hipMalloc(&a_, sizeof(*a_) * m_ * k_)); CUDA_RUNTIME(hipMalloc(&b_, sizeof(*b_) * k_ * n_)); hipLaunchKernelGGL(( float_to_half), dim3(256),dim3(256), 0, 0, a_, a32_, m_ * k_); hipLaunchKernelGGL(( float_to_half), dim3(256),dim3(256), 0, 0, b_, b32_, k_ * n_); CUDA_RUNTIME(hipDeviceSynchronize()); // GPU output CUDA_RUNTIME(hipMallocManaged(&ca_, sizeof(*ca_) * m_ * n_)); } void TCHalfRRR::finalize() { CUDA_RUNTIME(hipFree(a_)); CUDA_RUNTIME(hipFree(b_)); CUDA_RUNTIME(hipFree(ca_)); CUDA_RUNTIME(hipFree(a32_)); CUDA_RUNTIME(hipFree(b32_)); } double TCHalfRRR::sample() { // 1 warp in x, 8 warps in y constexpr dim3 bd(32, 8, 1); const dim3 gd((n_ + WMMA_TILE_N - 1) / WMMA_TILE_N, (m_ + WMMA_TILE_N * bd.y - 1) / (WMMA_TILE_N * bd.y), 1); hipEventRecord(start_, stream_); hipLaunchKernelGGL(( mm_tc), dim3(gd), dim3(bd), 0, stream_, ca_, a_, b_, m_, n_, k_); CUDA_RUNTIME(hipEventRecord(stop_, stream_)); CUDA_RUNTIME(hipGetLastError()); CUDA_RUNTIME(hipEventSynchronize(stop_)); float millis; CUDA_RUNTIME(hipEventElapsedTime(&millis, start_, stop_)); return millis/1e3; }
241dbac26ed3df962f18641eb8aadf336a24eaec.cu
#include "benchmark_tc_half_rrr.hpp" #include "benchmark_cpu_float_rrr.hpp" #include "numeric.hpp" #include "time.hpp" #include "cuda_runtime.hpp" #include <curand.h> #include <mma.h> #include <iostream> constexpr int WMMA_TILE_M = 16; constexpr int WMMA_TILE_N = 16; constexpr int WMMA_TILE_K = 16; static __global__ void mm_tc(float *_c, const half *_a, const half *_b, const int M, const int N, const int K) { #define a(_i, _j) (_a[(_i)*K + (_j)]) #define b(_i, _j) (_b[(_i)*N + (_j)]) #define c(_i, _j) (_c[(_i)*N + (_j)]) using nvcuda::wmma::accumulator; using nvcuda::wmma::fill_fragment; using nvcuda::wmma::fragment; using nvcuda::wmma::load_matrix_sync; using nvcuda::wmma::matrix_a; using nvcuda::wmma::matrix_b; using nvcuda::wmma::mem_row_major; // a layout_t using nvcuda::wmma::mma_sync; using nvcuda::wmma::row_major; // a type using nvcuda::wmma::store_matrix_sync; typedef fragment<matrix_a, WMMA_TILE_M, WMMA_TILE_N, WMMA_TILE_K, half, row_major> FragA; typedef fragment<matrix_b, WMMA_TILE_M, WMMA_TILE_N, WMMA_TILE_K, half, row_major> FragB; typedef fragment<accumulator, WMMA_TILE_M, WMMA_TILE_N, WMMA_TILE_K, float> FragC; FragA fa; FragB fb; FragC fc; fill_fragment(fc, 0.0f); // TODO -- loop over matrices with warps const int wx = (blockIdx.x * blockDim.x + threadIdx.x) / 32; const int wy = blockIdx.y * blockDim.y + threadIdx.y; for (int t = 0; t < K; t += WMMA_TILE_K) { // row and col of beginning of tile const int aRow = wy * WMMA_TILE_M; const int aCol = t; const int bRow = t; const int bCol = wx * WMMA_TILE_N; if (aRow < M && aCol < K && bRow < K && bCol < N) { // cast to half for now load_matrix_sync(fa, &a(aRow, aCol), unsigned(K)); load_matrix_sync(fb, &b(bRow, bCol), unsigned(N)); mma_sync(fc, fa, fb, fc); } } const int cRow = wy * WMMA_TILE_M; const int cCol = wx * WMMA_TILE_N; if (cRow < M && cCol < N) { store_matrix_sync(&c(cRow, cCol), fc, N, mem_row_major); } #undef a #undef b #undef c } static __global__ void float_to_half(half *h, const float *f, size_t n) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { h[i] = f[i]; } } TCHalfRRR::TCHalfRRR() { CUDA_RUNTIME(cudaStreamCreate(&stream_)); CUDA_RUNTIME(cudaEventCreate(&start_)); CUDA_RUNTIME(cudaEventCreate(&stop_)); } TCHalfRRR::~TCHalfRRR() { CUDA_RUNTIME(cudaStreamDestroy(stream_)); CUDA_RUNTIME(cudaEventDestroy(start_)); CUDA_RUNTIME(cudaEventDestroy(stop_)); } bool TCHalfRRR::check() { bool success = true; // compute expected std::vector<float> _ce(m_ * n_); CPURRR::mm(_ce.data(), a32_, b32_, m_, n_, k_); #define ca(i, j) (ca_[(i)*n_ + (j)]) #define ce(i, j) (_ce[(i)*n_ + (j)]) for (int i = 0; i < m_; ++i) { for (int j = 0; j < n_; ++j) { if (!almost_equal(ca(i, j), ce(i, j), 1e-2)) { std::cerr << "ERR at " << i << " " << j << " " << "ce=" << ce(i, j) << " ca=" << ca(i, j) << std::endl; success = false; } } } #undef ca #undef ce // send ca back to GPU CUDA_RUNTIME(cudaMemPrefetchAsync(ca_, sizeof(*ca_) *m_ *n_, 0, 0)); CUDA_RUNTIME(cudaDeviceSynchronize()); return success; } void TCHalfRRR::initialize(const Spec &spec) { // pad out to next multiple of 16 in each dimension m_ = (spec.m + 15) / 16 * 16; n_ = (spec.n + 15) / 16 * 16; k_ = (spec.k + 15) / 16 * 16; // generate random numbers on CPU CUDA_RUNTIME(cudaMallocManaged(&a32_, sizeof(*a32_) * m_ * k_)); CUDA_RUNTIME(cudaMallocManaged(&b32_, sizeof(*b32_) * k_ * n_)); fill(a32_, m_ * k_); fill(b32_, k_ * n_); // convert to half-precision GPU inputs CUDA_RUNTIME(cudaMalloc(&a_, sizeof(*a_) * m_ * k_)); CUDA_RUNTIME(cudaMalloc(&b_, sizeof(*b_) * k_ * n_)); float_to_half<<<256,256>>>(a_, a32_, m_ * k_); float_to_half<<<256,256>>>(b_, b32_, k_ * n_); CUDA_RUNTIME(cudaDeviceSynchronize()); // GPU output CUDA_RUNTIME(cudaMallocManaged(&ca_, sizeof(*ca_) * m_ * n_)); } void TCHalfRRR::finalize() { CUDA_RUNTIME(cudaFree(a_)); CUDA_RUNTIME(cudaFree(b_)); CUDA_RUNTIME(cudaFree(ca_)); CUDA_RUNTIME(cudaFree(a32_)); CUDA_RUNTIME(cudaFree(b32_)); } double TCHalfRRR::sample() { // 1 warp in x, 8 warps in y constexpr dim3 bd(32, 8, 1); const dim3 gd((n_ + WMMA_TILE_N - 1) / WMMA_TILE_N, (m_ + WMMA_TILE_N * bd.y - 1) / (WMMA_TILE_N * bd.y), 1); cudaEventRecord(start_, stream_); mm_tc<<<gd, bd, 0, stream_>>>(ca_, a_, b_, m_, n_, k_); CUDA_RUNTIME(cudaEventRecord(stop_, stream_)); CUDA_RUNTIME(cudaGetLastError()); CUDA_RUNTIME(cudaEventSynchronize(stop_)); float millis; CUDA_RUNTIME(cudaEventElapsedTime(&millis, start_, stop_)); return millis/1e3; }
b4a98d735d8f54f847c9059454f7da083186efb9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Scale the particle positions with each axis independent */ extern "C" __global__ void scalePositions(float scaleX, float scaleY, float scaleZ, int numMolecules, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real4* __restrict__ posq, const int* __restrict__ moleculeAtoms, const int* __restrict__ moleculeStartIndex) { for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < numMolecules; index += blockDim.x*gridDim.x) { int first = moleculeStartIndex[index]; int last = moleculeStartIndex[index+1]; int numAtoms = last-first; // Find the center of each molecule. real3 center = make_real3(0, 0, 0); for (int atom = first; atom < last; atom++) { real4 pos = posq[moleculeAtoms[atom]]; center.x += pos.x; center.y += pos.y; center.z += pos.z; } real invNumAtoms = RECIP(numAtoms); center.x *= invNumAtoms; center.y *= invNumAtoms; center.z *= invNumAtoms; // Move it into the first periodic box. real3 oldCenter = center; APPLY_PERIODIC_TO_POS(center) real3 delta = make_real3(oldCenter.x-center.x, oldCenter.y-center.y, oldCenter.z-center.z); // Now scale the position of the molecule center. delta.x = center.x*(scaleX-1)-delta.x; delta.y = center.y*(scaleY-1)-delta.y; delta.z = center.z*(scaleZ-1)-delta.z; for (int atom = first; atom < last; atom++) { real4 pos = posq[moleculeAtoms[atom]]; pos.x += delta.x; pos.y += delta.y; pos.z += delta.z; posq[moleculeAtoms[atom]] = pos; } } }
b4a98d735d8f54f847c9059454f7da083186efb9.cu
/** * Scale the particle positions with each axis independent */ extern "C" __global__ void scalePositions(float scaleX, float scaleY, float scaleZ, int numMolecules, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, real4* __restrict__ posq, const int* __restrict__ moleculeAtoms, const int* __restrict__ moleculeStartIndex) { for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < numMolecules; index += blockDim.x*gridDim.x) { int first = moleculeStartIndex[index]; int last = moleculeStartIndex[index+1]; int numAtoms = last-first; // Find the center of each molecule. real3 center = make_real3(0, 0, 0); for (int atom = first; atom < last; atom++) { real4 pos = posq[moleculeAtoms[atom]]; center.x += pos.x; center.y += pos.y; center.z += pos.z; } real invNumAtoms = RECIP(numAtoms); center.x *= invNumAtoms; center.y *= invNumAtoms; center.z *= invNumAtoms; // Move it into the first periodic box. real3 oldCenter = center; APPLY_PERIODIC_TO_POS(center) real3 delta = make_real3(oldCenter.x-center.x, oldCenter.y-center.y, oldCenter.z-center.z); // Now scale the position of the molecule center. delta.x = center.x*(scaleX-1)-delta.x; delta.y = center.y*(scaleY-1)-delta.y; delta.z = center.z*(scaleZ-1)-delta.z; for (int atom = first; atom < last; atom++) { real4 pos = posq[moleculeAtoms[atom]]; pos.x += delta.x; pos.y += delta.y; pos.z += delta.z; posq[moleculeAtoms[atom]] = pos; } } }
8aa4437efd39e9ea2965d06ac59b4d09ee3ea642.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include <cstdlib> #include <cstring> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/blob.hpp" namespace caffe { //sigmoid template <typename Dtype> __global__ void caffe_SigmoidForward(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = 1. / (1. + exp(-in[index])); } } template <typename Dtype> __global__ void caffe_SigmoidBackward(const int n, const Dtype* in_diff, const Dtype* out_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { const Dtype sigmoid_x = out_data[index]; out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); } } template <> void caffe_gpu_sigmoid_forward<float>(int N, const float* bottom, float* top) { hipLaunchKernelGGL(( caffe_SigmoidForward<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, bottom, top); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_sigmoid_forward<double>(int N, const double* bottom, double* top) { hipLaunchKernelGGL(( caffe_SigmoidForward<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, bottom, top); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_sigmoid_backward<float>(int N,const float* top_data, const float* top_diff, float* bottom_diff) { hipLaunchKernelGGL(( caffe_SigmoidBackward<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, top_diff, top_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_sigmoid_backward<double>(int N,const double* top_data, const double* top_diff, double* bottom_diff) { hipLaunchKernelGGL(( caffe_SigmoidBackward<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, top_diff, top_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } //relu template <typename Dtype> __global__ void caffe_ReLUForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } template <typename Dtype> __global__ void caffe_ReLUBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } template <> void caffe_gpu_relu_forward<float>(int N, const float* bottom, float* top) { hipLaunchKernelGGL(( caffe_ReLUForward<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, bottom, top, 0); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_relu_forward<double>(int N, const double* bottom, double* top) { hipLaunchKernelGGL(( caffe_ReLUForward<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, bottom, top, 0); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_relu_backward<float>(int N, const float* top_data,const float* top_diff, float* bottom_diff) { hipLaunchKernelGGL(( caffe_ReLUBackward<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, top_diff, top_data, bottom_diff, 0); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_relu_backward<double>(int N, const double* top_data,const double* top_diff, double* bottom_diff) { hipLaunchKernelGGL(( caffe_ReLUBackward<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, top_diff, top_data, bottom_diff, 0); CUDA_POST_KERNEL_CHECK; } //tanh template <typename Dtype> __global__ void caffe_TanHForward(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = tanh(in[index]); } } template <typename Dtype> __global__ void caffe_TanHBackward(const int n, const Dtype* in_diff, const Dtype* out_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { Dtype tanhx = out_data[index]; out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); } } template <> void caffe_gpu_tanh_forward<float>(int N, const float* bottom, float* top) { hipLaunchKernelGGL(( caffe_TanHForward<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, bottom, top); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_tanh_forward<double>(int N, const double* bottom, double* top) { hipLaunchKernelGGL(( caffe_TanHForward<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, bottom, top); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_tanh_backward<float>(int N, const float* top_data,const float* top_diff, float* bottom_diff) { hipLaunchKernelGGL(( caffe_TanHBackward<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, top_diff, top_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_tanh_backward<double>(int N, const double* top_data,const double* top_diff, double* bottom_diff) { hipLaunchKernelGGL(( caffe_TanHBackward<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, top_diff, top_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); __global__ void popc_kernel(const int n, const float* a, const float* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popc(static_cast<uint32_t>(a[index]) ^ static_cast<uint32_t>(b[index])); } } __global__ void popcll_kernel(const int n, const double* a, const double* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popcll(static_cast<uint64_t>(a[index]) ^ static_cast<uint64_t>(b[index])); } } template <> uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x, const float* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( popc_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), (uint32_t) 0, thrust::plus<uint32_t>()); } template <> uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x, const double* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( popcll_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), /* NOLINT_NEXT_LINE(build/include_what_you_use) */ (uint32_t) 0, thrust::plus<uint32_t>()); } void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } template <typename Dtype> __global__ void permute_gpu_matrix(const int n, const int dimsize, const Dtype* datain, Dtype* dataout, const int *order, const int * dstdim_capacity, const int flipdim, const int * dstdimensions, const int * dim_capacity) { int curidx=0; int idx[4],dstidx[4]; CUDA_KERNEL_LOOP(i, n) //for(int i=0;i<count;i++) { int temp=i; for(int j=0;j<dimsize;j++) { dstidx[j]= temp / dstdim_capacity[j]; temp = temp % dstdim_capacity[j]; } if(flipdim >= 0) { dstidx[flipdim] = dstdimensions[flipdim] - 1 - dstidx[flipdim] ; } for(int j=0;j<dimsize;j++) { idx[order[j]] = dstidx[j]; } curidx=0; for(int j=0;j<dimsize;j++) { curidx += idx[j]* dim_capacity[j]; } dataout[i] = datain[curidx]; } } template <typename Dtype> void caffe_gpu_permute(const Dtype * datain, Dtype * dataout, const int * dimensions, const int * order, const int dimsize,const int flipdim) { /* * implement matlab permute, with additional operation. *1. permutation matrix dimensions *2. flip given dimension *liangji, 20150113 */ CHECK(dimsize<=4)<<"currently caffe_gpu_permute only support matrix dimensions smaller than 4, got "<<dimsize<<" here."; shared_ptr<Blob <int> > dim_capacity_data(new Blob<int>()); shared_ptr<Blob <int> > dstdimensions_data(new Blob<int>()); shared_ptr<Blob <int> > dstdim_capacity_data(new Blob<int>()); shared_ptr<Blob <int> > order_dim_data(new Blob<int>()); dim_capacity_data->Reshape(1,1,1,dimsize); dstdimensions_data->Reshape(1,1,1,dimsize); dstdim_capacity_data->Reshape(1,1,1,dimsize); order_dim_data->Reshape(1,1,1,dimsize); caffe_set(dimsize, 1, dim_capacity_data->mutable_cpu_data()); caffe_set(dimsize, 0, dstdimensions_data->mutable_cpu_data()); caffe_set(dimsize, 1, dstdim_capacity_data->mutable_cpu_data()); caffe_set(dimsize, 0, order_dim_data->mutable_cpu_data()); caffe_copy(dimsize, order, order_dim_data->mutable_cpu_data()); int *dim_capacity, *dstdimensions,*dstdim_capacity;//,*order_dim; dim_capacity = dim_capacity_data->mutable_cpu_data(); dstdimensions = dstdimensions_data->mutable_cpu_data(); dstdim_capacity = dstdim_capacity_data->mutable_cpu_data(); //order_dim = order_dim_data->mutable_cpu_data(); for(int i = dimsize-2; i>=0; i-- ) { dim_capacity[i]=1; for(int j=i+1;j<dimsize;j++) dim_capacity[i] *= dimensions[j]; } for(int i =0; i<dimsize; i++ ) { dstdimensions[i] = dimensions[order[i]]; } for(int i = dimsize-2; i>=0; i-- ) { dstdim_capacity[i]=1; for(int j=i+1;j<dimsize;j++) dstdim_capacity[i] *= dstdimensions[j]; } int count = dim_capacity[0]* dimensions[0]; hipLaunchKernelGGL(( permute_gpu_matrix<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, dimsize, datain, dataout, order_dim_data->gpu_data(), dstdim_capacity_data->gpu_data(), flipdim, dstdimensions_data->gpu_data(), dim_capacity_data->gpu_data()); CUDA_POST_KERNEL_CHECK; return; } template void caffe_gpu_permute<float>(const float * datain, float * dataout, const int * dimensions, const int * order, const int dimsize,const int flipdim); template void caffe_gpu_permute<double>(const double * datain, double * dataout, const int * dimensions, const int * order, const int dimsize,const int flipdim); template <typename Dtype> __global__ void bound_kernel(const int n, const Dtype* a, const Dtype min_val, const Dtype max_val, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = min(max(a[index], min_val), max_val); } } template <typename Dtype> void caffe_gpu_bound(const int N, const Dtype* a, const Dtype min_val, const Dtype max_val, Dtype* y) { hipLaunchKernelGGL(( bound_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, min_val, max_val, y); } template void caffe_gpu_bound<float>(const int N, const float* a, const float min_val, const float max_val, float* y); template void caffe_gpu_bound<double>(const int N, const double* a, const double min_val, const double max_val, double* y); } // namespace caffe
8aa4437efd39e9ea2965d06ac59b4d09ee3ea642.cu
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include <cstdlib> #include <cstring> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/blob.hpp" namespace caffe { //sigmoid template <typename Dtype> __global__ void caffe_SigmoidForward(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = 1. / (1. + exp(-in[index])); } } template <typename Dtype> __global__ void caffe_SigmoidBackward(const int n, const Dtype* in_diff, const Dtype* out_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { const Dtype sigmoid_x = out_data[index]; out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); } } template <> void caffe_gpu_sigmoid_forward<float>(int N, const float* bottom, float* top) { caffe_SigmoidForward<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, bottom, top); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_sigmoid_forward<double>(int N, const double* bottom, double* top) { caffe_SigmoidForward<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, bottom, top); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_sigmoid_backward<float>(int N,const float* top_data, const float* top_diff, float* bottom_diff) { caffe_SigmoidBackward<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, top_diff, top_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_sigmoid_backward<double>(int N,const double* top_data, const double* top_diff, double* bottom_diff) { caffe_SigmoidBackward<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, top_diff, top_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } //relu template <typename Dtype> __global__ void caffe_ReLUForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } template <typename Dtype> __global__ void caffe_ReLUBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } template <> void caffe_gpu_relu_forward<float>(int N, const float* bottom, float* top) { caffe_ReLUForward<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, bottom, top, 0); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_relu_forward<double>(int N, const double* bottom, double* top) { caffe_ReLUForward<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, bottom, top, 0); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_relu_backward<float>(int N, const float* top_data,const float* top_diff, float* bottom_diff) { caffe_ReLUBackward<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, top_diff, top_data, bottom_diff, 0); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_relu_backward<double>(int N, const double* top_data,const double* top_diff, double* bottom_diff) { caffe_ReLUBackward<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, top_diff, top_data, bottom_diff, 0); CUDA_POST_KERNEL_CHECK; } //tanh template <typename Dtype> __global__ void caffe_TanHForward(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = tanh(in[index]); } } template <typename Dtype> __global__ void caffe_TanHBackward(const int n, const Dtype* in_diff, const Dtype* out_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { Dtype tanhx = out_data[index]; out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); } } template <> void caffe_gpu_tanh_forward<float>(int N, const float* bottom, float* top) { caffe_TanHForward<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, bottom, top); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_tanh_forward<double>(int N, const double* bottom, double* top) { caffe_TanHForward<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, bottom, top); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_tanh_backward<float>(int N, const float* top_data,const float* top_diff, float* bottom_diff) { caffe_TanHBackward<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, top_diff, top_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_tanh_backward<double>(int N, const double* top_data,const double* top_diff, double* bottom_diff) { caffe_TanHBackward<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, top_diff, top_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); __global__ void popc_kernel(const int n, const float* a, const float* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popc(static_cast<uint32_t>(a[index]) ^ static_cast<uint32_t>(b[index])); } } __global__ void popcll_kernel(const int n, const double* a, const double* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popcll(static_cast<uint64_t>(a[index]) ^ static_cast<uint64_t>(b[index])); } } template <> uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x, const float* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) popc_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), (uint32_t) 0, thrust::plus<uint32_t>()); } template <> uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x, const double* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) popcll_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), /* NOLINT_NEXT_LINE(build/include_what_you_use) */ (uint32_t) 0, thrust::plus<uint32_t>()); } void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } template <typename Dtype> __global__ void permute_gpu_matrix(const int n, const int dimsize, const Dtype* datain, Dtype* dataout, const int *order, const int * dstdim_capacity, const int flipdim, const int * dstdimensions, const int * dim_capacity) { int curidx=0; int idx[4],dstidx[4]; CUDA_KERNEL_LOOP(i, n) //for(int i=0;i<count;i++) { int temp=i; for(int j=0;j<dimsize;j++) { dstidx[j]= temp / dstdim_capacity[j]; temp = temp % dstdim_capacity[j]; } if(flipdim >= 0) { dstidx[flipdim] = dstdimensions[flipdim] - 1 - dstidx[flipdim] ; } for(int j=0;j<dimsize;j++) { idx[order[j]] = dstidx[j]; } curidx=0; for(int j=0;j<dimsize;j++) { curidx += idx[j]* dim_capacity[j]; } dataout[i] = datain[curidx]; } } template <typename Dtype> void caffe_gpu_permute(const Dtype * datain, Dtype * dataout, const int * dimensions, const int * order, const int dimsize,const int flipdim) { /* * implement matlab permute, with additional operation. *1. permutation matrix dimensions *2. flip given dimension *liangji, 20150113 */ CHECK(dimsize<=4)<<"currently caffe_gpu_permute only support matrix dimensions smaller than 4, got "<<dimsize<<" here."; shared_ptr<Blob <int> > dim_capacity_data(new Blob<int>()); shared_ptr<Blob <int> > dstdimensions_data(new Blob<int>()); shared_ptr<Blob <int> > dstdim_capacity_data(new Blob<int>()); shared_ptr<Blob <int> > order_dim_data(new Blob<int>()); dim_capacity_data->Reshape(1,1,1,dimsize); dstdimensions_data->Reshape(1,1,1,dimsize); dstdim_capacity_data->Reshape(1,1,1,dimsize); order_dim_data->Reshape(1,1,1,dimsize); caffe_set(dimsize, 1, dim_capacity_data->mutable_cpu_data()); caffe_set(dimsize, 0, dstdimensions_data->mutable_cpu_data()); caffe_set(dimsize, 1, dstdim_capacity_data->mutable_cpu_data()); caffe_set(dimsize, 0, order_dim_data->mutable_cpu_data()); caffe_copy(dimsize, order, order_dim_data->mutable_cpu_data()); int *dim_capacity, *dstdimensions,*dstdim_capacity;//,*order_dim; dim_capacity = dim_capacity_data->mutable_cpu_data(); dstdimensions = dstdimensions_data->mutable_cpu_data(); dstdim_capacity = dstdim_capacity_data->mutable_cpu_data(); //order_dim = order_dim_data->mutable_cpu_data(); for(int i = dimsize-2; i>=0; i-- ) { dim_capacity[i]=1; for(int j=i+1;j<dimsize;j++) dim_capacity[i] *= dimensions[j]; } for(int i =0; i<dimsize; i++ ) { dstdimensions[i] = dimensions[order[i]]; } for(int i = dimsize-2; i>=0; i-- ) { dstdim_capacity[i]=1; for(int j=i+1;j<dimsize;j++) dstdim_capacity[i] *= dstdimensions[j]; } int count = dim_capacity[0]* dimensions[0]; permute_gpu_matrix<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, dimsize, datain, dataout, order_dim_data->gpu_data(), dstdim_capacity_data->gpu_data(), flipdim, dstdimensions_data->gpu_data(), dim_capacity_data->gpu_data()); CUDA_POST_KERNEL_CHECK; return; } template void caffe_gpu_permute<float>(const float * datain, float * dataout, const int * dimensions, const int * order, const int dimsize,const int flipdim); template void caffe_gpu_permute<double>(const double * datain, double * dataout, const int * dimensions, const int * order, const int dimsize,const int flipdim); template <typename Dtype> __global__ void bound_kernel(const int n, const Dtype* a, const Dtype min_val, const Dtype max_val, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = min(max(a[index], min_val), max_val); } } template <typename Dtype> void caffe_gpu_bound(const int N, const Dtype* a, const Dtype min_val, const Dtype max_val, Dtype* y) { bound_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, min_val, max_val, y); } template void caffe_gpu_bound<float>(const int N, const float* a, const float min_val, const float max_val, float* y); template void caffe_gpu_bound<double>(const int N, const double* a, const double min_val, const double max_val, double* y); } // namespace caffe
191d882ff0631470bb7773cc883e1467d0d7d894.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions mixed zc -> ds @author Mark Gates */ #include "magma_internal.h" #define BLK_X 64 #define BLK_Y 32 /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to clat2z and zlaset. */ __global__ void clag2z_kernel( int m, int n, const magmaFloatComplex *SA, int ldsa, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] )); } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] )); } } } } /***************************************************************************//** Purpose ------- CLAG2Z converts a single-complex matrix, SA, to a double-complex matrix, A. Note that while it is possible to overflow while converting from double to single, it is not possible to overflow when converting from single to double. Arguments --------- @param[in] m INTEGER The number of lines of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in] SA COMPLEX array, dimension (LDSA,N) On entry, the M-by-N coefficient matrix SA. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,M). @param[out] A COMPLEX_16 array, dimension (LDA,N) On exit, the M-by-N coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lag2 *******************************************************************************/ extern "C" void magmablas_clag2z( magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr SA, magma_int_t ldsa, magmaDoubleComplex_ptr A, magma_int_t lda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( m < 0 ) *info = -1; else if ( n < 0 ) *info = -2; else if ( ldsa < max(1,m) ) *info = -4; else if ( lda < max(1,m) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) ); hipLaunchKernelGGL(( clag2z_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, SA, ldsa, A, lda ); }
191d882ff0631470bb7773cc883e1467d0d7d894.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions mixed zc -> ds @author Mark Gates */ #include "magma_internal.h" #define BLK_X 64 #define BLK_Y 32 /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to clat2z and zlaset. */ __global__ void clag2z_kernel( int m, int n, const magmaFloatComplex *SA, int ldsa, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] )); } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] )); } } } } /***************************************************************************//** Purpose ------- CLAG2Z converts a single-complex matrix, SA, to a double-complex matrix, A. Note that while it is possible to overflow while converting from double to single, it is not possible to overflow when converting from single to double. Arguments --------- @param[in] m INTEGER The number of lines of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in] SA COMPLEX array, dimension (LDSA,N) On entry, the M-by-N coefficient matrix SA. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,M). @param[out] A COMPLEX_16 array, dimension (LDA,N) On exit, the M-by-N coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lag2 *******************************************************************************/ extern "C" void magmablas_clag2z( magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr SA, magma_int_t ldsa, magmaDoubleComplex_ptr A, magma_int_t lda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( m < 0 ) *info = -1; else if ( n < 0 ) *info = -2; else if ( ldsa < max(1,m) ) *info = -4; else if ( lda < max(1,m) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) ); clag2z_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, SA, ldsa, A, lda ); }
96adee14489b161c8fe8b5e053ca746062120632.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // linear algebra subroutines // Ben Cumming @ CSCS #include <iostream> #include <cmath> #include <cstdio> #include "linalg.h" #include "operators.h" #include "stats.h" #include "data.h" #include <rocblas.h> #include <cstdlib> namespace linalg { namespace kernels { // TODO implement the missing linalg kernels __global__ void add_scaled_diff( double *y, const double* x, const double alpha, const double *l, const double *r, const int n) { auto i = threadIdx.x + blockDim.x*blockIdx.x; if(i < n) { y[i] = x[i] + alpha * (l[i] - r[i]); } } __global__ void copy(double *y, const double* x, int n) { auto i = threadIdx.x + blockDim.x*blockIdx.x; if(i < n) { y[i] = x[i]; } } __global__ void fill(double *x, const double value, int n, int idmax){ int i = threadIdx.x + blockDim.x*blockIdx.x; while(i<n){ x[i] = value; i += idmax; } } __global__ void scaled_diff(double *y, const double alpha, const double* l, const double* r, int n, int idmax){ int i = threadIdx.x + blockDim.x*blockIdx.x; while(i<n){ y[i] = alpha*(l[i]-r[i]); i += idmax; } } __global__ void scale(double *y, const double alpha, const double* x, int n, int idmax){ int i = threadIdx.x + blockDim.x*blockIdx.x; while(i<n){ y[i] = alpha*x[i]; i += idmax; } } __global__ void lcomb(double *y, const double alpha, const double* x, const double beta, const double* z, int n, int idmax){ int i = threadIdx.x + blockDim.x*blockIdx.x; while(i<n){ y[i] = alpha*x[i]+beta*z[i]; i += idmax; } } } // namespace kernels bool cg_initialized = false; Field r; Field Ap; Field p; Field Fx; Field Fxold; Field v; Field xold; // block dimensions for blas 1 calls const int block_dim = 192; int calculate_grid_dim(const int block_dim, int n) { return (n-1)/block_dim + 1; } using namespace operators; using namespace stats; using data::Field; // initialize temporary storage fields used by the cg solver // I do this here so that the fields are persistent between calls // to the CG solver. This is useful if we want to avoid malloc/free calls // on the device for the OpenACC implementation void cg_init(int nx, int ny) { Ap.init(nx,ny); r.init(nx,ny); p.init(nx,ny); Fx.init(nx,ny); Fxold.init(nx,ny); v.init(nx,ny); xold.init(nx,ny); cg_initialized = true; } //////////////////////////////////////////////////////////////////////////////// // blas level 1 reductions //////////////////////////////////////////////////////////////////////////////// // TODO implement the dot product with cublas // HINT : use cublas_handle() to get the cublas handle // computes the inner product of x and y // x and y are vectors double ss_dot(Field const& x, Field const& y) { hipblasHandle_t handle = cublas_handle(); double result = 0.; const int n = x.length(); hipblasDdot(handle,n,x.device_data(),1,y.device_data(),1, &result); return result; } // TODO : implement the dot product with cublas // HINT : use cublas_handle() to get the cublas handle // computes the 2-norm of x // x is a vector double ss_norm2(Field const& x) { hipblasHandle_t handle = cublas_handle(); double result = 0; const int n = x.length(); hipblasDnrm2(handle,n,x.device_data(),1, &result); return result; } //////////////////////////////////////////////////////////////////////////////// // blas level 1 vector-vector operations //////////////////////////////////////////////////////////////////////////////// // computes y = x + alpha*(l-r) // y, x, l and r are vectors // alpha is a scalar void ss_add_scaled_diff(Field& y, Field const& x, const double alpha, Field const& l, Field const& r) { const int n = y.length(); auto grid_dim = calculate_grid_dim(block_dim, n); hipLaunchKernelGGL(( kernels::add_scaled_diff), dim3(grid_dim), dim3(block_dim), 0, 0, y.device_data(), x.device_data(), alpha, l.device_data(), r.device_data(), n); } // copy one vector into another y := x // x and y are vectors of length N void ss_copy(Field& y, Field const& x) { const int n = x.length(); auto grid_dim = calculate_grid_dim(block_dim, n); hipLaunchKernelGGL(( kernels::copy), dim3(grid_dim), dim3(block_dim), 0, 0, y.device_data(), x.device_data(), n); } // TODO : implement the wrappers for // ss_fill // ss_axpy // ss_scaled_diff // ss_scale // ss_lcomb // sets x := value // x is a vector // value is a scalar void ss_fill(Field& x, const double value) { const int n = x.length(); auto grid_dim = calculate_grid_dim(block_dim, n); hipLaunchKernelGGL(( kernels::fill), dim3(grid_dim), dim3(block_dim), 0, 0, x.device_data(), value, n, grid_dim*block_dim); } // computes y := alpha*x + y // x and y are vectors // alpha is a scalar void ss_axpy(Field& y, const double alpha, Field const& x) { hipblasHandle_t handle = cublas_handle(); const int n = x.length(); hipblasDaxpy(handle, n, &alpha, x.device_data(), 1, y.device_data(),1); } // computes y = alpha*(l-r) // y, l and r are vectors of length N // alpha is a scalar void ss_scaled_diff(Field& y, const double alpha, Field const& l, Field const& r) { const int n = y.length(); auto grid_dim = calculate_grid_dim(block_dim, n); hipLaunchKernelGGL(( kernels::scaled_diff), dim3(grid_dim), dim3(block_dim), 0, 0, y.device_data(), alpha, l.device_data(), r.device_data(), n, grid_dim*block_dim); } // computes y := alpha*x // alpha is scalar // y and x are vectors void ss_scale(Field& y, const double alpha, Field& x) { const int n = y.length(); auto grid_dim = calculate_grid_dim(block_dim, n); hipLaunchKernelGGL(( kernels::scale), dim3(grid_dim), dim3(block_dim), 0, 0, y.device_data(), alpha, x.device_data(), n, grid_dim*block_dim); } // computes linear combination of two vectors y := alpha*x + beta*z // alpha and beta are scalar // y, x and z are vectors void ss_lcomb(Field& y, const double alpha, Field& x, const double beta, Field const& z) { const int n = y.length(); auto grid_dim = calculate_grid_dim(block_dim, n); hipLaunchKernelGGL(( kernels::lcomb), dim3(grid_dim), dim3(block_dim), 0, 0, y.device_data(), alpha, x.device_data(), beta, z.device_data(), n, grid_dim*block_dim); } // conjugate gradient solver // solve the linear system A*x = b for x // the matrix A is implicit in the objective function for the diffusion equation // the value in x constitute the "first guess" at the solution // x(N) // ON ENTRY contains the initial guess for the solution // ON EXIT contains the solution void ss_cg(Field& x, Field const& b, const int maxiters, const double tol, bool& success) { // this is the dimension of the linear system that we are to solve int nx = data::options.nx; int ny = data::options.ny; if(!cg_initialized) { cg_init(nx,ny); } // epsilon value use for matrix-vector approximation double eps = 1.e-8; double eps_inv = 1. / eps; // initialize memory for temporary storage ss_fill(Fx, 0.0); ss_fill(Fxold, 0.0); ss_copy(xold, x); // matrix vector multiplication is approximated with // A*v = 1/epsilon * ( F(x+epsilon*v) - F(x) ) // = 1/epsilon * ( F(x+epsilon*v) - Fxold ) // we compute Fxold at startup // we have to keep x so that we can compute the F(x+exps*v) diffusion(x, Fxold); // v = x + epsilon*x ss_scale(v, 1.0 + eps, x); // Fx = F(v) diffusion(v, Fx); // r = b - A*x // where A*x = (Fx-Fxold)/eps ss_add_scaled_diff(r, b, -eps_inv, Fx, Fxold); // p = r ss_copy(p, r); // rold = <r,r> double rold = ss_dot(r, r); double rnew = rold; // check for convergence success = sqrt(rold) < tol; if (success) { return; } int iter; for(iter=0; iter<maxiters; iter++) { // Ap = A*p ss_lcomb(v, 1.0, xold, eps, p); diffusion(v, Fx); ss_scaled_diff(Ap, eps_inv, Fx, Fxold); // alpha = rold / p'*Ap double alpha = rold / ss_dot(p, Ap); // x += alpha*p ss_axpy(x, alpha, p); // r -= alpha*Ap ss_axpy(r, -alpha, Ap); // find new norm rnew = ss_dot(r, r); // test for convergence if (sqrt(rnew) < tol) { success = true; break; } // p = r + (rnew/rold) * p ss_lcomb(p, 1.0, r, rnew / rold, p); rold = rnew; } stats::iters_cg += iter + 1; if (!success) { std::cerr << "ERROR: CG failed to converge after " << iter << " iterations, with residual " << sqrt(rnew) << std::endl; } } } // namespace linalg
96adee14489b161c8fe8b5e053ca746062120632.cu
// linear algebra subroutines // Ben Cumming @ CSCS #include <iostream> #include <cmath> #include <cstdio> #include "linalg.h" #include "operators.h" #include "stats.h" #include "data.h" #include <cublas_v2.h> #include <cstdlib> namespace linalg { namespace kernels { // TODO implement the missing linalg kernels __global__ void add_scaled_diff( double *y, const double* x, const double alpha, const double *l, const double *r, const int n) { auto i = threadIdx.x + blockDim.x*blockIdx.x; if(i < n) { y[i] = x[i] + alpha * (l[i] - r[i]); } } __global__ void copy(double *y, const double* x, int n) { auto i = threadIdx.x + blockDim.x*blockIdx.x; if(i < n) { y[i] = x[i]; } } __global__ void fill(double *x, const double value, int n, int idmax){ int i = threadIdx.x + blockDim.x*blockIdx.x; while(i<n){ x[i] = value; i += idmax; } } __global__ void scaled_diff(double *y, const double alpha, const double* l, const double* r, int n, int idmax){ int i = threadIdx.x + blockDim.x*blockIdx.x; while(i<n){ y[i] = alpha*(l[i]-r[i]); i += idmax; } } __global__ void scale(double *y, const double alpha, const double* x, int n, int idmax){ int i = threadIdx.x + blockDim.x*blockIdx.x; while(i<n){ y[i] = alpha*x[i]; i += idmax; } } __global__ void lcomb(double *y, const double alpha, const double* x, const double beta, const double* z, int n, int idmax){ int i = threadIdx.x + blockDim.x*blockIdx.x; while(i<n){ y[i] = alpha*x[i]+beta*z[i]; i += idmax; } } } // namespace kernels bool cg_initialized = false; Field r; Field Ap; Field p; Field Fx; Field Fxold; Field v; Field xold; // block dimensions for blas 1 calls const int block_dim = 192; int calculate_grid_dim(const int block_dim, int n) { return (n-1)/block_dim + 1; } using namespace operators; using namespace stats; using data::Field; // initialize temporary storage fields used by the cg solver // I do this here so that the fields are persistent between calls // to the CG solver. This is useful if we want to avoid malloc/free calls // on the device for the OpenACC implementation void cg_init(int nx, int ny) { Ap.init(nx,ny); r.init(nx,ny); p.init(nx,ny); Fx.init(nx,ny); Fxold.init(nx,ny); v.init(nx,ny); xold.init(nx,ny); cg_initialized = true; } //////////////////////////////////////////////////////////////////////////////// // blas level 1 reductions //////////////////////////////////////////////////////////////////////////////// // TODO implement the dot product with cublas // HINT : use cublas_handle() to get the cublas handle // computes the inner product of x and y // x and y are vectors double ss_dot(Field const& x, Field const& y) { cublasHandle_t handle = cublas_handle(); double result = 0.; const int n = x.length(); cublasDdot(handle,n,x.device_data(),1,y.device_data(),1, &result); return result; } // TODO : implement the dot product with cublas // HINT : use cublas_handle() to get the cublas handle // computes the 2-norm of x // x is a vector double ss_norm2(Field const& x) { cublasHandle_t handle = cublas_handle(); double result = 0; const int n = x.length(); cublasDnrm2(handle,n,x.device_data(),1, &result); return result; } //////////////////////////////////////////////////////////////////////////////// // blas level 1 vector-vector operations //////////////////////////////////////////////////////////////////////////////// // computes y = x + alpha*(l-r) // y, x, l and r are vectors // alpha is a scalar void ss_add_scaled_diff(Field& y, Field const& x, const double alpha, Field const& l, Field const& r) { const int n = y.length(); auto grid_dim = calculate_grid_dim(block_dim, n); kernels::add_scaled_diff<<<grid_dim, block_dim>>> (y.device_data(), x.device_data(), alpha, l.device_data(), r.device_data(), n); } // copy one vector into another y := x // x and y are vectors of length N void ss_copy(Field& y, Field const& x) { const int n = x.length(); auto grid_dim = calculate_grid_dim(block_dim, n); kernels::copy<<<grid_dim, block_dim>>> (y.device_data(), x.device_data(), n); } // TODO : implement the wrappers for // ss_fill // ss_axpy // ss_scaled_diff // ss_scale // ss_lcomb // sets x := value // x is a vector // value is a scalar void ss_fill(Field& x, const double value) { const int n = x.length(); auto grid_dim = calculate_grid_dim(block_dim, n); kernels::fill<<<grid_dim, block_dim>>>(x.device_data(), value, n, grid_dim*block_dim); } // computes y := alpha*x + y // x and y are vectors // alpha is a scalar void ss_axpy(Field& y, const double alpha, Field const& x) { cublasHandle_t handle = cublas_handle(); const int n = x.length(); cublasDaxpy(handle, n, &alpha, x.device_data(), 1, y.device_data(),1); } // computes y = alpha*(l-r) // y, l and r are vectors of length N // alpha is a scalar void ss_scaled_diff(Field& y, const double alpha, Field const& l, Field const& r) { const int n = y.length(); auto grid_dim = calculate_grid_dim(block_dim, n); kernels::scaled_diff<<<grid_dim, block_dim>>>(y.device_data(), alpha, l.device_data(), r.device_data(), n, grid_dim*block_dim); } // computes y := alpha*x // alpha is scalar // y and x are vectors void ss_scale(Field& y, const double alpha, Field& x) { const int n = y.length(); auto grid_dim = calculate_grid_dim(block_dim, n); kernels::scale<<<grid_dim, block_dim>>>(y.device_data(), alpha, x.device_data(), n, grid_dim*block_dim); } // computes linear combination of two vectors y := alpha*x + beta*z // alpha and beta are scalar // y, x and z are vectors void ss_lcomb(Field& y, const double alpha, Field& x, const double beta, Field const& z) { const int n = y.length(); auto grid_dim = calculate_grid_dim(block_dim, n); kernels::lcomb<<<grid_dim, block_dim>>>(y.device_data(), alpha, x.device_data(), beta, z.device_data(), n, grid_dim*block_dim); } // conjugate gradient solver // solve the linear system A*x = b for x // the matrix A is implicit in the objective function for the diffusion equation // the value in x constitute the "first guess" at the solution // x(N) // ON ENTRY contains the initial guess for the solution // ON EXIT contains the solution void ss_cg(Field& x, Field const& b, const int maxiters, const double tol, bool& success) { // this is the dimension of the linear system that we are to solve int nx = data::options.nx; int ny = data::options.ny; if(!cg_initialized) { cg_init(nx,ny); } // epsilon value use for matrix-vector approximation double eps = 1.e-8; double eps_inv = 1. / eps; // initialize memory for temporary storage ss_fill(Fx, 0.0); ss_fill(Fxold, 0.0); ss_copy(xold, x); // matrix vector multiplication is approximated with // A*v = 1/epsilon * ( F(x+epsilon*v) - F(x) ) // = 1/epsilon * ( F(x+epsilon*v) - Fxold ) // we compute Fxold at startup // we have to keep x so that we can compute the F(x+exps*v) diffusion(x, Fxold); // v = x + epsilon*x ss_scale(v, 1.0 + eps, x); // Fx = F(v) diffusion(v, Fx); // r = b - A*x // where A*x = (Fx-Fxold)/eps ss_add_scaled_diff(r, b, -eps_inv, Fx, Fxold); // p = r ss_copy(p, r); // rold = <r,r> double rold = ss_dot(r, r); double rnew = rold; // check for convergence success = sqrt(rold) < tol; if (success) { return; } int iter; for(iter=0; iter<maxiters; iter++) { // Ap = A*p ss_lcomb(v, 1.0, xold, eps, p); diffusion(v, Fx); ss_scaled_diff(Ap, eps_inv, Fx, Fxold); // alpha = rold / p'*Ap double alpha = rold / ss_dot(p, Ap); // x += alpha*p ss_axpy(x, alpha, p); // r -= alpha*Ap ss_axpy(r, -alpha, Ap); // find new norm rnew = ss_dot(r, r); // test for convergence if (sqrt(rnew) < tol) { success = true; break; } // p = r + (rnew/rold) * p ss_lcomb(p, 1.0, r, rnew / rold, p); rold = rnew; } stats::iters_cg += iter + 1; if (!success) { std::cerr << "ERROR: CG failed to converge after " << iter << " iterations, with residual " << sqrt(rnew) << std::endl; } } } // namespace linalg
0e4501f7ad9ed604c71bb96091d2bf371965cf67.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample implements a separable convolution filter * of a 2D image with an arbitrary kernel. */ // CUDA runtime #include <hip/hip_runtime.h> // Utilities and system includes //#include <helper_functions.h> #include <hip/hip_runtime.h> #include "../include/common.h" //#include <ctime.h> #include <time.h> #define KERNEL_RADIUS 8 #define KERNEL_LENGTH (2 * KERNEL_RADIUS + 1) __constant__ float c_Kernel[KERNEL_LENGTH]; __constant__ int gotoGlobal[3]; //__constant__ int gotoTexture[3]; //__constant__ int gotoRead_only[3]; texture<float,1,hipReadModeElementType> texRef; texture<float,1,hipReadModeElementType> tex_Kernel; void setConvolutionKernel(float *h_Kernel) { hipMemcpyToSymbol(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float)); } //////////////////////////////////////////////////////////////////////////////// // Row convolution filter //////////////////////////////////////////////////////////////////////////////// #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 4 #define ROWS_RESULT_STEPS 8 #define ROWS_HALO_STEPS 1 __global__ void convolutionRowsKernel( float *d_Dst, float *d_Src, float *Kernel, int imageW, int imageH, int pitch,int p0,int p1) { __shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; float _temp1; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; //d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { if(p1==0) _temp1=d_Src[baseX+i*ROWS_BLOCKDIM_X+baseY*pitch]; else if(p1==1) _temp1=tex1Dfetch(texRef,baseX+i*ROWS_BLOCKDIM_X+baseY*pitch); else if(p1==3) _temp1=__ldg(&d_Src[baseX+i*ROWS_BLOCKDIM_X+baseY*pitch]); s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = _temp1;//d_Src[i * ROWS_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { if(p1==0) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[baseX+i*ROWS_BLOCKDIM_X+baseY*pitch]:0;//d_Src[i * ROWS_BLOCKDIM_X] : 0; else if(p1==1) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? tex1Dfetch(texRef,baseX+i*ROWS_BLOCKDIM_X+baseY*pitch):0; else if(p1==3) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? __ldg(&d_Src[baseX+i*ROWS_BLOCKDIM_X+baseY*pitch]):0;//d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Load right halo //#pragma unroll // printf("%d,%d\n",ROWS_HALO_STEPS,ROWS_HALO_STEPS); for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { if(p1==0) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[baseX+i*ROWS_BLOCKDIM_X+baseY*pitch]:0;//d_Src[i * ROWS_BLOCKDIM_X] : 0; else if(p1==1) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? tex1Dfetch(texRef,baseX+i*ROWS_BLOCKDIM_X+baseY*pitch):0;//d_Src[i * ROWS_BLOCKDIM_X] : 0; else if(p1==3) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? __ldg(&d_Src[baseX+i*ROWS_BLOCKDIM_X+baseY*pitch]):0;//d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { if(p0==0) _temp1=c_Kernel[KERNEL_RADIUS-j]; else if(p0==1) _temp1=tex1Dfetch(tex_Kernel,KERNEL_RADIUS-j); else if(p1==3) _temp1=__ldg(&Kernel[KERNEL_RADIUS-j]); sum += _temp1* s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } void convolutionRowsGPU( float *d_Dst, float *d_Src, float *Kernel, int imageW, int imageH ) { assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(imageH % ROWS_BLOCKDIM_Y == 0); dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); hipLaunchKernelGGL(( convolutionRowsKernel), dim3(blocks), dim3(threads), 0, 0, d_Dst, d_Src, Kernel, imageW, imageH, imageW,0,1 ); getLastCudaError("convolutionRowsKernel() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Column convolution filter //////////////////////////////////////////////////////////////////////////////// #define COLUMNS_BLOCKDIM_X 16 #define COLUMNS_BLOCKDIM_Y 8 #define COLUMNS_RESULT_STEPS 8 #define COLUMNS_HALO_STEPS 1 __global__ void convolutionColumnsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ) { __shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; } //Upper halo #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Lower halo #pragma unroll for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } void convolutionColumnsGPU( float *d_Dst, float *d_Src, int imageW, int imageH ) { assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % COLUMNS_BLOCKDIM_X == 0); assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0); dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y); hipLaunchKernelGGL(( convolutionColumnsKernel), dim3(blocks), dim3(threads), 0, 0, d_Dst, d_Src, imageW, imageH, imageW ); getLastCudaError("convolutionColumnsKernel() execution failed\n"); } void convolutionRowCPU( float *h_Dst, float *h_Src, float *h_Kernel, int imageW, int imageH, int kernelR ) { for (int y = 0; y < imageH; y++) for (int x = 0; x < imageW; x++) { float sum = 0; for (int k = -kernelR; k <= kernelR; k++) { int d = x + k; if (d >= 0 && d < imageW) sum += h_Src[y * imageW + d] * h_Kernel[kernelR - k]; } h_Dst[y * imageW + x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Reference column convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionColumnCPU( float *h_Dst, float *h_Src, float *h_Kernel, int imageW, int imageH, int kernelR ) { for (int y = 0; y < imageH; y++) for (int x = 0; x < imageW; x++) { float sum = 0; for (int k = -kernelR; k <= kernelR; k++) { int d = y + k; if (d >= 0 && d < imageH) sum += h_Src[d * imageW + x] * h_Kernel[kernelR - k]; } h_Dst[y * imageW + x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // start logs printf("[%s] - Starting...\n", argv[0]); float *h_Kernel, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU; float *d_Input,*d_Kernel, *d_Output, *d_Buffer; const int imageW = 3072; const int imageH = 3072; const int iterations = 16; struct timespec t1,t2; //Use command-line specified CUDA device, otherwise use device with highest Gflops/s //findCudaDevice(argc, (const char **)argv); printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating and initializing host arrays...\n"); h_Kernel = (float *)malloc(KERNEL_LENGTH * sizeof(float)); h_Input = (float *)malloc(imageW * imageH * sizeof(float)); h_Buffer = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float)); srand(200); for (unsigned int i = 0; i < KERNEL_LENGTH; i++) { h_Kernel[i] = (float)(rand() % 16); } for (unsigned i = 0; i < imageW * imageH; i++) { h_Input[i] = (float)(rand() % 16); } printf("Allocating and initializing CUDA arrays...\n"); checkCudaErrors(hipMalloc((void **)&d_Input, imageW * imageH * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_Output, imageW * imageH * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_Buffer , imageW * imageH * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_Kernel , KERNEL_LENGTH * sizeof(float))); setConvolutionKernel(h_Kernel); //checkCudaErrors(hipMemcpy(d_Input, h_Input, imageW * imageH * sizeof(float), hipMemcpyHostToDevice)); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); hipArray* cuArray; hipMallocArray(&cuArray, &channelDesc, imageW, imageH); // Copy to device memory some data located at address h_data // in host memory hipMemcpyToArray(cuArray, 0, 0, h_Input, imageW * imageH * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_Input,h_Input,imageW*imageH*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(d_Kernel,h_Kernel,KERNEL_LENGTH*sizeof(float),hipMemcpyHostToDevice); // Set texture reference parameters // texRef.addressMode[0] = hipAddressModeWrap; //texRef.addressMode[1] = hipAddressModeWrap; //texRef.filterMode = hipFilterModePoint; // Bind the array to the texture reference // hipBindTextureToArray(texRef, cuArray, channelDesc); hipBindTexture(0,texRef,d_Input,imageW * imageH * sizeof(float)); printf("Running GPU convolution (%u identical iterations)...\n\n", iterations); int *Global=(int *)malloc(3*sizeof(int)) ; int *Texture = (int *)malloc(3*sizeof(int)) ; int *Read_only = (int *)malloc(3*sizeof(int)) ; Global[0]=0;Global[1]=2;Global[2]=1; hipMemcpyToSymbol(gotoGlobal,Global,3*sizeof(int)); for (int i = -1; i < iterations; i++) { //i == -1 -- warmup iteration if (i == 0) { checkCudaErrors(hipDeviceSynchronize()); // clock_gettime(CLOCK_MONOTONIC,&t1); } if(i==1) clock_gettime(CLOCK_MONOTONIC,&t1); convolutionRowsGPU( d_Buffer, d_Input,d_Kernel, imageW, imageH ); checkCudaErrors(hipDeviceSynchronize()); if(i==1) clock_gettime(CLOCK_MONOTONIC,&t2); convolutionColumnsGPU( d_Output, d_Buffer, imageW, imageH ); } checkCudaErrors(hipDeviceSynchronize()); // clock_gettime(CLOCK_MONOTONIC,&t2); double gpuTime = ((t2.tv_sec-t1.tv_sec)+ (t2.tv_nsec-t1.tv_nsec)/1.e9);/// (double)iterations; printf("convolutionSeparable, Throughput = %.4f MPixels/sec, Time = %.5f s, Size = %u Pixels, NumDevsUsed = %i, Workgroup = %u\n", (1.0e-6 * (double)(imageW * imageH)/ gpuTime), gpuTime, (imageW * imageH), 1, 0); printf("\nReading back GPU results...\n\n"); checkCudaErrors(hipMemcpy(h_OutputGPU, d_Output, imageW * imageH * sizeof(float), hipMemcpyDeviceToHost)); printf("Checking the results...\n"); printf(" ...running convolutionRowCPU()\n"); convolutionRowCPU( h_Buffer, h_Input, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...running convolutionColumnCPU()\n"); convolutionColumnCPU( h_OutputCPU, h_Buffer, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...comparing the results\n"); double sum = 0, delta = 0; for (unsigned i = 0; i < imageW * imageH; i++) { delta += (h_OutputGPU[i] - h_OutputCPU[i]) * (h_OutputGPU[i] - h_OutputCPU[i]); sum += h_OutputCPU[i] * h_OutputCPU[i]; } double L2norm = sqrt(delta / sum); printf(" ...Relative L2 norm: %E\n\n", L2norm); printf("Shutting down...\n"); checkCudaErrors(hipFree(d_Buffer)); checkCudaErrors(hipFree(d_Output)); checkCudaErrors(hipFree(d_Input)); free(h_OutputGPU); free(h_OutputCPU); free(h_Buffer); free(h_Input); free(h_Kernel); hipDeviceReset(); if (L2norm > 1e-6) { printf("Test failed!\n"); exit(EXIT_FAILURE); } printf("Test passed\n"); exit(EXIT_SUCCESS); }
0e4501f7ad9ed604c71bb96091d2bf371965cf67.cu
/* * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample implements a separable convolution filter * of a 2D image with an arbitrary kernel. */ // CUDA runtime #include <cuda_runtime.h> // Utilities and system includes //#include <helper_functions.h> #include <cuda.h> #include "../include/common.h" //#include <ctime.h> #include <time.h> #define KERNEL_RADIUS 8 #define KERNEL_LENGTH (2 * KERNEL_RADIUS + 1) __constant__ float c_Kernel[KERNEL_LENGTH]; __constant__ int gotoGlobal[3]; //__constant__ int gotoTexture[3]; //__constant__ int gotoRead_only[3]; texture<float,1,cudaReadModeElementType> texRef; texture<float,1,cudaReadModeElementType> tex_Kernel; void setConvolutionKernel(float *h_Kernel) { cudaMemcpyToSymbol(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float)); } //////////////////////////////////////////////////////////////////////////////// // Row convolution filter //////////////////////////////////////////////////////////////////////////////// #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 4 #define ROWS_RESULT_STEPS 8 #define ROWS_HALO_STEPS 1 __global__ void convolutionRowsKernel( float *d_Dst, float *d_Src, float *Kernel, int imageW, int imageH, int pitch,int p0,int p1) { __shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; float _temp1; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; //d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { if(p1==0) _temp1=d_Src[baseX+i*ROWS_BLOCKDIM_X+baseY*pitch]; else if(p1==1) _temp1=tex1Dfetch(texRef,baseX+i*ROWS_BLOCKDIM_X+baseY*pitch); else if(p1==3) _temp1=__ldg(&d_Src[baseX+i*ROWS_BLOCKDIM_X+baseY*pitch]); s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = _temp1;//d_Src[i * ROWS_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { if(p1==0) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[baseX+i*ROWS_BLOCKDIM_X+baseY*pitch]:0;//d_Src[i * ROWS_BLOCKDIM_X] : 0; else if(p1==1) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? tex1Dfetch(texRef,baseX+i*ROWS_BLOCKDIM_X+baseY*pitch):0; else if(p1==3) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? __ldg(&d_Src[baseX+i*ROWS_BLOCKDIM_X+baseY*pitch]):0;//d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Load right halo //#pragma unroll // printf("%d,%d\n",ROWS_HALO_STEPS,ROWS_HALO_STEPS); for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { if(p1==0) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[baseX+i*ROWS_BLOCKDIM_X+baseY*pitch]:0;//d_Src[i * ROWS_BLOCKDIM_X] : 0; else if(p1==1) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? tex1Dfetch(texRef,baseX+i*ROWS_BLOCKDIM_X+baseY*pitch):0;//d_Src[i * ROWS_BLOCKDIM_X] : 0; else if(p1==3) s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? __ldg(&d_Src[baseX+i*ROWS_BLOCKDIM_X+baseY*pitch]):0;//d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { if(p0==0) _temp1=c_Kernel[KERNEL_RADIUS-j]; else if(p0==1) _temp1=tex1Dfetch(tex_Kernel,KERNEL_RADIUS-j); else if(p1==3) _temp1=__ldg(&Kernel[KERNEL_RADIUS-j]); sum += _temp1* s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } void convolutionRowsGPU( float *d_Dst, float *d_Src, float *Kernel, int imageW, int imageH ) { assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(imageH % ROWS_BLOCKDIM_Y == 0); dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); convolutionRowsKernel<<<blocks, threads>>>( d_Dst, d_Src, Kernel, imageW, imageH, imageW,0,1 ); getLastCudaError("convolutionRowsKernel() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Column convolution filter //////////////////////////////////////////////////////////////////////////////// #define COLUMNS_BLOCKDIM_X 16 #define COLUMNS_BLOCKDIM_Y 8 #define COLUMNS_RESULT_STEPS 8 #define COLUMNS_HALO_STEPS 1 __global__ void convolutionColumnsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ) { __shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; } //Upper halo #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Lower halo #pragma unroll for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } void convolutionColumnsGPU( float *d_Dst, float *d_Src, int imageW, int imageH ) { assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % COLUMNS_BLOCKDIM_X == 0); assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0); dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y); convolutionColumnsKernel<<<blocks, threads>>>( d_Dst, d_Src, imageW, imageH, imageW ); getLastCudaError("convolutionColumnsKernel() execution failed\n"); } void convolutionRowCPU( float *h_Dst, float *h_Src, float *h_Kernel, int imageW, int imageH, int kernelR ) { for (int y = 0; y < imageH; y++) for (int x = 0; x < imageW; x++) { float sum = 0; for (int k = -kernelR; k <= kernelR; k++) { int d = x + k; if (d >= 0 && d < imageW) sum += h_Src[y * imageW + d] * h_Kernel[kernelR - k]; } h_Dst[y * imageW + x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Reference column convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionColumnCPU( float *h_Dst, float *h_Src, float *h_Kernel, int imageW, int imageH, int kernelR ) { for (int y = 0; y < imageH; y++) for (int x = 0; x < imageW; x++) { float sum = 0; for (int k = -kernelR; k <= kernelR; k++) { int d = y + k; if (d >= 0 && d < imageH) sum += h_Src[d * imageW + x] * h_Kernel[kernelR - k]; } h_Dst[y * imageW + x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // start logs printf("[%s] - Starting...\n", argv[0]); float *h_Kernel, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU; float *d_Input,*d_Kernel, *d_Output, *d_Buffer; const int imageW = 3072; const int imageH = 3072; const int iterations = 16; struct timespec t1,t2; //Use command-line specified CUDA device, otherwise use device with highest Gflops/s //findCudaDevice(argc, (const char **)argv); printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating and initializing host arrays...\n"); h_Kernel = (float *)malloc(KERNEL_LENGTH * sizeof(float)); h_Input = (float *)malloc(imageW * imageH * sizeof(float)); h_Buffer = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float)); srand(200); for (unsigned int i = 0; i < KERNEL_LENGTH; i++) { h_Kernel[i] = (float)(rand() % 16); } for (unsigned i = 0; i < imageW * imageH; i++) { h_Input[i] = (float)(rand() % 16); } printf("Allocating and initializing CUDA arrays...\n"); checkCudaErrors(cudaMalloc((void **)&d_Input, imageW * imageH * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_Output, imageW * imageH * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_Buffer , imageW * imageH * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_Kernel , KERNEL_LENGTH * sizeof(float))); setConvolutionKernel(h_Kernel); //checkCudaErrors(cudaMemcpy(d_Input, h_Input, imageW * imageH * sizeof(float), cudaMemcpyHostToDevice)); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cudaArray* cuArray; cudaMallocArray(&cuArray, &channelDesc, imageW, imageH); // Copy to device memory some data located at address h_data // in host memory cudaMemcpyToArray(cuArray, 0, 0, h_Input, imageW * imageH * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_Input,h_Input,imageW*imageH*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_Kernel,h_Kernel,KERNEL_LENGTH*sizeof(float),cudaMemcpyHostToDevice); // Set texture reference parameters // texRef.addressMode[0] = cudaAddressModeWrap; //texRef.addressMode[1] = cudaAddressModeWrap; //texRef.filterMode = cudaFilterModePoint; // Bind the array to the texture reference // cudaBindTextureToArray(texRef, cuArray, channelDesc); cudaBindTexture(0,texRef,d_Input,imageW * imageH * sizeof(float)); printf("Running GPU convolution (%u identical iterations)...\n\n", iterations); int *Global=(int *)malloc(3*sizeof(int)) ; int *Texture = (int *)malloc(3*sizeof(int)) ; int *Read_only = (int *)malloc(3*sizeof(int)) ; Global[0]=0;Global[1]=2;Global[2]=1; cudaMemcpyToSymbol(gotoGlobal,Global,3*sizeof(int)); for (int i = -1; i < iterations; i++) { //i == -1 -- warmup iteration if (i == 0) { checkCudaErrors(cudaDeviceSynchronize()); // clock_gettime(CLOCK_MONOTONIC,&t1); } if(i==1) clock_gettime(CLOCK_MONOTONIC,&t1); convolutionRowsGPU( d_Buffer, d_Input,d_Kernel, imageW, imageH ); checkCudaErrors(cudaDeviceSynchronize()); if(i==1) clock_gettime(CLOCK_MONOTONIC,&t2); convolutionColumnsGPU( d_Output, d_Buffer, imageW, imageH ); } checkCudaErrors(cudaDeviceSynchronize()); // clock_gettime(CLOCK_MONOTONIC,&t2); double gpuTime = ((t2.tv_sec-t1.tv_sec)+ (t2.tv_nsec-t1.tv_nsec)/1.e9);/// (double)iterations; printf("convolutionSeparable, Throughput = %.4f MPixels/sec, Time = %.5f s, Size = %u Pixels, NumDevsUsed = %i, Workgroup = %u\n", (1.0e-6 * (double)(imageW * imageH)/ gpuTime), gpuTime, (imageW * imageH), 1, 0); printf("\nReading back GPU results...\n\n"); checkCudaErrors(cudaMemcpy(h_OutputGPU, d_Output, imageW * imageH * sizeof(float), cudaMemcpyDeviceToHost)); printf("Checking the results...\n"); printf(" ...running convolutionRowCPU()\n"); convolutionRowCPU( h_Buffer, h_Input, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...running convolutionColumnCPU()\n"); convolutionColumnCPU( h_OutputCPU, h_Buffer, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...comparing the results\n"); double sum = 0, delta = 0; for (unsigned i = 0; i < imageW * imageH; i++) { delta += (h_OutputGPU[i] - h_OutputCPU[i]) * (h_OutputGPU[i] - h_OutputCPU[i]); sum += h_OutputCPU[i] * h_OutputCPU[i]; } double L2norm = sqrt(delta / sum); printf(" ...Relative L2 norm: %E\n\n", L2norm); printf("Shutting down...\n"); checkCudaErrors(cudaFree(d_Buffer)); checkCudaErrors(cudaFree(d_Output)); checkCudaErrors(cudaFree(d_Input)); free(h_OutputGPU); free(h_OutputCPU); free(h_Buffer); free(h_Input); free(h_Kernel); cudaDeviceReset(); if (L2norm > 1e-6) { printf("Test failed!\n"); exit(EXIT_FAILURE); } printf("Test passed\n"); exit(EXIT_SUCCESS); }
f3e3eb3c12d871e2d53fa1f1bbc683bd1392a459.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_tanh.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *result = NULL; hipMalloc(&result, XSIZE*YSIZE); double *x = NULL; hipMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_tanh), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_tanh), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_tanh), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f3e3eb3c12d871e2d53fa1f1bbc683bd1392a459.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_tanh.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); double *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_tanh<<<gridBlock,threadBlock>>>(n,result,x); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_tanh<<<gridBlock,threadBlock>>>(n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_tanh<<<gridBlock,threadBlock>>>(n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
39cf4f89bb213751bd7178b30a6ad767fa229f0f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THH/generic/THHTensorMathReduce.hip" #else accreal THCTensor_(sumall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal val; if (!THC_reduceAll<scalar_t>(state, self, thrust::identity<accreal>{}, ReduceAdd<accreal>{}, scalar_cast<accreal>(0), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); return val; } void THCTensor_(max)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *src, int dimension, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src)); thrust::pair<scalar_t, int64_t> init = thrust::make_pair<scalar_t, int64_t>( THCNumerics<scalar_t>::lower_bound(), 0); return THC_reduceDimIndex<scalar_t, int64_t>( state, values, indices, src, dimension, keepdim, init, MaxValuePair<scalar_t, int64_t>()); } void THCTensor_(min)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *src, int dimension, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src)); thrust::pair<scalar_t, int64_t> init = thrust::make_pair<scalar_t, int64_t>( THCNumerics<scalar_t>::upper_bound(), 0); return THC_reduceDimIndex<scalar_t, int64_t>( state, values, indices, src, dimension, keepdim, init, MinValuePair<scalar_t, int64_t>()); } scalar_t THCTensor_(minall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); THArgCheck( THTensor_(nElement)(self) > 0, 1, "cannot perform reduction function min " "on tensor with no elements because the " "operation does not have an identity" ); accreal val; if (!THC_reduceAll<scalar_t>(state, self, thrust::identity<accreal>{}, ReduceMin<accreal>{}, THCNumerics<accreal>::upper_bound(), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); return scalar_cast<scalar_t>(val); } scalar_t THCTensor_(maxall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); THArgCheck( THTensor_(nElement)(self) > 0, 1, "cannot perform reduction function max " "on tensor with no elements because the " "operation does not have an identity" ); accreal val; if (!THC_reduceAll<scalar_t>(state, self, thrust::identity<accreal>{}, ReduceMax<accreal>{}, THCNumerics<accreal>::lower_bound(), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); return scalar_cast<scalar_t>(val); } #if !defined(THC_REAL_IS_BOOL) void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (!THC_reduceDim<scalar_t>(state, self, src, thrust::identity<accreal>{}, ReduceMultiply<accreal>{}, thrust::identity<accreal>{}, scalar_cast<accreal>(1), dimension, keepdim)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); dimension = at::maybe_wrap_dim(dimension, src); THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension"); THArgCheck(THCNumerics<scalar_t>::gt(value, scalar_cast<scalar_t>(0)), 2, "non-positive-norm not supported"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions"); THCTensor *self_; THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0); THCTensor *data = THCTensor_(newClone)(state, src_); int64_t numel = THCTensor_(nElement)(state, data); if (numel > 0) { ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0); dim3 grid( THTensor_sizeLegacyNoScalars(data, 0)); // NOTE: only with this specific number of threads can this work on GPUs with a warp size != 32 (such as AMD). Do not alter w/o changing buffer size in kernel. dim3 threads(32); hipLaunchKernelGGL(( THCTensor_kernel_renorm<scalar_t, accreal>) , dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm)); hipError_t errcode = hipGetLastError(); if(errcode != hipSuccess) THError(hipGetErrorString(errcode)); } THCTensor_(free)(state, src_); self_ = THCTensor_(newTranspose)(state, data, dimension, 0); THCTensor_(resizeAs)(state, self, self_); THCTensor_(freeCopyTo)(state, self_, self); THCTensor_(free)(state, data); } accreal THCTensor_(std_all)(THCState *state, THCTensor *self, bool unbiased) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); return THCNumerics<accreal>::sqrt((THCTensor_(var_all)(state, self, unbiased))); } accreal THCTensor_(var_all)(THCState *state, THCTensor *self, bool unbiased) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal mean = THCTensor_(meanall)(state, self); accreal val; if (!THC_reduceAll<scalar_t>(state, self, SquareFunctor<accreal>(mean), ReduceAdd<accreal>(), scalar_cast<accreal>(0), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } val = THCNumerics<accreal>::div( val, scalar_cast<accreal>(std::max<int64_t>(0, THCTensor_(nElement)(state, self) - (unbiased ? 1 : 0))) ); THCudaCheck(hipGetLastError()); return val; } accreal THCTensor_(dist)(THCState *state, THCTensor *self, THCTensor *src, scalar_t _value) { const accreal value = scalar_cast<accreal>(_value); THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); self = THCTensor_(newContiguous)(state, self); ptrdiff_t size = THCTensor_(nElement)(state, self); src = THCTensor_(newContiguous)(state, src); thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self)); thrust::device_ptr<scalar_t> src_data(THCTensor_(data)(state, src)); THCThrustAllocator thrustAlloc(state); accreal result; if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) { result = thrust::inner_product( #if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, scalar_cast<accreal>(0), ReduceMax<accreal>(), ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1))); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) { result = thrust::inner_product( #if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, scalar_cast<accreal>(INFINITY), ReduceMin<accreal>(), ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1))); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) { result = thrust::inner_product( #if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, scalar_cast<accreal>(0), thrust::plus<accreal>(), ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(0))); } else { result = thrust::inner_product( #if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, scalar_cast<accreal>(0), thrust::plus<accreal>(), ThrustTensorDistOp<scalar_t, accreal>(value)); result = THCNumerics<accreal>::pow(result, static_cast<accreal>(1) / value); } THCTensor_(free)(state, src); THCTensor_(free)(state, self); return result; } #endif accreal THCTensor_(meanall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self); } #endif #endif
39cf4f89bb213751bd7178b30a6ad767fa229f0f.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THC/generic/THCTensorMathReduce.cu" #else accreal THCTensor_(sumall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal val; if (!THC_reduceAll<scalar_t>(state, self, thrust::identity<accreal>{}, ReduceAdd<accreal>{}, scalar_cast<accreal>(0), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); return val; } void THCTensor_(max)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *src, int dimension, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src)); thrust::pair<scalar_t, int64_t> init = thrust::make_pair<scalar_t, int64_t>( THCNumerics<scalar_t>::lower_bound(), 0); return THC_reduceDimIndex<scalar_t, int64_t>( state, values, indices, src, dimension, keepdim, init, MaxValuePair<scalar_t, int64_t>()); } void THCTensor_(min)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *src, int dimension, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src)); thrust::pair<scalar_t, int64_t> init = thrust::make_pair<scalar_t, int64_t>( THCNumerics<scalar_t>::upper_bound(), 0); return THC_reduceDimIndex<scalar_t, int64_t>( state, values, indices, src, dimension, keepdim, init, MinValuePair<scalar_t, int64_t>()); } scalar_t THCTensor_(minall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); THArgCheck( THTensor_(nElement)(self) > 0, 1, "cannot perform reduction function min " "on tensor with no elements because the " "operation does not have an identity" ); accreal val; if (!THC_reduceAll<scalar_t>(state, self, thrust::identity<accreal>{}, ReduceMin<accreal>{}, THCNumerics<accreal>::upper_bound(), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); return scalar_cast<scalar_t>(val); } scalar_t THCTensor_(maxall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); THArgCheck( THTensor_(nElement)(self) > 0, 1, "cannot perform reduction function max " "on tensor with no elements because the " "operation does not have an identity" ); accreal val; if (!THC_reduceAll<scalar_t>(state, self, thrust::identity<accreal>{}, ReduceMax<accreal>{}, THCNumerics<accreal>::lower_bound(), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); return scalar_cast<scalar_t>(val); } #if !defined(THC_REAL_IS_BOOL) void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (!THC_reduceDim<scalar_t>(state, self, src, thrust::identity<accreal>{}, ReduceMultiply<accreal>{}, thrust::identity<accreal>{}, scalar_cast<accreal>(1), dimension, keepdim)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); dimension = at::maybe_wrap_dim(dimension, src); THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension"); THArgCheck(THCNumerics<scalar_t>::gt(value, scalar_cast<scalar_t>(0)), 2, "non-positive-norm not supported"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions"); THCTensor *self_; THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0); THCTensor *data = THCTensor_(newClone)(state, src_); int64_t numel = THCTensor_(nElement)(state, data); if (numel > 0) { ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0); dim3 grid( THTensor_sizeLegacyNoScalars(data, 0)); // NOTE: only with this specific number of threads can this work on GPUs with a warp size != 32 (such as AMD). Do not alter w/o changing buffer size in kernel. dim3 threads(32); THCTensor_kernel_renorm<scalar_t, accreal> <<<grid, threads, 0, THCState_getCurrentStream(state)>>> (THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm)); cudaError_t errcode = cudaGetLastError(); if(errcode != cudaSuccess) THError(cudaGetErrorString(errcode)); } THCTensor_(free)(state, src_); self_ = THCTensor_(newTranspose)(state, data, dimension, 0); THCTensor_(resizeAs)(state, self, self_); THCTensor_(freeCopyTo)(state, self_, self); THCTensor_(free)(state, data); } accreal THCTensor_(std_all)(THCState *state, THCTensor *self, bool unbiased) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); return THCNumerics<accreal>::sqrt((THCTensor_(var_all)(state, self, unbiased))); } accreal THCTensor_(var_all)(THCState *state, THCTensor *self, bool unbiased) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); accreal mean = THCTensor_(meanall)(state, self); accreal val; if (!THC_reduceAll<scalar_t>(state, self, SquareFunctor<accreal>(mean), ReduceAdd<accreal>(), scalar_cast<accreal>(0), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } val = THCNumerics<accreal>::div( val, scalar_cast<accreal>(std::max<int64_t>(0, THCTensor_(nElement)(state, self) - (unbiased ? 1 : 0))) ); THCudaCheck(cudaGetLastError()); return val; } accreal THCTensor_(dist)(THCState *state, THCTensor *self, THCTensor *src, scalar_t _value) { const accreal value = scalar_cast<accreal>(_value); THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); self = THCTensor_(newContiguous)(state, self); ptrdiff_t size = THCTensor_(nElement)(state, self); src = THCTensor_(newContiguous)(state, src); thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self)); thrust::device_ptr<scalar_t> src_data(THCTensor_(data)(state, src)); THCThrustAllocator thrustAlloc(state); accreal result; if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) { result = thrust::inner_product( #if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, scalar_cast<accreal>(0), ReduceMax<accreal>(), ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1))); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) { result = thrust::inner_product( #if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, scalar_cast<accreal>(INFINITY), ReduceMin<accreal>(), ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1))); } else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) { result = thrust::inner_product( #if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, scalar_cast<accreal>(0), thrust::plus<accreal>(), ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(0))); } else { result = thrust::inner_product( #if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, scalar_cast<accreal>(0), thrust::plus<accreal>(), ThrustTensorDistOp<scalar_t, accreal>(value)); result = THCNumerics<accreal>::pow(result, static_cast<accreal>(1) / value); } THCTensor_(free)(state, src); THCTensor_(free)(state, self); return result; } #endif accreal THCTensor_(meanall)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self); } #endif #endif
8825499cf1ecb98e66fe3b45ec3342ceea2bc3c9.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <algorithm> #include <util.cuh> #include <worker.cuh> #include <sys/time.h> using namespace std; /* * ==================== * WorkResult * ==================== */ WorkResult::WorkResult(WorkResult::RESULTS resultType, Cost& results) : _resultType(resultType), _results(&results) { } WorkResult::WorkResult(WorkResult::RESULTS resultType) : _resultType(resultType), _results(NULL) { } WorkResult::~WorkResult() { delete _results; // delete NULL is ok } Cost& WorkResult::getResults() const { return *_results; } WorkResult::RESULTS WorkResult::getResultType() const { return _resultType; } /* * ==================== * Worker * ==================== */ Worker::Worker(ConvNet& convNet) : _convNet(&convNet) { } /* * ==================== * DataWorker * ==================== */ DataWorker::DataWorker(ConvNet& convNet, CPUData& data) : Worker(convNet), _data(&data) { _dp = &convNet.getDataProvider(); } DataWorker::~DataWorker() { _dp->clearData(); } /* * ==================== * TrainingWorker * ==================== */ TrainingWorker::TrainingWorker(ConvNet& convNet, CPUData& data, bool test) : DataWorker(convNet, data), _test(test) { } // Need to setData here (as opposed to the constructor) because the constructor executes in // the original CPU thread, which is not the one with GPU access. void TrainingWorker::run() { _dp->setData(*_data); Cost& batchCost = *new Cost(0); for (int i = 0; i < _dp->getNumMinibatches(); i++) { _convNet->fprop(i, _test ? PASS_TEST : PASS_TRAIN); _convNet->getCost(batchCost); if (!_test) { _convNet->bprop(PASS_TRAIN); _convNet->updateWeights(); } } hipDeviceSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); } /* * ==================== * SyncWorker * ==================== */ SyncWorker::SyncWorker(ConvNet& convNet) : Worker(convNet) { } void SyncWorker::run() { _convNet->copyToCPU(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::SYNC_DONE)); } /* * ==================== * GradCheckWorker * ==================== */ GradCheckWorker::GradCheckWorker(ConvNet& convNet, CPUData& data) : DataWorker(convNet, data) { } void GradCheckWorker::run() { _dp->setData(*_data); _convNet->checkGradients(); exit(0); } /* * ==================== * MultiviewTestWorker * ==================== */ MultiviewTestWorker::MultiviewTestWorker(ConvNet& convNet, CPUData& data, int numViews, int logregIdx) : DataWorker(convNet, data), _numViews(numViews), _logregIdx(logregIdx) { assert(_data->getNumCases() % _numViews == 0); } void MultiviewTestWorker::run() { _dp->setData(*_data); Layer& logregLayer = _convNet->getLayer(_logregIdx); int numCasesReal = _dp->getNumCases() / _numViews; int numMiniReal = DIVUP(numCasesReal, _dp->getMinibatchSize()); Cost& batchCost = *new Cost(0); for (int i = 0; i < numMiniReal; i++) { NVMatrix softmaxActs; for (int v = 0; v < _numViews; v++) { GPUData& mini = _dp->getDataSlice(v * numCasesReal + i * _dp->getMinibatchSize(), min((v + 1) * numCasesReal, v * numCasesReal + (i + 1) * _dp->getMinibatchSize())); _convNet->fprop(mini, PASS_TEST); if (v == 0) { logregLayer.getPrev()[1]->getActs().copy(softmaxActs); } else { softmaxActs.add(logregLayer.getPrev()[1]->getActs()); } } softmaxActs.scale(1.0 / _numViews); NVMatrixV logregInput; logregInput.push_back(&logregLayer.getPrev()[0]->getActs()); logregInput.push_back(&softmaxActs); logregLayer.fprop(logregInput, PASS_TEST); _convNet->getCost(batchCost); } hipDeviceSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); } /* * ==================== * FeatureWorker * ==================== */ FeatureWorker::FeatureWorker(ConvNet& convNet, CPUData& data, Matrix& ftrs, int layerIdx) : DataWorker(convNet, data), _ftrs(&ftrs), _layerIdx(layerIdx) { assert(ftrs.getNumRows() == data.getNumCases()); assert(!ftrs.isTrans()); } FeatureWorker::~FeatureWorker() { delete _ftrs; } void FeatureWorker::run() { _dp->setData(*_data); Layer& ftrLayer = _convNet->getLayer(_layerIdx); Cost& batchCost = *new Cost(0); for (int i = 0; i < _dp->getNumMinibatches(); i++) { _convNet->fprop(i, PASS_TEST); _convNet->getCost(batchCost); Matrix& miniFtrs = _ftrs->sliceRows(i * _dp->getMinibatchSize(), min(_dp->getNumCases(), (i + 1) * _dp->getMinibatchSize())); NVMatrix& acts = ftrLayer.getActs(); NVMatrix acts_T; if (acts.isTrans()) { NVMatrix& soft_T = acts.getTranspose(); soft_T.transpose(acts_T); delete &soft_T; } else { acts.transpose(acts_T); } acts_T.copyToHost(miniFtrs); delete &miniFtrs; } hipDeviceSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); }
8825499cf1ecb98e66fe3b45ec3342ceea2bc3c9.cu
/* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <algorithm> #include <util.cuh> #include <worker.cuh> #include <sys/time.h> using namespace std; /* * ==================== * WorkResult * ==================== */ WorkResult::WorkResult(WorkResult::RESULTS resultType, Cost& results) : _resultType(resultType), _results(&results) { } WorkResult::WorkResult(WorkResult::RESULTS resultType) : _resultType(resultType), _results(NULL) { } WorkResult::~WorkResult() { delete _results; // delete NULL is ok } Cost& WorkResult::getResults() const { return *_results; } WorkResult::RESULTS WorkResult::getResultType() const { return _resultType; } /* * ==================== * Worker * ==================== */ Worker::Worker(ConvNet& convNet) : _convNet(&convNet) { } /* * ==================== * DataWorker * ==================== */ DataWorker::DataWorker(ConvNet& convNet, CPUData& data) : Worker(convNet), _data(&data) { _dp = &convNet.getDataProvider(); } DataWorker::~DataWorker() { _dp->clearData(); } /* * ==================== * TrainingWorker * ==================== */ TrainingWorker::TrainingWorker(ConvNet& convNet, CPUData& data, bool test) : DataWorker(convNet, data), _test(test) { } // Need to setData here (as opposed to the constructor) because the constructor executes in // the original CPU thread, which is not the one with GPU access. void TrainingWorker::run() { _dp->setData(*_data); Cost& batchCost = *new Cost(0); for (int i = 0; i < _dp->getNumMinibatches(); i++) { _convNet->fprop(i, _test ? PASS_TEST : PASS_TRAIN); _convNet->getCost(batchCost); if (!_test) { _convNet->bprop(PASS_TRAIN); _convNet->updateWeights(); } } cudaThreadSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); } /* * ==================== * SyncWorker * ==================== */ SyncWorker::SyncWorker(ConvNet& convNet) : Worker(convNet) { } void SyncWorker::run() { _convNet->copyToCPU(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::SYNC_DONE)); } /* * ==================== * GradCheckWorker * ==================== */ GradCheckWorker::GradCheckWorker(ConvNet& convNet, CPUData& data) : DataWorker(convNet, data) { } void GradCheckWorker::run() { _dp->setData(*_data); _convNet->checkGradients(); exit(0); } /* * ==================== * MultiviewTestWorker * ==================== */ MultiviewTestWorker::MultiviewTestWorker(ConvNet& convNet, CPUData& data, int numViews, int logregIdx) : DataWorker(convNet, data), _numViews(numViews), _logregIdx(logregIdx) { assert(_data->getNumCases() % _numViews == 0); } void MultiviewTestWorker::run() { _dp->setData(*_data); Layer& logregLayer = _convNet->getLayer(_logregIdx); int numCasesReal = _dp->getNumCases() / _numViews; int numMiniReal = DIVUP(numCasesReal, _dp->getMinibatchSize()); Cost& batchCost = *new Cost(0); for (int i = 0; i < numMiniReal; i++) { NVMatrix softmaxActs; for (int v = 0; v < _numViews; v++) { GPUData& mini = _dp->getDataSlice(v * numCasesReal + i * _dp->getMinibatchSize(), min((v + 1) * numCasesReal, v * numCasesReal + (i + 1) * _dp->getMinibatchSize())); _convNet->fprop(mini, PASS_TEST); if (v == 0) { logregLayer.getPrev()[1]->getActs().copy(softmaxActs); } else { softmaxActs.add(logregLayer.getPrev()[1]->getActs()); } } softmaxActs.scale(1.0 / _numViews); NVMatrixV logregInput; logregInput.push_back(&logregLayer.getPrev()[0]->getActs()); logregInput.push_back(&softmaxActs); logregLayer.fprop(logregInput, PASS_TEST); _convNet->getCost(batchCost); } cudaThreadSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); } /* * ==================== * FeatureWorker * ==================== */ FeatureWorker::FeatureWorker(ConvNet& convNet, CPUData& data, Matrix& ftrs, int layerIdx) : DataWorker(convNet, data), _ftrs(&ftrs), _layerIdx(layerIdx) { assert(ftrs.getNumRows() == data.getNumCases()); assert(!ftrs.isTrans()); } FeatureWorker::~FeatureWorker() { delete _ftrs; } void FeatureWorker::run() { _dp->setData(*_data); Layer& ftrLayer = _convNet->getLayer(_layerIdx); Cost& batchCost = *new Cost(0); for (int i = 0; i < _dp->getNumMinibatches(); i++) { _convNet->fprop(i, PASS_TEST); _convNet->getCost(batchCost); Matrix& miniFtrs = _ftrs->sliceRows(i * _dp->getMinibatchSize(), min(_dp->getNumCases(), (i + 1) * _dp->getMinibatchSize())); NVMatrix& acts = ftrLayer.getActs(); NVMatrix acts_T; if (acts.isTrans()) { NVMatrix& soft_T = acts.getTranspose(); soft_T.transpose(acts_T); delete &soft_T; } else { acts.transpose(acts_T); } acts_T.copyToHost(miniFtrs); delete &miniFtrs; } cudaThreadSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); }
4864cefaaa3f361d65a11c3a265d40183718c88e.hip
// !!! This is a file automatically generated by hipify!!! // clang-format off /************************************************************************************\ * * * Copyright 2014 Advanced Micro Devices, Inc. * * Copyright (c) 2015 Mark D. Hill and David A. Wood * * All rights reserved. * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted provided that the following are met: * * * * You must reproduce the above copyright notice. * * * * Neither the name of the copyright holder nor the names of its contributors * * may be used to endorse or promote products derived from this software * * without specific, prior, written permission from at least the copyright holder. * * * * You must include the following terms in your license and/or other materials * * provided with the software. * * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * * IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A * * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER * * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * * OF SUCH DAMAGE. * * * * Without limiting the foregoing, the software may implement third party * * technologies for which you must obtain licenses from parties other than AMD. * * You agree that AMD has not obtained or conveyed to you, and that you shall * * be responsible for obtaining the rights to use and/or distribute the applicable * * underlying intellectual property rights related to the third party technologies. * * These third party technologies are not licensed hereunder. * * * * If you use the software (in whole or in part), you shall adhere to all * * applicable U.S., European, and other export laws, including but not limited to * * the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), * * and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant * * to Section 740.6 of the EAR, you hereby certify that, except pursuant to a * * license granted by the United States Department of Commerce Bureau of Industry * * and Security or as otherwise permitted pursuant to a License Exception under * * the U.S. Export Administration Regulations ("EAR"), you will not (1) export, * * re-export or release to a national of a country in Country Groups D:1, E:1 or * * E:2 any restricted technology, software, or source code you receive hereunder, * * or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such * * technology or software, if such foreign produced direct product is subject to * * national security controls as identified on the Commerce Control List (currently * * found in Supplement 1 to Part 774 of EAR). For the most current Country Group * * listings, or for additional information about the EAR or your obligations under * * those regulations, please refer to the U.S. Bureau of Industry and Security's * * website at http://www.bis.doc.gov/. * * * \************************************************************************************/ // clang-format on #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> //#include "../../graph_parser/parse.h" #include <hip/hip_runtime_api.h> #include "../../graph_parser/util.h" #include "../../graph_parser/parse.cpp" #include "../../graph_parser/util.cpp" #include "kernel.hip" // Iteration count #define ITER 20 void print_vectorf(int *vector, int num); int main(int argc, char **argv) { char *tmpchar; int num_nodes; int num_edges; int file_format = 1; bool directed = 0; hipError_t err = hipSuccess; if (argc == 3) { tmpchar = argv[1]; // Graph inputfile file_format = atoi(argv[2]); // File format } else { fprintf(stderr, "You did something wrong!\n"); exit(1); } // Allocate the csr structure csr_array *csr; // Parse graph files into csr structure if (file_format == 1) { // Metis csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed); } else if (file_format == 0) { // Dimacs9 csr = parseCOO(tmpchar, &num_nodes, &num_edges, directed); } else if (file_format == 2) { // Matrix market csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0); } else { printf("reserve for future"); exit(1); } // Allocate rank_array int *rank_array = (int *)malloc(num_nodes * sizeof(int)); if (!rank_array) { fprintf(stderr, "rank array not allocated successfully\n"); return -1; } int *row_d; int *col_d; int *inrow_d; int *incol_d; int *cc_d; // Create device-side buffers for the graph err = hipMalloc(&row_d, num_nodes * sizeof(int)); if (err != hipSuccess) { fprintf(stderr, "ERROR: hipMalloc row_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err)); return -1; } err = hipMalloc(&col_d, num_edges * sizeof(int)); if (err != hipSuccess) { fprintf(stderr, "ERROR: hipMalloc col_d (size:%d) => %s\n", num_edges, hipGetErrorString(err)); return -1; } err = hipMalloc(&inrow_d, num_nodes * sizeof(int)); if (err != hipSuccess) { fprintf(stderr, "ERROR: hipMalloc inrow_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err)); return -1; } err = hipMalloc(&incol_d, num_edges * sizeof(int)); if (err != hipSuccess) { fprintf(stderr, "ERROR: hipMalloc incol_d (size:%d) => %s\n", num_edges, hipGetErrorString(err)); return -1; } // Create buffers for cc err = hipMalloc(&cc_d, num_nodes * sizeof(int)); if (err != hipSuccess) { fprintf(stderr, "ERROR: hipMalloc cc_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err)); return -1; } double timer1 = gettime(); // Copy the data to the device-side buffers err = hipMemcpy(row_d, csr->row_array, num_nodes * sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "ERROR:#endif hipMemcpy row_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err)); return -1; } err = hipMemcpy(col_d, csr->col_array, num_edges * sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "ERROR: hipMemcpy col_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err)); return -1; } err = hipMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "ERROR:#endif hipMemcpy inrow_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err)); return -1; } err = hipMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "ERROR: hipMemcpy incol_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err)); return -1; } // Set up work dimensions int block_size = 256; int num_blocks = (num_nodes + block_size - 1) / block_size; dim3 threads(block_size, 1, 1); dim3 grid(num_blocks, 1, 1); hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024); double timer3 = gettime(); VirtVertex<int, int> **vertex; GraphChiContext *context; err = hipMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int> *)); if (err != hipSuccess) { fprintf(stderr, "ERROR: hipMalloc vertex (size:%d) => %s\n", num_edges, hipGetErrorString(err)); return -1; } err = hipMalloc(&context, sizeof(GraphChiContext)); if (err != hipSuccess) { fprintf(stderr, "ERROR: hipMalloc context (size:%d) => %s\n", num_edges, hipGetErrorString(err)); return -1; } printf("Start initCtx\n"); hipLaunchKernelGGL(( initContext), dim3(1), dim3(1), 0, 0, context, num_nodes, num_edges); hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "ERROR: initCtx failed (%s)\n", hipGetErrorString(err)); return -1; } printf("Start initObj\n"); hipLaunchKernelGGL(( initObject), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d, inrow_d, incol_d); hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "ERROR: initObject failed (%s)\n", hipGetErrorString(err)); return -1; } printf("Start initOutEdge\n"); hipLaunchKernelGGL(( initOutEdge), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d); hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "ERROR: initOutEdge failed (%s)\n", hipGetErrorString(err)); return -1; } // Run CC for some iter. TO: convergence determination for (int i = 0; i < ITER; i++) { printf("Start ConnectedComponent\n"); hipLaunchKernelGGL(( ConnectedComponent), dim3(grid), dim3(threads), 0, 0, vertex, context, i); printf("Finish ConnectedComponent\n"); hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "ERROR: hipLaunch failed (%s)\n", hipGetErrorString(err)); return -1; } } hipDeviceSynchronize(); double timer4 = gettime(); printf("Start Copyback\n"); hipLaunchKernelGGL(( copyBack), dim3(grid), dim3(threads), 0, 0, vertex, context, cc_d); printf("End Copyback\n"); hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "ERROR: hipLaunch failed (%s)\n", hipGetErrorString(err)); return -1; } // Copy the rank buffer back err = hipMemcpy(rank_array, cc_d, num_nodes * sizeof(int), hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "ERROR: hipMemcpy() failed (%s)\n", hipGetErrorString(err)); return -1; } double timer2 = gettime(); // Report timing characteristics printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000); printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000); #if 1 // Print rank array print_vectorf(rank_array, num_nodes); #endif // Free the host-side arrays free(rank_array); csr->freeArrays(); free(csr); // Free the device buffers hipFree(row_d); hipFree(col_d); hipFree(inrow_d); hipFree(incol_d); hipFree(cc_d); return 0; } void print_vectorf(int *vector, int num) { FILE *fp = fopen("result.out", "w"); if (!fp) { printf("ERROR: unable to open result.txt\n"); } for (int i = 0; i < num; i++) { fprintf(fp, "%d\n", vector[i]); } fclose(fp); }
4864cefaaa3f361d65a11c3a265d40183718c88e.cu
// clang-format off /************************************************************************************\ * * * Copyright � 2014 Advanced Micro Devices, Inc. * * Copyright (c) 2015 Mark D. Hill and David A. Wood * * All rights reserved. * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted provided that the following are met: * * * * You must reproduce the above copyright notice. * * * * Neither the name of the copyright holder nor the names of its contributors * * may be used to endorse or promote products derived from this software * * without specific, prior, written permission from at least the copyright holder. * * * * You must include the following terms in your license and/or other materials * * provided with the software. * * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * * IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A * * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER * * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * * OF SUCH DAMAGE. * * * * Without limiting the foregoing, the software may implement third party * * technologies for which you must obtain licenses from parties other than AMD. * * You agree that AMD has not obtained or conveyed to you, and that you shall * * be responsible for obtaining the rights to use and/or distribute the applicable * * underlying intellectual property rights related to the third party technologies. * * These third party technologies are not licensed hereunder. * * * * If you use the software (in whole or in part), you shall adhere to all * * applicable U.S., European, and other export laws, including but not limited to * * the U.S. Export Administration Regulations ("EAR"�) (15 C.F.R Sections 730-774), * * and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant * * to Section 740.6 of the EAR, you hereby certify that, except pursuant to a * * license granted by the United States Department of Commerce Bureau of Industry * * and Security or as otherwise permitted pursuant to a License Exception under * * the U.S. Export Administration Regulations ("EAR"), you will not (1) export, * * re-export or release to a national of a country in Country Groups D:1, E:1 or * * E:2 any restricted technology, software, or source code you receive hereunder, * * or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such * * technology or software, if such foreign produced direct product is subject to * * national security controls as identified on the Commerce Control List (currently * * found in Supplement 1 to Part 774 of EAR). For the most current Country Group * * listings, or for additional information about the EAR or your obligations under * * those regulations, please refer to the U.S. Bureau of Industry and Security's * * website at http://www.bis.doc.gov/. * * * \************************************************************************************/ // clang-format on #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> //#include "../../graph_parser/parse.h" #include <cuda_runtime_api.h> #include "../../graph_parser/util.h" #include "../../graph_parser/parse.cpp" #include "../../graph_parser/util.cpp" #include "kernel.cu" // Iteration count #define ITER 20 void print_vectorf(int *vector, int num); int main(int argc, char **argv) { char *tmpchar; int num_nodes; int num_edges; int file_format = 1; bool directed = 0; cudaError_t err = cudaSuccess; if (argc == 3) { tmpchar = argv[1]; // Graph inputfile file_format = atoi(argv[2]); // File format } else { fprintf(stderr, "You did something wrong!\n"); exit(1); } // Allocate the csr structure csr_array *csr; // Parse graph files into csr structure if (file_format == 1) { // Metis csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed); } else if (file_format == 0) { // Dimacs9 csr = parseCOO(tmpchar, &num_nodes, &num_edges, directed); } else if (file_format == 2) { // Matrix market csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0); } else { printf("reserve for future"); exit(1); } // Allocate rank_array int *rank_array = (int *)malloc(num_nodes * sizeof(int)); if (!rank_array) { fprintf(stderr, "rank array not allocated successfully\n"); return -1; } int *row_d; int *col_d; int *inrow_d; int *incol_d; int *cc_d; // Create device-side buffers for the graph err = cudaMalloc(&row_d, num_nodes * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err)); return -1; } err = cudaMalloc(&col_d, num_edges * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d) => %s\n", num_edges, cudaGetErrorString(err)); return -1; } err = cudaMalloc(&inrow_d, num_nodes * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc inrow_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err)); return -1; } err = cudaMalloc(&incol_d, num_edges * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc incol_d (size:%d) => %s\n", num_edges, cudaGetErrorString(err)); return -1; } // Create buffers for cc err = cudaMalloc(&cc_d, num_nodes * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc cc_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err)); return -1; } double timer1 = gettime(); // Copy the data to the device-side buffers err = cudaMemcpy(row_d, csr->row_array, num_nodes * sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "ERROR:#endif cudaMemcpy row_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err)); return -1; } err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err)); return -1; } err = cudaMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "ERROR:#endif cudaMemcpy inrow_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err)); return -1; } err = cudaMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMemcpy incol_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err)); return -1; } // Set up work dimensions int block_size = 256; int num_blocks = (num_nodes + block_size - 1) / block_size; dim3 threads(block_size, 1, 1); dim3 grid(num_blocks, 1, 1); cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024); double timer3 = gettime(); VirtVertex<int, int> **vertex; GraphChiContext *context; err = cudaMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int> *)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc vertex (size:%d) => %s\n", num_edges, cudaGetErrorString(err)); return -1; } err = cudaMalloc(&context, sizeof(GraphChiContext)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc context (size:%d) => %s\n", num_edges, cudaGetErrorString(err)); return -1; } printf("Start initCtx\n"); initContext<<<1, 1>>>(context, num_nodes, num_edges); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "ERROR: initCtx failed (%s)\n", cudaGetErrorString(err)); return -1; } printf("Start initObj\n"); initObject<<<grid, threads>>>(vertex, context, row_d, col_d, inrow_d, incol_d); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "ERROR: initObject failed (%s)\n", cudaGetErrorString(err)); return -1; } printf("Start initOutEdge\n"); initOutEdge<<<grid, threads>>>(vertex, context, row_d, col_d); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "ERROR: initOutEdge failed (%s)\n", cudaGetErrorString(err)); return -1; } // Run CC for some iter. TO: convergence determination for (int i = 0; i < ITER; i++) { printf("Start ConnectedComponent\n"); ConnectedComponent<<<grid, threads>>>(vertex, context, i); printf("Finish ConnectedComponent\n"); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n", cudaGetErrorString(err)); return -1; } } cudaDeviceSynchronize(); double timer4 = gettime(); printf("Start Copyback\n"); copyBack<<<grid, threads>>>(vertex, context, cc_d); printf("End Copyback\n"); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n", cudaGetErrorString(err)); return -1; } // Copy the rank buffer back err = cudaMemcpy(rank_array, cc_d, num_nodes * sizeof(int), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMemcpy() failed (%s)\n", cudaGetErrorString(err)); return -1; } double timer2 = gettime(); // Report timing characteristics printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000); printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000); #if 1 // Print rank array print_vectorf(rank_array, num_nodes); #endif // Free the host-side arrays free(rank_array); csr->freeArrays(); free(csr); // Free the device buffers cudaFree(row_d); cudaFree(col_d); cudaFree(inrow_d); cudaFree(incol_d); cudaFree(cc_d); return 0; } void print_vectorf(int *vector, int num) { FILE *fp = fopen("result.out", "w"); if (!fp) { printf("ERROR: unable to open result.txt\n"); } for (int i = 0; i < num; i++) { fprintf(fp, "%d\n", vector[i]); } fclose(fp); }
6d6799f5f1f4ccf983d7b1dcda6b6c9ef1cd4581.hip
// !!! This is a file automatically generated by hipify!!! #include "StiffnessMatrixFirstOrder.h" void StiffnessMatrixFirstOrder::constantCreator(unsigned int numberElement, float* c, float* x, float* y, unsigned int* mesh) { unsigned int i = numberElement*6; c[i++] = (x[mesh[numberElement*4+0]] - x[mesh[numberElement*4+1]] + x[mesh[numberElement*4+2]] - x[mesh[numberElement*4+3]])/4; c[i++] = (x[mesh[numberElement*4+0]] - x[mesh[numberElement*4+1]] - x[mesh[numberElement*4+2]] + x[mesh[numberElement*4+3]])/4; c[i++] = (x[mesh[numberElement*4+0]] - x[mesh[numberElement*4+3]] - x[mesh[numberElement*4+2]] + x[mesh[numberElement*4+1]])/4; c[i++] = (y[mesh[numberElement*4+0]] - y[mesh[numberElement*4+1]] + y[mesh[numberElement*4+2]] - y[mesh[numberElement*4+3]])/4; c[i++] = (y[mesh[numberElement*4+0]] - y[mesh[numberElement*4+1]] - y[mesh[numberElement*4+2]] + y[mesh[numberElement*4+3]])/4; c[i++] = (y[mesh[numberElement*4+0]] - y[mesh[numberElement*4+3]] - y[mesh[numberElement*4+2]] + y[mesh[numberElement*4+1]])/4; // defined the constants c1x to c3y }; void StiffnessMatrixFirstOrder::stiffnessMatrixCalculation(unsigned int numberElement, unsigned int nip ,float* in, unsigned int* ip, float* iw, float* c, float* D, unsigned int* mesh, float* k, unsigned int* i_index, unsigned int* j_index) // numberElement -> the element number needed to be calculated // nip is the number of integration point squared. // in is the integrationNode // ip -> integrationPos // iw -> integrationWeight // c -> constants // D -> material matrix // k -> stiffness matrix { unsigned int counter = 36*(numberElement); for (unsigned int noIP = 0; noIP < nip; noIP++) { // noIP -> the integration point number needed to be calculated double XI = in[ip[2*noIP]]; double YI = in[ip[2*noIP+1]]; // Jacobian double J11 = c[numberElement*6+0]*YI-c[numberElement*6+1]; double J12 = c[numberElement*6+3]*YI-c[numberElement*6+4]; double J21 = c[numberElement*6+0]*XI-c[numberElement*6+2]; double J22 = c[numberElement*6+3]*XI-c[numberElement*6+5]; double detJ = J11*J22-J12*J21; double WeightPerDetJ = (iw[ip[2*noIP]]*iw[ip[2*noIP+1]])/detJ; // derveativs of the shape function N1x N2x ... N1y N2y ... double Ni[8] = {J22*( YI-1)/4 - J12*( XI-1)/4, J22*(-YI+1)/4 - J12*(-XI-1)/4, \ J22*( YI+1)/4 - J12*( XI+1)/4, J22*(-YI-1)/4 - J12*(-XI+1)/4, \ J11*( XI-1)/4 - J21*( YI-1)/4, J11*(-XI-1)/4 - J21*(-YI+1)/4, \ J11*( XI+1)/4 - J21*( YI+1)/4, J11*(-XI+1)/4 - J21*(-YI-1)/4}; // multiplication of shape functions N1x^2 N1x*N2x .... double N[36]; unsigned int counterN = 0; for (unsigned int i = 0; i < 8; i++) { for (unsigned int j = i; j < 8 ; j++) N[counterN++] = Ni[i]*Ni[j]; }; // find the position to start filling the stiffness matrix // writes all 36 components of the 8 by 8 stiffness Matrix considering symmetry k[counter+0] = k[counter+0] + WeightPerDetJ*(D[0]*N[0] + 2*D[4]*N[4] + D[2]*N[26]); k[counter+1] = k[counter+1] + WeightPerDetJ*(D[4]*N[0] + D[5]*N[26] + D[3]*N[4] + D[2]*N[4]); k[counter+2] = k[counter+2] + WeightPerDetJ*(D[2]*N[0] + 2*D[5]*N[4] + D[1]*N[26]); k[counter+3] = k[counter+3] + WeightPerDetJ*(D[0]*N[1] + D[4]*N[5] + D[4]*N[11] + D[2]*N[27]); k[counter+4] = k[counter+4] + WeightPerDetJ*(D[4]*N[1] + D[3]*N[11] + D[2]*N[5] + D[5]*N[27]); k[counter+5] = k[counter+5] + WeightPerDetJ*(D[0]*N[8] + 2*D[4]*N[12] + D[2]*N[30]); k[counter+6] = k[counter+6] + WeightPerDetJ*(D[4]*N[1] + D[3]*N[5] + D[2]*N[11] + D[5]*N[27]); k[counter+7] = k[counter+7] + WeightPerDetJ*(D[2]*N[1] + D[5]*N[5] + D[5]*N[11] + D[1]*N[27]); k[counter+8] = k[counter+8] + WeightPerDetJ*(D[4]*N[8] + D[5]*N[30] + D[3]*N[12] + D[2]*N[12]); k[counter+9] = k[counter+9] + WeightPerDetJ*(D[2]*N[8] + 2*D[5]*N[12] + D[1]*N[30]); k[counter+10] = k[counter+10] + WeightPerDetJ*(D[0]*N[2] + D[4]*N[6] + D[4]*N[17] + D[2]*N[28]); k[counter+11] = k[counter+11] + WeightPerDetJ*(D[4]*N[2] + D[3]*N[17] + D[2]*N[6] + D[5]*N[28]); k[counter+12] = k[counter+12] + WeightPerDetJ*(D[0]*N[9] + D[4]*N[13] + D[4]*N[18] + D[2]*N[31]); k[counter+13] = k[counter+13] + WeightPerDetJ*(D[4]*N[9] + D[3]*N[18] + D[2]*N[13] + D[5]*N[31]); k[counter+14] = k[counter+14] + WeightPerDetJ*(D[0]*N[15] + 2*D[4]*N[19] + D[2]*N[33]); k[counter+15] = k[counter+15] + WeightPerDetJ*(D[4]*N[2] + D[3]*N[6] + D[2]*N[17] + D[5]*N[28]); k[counter+16] = k[counter+16] + WeightPerDetJ*(D[2]*N[2] + D[5]*N[6] + D[5]*N[17] + D[1]*N[28]); k[counter+17] = k[counter+17] + WeightPerDetJ*(D[4]*N[9] + D[3]*N[13] + D[2]*N[18] + D[5]*N[31]); k[counter+18] = k[counter+18] + WeightPerDetJ*(D[2]*N[9] + D[5]*N[13] + D[5]*N[18] + D[1]*N[31]); k[counter+19] = k[counter+19] + WeightPerDetJ*(D[4]*N[15] + D[5]*N[33] + D[3]*N[19] + D[2]*N[19]); k[counter+20] = k[counter+20] + WeightPerDetJ*(D[2]*N[15] + 2*D[5]*N[19] + D[1]*N[33]); k[counter+21] = k[counter+21] + WeightPerDetJ*(D[0]*N[3] + D[4]*N[7] + D[4]*N[22] + D[2]*N[29]); k[counter+22] = k[counter+22] + WeightPerDetJ*(D[4]*N[3] + D[3]*N[22] + D[2]*N[7] + D[5]*N[29]); k[counter+23] = k[counter+23] + WeightPerDetJ*(D[0]*N[10] + D[4]*N[14] + D[4]*N[23] + D[2]*N[32]); k[counter+24] = k[counter+24] + WeightPerDetJ*(D[4]*N[10] + D[3]*N[23] + D[2]*N[14] + D[5]*N[32]); k[counter+25] = k[counter+25] + WeightPerDetJ*(D[0]*N[16] + D[4]*N[20] + D[4]*N[24] + D[2]*N[34]); k[counter+26] = k[counter+26] + WeightPerDetJ*(D[4]*N[16] + D[3]*N[24] + D[2]*N[20] + D[5]*N[34]); k[counter+27] = k[counter+27] + WeightPerDetJ*(D[0]*N[21] + 2*D[4]*N[25] + D[2]*N[35]); k[counter+28] = k[counter+28] + WeightPerDetJ*(D[4]*N[3] + D[3]*N[7] + D[2]*N[22] + D[5]*N[29]); k[counter+29] = k[counter+29] + WeightPerDetJ*(D[2]*N[3] + D[5]*N[7] + D[5]*N[22] + D[1]*N[29]); k[counter+30] = k[counter+30] + WeightPerDetJ*(D[4]*N[10] + D[3]*N[14] + D[2]*N[23] + D[5]*N[32]); k[counter+31] = k[counter+31] + WeightPerDetJ*(D[2]*N[10] + D[5]*N[14] + D[5]*N[23] + D[1]*N[32]); k[counter+32] = k[counter+32] + WeightPerDetJ*(D[4]*N[16] + D[3]*N[20] + D[2]*N[24] + D[5]*N[34]); k[counter+33] = k[counter+33] + WeightPerDetJ*(D[2]*N[16] + D[5]*N[20] + D[5]*N[24] + D[1]*N[34]); k[counter+34] = k[counter+34] + WeightPerDetJ*(D[4]*N[21] + D[5]*N[35] + D[3]*N[25] + D[2]*N[25]); k[counter+35] = k[counter+35] + WeightPerDetJ*(D[2]*N[21] + 2*D[5]*N[25] + D[1]*N[35]); } unsigned int count = counter; unsigned int xi, xj, yi, yj; unsigned int dof_i, dof_j; for (unsigned int i = 0; i<8; i++) for (unsigned int j = 0; j<i+1; j++) { xi = i/2; yi = i%2-1; xj = j/2; yj = j%2-1; dof_i = (mesh[numberElement*4+xi]+1)*2+yi; dof_j = (mesh[numberElement*4+xj]+1)*2+yj; i_index[count] = max(dof_i,dof_j); j_index[count++] = min(dof_i,dof_j); } /* unsigned int count = counter; for (unsigned int i = 0; i<4; i++) { for (unsigned int j = 0; j<i+1; j++) { for (unsigned int k = 1; k < 3; k++) { dof_i = (mesh[numberElement*4+i])*2+k; dof_j = (mesh[numberElement*4+j])*2+k; } } } */ } Sparse* StiffnessMatrixFirstOrder::assembler(Sparse* k, unsigned int* freeDofs, unsigned int freeDofSize) { unsigned int kg_size = k->get_numberOfRows(); Sparse* K = new Sparse(kg_size*kg_size,kg_size); unsigned int posGlobal; // assemble value int totalElement = k->get_valueSize()/36; unsigned int totalDOF = k->get_numberOfRows(); int counter = 0; for (unsigned int nOfE = 0; nOfE< totalElement; nOfE++) { counter = nOfE*36; for (unsigned int c = counter; c < counter+36; c++) { posGlobal = totalDOF*(k->i[c]-1)+k->j[c]-1; K->value[posGlobal] = K->value[posGlobal] + k->value[c]; K->i[posGlobal] = k->i[c]; K->j[posGlobal] = k->j[c]; if (k->j[c] != k->i[c]) { posGlobal = totalDOF*(k->j[c]-1)+k->i[c]-1; K->value[posGlobal] = K->value[posGlobal] + k->value[c]; K->i[posGlobal] = k->i[c]; K->j[posGlobal] = k->j[c]; } } } /* // assemble Dofs counter = 0; for (unsigned int i = 0; i < freeDofSize; i++) { for (unsigned int j = 0; j < freeDofSize; j++) { K->i[counter] = freeDofs[i]; K->j[counter] = freeDofs[j]; K->value[counter] = Ktemp[totalNode*2*(K->i[counter]-1)+K->j[counter]-1]; //Log::Logger().Info(totalNode*2*(K->i[counter]-1)+K->j[counter]-1); counter++; } } */ delete k; //delete[] Ktemp; return K; } StiffnessMatrixFirstOrder::StiffnessMatrixFirstOrder(Material& mat, Geometry& geo, unsigned int n) :StiffnessMatrix(mat,geo,n) { Log::Logger().Info("StiffnessMatrixFirstOrder Created by CPU"); sizeStiffMatPerEle = 36; stiffMatSize = numberOfElements*sizeStiffMatPerEle; globalStiffMatSize = globalStiffMatSizeCalculator(geometry->get_mesh(), geometry->get_mesh_Size(), geometry->get_x_y_size()); simulationSize = numberOfElements; stiffMat = new Sparse(stiffMatSize,geometry->get_x_y_size()*2); hipMallocManaged(&c,numberOfElements*6*sizeof(float)); }; StiffnessMatrixFirstOrder::~StiffnessMatrixFirstOrder() { Log::Logger().Info("StiffnessMatrixFirstOrder Deleted by CPU"); hipFree(c); } int StiffnessMatrixFirstOrder::GetStiffnessMatrixSize() { return stiffMatSize; }
6d6799f5f1f4ccf983d7b1dcda6b6c9ef1cd4581.cu
#include "StiffnessMatrixFirstOrder.h" void StiffnessMatrixFirstOrder::constantCreator(unsigned int numberElement, float* c, float* x, float* y, unsigned int* mesh) { unsigned int i = numberElement*6; c[i++] = (x[mesh[numberElement*4+0]] - x[mesh[numberElement*4+1]] + x[mesh[numberElement*4+2]] - x[mesh[numberElement*4+3]])/4; c[i++] = (x[mesh[numberElement*4+0]] - x[mesh[numberElement*4+1]] - x[mesh[numberElement*4+2]] + x[mesh[numberElement*4+3]])/4; c[i++] = (x[mesh[numberElement*4+0]] - x[mesh[numberElement*4+3]] - x[mesh[numberElement*4+2]] + x[mesh[numberElement*4+1]])/4; c[i++] = (y[mesh[numberElement*4+0]] - y[mesh[numberElement*4+1]] + y[mesh[numberElement*4+2]] - y[mesh[numberElement*4+3]])/4; c[i++] = (y[mesh[numberElement*4+0]] - y[mesh[numberElement*4+1]] - y[mesh[numberElement*4+2]] + y[mesh[numberElement*4+3]])/4; c[i++] = (y[mesh[numberElement*4+0]] - y[mesh[numberElement*4+3]] - y[mesh[numberElement*4+2]] + y[mesh[numberElement*4+1]])/4; // defined the constants c1x to c3y }; void StiffnessMatrixFirstOrder::stiffnessMatrixCalculation(unsigned int numberElement, unsigned int nip ,float* in, unsigned int* ip, float* iw, float* c, float* D, unsigned int* mesh, float* k, unsigned int* i_index, unsigned int* j_index) // numberElement -> the element number needed to be calculated // nip is the number of integration point squared. // in is the integrationNode // ip -> integrationPos // iw -> integrationWeight // c -> constants // D -> material matrix // k -> stiffness matrix { unsigned int counter = 36*(numberElement); for (unsigned int noIP = 0; noIP < nip; noIP++) { // noIP -> the integration point number needed to be calculated double XI = in[ip[2*noIP]]; double YI = in[ip[2*noIP+1]]; // Jacobian double J11 = c[numberElement*6+0]*YI-c[numberElement*6+1]; double J12 = c[numberElement*6+3]*YI-c[numberElement*6+4]; double J21 = c[numberElement*6+0]*XI-c[numberElement*6+2]; double J22 = c[numberElement*6+3]*XI-c[numberElement*6+5]; double detJ = J11*J22-J12*J21; double WeightPerDetJ = (iw[ip[2*noIP]]*iw[ip[2*noIP+1]])/detJ; // derveativs of the shape function N1x N2x ... N1y N2y ... double Ni[8] = {J22*( YI-1)/4 - J12*( XI-1)/4, J22*(-YI+1)/4 - J12*(-XI-1)/4, \ J22*( YI+1)/4 - J12*( XI+1)/4, J22*(-YI-1)/4 - J12*(-XI+1)/4, \ J11*( XI-1)/4 - J21*( YI-1)/4, J11*(-XI-1)/4 - J21*(-YI+1)/4, \ J11*( XI+1)/4 - J21*( YI+1)/4, J11*(-XI+1)/4 - J21*(-YI-1)/4}; // multiplication of shape functions N1x^2 N1x*N2x .... double N[36]; unsigned int counterN = 0; for (unsigned int i = 0; i < 8; i++) { for (unsigned int j = i; j < 8 ; j++) N[counterN++] = Ni[i]*Ni[j]; }; // find the position to start filling the stiffness matrix // writes all 36 components of the 8 by 8 stiffness Matrix considering symmetry k[counter+0] = k[counter+0] + WeightPerDetJ*(D[0]*N[0] + 2*D[4]*N[4] + D[2]*N[26]); k[counter+1] = k[counter+1] + WeightPerDetJ*(D[4]*N[0] + D[5]*N[26] + D[3]*N[4] + D[2]*N[4]); k[counter+2] = k[counter+2] + WeightPerDetJ*(D[2]*N[0] + 2*D[5]*N[4] + D[1]*N[26]); k[counter+3] = k[counter+3] + WeightPerDetJ*(D[0]*N[1] + D[4]*N[5] + D[4]*N[11] + D[2]*N[27]); k[counter+4] = k[counter+4] + WeightPerDetJ*(D[4]*N[1] + D[3]*N[11] + D[2]*N[5] + D[5]*N[27]); k[counter+5] = k[counter+5] + WeightPerDetJ*(D[0]*N[8] + 2*D[4]*N[12] + D[2]*N[30]); k[counter+6] = k[counter+6] + WeightPerDetJ*(D[4]*N[1] + D[3]*N[5] + D[2]*N[11] + D[5]*N[27]); k[counter+7] = k[counter+7] + WeightPerDetJ*(D[2]*N[1] + D[5]*N[5] + D[5]*N[11] + D[1]*N[27]); k[counter+8] = k[counter+8] + WeightPerDetJ*(D[4]*N[8] + D[5]*N[30] + D[3]*N[12] + D[2]*N[12]); k[counter+9] = k[counter+9] + WeightPerDetJ*(D[2]*N[8] + 2*D[5]*N[12] + D[1]*N[30]); k[counter+10] = k[counter+10] + WeightPerDetJ*(D[0]*N[2] + D[4]*N[6] + D[4]*N[17] + D[2]*N[28]); k[counter+11] = k[counter+11] + WeightPerDetJ*(D[4]*N[2] + D[3]*N[17] + D[2]*N[6] + D[5]*N[28]); k[counter+12] = k[counter+12] + WeightPerDetJ*(D[0]*N[9] + D[4]*N[13] + D[4]*N[18] + D[2]*N[31]); k[counter+13] = k[counter+13] + WeightPerDetJ*(D[4]*N[9] + D[3]*N[18] + D[2]*N[13] + D[5]*N[31]); k[counter+14] = k[counter+14] + WeightPerDetJ*(D[0]*N[15] + 2*D[4]*N[19] + D[2]*N[33]); k[counter+15] = k[counter+15] + WeightPerDetJ*(D[4]*N[2] + D[3]*N[6] + D[2]*N[17] + D[5]*N[28]); k[counter+16] = k[counter+16] + WeightPerDetJ*(D[2]*N[2] + D[5]*N[6] + D[5]*N[17] + D[1]*N[28]); k[counter+17] = k[counter+17] + WeightPerDetJ*(D[4]*N[9] + D[3]*N[13] + D[2]*N[18] + D[5]*N[31]); k[counter+18] = k[counter+18] + WeightPerDetJ*(D[2]*N[9] + D[5]*N[13] + D[5]*N[18] + D[1]*N[31]); k[counter+19] = k[counter+19] + WeightPerDetJ*(D[4]*N[15] + D[5]*N[33] + D[3]*N[19] + D[2]*N[19]); k[counter+20] = k[counter+20] + WeightPerDetJ*(D[2]*N[15] + 2*D[5]*N[19] + D[1]*N[33]); k[counter+21] = k[counter+21] + WeightPerDetJ*(D[0]*N[3] + D[4]*N[7] + D[4]*N[22] + D[2]*N[29]); k[counter+22] = k[counter+22] + WeightPerDetJ*(D[4]*N[3] + D[3]*N[22] + D[2]*N[7] + D[5]*N[29]); k[counter+23] = k[counter+23] + WeightPerDetJ*(D[0]*N[10] + D[4]*N[14] + D[4]*N[23] + D[2]*N[32]); k[counter+24] = k[counter+24] + WeightPerDetJ*(D[4]*N[10] + D[3]*N[23] + D[2]*N[14] + D[5]*N[32]); k[counter+25] = k[counter+25] + WeightPerDetJ*(D[0]*N[16] + D[4]*N[20] + D[4]*N[24] + D[2]*N[34]); k[counter+26] = k[counter+26] + WeightPerDetJ*(D[4]*N[16] + D[3]*N[24] + D[2]*N[20] + D[5]*N[34]); k[counter+27] = k[counter+27] + WeightPerDetJ*(D[0]*N[21] + 2*D[4]*N[25] + D[2]*N[35]); k[counter+28] = k[counter+28] + WeightPerDetJ*(D[4]*N[3] + D[3]*N[7] + D[2]*N[22] + D[5]*N[29]); k[counter+29] = k[counter+29] + WeightPerDetJ*(D[2]*N[3] + D[5]*N[7] + D[5]*N[22] + D[1]*N[29]); k[counter+30] = k[counter+30] + WeightPerDetJ*(D[4]*N[10] + D[3]*N[14] + D[2]*N[23] + D[5]*N[32]); k[counter+31] = k[counter+31] + WeightPerDetJ*(D[2]*N[10] + D[5]*N[14] + D[5]*N[23] + D[1]*N[32]); k[counter+32] = k[counter+32] + WeightPerDetJ*(D[4]*N[16] + D[3]*N[20] + D[2]*N[24] + D[5]*N[34]); k[counter+33] = k[counter+33] + WeightPerDetJ*(D[2]*N[16] + D[5]*N[20] + D[5]*N[24] + D[1]*N[34]); k[counter+34] = k[counter+34] + WeightPerDetJ*(D[4]*N[21] + D[5]*N[35] + D[3]*N[25] + D[2]*N[25]); k[counter+35] = k[counter+35] + WeightPerDetJ*(D[2]*N[21] + 2*D[5]*N[25] + D[1]*N[35]); } unsigned int count = counter; unsigned int xi, xj, yi, yj; unsigned int dof_i, dof_j; for (unsigned int i = 0; i<8; i++) for (unsigned int j = 0; j<i+1; j++) { xi = i/2; yi = i%2-1; xj = j/2; yj = j%2-1; dof_i = (mesh[numberElement*4+xi]+1)*2+yi; dof_j = (mesh[numberElement*4+xj]+1)*2+yj; i_index[count] = max(dof_i,dof_j); j_index[count++] = min(dof_i,dof_j); } /* unsigned int count = counter; for (unsigned int i = 0; i<4; i++) { for (unsigned int j = 0; j<i+1; j++) { for (unsigned int k = 1; k < 3; k++) { dof_i = (mesh[numberElement*4+i])*2+k; dof_j = (mesh[numberElement*4+j])*2+k; } } } */ } Sparse* StiffnessMatrixFirstOrder::assembler(Sparse* k, unsigned int* freeDofs, unsigned int freeDofSize) { unsigned int kg_size = k->get_numberOfRows(); Sparse* K = new Sparse(kg_size*kg_size,kg_size); unsigned int posGlobal; // assemble value int totalElement = k->get_valueSize()/36; unsigned int totalDOF = k->get_numberOfRows(); int counter = 0; for (unsigned int nOfE = 0; nOfE< totalElement; nOfE++) { counter = nOfE*36; for (unsigned int c = counter; c < counter+36; c++) { posGlobal = totalDOF*(k->i[c]-1)+k->j[c]-1; K->value[posGlobal] = K->value[posGlobal] + k->value[c]; K->i[posGlobal] = k->i[c]; K->j[posGlobal] = k->j[c]; if (k->j[c] != k->i[c]) { posGlobal = totalDOF*(k->j[c]-1)+k->i[c]-1; K->value[posGlobal] = K->value[posGlobal] + k->value[c]; K->i[posGlobal] = k->i[c]; K->j[posGlobal] = k->j[c]; } } } /* // assemble Dofs counter = 0; for (unsigned int i = 0; i < freeDofSize; i++) { for (unsigned int j = 0; j < freeDofSize; j++) { K->i[counter] = freeDofs[i]; K->j[counter] = freeDofs[j]; K->value[counter] = Ktemp[totalNode*2*(K->i[counter]-1)+K->j[counter]-1]; //Log::Logger().Info(totalNode*2*(K->i[counter]-1)+K->j[counter]-1); counter++; } } */ delete k; //delete[] Ktemp; return K; } StiffnessMatrixFirstOrder::StiffnessMatrixFirstOrder(Material& mat, Geometry& geo, unsigned int n) :StiffnessMatrix(mat,geo,n) { Log::Logger().Info("StiffnessMatrixFirstOrder Created by CPU"); sizeStiffMatPerEle = 36; stiffMatSize = numberOfElements*sizeStiffMatPerEle; globalStiffMatSize = globalStiffMatSizeCalculator(geometry->get_mesh(), geometry->get_mesh_Size(), geometry->get_x_y_size()); simulationSize = numberOfElements; stiffMat = new Sparse(stiffMatSize,geometry->get_x_y_size()*2); cudaMallocManaged(&c,numberOfElements*6*sizeof(float)); }; StiffnessMatrixFirstOrder::~StiffnessMatrixFirstOrder() { Log::Logger().Info("StiffnessMatrixFirstOrder Deleted by CPU"); cudaFree(c); } int StiffnessMatrixFirstOrder::GetStiffnessMatrixSize() { return stiffMatSize; }
bc0355b839595daf4ccdaea65bf96e6d0bdfcd49.hip
// !!! This is a file automatically generated by hipify!!! /* ** PROGRAM: heat equation solve ** ** PURPOSE: This program will explore use of an explicit ** finite difference method to solve the heat ** equation under a method of manufactured solution (MMS) ** scheme. The solution has been set to be a simple ** function based on exponentials and trig functions. ** ** A finite difference scheme is used on a 1000x1000 cube. ** A total of 0.5 units of time are simulated. ** ** The MMS solution has been adapted from ** G.W. Recktenwald (2011). Finite difference approximations ** to the Heat Equation. Portland State University. ** ** ** USAGE: Run with two arguments: ** First is the number of cells. ** Second is the number of timesteps. ** ** For example, with 100x100 cells and 10 steps: ** ** ./heat 100 10 ** ** ** HISTORY: ** Ported to SYCL by Tom Deakin, Nov 2019 ** Ported to OpenCL by Tom Deakin, Jan 2020 ** Ported to CUDA by Zheming Jin, June 2020 ** */ #include <chrono> #include <cmath> #include <fstream> #include <iostream> #include <hip/hip_runtime.h> // Key constants used in this program #define PI acos(-1.0) // Pi #define LINE "--------------------" // A line for fancy output // Function definitions __global__ void initial_value(const unsigned int n, const double dx, const double length, double *u); __global__ void zero(const unsigned int n, double *u); __global__ void solve(const unsigned int n, const double alpha, const double dx, const double dt, double *__restrict__ u, double *__restrict__ u_tmp); double solution(const double t, const double x, const double y, const double alpha, const double length); double l2norm(const unsigned int n, const double *u, const int nsteps, const double dt, const double alpha, const double dx, const double length); // Main function int main(int argc, char *argv[]) { // Start the total program runtime timer auto start = std::chrono::high_resolution_clock::now(); // Problem size, forms an nxn grid int n = 1000; // Number of timesteps int nsteps = 10; // Check for the correct number of arguments // Print usage and exits if not correct if (argc == 3) { // Set problem size from first argument n = atoi(argv[1]); if (n < 0) { std::cerr << "Error: n must be positive" << std::endl; exit(EXIT_FAILURE); } // Set number of timesteps from second argument nsteps = atoi(argv[2]); if (nsteps < 0) { std::cerr << "Error: nsteps must be positive" << std::endl; exit(EXIT_FAILURE); } } // // Set problem definition // double alpha = 0.1; // heat equation coefficient double length = 1000.0; // physical size of domain: length x length square double dx = length / (n + 1); // physical size of each cell (+1 as don't // simulate boundaries as they are given) double dt = 0.5 / nsteps; // time interval (total time of 0.5s) // Stability requires that dt/(dx^2) <= 0.5, double r = alpha * dt / (dx * dx); hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); char *device_name = prop.name; // Print message detailing runtime configuration std::cout << std::endl << " MMS heat equation" << std::endl << std::endl << LINE << std::endl << "Problem input" << std::endl << std::endl << " Grid size: " << n << " x " << n << std::endl << " Cell width: " << dx << std::endl << " Grid length: " << length << "x" << length << std::endl << std::endl << " Alpha: " << alpha << std::endl << std::endl << " Steps: " << nsteps << std::endl << " Total time: " << dt * (double)nsteps << std::endl << " Time step: " << dt << std::endl << " GPU device: " << device_name << std::endl << LINE << std::endl; // Stability check std::cout << "Stability" << std::endl << std::endl; std::cout << " r value: " << r << std::endl; if (r > 0.5) std::cout << " Warning: unstable" << std::endl; std::cout << LINE << std::endl; // Allocate two nxn grids double *u; double *u_tmp; hipMalloc((void **)&u, sizeof(double) * n * n); hipMalloc((void **)&u_tmp, sizeof(double) * n * n); // Set the initial value of the grid under the MMS scheme const int block_size = 16; int n_ceil = (n % block_size == 0) ? n / block_size : (n / block_size) + 1; dim3 grid(n_ceil, n_ceil); dim3 block(block_size, block_size); hipLaunchKernelGGL(( initial_value), dim3(dim3(grid)), dim3(dim3(block)), 0, 0, n, dx, length, u); hipLaunchKernelGGL(( zero), dim3(dim3(grid)), dim3(dim3(block)), 0, 0, n, u_tmp); // Ensure everything is initalised on the device hipError_t err = hipDeviceSynchronize(); if (err != hipSuccess) { std::cerr << "CUDA error after initalisation" << std::endl; exit(EXIT_FAILURE); } // // Run through timesteps under the explicit scheme // // Start the solve timer auto tic = std::chrono::high_resolution_clock::now(); for (int t = 0; t < nsteps; ++t) { // Call the solve kernel // Computes u_tmp at the next timestep // given the value of u at the current timestep if (t % 2 == 0) hipLaunchKernelGGL(( solve), dim3(dim3(grid)), dim3(dim3(block)), 0, 0, n, alpha, dx, dt, u, u_tmp); else hipLaunchKernelGGL(( solve), dim3(dim3(grid)), dim3(dim3(block)), 0, 0, n, alpha, dx, dt, u_tmp, u); } // Stop solve timer hipDeviceSynchronize(); auto toc = std::chrono::high_resolution_clock::now(); // Get access to u on the host double *u_host = new double[n * n]; err = hipMemcpy(u_host, u, sizeof(double) * n * n, hipMemcpyDeviceToHost); if (err != hipSuccess) { std::cerr << "CUDA error on copying back data" << std::endl; exit(EXIT_FAILURE); } // // Check the L2-norm of the computed solution // against the *known* solution from the MMS scheme // double norm = l2norm(n, u_host, nsteps, dt, alpha, dx, length); // Stop total timer auto stop = std::chrono::high_resolution_clock::now(); // Print results std::cout << "Results" << std::endl << std::endl << "Error (L2norm): " << norm << std::endl << "Solve time (s): " << std::chrono::duration_cast<std::chrono::duration<double>>(toc - tic) .count() << std::endl << "Total time (s): " << std::chrono::duration_cast<std::chrono::duration<double>>(stop - start) .count() << std::endl << "Bandwidth (GB/s): " << 1.0E-9 * 2.0 * n * n * nsteps * sizeof(double) / std::chrono::duration_cast<std::chrono::duration<double>>(toc - tic) .count() << std::endl << LINE << std::endl; delete[] u_host; } // Sets the mesh to an initial value, determined by the MMS scheme __global__ void initial_value(const unsigned int n, const double dx, const double length, double *u) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= n || j >= n) return; int idx = i + j * n; double y = dx * (j + 1); // Physical y position double x = dx * (i + 1); // Physical x position u[idx] = sin(PI * x / length) * sin(PI * y / length); } // Zero the array u __global__ void zero(const unsigned int n, double *u) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= n * n) return; u[i] = 0.0; } // Compute the next timestep, given the current timestep // Loop over the nxn grid __global__ void solve(const unsigned int n, const double alpha, const double dx, const double dt, double *__restrict__ u, double *__restrict__ u_tmp) { // Finite difference constant multiplier const double r = alpha * dt / (dx * dx); const double r2 = 1.0 - 4.0 * r; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= n || j >= n) return; // Update the 5-point stencil, using boundary conditions on the edges of the // domain. Boundaries are zero because the MMS solution is zero there. u_tmp[i + j * n] = r2 * u[i + j * n] + r * ((i < n - 1) ? u[i + 1 + j * n] : 0.0) + r * ((i > 0) ? u[i - 1 + j * n] : 0.0) + r * ((j < n - 1) ? u[i + (j + 1) * n] : 0.0) + r * ((j > 0) ? u[i + (j - 1) * n] : 0.0); } // True answer given by the manufactured solution double solution(const double t, const double x, const double y, const double alpha, const double length) { return exp(-2.0 * alpha * PI * PI * t / (length * length)) * sin(PI * x / length) * sin(PI * y / length); } // Computes the L2-norm of the computed grid and the MMS known solution // The known solution is the same as the boundary function. double l2norm(const unsigned int n, const double *u, const int nsteps, const double dt, const double alpha, const double dx, const double length) { // Final (real) time simulated double time = dt * (double)nsteps; // L2-norm error double l2norm = 0.0; // Loop over the grid and compute difference of computed and known solutions // as an L2-norm double y = dx; for (int j = 0; j < n; ++j) { double x = dx; for (int i = 0; i < n; ++i) { double answer = solution(time, x, y, alpha, length); l2norm += (u[i + j * n] - answer) * (u[i + j * n] - answer); x += dx; } y += dx; } return sqrt(l2norm); }
bc0355b839595daf4ccdaea65bf96e6d0bdfcd49.cu
/* ** PROGRAM: heat equation solve ** ** PURPOSE: This program will explore use of an explicit ** finite difference method to solve the heat ** equation under a method of manufactured solution (MMS) ** scheme. The solution has been set to be a simple ** function based on exponentials and trig functions. ** ** A finite difference scheme is used on a 1000x1000 cube. ** A total of 0.5 units of time are simulated. ** ** The MMS solution has been adapted from ** G.W. Recktenwald (2011). Finite difference approximations ** to the Heat Equation. Portland State University. ** ** ** USAGE: Run with two arguments: ** First is the number of cells. ** Second is the number of timesteps. ** ** For example, with 100x100 cells and 10 steps: ** ** ./heat 100 10 ** ** ** HISTORY: ** Ported to SYCL by Tom Deakin, Nov 2019 ** Ported to OpenCL by Tom Deakin, Jan 2020 ** Ported to CUDA by Zheming Jin, June 2020 ** */ #include <chrono> #include <cmath> #include <fstream> #include <iostream> #include <cuda.h> // Key constants used in this program #define PI acos(-1.0) // Pi #define LINE "--------------------" // A line for fancy output // Function definitions __global__ void initial_value(const unsigned int n, const double dx, const double length, double *u); __global__ void zero(const unsigned int n, double *u); __global__ void solve(const unsigned int n, const double alpha, const double dx, const double dt, double *__restrict__ u, double *__restrict__ u_tmp); double solution(const double t, const double x, const double y, const double alpha, const double length); double l2norm(const unsigned int n, const double *u, const int nsteps, const double dt, const double alpha, const double dx, const double length); // Main function int main(int argc, char *argv[]) { // Start the total program runtime timer auto start = std::chrono::high_resolution_clock::now(); // Problem size, forms an nxn grid int n = 1000; // Number of timesteps int nsteps = 10; // Check for the correct number of arguments // Print usage and exits if not correct if (argc == 3) { // Set problem size from first argument n = atoi(argv[1]); if (n < 0) { std::cerr << "Error: n must be positive" << std::endl; exit(EXIT_FAILURE); } // Set number of timesteps from second argument nsteps = atoi(argv[2]); if (nsteps < 0) { std::cerr << "Error: nsteps must be positive" << std::endl; exit(EXIT_FAILURE); } } // // Set problem definition // double alpha = 0.1; // heat equation coefficient double length = 1000.0; // physical size of domain: length x length square double dx = length / (n + 1); // physical size of each cell (+1 as don't // simulate boundaries as they are given) double dt = 0.5 / nsteps; // time interval (total time of 0.5s) // Stability requires that dt/(dx^2) <= 0.5, double r = alpha * dt / (dx * dx); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); char *device_name = prop.name; // Print message detailing runtime configuration std::cout << std::endl << " MMS heat equation" << std::endl << std::endl << LINE << std::endl << "Problem input" << std::endl << std::endl << " Grid size: " << n << " x " << n << std::endl << " Cell width: " << dx << std::endl << " Grid length: " << length << "x" << length << std::endl << std::endl << " Alpha: " << alpha << std::endl << std::endl << " Steps: " << nsteps << std::endl << " Total time: " << dt * (double)nsteps << std::endl << " Time step: " << dt << std::endl << " GPU device: " << device_name << std::endl << LINE << std::endl; // Stability check std::cout << "Stability" << std::endl << std::endl; std::cout << " r value: " << r << std::endl; if (r > 0.5) std::cout << " Warning: unstable" << std::endl; std::cout << LINE << std::endl; // Allocate two nxn grids double *u; double *u_tmp; cudaMalloc((void **)&u, sizeof(double) * n * n); cudaMalloc((void **)&u_tmp, sizeof(double) * n * n); // Set the initial value of the grid under the MMS scheme const int block_size = 16; int n_ceil = (n % block_size == 0) ? n / block_size : (n / block_size) + 1; dim3 grid(n_ceil, n_ceil); dim3 block(block_size, block_size); initial_value<<<dim3(grid), dim3(block)>>>(n, dx, length, u); zero<<<dim3(grid), dim3(block)>>>(n, u_tmp); // Ensure everything is initalised on the device cudaError_t err = cudaDeviceSynchronize(); if (err != cudaSuccess) { std::cerr << "CUDA error after initalisation" << std::endl; exit(EXIT_FAILURE); } // // Run through timesteps under the explicit scheme // // Start the solve timer auto tic = std::chrono::high_resolution_clock::now(); for (int t = 0; t < nsteps; ++t) { // Call the solve kernel // Computes u_tmp at the next timestep // given the value of u at the current timestep if (t % 2 == 0) solve<<< dim3(grid), dim3(block)>>>(n, alpha, dx, dt, u, u_tmp); else solve<<< dim3(grid), dim3(block)>>>(n, alpha, dx, dt, u_tmp, u); } // Stop solve timer cudaDeviceSynchronize(); auto toc = std::chrono::high_resolution_clock::now(); // Get access to u on the host double *u_host = new double[n * n]; err = cudaMemcpy(u_host, u, sizeof(double) * n * n, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { std::cerr << "CUDA error on copying back data" << std::endl; exit(EXIT_FAILURE); } // // Check the L2-norm of the computed solution // against the *known* solution from the MMS scheme // double norm = l2norm(n, u_host, nsteps, dt, alpha, dx, length); // Stop total timer auto stop = std::chrono::high_resolution_clock::now(); // Print results std::cout << "Results" << std::endl << std::endl << "Error (L2norm): " << norm << std::endl << "Solve time (s): " << std::chrono::duration_cast<std::chrono::duration<double>>(toc - tic) .count() << std::endl << "Total time (s): " << std::chrono::duration_cast<std::chrono::duration<double>>(stop - start) .count() << std::endl << "Bandwidth (GB/s): " << 1.0E-9 * 2.0 * n * n * nsteps * sizeof(double) / std::chrono::duration_cast<std::chrono::duration<double>>(toc - tic) .count() << std::endl << LINE << std::endl; delete[] u_host; } // Sets the mesh to an initial value, determined by the MMS scheme __global__ void initial_value(const unsigned int n, const double dx, const double length, double *u) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= n || j >= n) return; int idx = i + j * n; double y = dx * (j + 1); // Physical y position double x = dx * (i + 1); // Physical x position u[idx] = sin(PI * x / length) * sin(PI * y / length); } // Zero the array u __global__ void zero(const unsigned int n, double *u) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= n * n) return; u[i] = 0.0; } // Compute the next timestep, given the current timestep // Loop over the nxn grid __global__ void solve(const unsigned int n, const double alpha, const double dx, const double dt, double *__restrict__ u, double *__restrict__ u_tmp) { // Finite difference constant multiplier const double r = alpha * dt / (dx * dx); const double r2 = 1.0 - 4.0 * r; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= n || j >= n) return; // Update the 5-point stencil, using boundary conditions on the edges of the // domain. Boundaries are zero because the MMS solution is zero there. u_tmp[i + j * n] = r2 * u[i + j * n] + r * ((i < n - 1) ? u[i + 1 + j * n] : 0.0) + r * ((i > 0) ? u[i - 1 + j * n] : 0.0) + r * ((j < n - 1) ? u[i + (j + 1) * n] : 0.0) + r * ((j > 0) ? u[i + (j - 1) * n] : 0.0); } // True answer given by the manufactured solution double solution(const double t, const double x, const double y, const double alpha, const double length) { return exp(-2.0 * alpha * PI * PI * t / (length * length)) * sin(PI * x / length) * sin(PI * y / length); } // Computes the L2-norm of the computed grid and the MMS known solution // The known solution is the same as the boundary function. double l2norm(const unsigned int n, const double *u, const int nsteps, const double dt, const double alpha, const double dx, const double length) { // Final (real) time simulated double time = dt * (double)nsteps; // L2-norm error double l2norm = 0.0; // Loop over the grid and compute difference of computed and known solutions // as an L2-norm double y = dx; for (int j = 0; j < n; ++j) { double x = dx; for (int i = 0; i < n; ++i) { double answer = solution(time, x, y, alpha, length); l2norm += (u[i + j * n] - answer) * (u[i + j * n] - answer); x += dx; } y += dx; } return sqrt(l2norm); }
77535364eca3e9d51739b864be8d9b18394ee29d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" { __global__ void writeTest(int* array){ for(int i = 0; i < 8; i++){ array[i] *= 2; } } }
77535364eca3e9d51739b864be8d9b18394ee29d.cu
extern "C" { __global__ void writeTest(int* array){ for(int i = 0; i < 8; i++){ array[i] *= 2; } } }
a06a997846663c7f5b95161c6a86450eb1d973ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "multiply.cuh" __global__ void multiplyKernel(float* output, float* input, const int width, const int height, const float factor) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= width || j >= height) { return; } output[(j * width) + i] = input[(j * width) + i] * factor; } __global__ void MultiplyKernel3D(cnoise::Point* left, const cnoise::Point* right, const int width, const int height, const float factor) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; if(i >= width || j >= height) { return; } // might as well grab temporary local values, since we're writing back to one of the inputs. float left_val = left[i + (j * width)].Value; float right_val = left[i + (j * width)].Value; // Left is specified as the "Points" ptr in the parent module that launched this left[i + (j * width)].Value = left_val * right_val; } void multiplyLauncher(float* output, float* input, const int width, const int height, float factor) { #ifdef CUDA_KERNEL_TIMING hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); #endif // CUDA_KERNEL_TIMING // Setup dimensions of kernel launch using occupancy calculator. int blockSize, minGridSize; hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, multiplyKernel, 0, 0); //??? dim3 block(blockSize, blockSize, 1); dim3 grid((width - 1) / blockSize + 1, (height - 1) / blockSize + 1, 1); hipLaunchKernelGGL(( multiplyKernel), dim3(grid), dim3(block), 0, 0, output, input, width, height, factor); // Check for succesfull kernel launch hipError_t err = hipGetLastError(); cudaAssert(err); // Synchronize device err = hipDeviceSynchronize(); cudaAssert(err); #ifdef CUDA_KERNEL_TIMING hipEventRecord(stop); hipEventSynchronize(stop); float elapsed = 0.0f; hipEventElapsedTime(&elapsed, start, stop); printf("Multiply Kernel execution time in ms: %f\n", elapsed); #endif // CUDA_KERNEL_TIMING // If this completes, kernel is done and "output" contains correct data. } void MultiplyLauncher3D(cnoise::Point* left, const cnoise::Point* right, const int width, const int height) { #ifdef CUDA_KERNEL_TIMING hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); #endif // CUDA_KERNEL_TIMING // Setup dimensions of kernel launch using occupancy calculator. int blockSize, minGridSize; hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, multiplyKernel, 0, 0); //??? dim3 block(blockSize, blockSize, 1); dim3 grid((width - 1) / blockSize + 1, (height - 1) / blockSize + 1, 1); hipLaunchKernelGGL(( multiplyKernel), dim3(grid), dim3(block), 0, 0, output, input, width, height, factor); // Check for succesfull kernel launch hipError_t err = hipGetLastError(); cudaAssert(err); // Synchronize device err = hipDeviceSynchronize(); cudaAssert(err); #ifdef CUDA_KERNEL_TIMING hipEventRecord(stop); hipEventSynchronize(stop); float elapsed = 0.0f; hipEventElapsedTime(&elapsed, start, stop); printf("Multiply Kernel execution time in ms: %f\n", elapsed); #endif // CUDA_KERNEL_TIMING }
a06a997846663c7f5b95161c6a86450eb1d973ae.cu
#include "multiply.cuh" __global__ void multiplyKernel(float* output, float* input, const int width, const int height, const float factor) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= width || j >= height) { return; } output[(j * width) + i] = input[(j * width) + i] * factor; } __global__ void MultiplyKernel3D(cnoise::Point* left, const cnoise::Point* right, const int width, const int height, const float factor) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; if(i >= width || j >= height) { return; } // might as well grab temporary local values, since we're writing back to one of the inputs. float left_val = left[i + (j * width)].Value; float right_val = left[i + (j * width)].Value; // Left is specified as the "Points" ptr in the parent module that launched this left[i + (j * width)].Value = left_val * right_val; } void multiplyLauncher(float* output, float* input, const int width, const int height, float factor) { #ifdef CUDA_KERNEL_TIMING cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); #endif // CUDA_KERNEL_TIMING // Setup dimensions of kernel launch using occupancy calculator. int blockSize, minGridSize; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, multiplyKernel, 0, 0); //??? dim3 block(blockSize, blockSize, 1); dim3 grid((width - 1) / blockSize + 1, (height - 1) / blockSize + 1, 1); multiplyKernel<<<grid, block>>>(output, input, width, height, factor); // Check for succesfull kernel launch cudaError_t err = cudaGetLastError(); cudaAssert(err); // Synchronize device err = cudaDeviceSynchronize(); cudaAssert(err); #ifdef CUDA_KERNEL_TIMING cudaEventRecord(stop); cudaEventSynchronize(stop); float elapsed = 0.0f; cudaEventElapsedTime(&elapsed, start, stop); printf("Multiply Kernel execution time in ms: %f\n", elapsed); #endif // CUDA_KERNEL_TIMING // If this completes, kernel is done and "output" contains correct data. } void MultiplyLauncher3D(cnoise::Point* left, const cnoise::Point* right, const int width, const int height) { #ifdef CUDA_KERNEL_TIMING cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); #endif // CUDA_KERNEL_TIMING // Setup dimensions of kernel launch using occupancy calculator. int blockSize, minGridSize; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, multiplyKernel, 0, 0); //??? dim3 block(blockSize, blockSize, 1); dim3 grid((width - 1) / blockSize + 1, (height - 1) / blockSize + 1, 1); multiplyKernel<<<grid, block>>>(output, input, width, height, factor); // Check for succesfull kernel launch cudaError_t err = cudaGetLastError(); cudaAssert(err); // Synchronize device err = cudaDeviceSynchronize(); cudaAssert(err); #ifdef CUDA_KERNEL_TIMING cudaEventRecord(stop); cudaEventSynchronize(stop); float elapsed = 0.0f; cudaEventElapsedTime(&elapsed, start, stop); printf("Multiply Kernel execution time in ms: %f\n", elapsed); #endif // CUDA_KERNEL_TIMING }
a9f672ee6cb98b9cd3163fa435f778fc20073e8a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "k1.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *g_dataA = NULL; hipMalloc(&g_dataA, XSIZE*YSIZE); float *g_dataB = NULL; hipMalloc(&g_dataB, XSIZE*YSIZE); int floatpitch = 2; int width = XSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( k1), dim3(gridBlock),dim3(threadBlock), 0, 0, g_dataA,g_dataB,floatpitch,width); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( k1), dim3(gridBlock),dim3(threadBlock), 0, 0, g_dataA,g_dataB,floatpitch,width); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( k1), dim3(gridBlock),dim3(threadBlock), 0, 0, g_dataA,g_dataB,floatpitch,width); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a9f672ee6cb98b9cd3163fa435f778fc20073e8a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "k1.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *g_dataA = NULL; cudaMalloc(&g_dataA, XSIZE*YSIZE); float *g_dataB = NULL; cudaMalloc(&g_dataB, XSIZE*YSIZE); int floatpitch = 2; int width = XSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); k1<<<gridBlock,threadBlock>>>(g_dataA,g_dataB,floatpitch,width); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { k1<<<gridBlock,threadBlock>>>(g_dataA,g_dataB,floatpitch,width); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { k1<<<gridBlock,threadBlock>>>(g_dataA,g_dataB,floatpitch,width); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6c3302237a5abc79f17c4376697179cf240de28d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "utils.h" #define DEBUG 0 #if DEBUG == 1 #include <sys/time.h> #endif #include <cstdio> #include <cassert> template<typename T> struct addF { __device__ T operator() (const T& a, const T& b) { return a + b; } }; template<typename T> struct minF { __device__ T operator() (const T& a, const T& b) { return (a < b) ? a : b; } }; template<typename T> struct maxF { __device__ T operator() (const T& a, const T& b) { return (a < b) ? b : a; } }; template<typename T, typename F> __device__ T reduce_shared(T* sdata, int n) { int tid = threadIdx.x; for (int s = n/2; s>0; s /=2) { if (tid < s) { T a = sdata[tid]; T b = sdata[tid+s]; sdata[tid] = F()(a, b); } __syncthreads(); } T r; if (tid == 0) { r = sdata[0]; } else { r = 0; } return r; } template<typename T, typename F> __global__ void reduce_kernel(const T* const d_in, T* const d_out, T identity, int length) { int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; int tid = threadIdx.x; int n = blockDim.x; extern __shared__ __align__(sizeof(T)) unsigned char smem[]; T *sdata = reinterpret_cast<T *>(smem); // size should be n if (idx < length) { sdata[tid] = d_in[idx]; } else { sdata[tid] = identity; } if (idx + n < length) { sdata[tid] = F()(sdata[tid], d_in[idx + n]); } __syncthreads(); T r = reduce_shared<T, F>(sdata, n); if (tid == 0) { d_out[blockIdx.x] = r; } } __global__ void reduce_min_max_kernel(const float* const d_in, float* const d_min, float* const d_max, int length) { int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; int tid = threadIdx.x; int n = blockDim.x; extern __shared__ __align__(sizeof(float)) unsigned char smem[]; float *sdata = reinterpret_cast<float *>(smem); // size should be n float* sdata_min = sdata; float* sdata_max = sdata + n; if (idx < length) { float t = d_in[idx]; sdata_min[tid] = t; sdata_max[tid] = t; } else { sdata_min[tid] = 275.0; sdata_max[tid] = 0.0; } if (idx + n < length) { sdata_min[tid] = minF<int>()(sdata[tid], d_in[idx + n]); sdata_max[tid] = maxF<int>()(sdata[tid], d_in[idx + n]); } __syncthreads(); for (int s = n/2; s>0; s /=2) { if (tid < s) { float a = sdata_min[tid]; float b = sdata_min[tid+s]; sdata_min[tid] = min(a, b); a = sdata_max[tid]; b = sdata_max[tid+s]; sdata_max[tid] = max(a, b); } __syncthreads(); } if (tid == 0) { d_min[blockIdx.x] = sdata_min[0]; d_max[blockIdx.x] = sdata_max[0]; } } __global__ void histogram_local_reduce_atomic(const float* const d_logLuminance, unsigned int* const d_histogram, const float min_logLum, const float max_logLum, const size_t length, const size_t numBins) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; //int n = blockDim.x; float range_logLum = max_logLum - min_logLum; extern __shared__ __align__(sizeof(unsigned int)) unsigned char smem[]; unsigned int *sdata = reinterpret_cast<unsigned int *>(smem); // size should be numBins for(int i=tid; i<numBins; i += blockDim.x) { sdata[i] = 0; } __syncthreads(); for (int i=idx; i<length; i += gridDim.x * blockDim.x) { int bin = minF<unsigned int>()(static_cast<unsigned int>((d_logLuminance[i] - min_logLum) / range_logLum * numBins), static_cast<unsigned int>(numBins-1)); //d_buf[bin * length + idx] += 1; atomicAdd(&sdata[bin], 1); } __syncthreads(); for(int i=tid; i<numBins; i += blockDim.x) { atomicAdd(&d_histogram[i], sdata[i]); } //for (int i=0; i<numBins; ++i) { // __syncthreads(); // sdata[tid] = (idx < length)? d_buf[i * length + idx] : 0; // __syncthreads(); // unsigned int r = reduce_shared< unsigned int, addF<unsigned int> >(sdata, n); // if (tid == 0) atomicAdd(&d_histogram[i], r); //} } template<typename T, typename F> __global__ void hills_steele_scan_kernel(const T* const d_in, T* const d_out, int length) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; int n = blockDim.x; // size of one shared mem array extern __shared__ __align__(sizeof(T)) unsigned char smem[]; T *sdata = reinterpret_cast<T *>(smem); // size should be n if (idx < length) { sdata[tid] = d_in[idx]; } __syncthreads(); for (int s = 1, pin = 0, pout = 1; s < blockDim.x; s <<= 1, pin = 1-pin, pout = 1-pout) { if (tid >= s) { T a = sdata[pin*n + tid - s]; T b = sdata[pin*n + tid]; sdata[pout*n + tid] = F()(a, b); } else { sdata[pout*n + tid] = sdata[pin*n + tid]; } __syncthreads(); } if (idx < length) { d_out[idx] = sdata[tid]; } } template<typename T, typename F> __global__ void blelloch_scan_kernel(T* const d_in, T* const d_out, int length, T identity) { int gtid = blockIdx.x * blockDim.x + threadIdx.x; int ltid = threadIdx.x; int n = 2*blockDim.x; extern __shared__ __align__(sizeof(T)) unsigned char smem[]; T *sdata = reinterpret_cast<T *>(smem); // size should be n sdata[ltid * 2] = (gtid * 2 < length)? d_in[gtid * 2] : identity; sdata[ltid * 2 + 1] = (gtid * 2 + 1< length)? d_in[gtid * 2 + 1] : identity; __syncthreads(); int s = 2; for (; s <= n; s <<= 1) { int pos = ltid * s + s - 1; if (pos < n) { T a = sdata[pos - s/2]; T b = sdata[pos]; sdata[pos] = F()(a, b); } __syncthreads(); } if (ltid == 0) { if(d_out) d_out[blockIdx.x] = sdata[n-1]; sdata[n - 1] = identity; // identity item } __syncthreads(); s = n; for (; s > 1; s >>= 1) { int pos = ltid * s + s - 1; if (pos < n) { T a = sdata[pos - s/2]; T b = sdata[pos]; sdata[pos] = F()(a, b); sdata[pos - s/2] = b; } __syncthreads(); } if (gtid * 2 < length) { d_in[gtid * 2] = sdata[ltid * 2]; } if (gtid * 2 + 1 < length) { d_in[gtid * 2 + 1] = sdata[ltid * 2 + 1]; } } template<typename T, typename F> __global__ void scan_update_kernel(T* const d_in, T* const d_buf, int length) { int bid = blockIdx.x; int tid = threadIdx.x; int idx = bid * blockDim.x + tid; __shared__ T sdata; if (tid == 0) { sdata = d_buf[bid]; } __syncthreads(); if (idx < length) { d_in[idx] = F()(d_in[idx], sdata); } } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ int length = numRows * numCols; int blockSize = 512; int gridSize = (length + blockSize - 1)/blockSize; float* d_buf1; unsigned int* d_buf2; #if DEBUG == 1 struct timeval tb, te; gettimeofday(&tb, 0); #endif checkCudaErrors(hipMalloc((void **)&d_buf1, length * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_buf2, numBins * sizeof(unsigned int))); checkCudaErrors(hipMemset(d_buf2, 0, numBins * sizeof(unsigned int))); #if DEBUG == 1 hipDeviceSynchronize(); gettimeofday(&te, 0); printf("memset time: %ld\n", (te.tv_sec-tb.tv_sec)*1000000 + te.tv_usec-tb.tv_usec); #endif // find min_logLum #if DEBUG == 1 printf("reduce_min: grid: %d, block: %d, length: %d\n", gridSize, blockSize, length); gettimeofday(&tb, 0); #endif hipLaunchKernelGGL(( reduce_kernel< float, minF<float> >), dim3(gridSize), dim3(blockSize), blockSize * sizeof(float), 0, d_logLuminance, d_buf1, 275.0, length); checkCudaErrors(hipGetLastError()); int gs = gridSize; while(gs > 1) { int l = gs; gs = (gs+blockSize-1)/blockSize; #if DEBUG == 1 printf("reduce_min: grid: %d, block: %d, length: %d\n", gs, blockSize, l); #endif hipLaunchKernelGGL(( reduce_kernel<float, minF<float> >), dim3(gs), dim3(blockSize), blockSize * sizeof(float), 0, d_buf1, d_buf1, 275.0, l); checkCudaErrors(hipGetLastError()); } #if DEBUG == 1 hipDeviceSynchronize(); gettimeofday(&te, 0); printf("reduce min time: %ld\n", (te.tv_sec-tb.tv_sec)*1000000 + te.tv_usec-tb.tv_usec); #endif checkCudaErrors(hipMemcpy(&min_logLum, d_buf1, sizeof(float), hipMemcpyDeviceToHost)); printf("min_logLum = %f\n", min_logLum); // find max logLum #if DEBUG == 1 printf("reduce_max: grid: %d, block: %d, length: %d\n", gridSize, blockSize, length); gettimeofday(&tb, 0); #endif hipLaunchKernelGGL(( reduce_kernel<float, maxF<float> >), dim3(gridSize), dim3(blockSize), blockSize * sizeof(float), 0, d_logLuminance, d_buf1, 0.0, length); checkCudaErrors(hipGetLastError()); gs = gridSize; while(gs > 1) { int l = gs; gs = (gs+blockSize-1)/blockSize; #if DEBUG == 1 printf("reduce_max: grid: %d, block: %d, length: %d\n", gs, blockSize, l); #endif hipLaunchKernelGGL(( reduce_kernel<float, maxF<float> >), dim3(gs), dim3(blockSize), blockSize * sizeof(float), 0, d_buf1, d_buf1, 0.0, l); checkCudaErrors(hipGetLastError()); } #if DEBUG == 1 hipDeviceSynchronize(); gettimeofday(&te, 0); printf("reduce max time: %ld\n", (te.tv_sec-tb.tv_sec)*1000000 + te.tv_usec-tb.tv_usec); #endif checkCudaErrors(hipMemcpy(&max_logLum, d_buf1, sizeof(float), hipMemcpyDeviceToHost)); printf("max_logLum = %f\n", max_logLum); // compute histogram gridSize = (length + blockSize - 1)/blockSize; #if DEBUG == 1 printf("histogram: grid: %d, block: %d, length: %d\n", gridSize, blockSize, length); gettimeofday(&tb, 0); #endif hipLaunchKernelGGL(( histogram_local_reduce_atomic), dim3(gridSize), dim3(blockSize), numBins * sizeof(unsigned int), 0, d_logLuminance, d_cdf, min_logLum, max_logLum, length, numBins); checkCudaErrors(hipGetLastError()); #if DEBUG == 1 hipDeviceSynchronize(); gettimeofday(&te, 0); printf("histogram time: %ld\n", (te.tv_sec-tb.tv_sec)*1000000 + te.tv_usec-tb.tv_usec); #endif // exclusive scan to produce cdf blockSize = 1024; gridSize = (numBins + blockSize - 1)/blockSize; assert(gridSize <= blockSize); #if DEBUG == 1 printf("scan: grid: %d, block: %d, length: %lu\n", gridSize, blockSize, numBins); gettimeofday(&tb, 0); #endif hipLaunchKernelGGL(( blelloch_scan_kernel<unsigned int, addF<unsigned int> >), dim3(gridSize), dim3(blockSize), blockSize * 2 * sizeof(unsigned int), 0, d_cdf, d_buf2, numBins, 0); checkCudaErrors(hipGetLastError()); if (gridSize > 1) { #if DEBUG == 1 printf("scan: grid: %d, block: %d, length: %d\n", (gridSize+blockSize-1)/blockSize, blockSize, gridSize); #endif hipLaunchKernelGGL(( blelloch_scan_kernel<unsigned int, addF<unsigned int> >), dim3((gridSize+blockSize-1)/blockSize), dim3(blockSize), blockSize * 2 * sizeof(unsigned int), 0, d_buf2, NULL, gridSize, 0); checkCudaErrors(hipGetLastError()); #if DEBUG == 1 printf("scan_update: grid: %d, block: %d\n", gridSize, blockSize); #endif hipLaunchKernelGGL(( scan_update_kernel<unsigned int, addF<unsigned int> >), dim3(gridSize), dim3(blockSize), 0, 0, d_cdf, d_buf2, numBins); checkCudaErrors(hipGetLastError()); } #if DEBUG == 1 hipDeviceSynchronize(); gettimeofday(&te, 0); printf("scan time: %ld\n", (te.tv_sec-tb.tv_sec)*1000000 + te.tv_usec-tb.tv_usec); #endif checkCudaErrors(hipFree(d_buf1)); checkCudaErrors(hipFree(d_buf2)); }
6c3302237a5abc79f17c4376697179cf240de28d.cu
/* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "utils.h" #define DEBUG 0 #if DEBUG == 1 #include <sys/time.h> #endif #include <cstdio> #include <cassert> template<typename T> struct addF { __device__ T operator() (const T& a, const T& b) { return a + b; } }; template<typename T> struct minF { __device__ T operator() (const T& a, const T& b) { return (a < b) ? a : b; } }; template<typename T> struct maxF { __device__ T operator() (const T& a, const T& b) { return (a < b) ? b : a; } }; template<typename T, typename F> __device__ T reduce_shared(T* sdata, int n) { int tid = threadIdx.x; for (int s = n/2; s>0; s /=2) { if (tid < s) { T a = sdata[tid]; T b = sdata[tid+s]; sdata[tid] = F()(a, b); } __syncthreads(); } T r; if (tid == 0) { r = sdata[0]; } else { r = 0; } return r; } template<typename T, typename F> __global__ void reduce_kernel(const T* const d_in, T* const d_out, T identity, int length) { int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; int tid = threadIdx.x; int n = blockDim.x; extern __shared__ __align__(sizeof(T)) unsigned char smem[]; T *sdata = reinterpret_cast<T *>(smem); // size should be n if (idx < length) { sdata[tid] = d_in[idx]; } else { sdata[tid] = identity; } if (idx + n < length) { sdata[tid] = F()(sdata[tid], d_in[idx + n]); } __syncthreads(); T r = reduce_shared<T, F>(sdata, n); if (tid == 0) { d_out[blockIdx.x] = r; } } __global__ void reduce_min_max_kernel(const float* const d_in, float* const d_min, float* const d_max, int length) { int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; int tid = threadIdx.x; int n = blockDim.x; extern __shared__ __align__(sizeof(float)) unsigned char smem[]; float *sdata = reinterpret_cast<float *>(smem); // size should be n float* sdata_min = sdata; float* sdata_max = sdata + n; if (idx < length) { float t = d_in[idx]; sdata_min[tid] = t; sdata_max[tid] = t; } else { sdata_min[tid] = 275.0; sdata_max[tid] = 0.0; } if (idx + n < length) { sdata_min[tid] = minF<int>()(sdata[tid], d_in[idx + n]); sdata_max[tid] = maxF<int>()(sdata[tid], d_in[idx + n]); } __syncthreads(); for (int s = n/2; s>0; s /=2) { if (tid < s) { float a = sdata_min[tid]; float b = sdata_min[tid+s]; sdata_min[tid] = min(a, b); a = sdata_max[tid]; b = sdata_max[tid+s]; sdata_max[tid] = max(a, b); } __syncthreads(); } if (tid == 0) { d_min[blockIdx.x] = sdata_min[0]; d_max[blockIdx.x] = sdata_max[0]; } } __global__ void histogram_local_reduce_atomic(const float* const d_logLuminance, unsigned int* const d_histogram, const float min_logLum, const float max_logLum, const size_t length, const size_t numBins) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; //int n = blockDim.x; float range_logLum = max_logLum - min_logLum; extern __shared__ __align__(sizeof(unsigned int)) unsigned char smem[]; unsigned int *sdata = reinterpret_cast<unsigned int *>(smem); // size should be numBins for(int i=tid; i<numBins; i += blockDim.x) { sdata[i] = 0; } __syncthreads(); for (int i=idx; i<length; i += gridDim.x * blockDim.x) { int bin = minF<unsigned int>()(static_cast<unsigned int>((d_logLuminance[i] - min_logLum) / range_logLum * numBins), static_cast<unsigned int>(numBins-1)); //d_buf[bin * length + idx] += 1; atomicAdd(&sdata[bin], 1); } __syncthreads(); for(int i=tid; i<numBins; i += blockDim.x) { atomicAdd(&d_histogram[i], sdata[i]); } //for (int i=0; i<numBins; ++i) { // __syncthreads(); // sdata[tid] = (idx < length)? d_buf[i * length + idx] : 0; // __syncthreads(); // unsigned int r = reduce_shared< unsigned int, addF<unsigned int> >(sdata, n); // if (tid == 0) atomicAdd(&d_histogram[i], r); //} } template<typename T, typename F> __global__ void hills_steele_scan_kernel(const T* const d_in, T* const d_out, int length) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; int n = blockDim.x; // size of one shared mem array extern __shared__ __align__(sizeof(T)) unsigned char smem[]; T *sdata = reinterpret_cast<T *>(smem); // size should be n if (idx < length) { sdata[tid] = d_in[idx]; } __syncthreads(); for (int s = 1, pin = 0, pout = 1; s < blockDim.x; s <<= 1, pin = 1-pin, pout = 1-pout) { if (tid >= s) { T a = sdata[pin*n + tid - s]; T b = sdata[pin*n + tid]; sdata[pout*n + tid] = F()(a, b); } else { sdata[pout*n + tid] = sdata[pin*n + tid]; } __syncthreads(); } if (idx < length) { d_out[idx] = sdata[tid]; } } template<typename T, typename F> __global__ void blelloch_scan_kernel(T* const d_in, T* const d_out, int length, T identity) { int gtid = blockIdx.x * blockDim.x + threadIdx.x; int ltid = threadIdx.x; int n = 2*blockDim.x; extern __shared__ __align__(sizeof(T)) unsigned char smem[]; T *sdata = reinterpret_cast<T *>(smem); // size should be n sdata[ltid * 2] = (gtid * 2 < length)? d_in[gtid * 2] : identity; sdata[ltid * 2 + 1] = (gtid * 2 + 1< length)? d_in[gtid * 2 + 1] : identity; __syncthreads(); int s = 2; for (; s <= n; s <<= 1) { int pos = ltid * s + s - 1; if (pos < n) { T a = sdata[pos - s/2]; T b = sdata[pos]; sdata[pos] = F()(a, b); } __syncthreads(); } if (ltid == 0) { if(d_out) d_out[blockIdx.x] = sdata[n-1]; sdata[n - 1] = identity; // identity item } __syncthreads(); s = n; for (; s > 1; s >>= 1) { int pos = ltid * s + s - 1; if (pos < n) { T a = sdata[pos - s/2]; T b = sdata[pos]; sdata[pos] = F()(a, b); sdata[pos - s/2] = b; } __syncthreads(); } if (gtid * 2 < length) { d_in[gtid * 2] = sdata[ltid * 2]; } if (gtid * 2 + 1 < length) { d_in[gtid * 2 + 1] = sdata[ltid * 2 + 1]; } } template<typename T, typename F> __global__ void scan_update_kernel(T* const d_in, T* const d_buf, int length) { int bid = blockIdx.x; int tid = threadIdx.x; int idx = bid * blockDim.x + tid; __shared__ T sdata; if (tid == 0) { sdata = d_buf[bid]; } __syncthreads(); if (idx < length) { d_in[idx] = F()(d_in[idx], sdata); } } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ int length = numRows * numCols; int blockSize = 512; int gridSize = (length + blockSize - 1)/blockSize; float* d_buf1; unsigned int* d_buf2; #if DEBUG == 1 struct timeval tb, te; gettimeofday(&tb, 0); #endif checkCudaErrors(cudaMalloc((void **)&d_buf1, length * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_buf2, numBins * sizeof(unsigned int))); checkCudaErrors(cudaMemset(d_buf2, 0, numBins * sizeof(unsigned int))); #if DEBUG == 1 cudaDeviceSynchronize(); gettimeofday(&te, 0); printf("memset time: %ld\n", (te.tv_sec-tb.tv_sec)*1000000 + te.tv_usec-tb.tv_usec); #endif // find min_logLum #if DEBUG == 1 printf("reduce_min: grid: %d, block: %d, length: %d\n", gridSize, blockSize, length); gettimeofday(&tb, 0); #endif reduce_kernel< float, minF<float> ><<<gridSize, blockSize, blockSize * sizeof(float)>>>(d_logLuminance, d_buf1, 275.0, length); checkCudaErrors(cudaGetLastError()); int gs = gridSize; while(gs > 1) { int l = gs; gs = (gs+blockSize-1)/blockSize; #if DEBUG == 1 printf("reduce_min: grid: %d, block: %d, length: %d\n", gs, blockSize, l); #endif reduce_kernel<float, minF<float> ><<<gs, blockSize, blockSize * sizeof(float)>>>(d_buf1, d_buf1, 275.0, l); checkCudaErrors(cudaGetLastError()); } #if DEBUG == 1 cudaDeviceSynchronize(); gettimeofday(&te, 0); printf("reduce min time: %ld\n", (te.tv_sec-tb.tv_sec)*1000000 + te.tv_usec-tb.tv_usec); #endif checkCudaErrors(cudaMemcpy(&min_logLum, d_buf1, sizeof(float), cudaMemcpyDeviceToHost)); printf("min_logLum = %f\n", min_logLum); // find max logLum #if DEBUG == 1 printf("reduce_max: grid: %d, block: %d, length: %d\n", gridSize, blockSize, length); gettimeofday(&tb, 0); #endif reduce_kernel<float, maxF<float> ><<<gridSize, blockSize, blockSize * sizeof(float)>>>(d_logLuminance, d_buf1, 0.0, length); checkCudaErrors(cudaGetLastError()); gs = gridSize; while(gs > 1) { int l = gs; gs = (gs+blockSize-1)/blockSize; #if DEBUG == 1 printf("reduce_max: grid: %d, block: %d, length: %d\n", gs, blockSize, l); #endif reduce_kernel<float, maxF<float> ><<<gs, blockSize, blockSize * sizeof(float)>>>(d_buf1, d_buf1, 0.0, l); checkCudaErrors(cudaGetLastError()); } #if DEBUG == 1 cudaDeviceSynchronize(); gettimeofday(&te, 0); printf("reduce max time: %ld\n", (te.tv_sec-tb.tv_sec)*1000000 + te.tv_usec-tb.tv_usec); #endif checkCudaErrors(cudaMemcpy(&max_logLum, d_buf1, sizeof(float), cudaMemcpyDeviceToHost)); printf("max_logLum = %f\n", max_logLum); // compute histogram gridSize = (length + blockSize - 1)/blockSize; #if DEBUG == 1 printf("histogram: grid: %d, block: %d, length: %d\n", gridSize, blockSize, length); gettimeofday(&tb, 0); #endif histogram_local_reduce_atomic<<<gridSize, blockSize, numBins * sizeof(unsigned int)>>>(d_logLuminance, d_cdf, min_logLum, max_logLum, length, numBins); checkCudaErrors(cudaGetLastError()); #if DEBUG == 1 cudaDeviceSynchronize(); gettimeofday(&te, 0); printf("histogram time: %ld\n", (te.tv_sec-tb.tv_sec)*1000000 + te.tv_usec-tb.tv_usec); #endif // exclusive scan to produce cdf blockSize = 1024; gridSize = (numBins + blockSize - 1)/blockSize; assert(gridSize <= blockSize); #if DEBUG == 1 printf("scan: grid: %d, block: %d, length: %lu\n", gridSize, blockSize, numBins); gettimeofday(&tb, 0); #endif blelloch_scan_kernel<unsigned int, addF<unsigned int> ><<<gridSize, blockSize, blockSize * 2 * sizeof(unsigned int)>>>(d_cdf, d_buf2, numBins, 0); checkCudaErrors(cudaGetLastError()); if (gridSize > 1) { #if DEBUG == 1 printf("scan: grid: %d, block: %d, length: %d\n", (gridSize+blockSize-1)/blockSize, blockSize, gridSize); #endif blelloch_scan_kernel<unsigned int, addF<unsigned int> ><<<(gridSize+blockSize-1)/blockSize, blockSize, blockSize * 2 * sizeof(unsigned int)>>>(d_buf2, NULL, gridSize, 0); checkCudaErrors(cudaGetLastError()); #if DEBUG == 1 printf("scan_update: grid: %d, block: %d\n", gridSize, blockSize); #endif scan_update_kernel<unsigned int, addF<unsigned int> ><<<gridSize, blockSize>>>(d_cdf, d_buf2, numBins); checkCudaErrors(cudaGetLastError()); } #if DEBUG == 1 cudaDeviceSynchronize(); gettimeofday(&te, 0); printf("scan time: %ld\n", (te.tv_sec-tb.tv_sec)*1000000 + te.tv_usec-tb.tv_usec); #endif checkCudaErrors(cudaFree(d_buf1)); checkCudaErrors(cudaFree(d_buf2)); }