hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
55584905dc19e7f182545f2795ce12a6a1d477f3.hip
// !!! This is a file automatically generated by hipify!!! //a######################################################### //a## 3D iso acoustic fd :MPI + CUDA //a## code by Rong Tao //a######################################################### #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> __device__ volatile int vint = 0; #define PI 3.141592653 #define BlockSize1 16// tile size in 1st-axis #define BlockSize2 16// tile size in 2nd-axis #define mm 4 // half of the order in space #define npd 50 // absorbing boundry condition wield void check_gpu_error (const char *msg) /*< check GPU errors >*/ { hipError_t err = hipGetLastError (); if (hipSuccess != err) { printf("Cuda error: %s: %s\n", msg, hipGetErrorString (err)); exit(0); } } __constant__ float stencil[mm+1]={-205.0/72.0,8.0/5.0,-1.0/5.0,8.0/315.0,-1.0/560.0}; __global__ void cuda_ricker_wavelet(float *wlt, float fm, float dt, int nt) /*< generate ricker wavelet with time deley >*/ { int it=threadIdx.x+blockDim.x*blockIdx.x; if (it<nt){ float tmp = PI*fm*fabsf(it*dt-1.0/fm);//delay the wavelet to exhibit all waveform tmp *=tmp; wlt[it]= (1.0-2.0*tmp)*expf(-tmp);// ricker wavelet at time: t=nt*dt } } __global__ void cuda_set_s(int *szxy, int szbeg, int sxbeg, int sybeg, int jsz, int jsx, int jsy, int ns, int nz, int nx, int ny) /*< set the positions of sources in whole domain >*/ { int id=threadIdx.x+blockDim.x*blockIdx.x; int nnz=nz+2*mm+2*npd; int nnx=nx+2*mm+2*npd; if (id<ns) szxy[id]=(szbeg+id*jsz+mm+npd)+nnz*(sxbeg+id*jsx+mm+npd)+nnz*nnx*(sybeg+id*jsy+mm+npd); } __global__ void cuda_set_g(int *gzxy, int ng, int nz, int nx, int ny) /*< set the positions of geophones in whole domain >*/ { int id=threadIdx.x+blockDim.x*blockIdx.x; int nnz=nz+2*mm+2*npd; int nnx=nx+2*mm+2*npd; int iy=id/nx; int ix=id%nx; if (id<ng) gzxy[id]=(mm+npd)+nnz*(ix*1+mm+npd)+nnz*nnx*(iy*1+mm+npd); } __global__ void cuda_trans_xy2txy(float *xy, float *txy, int it, int nt, int ng) /*< set the positions of geophones in whole domain >*/ { int id=threadIdx.x+blockDim.x*blockIdx.x; if (id<ng) txy[it+id*nt]+=xy[id]; } __global__ void cuda_absorb_bndr(float *d_p,int nz,int nx,int ny,float qp) /*< absorb boundry condition >*/ { const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix int id,iy; int nnz=nz+2*mm+2*npd; int nnx=nx+2*mm+2*npd; int nny=ny+2*mm+2*npd; for(iy=0;iy<nny;iy++) { id=iz+ix*nnz+iy*nnz*nnx; /*< front & back (0<y<ny) >*/ if ( iy < npd ) d_p[id]=( qp*pow((npd-iy)/(1.0*npd),2) + 1 )*d_p[id]; else if ( iy >= 2*mm + npd + ny ) d_p[id]=( qp*pow((iy-2*mm-npd-ny)/(1.0*npd),2) + 1 )*d_p[id]; /*< left & right (0<x<nx) >*/ if ( ix < npd ) d_p[id]=( qp*pow((npd-ix)/(1.0*npd),2) + 1 )*d_p[id]; else if ( ix >= 2*mm + npd + nx ) d_p[id]=( qp*pow((ix-2*mm-npd-nx)/(1.0*npd),2) + 1 )*d_p[id]; /*< up & down (0<z<nz) >*/ if ( iz < npd ) d_p[id]=( qp*pow((npd-iz)/(1.0*npd),2) + 1 )*d_p[id]; else if ( iz >= 2*mm + npd + nz ) d_p[id]=( qp*pow((iz-2*mm-npd-nz)/(1.0*npd),2) + 1 )*d_p[id]; } } __global__ void cuda_record(float *p, float *seis, int *gxz, int ng)//++++++++++++ /*< record the seismogram at time it >*/ { int id=threadIdx.x+blockDim.x*blockIdx.x; if (id<ng) seis[id]=p[gxz[id]]; } __global__ void cuda_add_source(bool add, float *p, float *source, int *szxy, int ns) /*< add/subtract sources: length of source[]=ns, index stored in szxy[] >*/ { int id=threadIdx.x+blockIdx.x*blockDim.x; if(id<ns){ if(add){ p[szxy[id]]+=source[id]; }else{ p[szxy[id]]-=source[id]; } } } __global__ void cuda_step_fd3d(float *p0, float *p1, float *vv, float _dz2, float _dx2, float _dy2, int n1, int n2, int n3) /*< step forward: 3-D FD, order=8 >*/ { bool validr = true; bool validw = true; const int gtid1 = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz const int gtid2 = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix const int ltid1 = threadIdx.x;//ithreadz const int ltid2 = threadIdx.y;//ithreadx const int work1 = blockDim.x;//nblockz const int work2 = blockDim.y;//nblockx __shared__ float tile[BlockSize2 + 2 * mm][BlockSize1 + 2 * mm];//tile[16+2*mm][16+2*mm] const int stride2 = n1 + 2 * mm + 2 * npd;//n1=nz const int stride3 = stride2 * (n2 + 2 * mm + 2 * npd);//n2=nx stride3=(nz+2*mm)*(nx+2*mm) int inIndex = 0; int outIndex = 0; // Advance inputIndex to start of inner volume inIndex += (mm ) * stride2 + mm ;// inIndex=mm*(nz+2*mm+2*npd)+mm; // Advance inputIndex to target element inIndex += gtid2 * stride2 + gtid1; // inIndex=mm*(nz+2*mm)+mm+ix*(nz+2*mm+2*npd)+iz;:igrid float infront[mm]; float behind[mm]; float current; const int t1 = ltid1 + mm; const int t2 = ltid2 + mm; // Check in bounds if ((gtid1 >= n1 + mm + 2*npd) ||(gtid2 >= n2 + mm + 2*npd)) validr = false; if ((gtid1 >= n1 + 2*npd) ||(gtid2 >= n2 + 2*npd)) validw = false; // Preload the "infront" and "behind" data for (int i = mm -2 ; i >= 0 ; i--)//change 'mm-2' to 'mm-1'+++++++++++++++++++ { if (validr) behind[i] = p1[inIndex]; inIndex += stride3;//stride3=(nz+2*mm)*(nx+2*mm) } if (validr) current = p1[inIndex]; outIndex = inIndex; inIndex += stride3;//stride3=(nz+2*mm)*(nx+2*mm) for (int i = 0 ; i < mm ; i++) { if (validr) infront[i] = p1[inIndex]; inIndex += stride3;//stride3=(nz+2*mm)*(nx+2*mm) } // Step through the zx-planes for (int i3 = mm ; i3 < n3 + 2*npd + mm ; i3++) { // Advance the slice (move the thread-front) for (int i = mm - 1 ; i > 0 ; i--) behind[i] = behind[i - 1]; behind[0] = current; current = infront[0]; for (int i = 0 ; i < mm - 1 ; i++) infront[i] = infront[i + 1]; if (validr) infront[mm - 1] = p1[inIndex]; inIndex += stride3; outIndex += stride3; __syncthreads(); // Update the data slice in the local tile // Halo above & below if (ltid2 < mm) { /* tile[ithread][ithread+mm]=p1[igrid - mm*(nz+2*mm)] */ tile[ltid2][t1] = p1[outIndex - mm * stride2];//t1 = ltid1 + mm; tile[ltid2 + work2 + mm][t1] = p1[outIndex + work2 * stride2]; } // Halo left & right if (ltid1 < mm) { tile[t2][ltid1] = p1[outIndex - mm]; tile[t2][ltid1 + work1 + mm] = p1[outIndex + work1]; } tile[t2][t1] = current; __syncthreads(); // Compute the output value float c1, c2, c3; c1=c2=c3=stencil[0]*current; for (int i=1; i <= mm ; i++) { c1 +=stencil[i]*(tile[t2][t1-i]+ tile[t2][t1+i]);//z c2 +=stencil[i]*(tile[t2-i][t1]+ tile[t2+i][t1]);//x c3 +=stencil[i]*(infront[i-1] + behind[i-1] ); //y } c1*=_dz2; c2*=_dx2; c3*=_dy2; if (validw) p0[outIndex]=2.0*p1[outIndex]-p0[outIndex]+vv[outIndex]*(c1+c2+c3); } } void velocity_transform(float *v0, float*vv, float dt, int n1, int n2, int n3) /*< velocit2 transform: vv=v0*dt; vv<--vv^2 >*/ { int i1, i2, i3, nn1, nn2, nn3; float tmp; nn1=n1+2*mm+2*npd; nn2=n2+2*mm+2*npd; nn3=n3+2*mm+2*npd; // inner zone for(i3=0; i3<n3; i3++){//y for(i2=0; i2<n2; i2++){//x for(i1=0; i1<n1; i1++){//z tmp=v0[i1+n1*i2+n1*n2*i3]*dt; vv[(i1+mm+npd)+nn1*(i2+mm+npd)+nn1*nn2*(i3+mm+npd)]=tmp*tmp; } } } //top & down for(i3=0; i3<nn3; i3++){//y for(i2=0; i2<nn2; i2++){//x for (i1=0; i1<mm+npd; i1++){//z vv[i1+nn1*i2+nn1*nn2*i3]=vv[mm+npd+nn1*i2+nn1*nn2*i3]; vv[(nn1-i1-1)+nn1*i2+nn1*nn2*i3]=vv[(nn1-mm-npd-1)+nn1*i2+nn1*nn2*i3]; } } } //left & right for(i3=0; i3<nn3; i3++){//y for(i2=0; i2<mm+npd; i2++){//x for (i1=0; i1<nn1; i1++){//z vv[i1+nn1*i2+nn1*nn2*i3]=vv[i1+nn1*(mm+npd)+nn1*nn2*i3]; vv[i1+nn1*(nn2-i2-1)+nn1*nn2*i3]=vv[i1+nn1*(nn2-mm-npd-1)+nn1*nn2*i3]; } } } //front & back for(i3=0; i3<mm+npd; i3++){//y for(i2=0; i2<nn2; i2++){//x for(i1=0; i1<nn1; i1++){//z vv[i1+nn1*i2+nn1*nn2*i3]=vv[i1+nn1*i2+nn1*nn2*(mm+npd)]; vv[i1+nn1*i2+nn1*nn2*(nn3-1-i3)]=vv[i1+nn1*i2+nn1*nn2*(nn3-mm-npd-1)]; } } } } void window3d(float *a, float *b, int n1, int n2, int n3) /*< window a 3d subvolume >*/ { int i1, i2, i3, nn1, nn2; nn1=n1+2*mm+ 2*npd;//z nn2=n2+2*mm+ 2*npd;//x for(i3=0; i3<n3; i3++) for(i2=0; i2<n2; i2++) for(i1=0; i1<n1; i1++) { a[i1+n1*i2+n1*n2*i3]=b[(i1+mm+npd)+nn1*(i2+mm+npd)+nn1*nn2*(i3+mm+npd)]; } } extern "C" void cuda_3dfd(FILE *fpvel, FILE *fpsnap, FILE *fpshot, int is, int ns, int myid, int nx, int ny, int nz, float dx, float dy, float dz, int sxbeg, int sybeg, int szbeg, int jsx, int jsy, int jsz, int nt, int kt, float dt, float fm) { int nnz, nnx, nny, it, ng; int *d_szxy,*d_gzxy; float _dz2, _dx2, _dy2; float *v0, *vv, *d_wlt, *d_vv, *d_p0, *d_p1, *ptr; float *d_dcal_device_xy,*d_dcal_device_txy,*d_dcal_host; clock_t t0, t1; t0 = clock(); _dz2=1.0/(dz*dz); _dx2=1.0/(dx*dx); _dy2=1.0/(dy*dy); nnz=nz+2*mm+2*npd; nnx=nx+2*mm+2*npd; nny=ny+2*mm+2*npd; ng=nx*ny; v0=(float*)malloc(nz*nx*ny*sizeof(float)); vv=(float*)malloc(nnz*nnx*nny*sizeof(float)); d_dcal_host=(float*)malloc(ng*nt*sizeof(float)); fread(v0, sizeof(float), nz*nx*ny, fpvel); velocity_transform(v0, vv, dt, nz, nx, ny); hipSetDevice(0); check_gpu_error("CUDA:Failed to initialize device!"); dim3 dimg, dimb; dimg.x=(nz+2*npd+2*mm+BlockSize1-1)/BlockSize1; dimg.y=(nx+2*npd+2*mm+BlockSize2-1)/BlockSize2; dimb.x=BlockSize1; dimb.y=BlockSize2; /* allocate memory on device */ hipMalloc(&d_wlt, nt*sizeof(float)); hipMalloc(&d_vv, nnz*nnx*nny*sizeof(float)); hipMalloc(&d_p0, nnz*nnx*nny*sizeof(float)); hipMalloc(&d_p1, nnz*nnx*nny*sizeof(float)); hipMalloc(&d_szxy, ns*sizeof(int)); hipMalloc(&d_gzxy, ng*sizeof(int)); hipMalloc(&d_dcal_device_xy, ng*sizeof(float)); hipMalloc(&d_dcal_device_txy, ng*nt*sizeof(float)); check_gpu_error("CUDA: Failed to allocate memory for variables!"); hipLaunchKernelGGL(( cuda_ricker_wavelet), dim3((nt+511)/512), dim3(512), 0, 0, d_wlt, fm, dt, nt); hipMemcpy(d_vv, vv, nnz*nnx*nny*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( cuda_set_s), dim3(1), dim3(ns), 0, 0, d_szxy, szbeg, sxbeg, sybeg, jsz, jsx, jsy, ns, nz, nx, ny); hipLaunchKernelGGL(( cuda_set_g), dim3((ng+511)/512),dim3(512), 0, 0, d_gzxy, ng, nz, nx, ny); hipMemset(d_p0, 0, nnz*nnx*nny*sizeof(float)); hipMemset(d_p1, 0, nnz*nnx*nny*sizeof(float)); hipMemset(d_dcal_device_xy, 0, ng*sizeof(float)); hipMemset(d_dcal_device_txy, 0, ng*nt*sizeof(float)); if(myid==0)printf(" cuda: is=%2d << it= 0",is); for(it=0; it<nt; it++) { hipLaunchKernelGGL(( cuda_add_source), dim3(1),dim3(1), 0, 0, true, d_p1, &d_wlt[it], &d_szxy[is], 1); hipLaunchKernelGGL(( cuda_step_fd3d), dim3(dimg),dim3(dimb), 0, 0, d_p0, d_p1, d_vv, _dz2, _dx2, _dy2, nz, nx, ny); ptr=d_p0; d_p0=d_p1; d_p1=ptr; hipLaunchKernelGGL(( cuda_absorb_bndr), dim3(dimg),dim3(dimb), 0, 0, d_p0, nz, nx, ny, -0.25); hipLaunchKernelGGL(( cuda_absorb_bndr), dim3(dimg),dim3(dimb), 0, 0, d_p1, nz, nx, ny, -0.25); hipLaunchKernelGGL(( cuda_record), dim3((ng+511)/512), dim3(512), 0, 0, d_p0, d_dcal_device_xy, d_gzxy, ng); hipLaunchKernelGGL(( cuda_trans_xy2txy), dim3((ng+511)/512), dim3(512), 0, 0, d_dcal_device_xy, d_dcal_device_txy, it, nt, ng); /* if(it==kt&&is==0){ hipMemcpy(vv, d_p0, nnz*nnx*nny*sizeof(float), hipMemcpyDeviceToHost); window3d(v0, vv, nz, nx, ny); fwrite(v0, sizeof(float),nz*nx*ny, fpsnap); }*/ if(it%200==0&&myid==0) printf("-%d",it); } hipMemcpy(d_dcal_host, d_dcal_device_txy, ng*nt*sizeof(float), hipMemcpyDeviceToHost); fseek(fpshot,is*ng*nt*sizeof(float),0); fwrite(d_dcal_host, sizeof(float), ng*nt, fpshot); t1 = clock(); if(myid==0)printf(" >> %.3f (s)\n", ((float)(t1-t0))/CLOCKS_PER_SEC); /* free memory on device */ hipFree(d_wlt); hipFree(d_vv); hipFree(d_p0); hipFree(d_p1); hipFree(d_szxy); hipFree(d_gzxy); hipFree(d_dcal_device_xy); hipFree(d_dcal_device_txy); free(v0); free(vv); free(d_dcal_host); }
55584905dc19e7f182545f2795ce12a6a1d477f3.cu
//a######################################################### //a## 3D iso acoustic fd :MPI + CUDA //a## code by Rong Tao //a######################################################### #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <cuda.h> __device__ volatile int vint = 0; #define PI 3.141592653 #define BlockSize1 16// tile size in 1st-axis #define BlockSize2 16// tile size in 2nd-axis #define mm 4 // half of the order in space #define npd 50 // absorbing boundry condition wield void check_gpu_error (const char *msg) /*< check GPU errors >*/ { cudaError_t err = cudaGetLastError (); if (cudaSuccess != err) { printf("Cuda error: %s: %s\n", msg, cudaGetErrorString (err)); exit(0); } } __constant__ float stencil[mm+1]={-205.0/72.0,8.0/5.0,-1.0/5.0,8.0/315.0,-1.0/560.0}; __global__ void cuda_ricker_wavelet(float *wlt, float fm, float dt, int nt) /*< generate ricker wavelet with time deley >*/ { int it=threadIdx.x+blockDim.x*blockIdx.x; if (it<nt){ float tmp = PI*fm*fabsf(it*dt-1.0/fm);//delay the wavelet to exhibit all waveform tmp *=tmp; wlt[it]= (1.0-2.0*tmp)*expf(-tmp);// ricker wavelet at time: t=nt*dt } } __global__ void cuda_set_s(int *szxy, int szbeg, int sxbeg, int sybeg, int jsz, int jsx, int jsy, int ns, int nz, int nx, int ny) /*< set the positions of sources in whole domain >*/ { int id=threadIdx.x+blockDim.x*blockIdx.x; int nnz=nz+2*mm+2*npd; int nnx=nx+2*mm+2*npd; if (id<ns) szxy[id]=(szbeg+id*jsz+mm+npd)+nnz*(sxbeg+id*jsx+mm+npd)+nnz*nnx*(sybeg+id*jsy+mm+npd); } __global__ void cuda_set_g(int *gzxy, int ng, int nz, int nx, int ny) /*< set the positions of geophones in whole domain >*/ { int id=threadIdx.x+blockDim.x*blockIdx.x; int nnz=nz+2*mm+2*npd; int nnx=nx+2*mm+2*npd; int iy=id/nx; int ix=id%nx; if (id<ng) gzxy[id]=(mm+npd)+nnz*(ix*1+mm+npd)+nnz*nnx*(iy*1+mm+npd); } __global__ void cuda_trans_xy2txy(float *xy, float *txy, int it, int nt, int ng) /*< set the positions of geophones in whole domain >*/ { int id=threadIdx.x+blockDim.x*blockIdx.x; if (id<ng) txy[it+id*nt]+=xy[id]; } __global__ void cuda_absorb_bndr(float *d_p,int nz,int nx,int ny,float qp) /*< absorb boundry condition >*/ { const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix int id,iy; int nnz=nz+2*mm+2*npd; int nnx=nx+2*mm+2*npd; int nny=ny+2*mm+2*npd; for(iy=0;iy<nny;iy++) { id=iz+ix*nnz+iy*nnz*nnx; /*< front & back (0<y<ny) >*/ if ( iy < npd ) d_p[id]=( qp*pow((npd-iy)/(1.0*npd),2) + 1 )*d_p[id]; else if ( iy >= 2*mm + npd + ny ) d_p[id]=( qp*pow((iy-2*mm-npd-ny)/(1.0*npd),2) + 1 )*d_p[id]; /*< left & right (0<x<nx) >*/ if ( ix < npd ) d_p[id]=( qp*pow((npd-ix)/(1.0*npd),2) + 1 )*d_p[id]; else if ( ix >= 2*mm + npd + nx ) d_p[id]=( qp*pow((ix-2*mm-npd-nx)/(1.0*npd),2) + 1 )*d_p[id]; /*< up & down (0<z<nz) >*/ if ( iz < npd ) d_p[id]=( qp*pow((npd-iz)/(1.0*npd),2) + 1 )*d_p[id]; else if ( iz >= 2*mm + npd + nz ) d_p[id]=( qp*pow((iz-2*mm-npd-nz)/(1.0*npd),2) + 1 )*d_p[id]; } } __global__ void cuda_record(float *p, float *seis, int *gxz, int ng)//++++++++++++ /*< record the seismogram at time it >*/ { int id=threadIdx.x+blockDim.x*blockIdx.x; if (id<ng) seis[id]=p[gxz[id]]; } __global__ void cuda_add_source(bool add, float *p, float *source, int *szxy, int ns) /*< add/subtract sources: length of source[]=ns, index stored in szxy[] >*/ { int id=threadIdx.x+blockIdx.x*blockDim.x; if(id<ns){ if(add){ p[szxy[id]]+=source[id]; }else{ p[szxy[id]]-=source[id]; } } } __global__ void cuda_step_fd3d(float *p0, float *p1, float *vv, float _dz2, float _dx2, float _dy2, int n1, int n2, int n3) /*< step forward: 3-D FD, order=8 >*/ { bool validr = true; bool validw = true; const int gtid1 = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz const int gtid2 = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix const int ltid1 = threadIdx.x;//ithreadz const int ltid2 = threadIdx.y;//ithreadx const int work1 = blockDim.x;//nblockz const int work2 = blockDim.y;//nblockx __shared__ float tile[BlockSize2 + 2 * mm][BlockSize1 + 2 * mm];//tile[16+2*mm][16+2*mm] const int stride2 = n1 + 2 * mm + 2 * npd;//n1=nz const int stride3 = stride2 * (n2 + 2 * mm + 2 * npd);//n2=nx stride3=(nz+2*mm)*(nx+2*mm) int inIndex = 0; int outIndex = 0; // Advance inputIndex to start of inner volume inIndex += (mm ) * stride2 + mm ;// inIndex=mm*(nz+2*mm+2*npd)+mm; // Advance inputIndex to target element inIndex += gtid2 * stride2 + gtid1; // inIndex=mm*(nz+2*mm)+mm+ix*(nz+2*mm+2*npd)+iz;:igrid float infront[mm]; float behind[mm]; float current; const int t1 = ltid1 + mm; const int t2 = ltid2 + mm; // Check in bounds if ((gtid1 >= n1 + mm + 2*npd) ||(gtid2 >= n2 + mm + 2*npd)) validr = false; if ((gtid1 >= n1 + 2*npd) ||(gtid2 >= n2 + 2*npd)) validw = false; // Preload the "infront" and "behind" data for (int i = mm -2 ; i >= 0 ; i--)//change 'mm-2' to 'mm-1'+++++++++++++++++++ { if (validr) behind[i] = p1[inIndex]; inIndex += stride3;//stride3=(nz+2*mm)*(nx+2*mm) } if (validr) current = p1[inIndex]; outIndex = inIndex; inIndex += stride3;//stride3=(nz+2*mm)*(nx+2*mm) for (int i = 0 ; i < mm ; i++) { if (validr) infront[i] = p1[inIndex]; inIndex += stride3;//stride3=(nz+2*mm)*(nx+2*mm) } // Step through the zx-planes for (int i3 = mm ; i3 < n3 + 2*npd + mm ; i3++) { // Advance the slice (move the thread-front) for (int i = mm - 1 ; i > 0 ; i--) behind[i] = behind[i - 1]; behind[0] = current; current = infront[0]; for (int i = 0 ; i < mm - 1 ; i++) infront[i] = infront[i + 1]; if (validr) infront[mm - 1] = p1[inIndex]; inIndex += stride3; outIndex += stride3; __syncthreads(); // Update the data slice in the local tile // Halo above & below if (ltid2 < mm) { /* tile[ithread][ithread+mm]=p1[igrid - mm*(nz+2*mm)] */ tile[ltid2][t1] = p1[outIndex - mm * stride2];//t1 = ltid1 + mm; tile[ltid2 + work2 + mm][t1] = p1[outIndex + work2 * stride2]; } // Halo left & right if (ltid1 < mm) { tile[t2][ltid1] = p1[outIndex - mm]; tile[t2][ltid1 + work1 + mm] = p1[outIndex + work1]; } tile[t2][t1] = current; __syncthreads(); // Compute the output value float c1, c2, c3; c1=c2=c3=stencil[0]*current; for (int i=1; i <= mm ; i++) { c1 +=stencil[i]*(tile[t2][t1-i]+ tile[t2][t1+i]);//z c2 +=stencil[i]*(tile[t2-i][t1]+ tile[t2+i][t1]);//x c3 +=stencil[i]*(infront[i-1] + behind[i-1] ); //y } c1*=_dz2; c2*=_dx2; c3*=_dy2; if (validw) p0[outIndex]=2.0*p1[outIndex]-p0[outIndex]+vv[outIndex]*(c1+c2+c3); } } void velocity_transform(float *v0, float*vv, float dt, int n1, int n2, int n3) /*< velocit2 transform: vv=v0*dt; vv<--vv^2 >*/ { int i1, i2, i3, nn1, nn2, nn3; float tmp; nn1=n1+2*mm+2*npd; nn2=n2+2*mm+2*npd; nn3=n3+2*mm+2*npd; // inner zone for(i3=0; i3<n3; i3++){//y for(i2=0; i2<n2; i2++){//x for(i1=0; i1<n1; i1++){//z tmp=v0[i1+n1*i2+n1*n2*i3]*dt; vv[(i1+mm+npd)+nn1*(i2+mm+npd)+nn1*nn2*(i3+mm+npd)]=tmp*tmp; } } } //top & down for(i3=0; i3<nn3; i3++){//y for(i2=0; i2<nn2; i2++){//x for (i1=0; i1<mm+npd; i1++){//z vv[i1+nn1*i2+nn1*nn2*i3]=vv[mm+npd+nn1*i2+nn1*nn2*i3]; vv[(nn1-i1-1)+nn1*i2+nn1*nn2*i3]=vv[(nn1-mm-npd-1)+nn1*i2+nn1*nn2*i3]; } } } //left & right for(i3=0; i3<nn3; i3++){//y for(i2=0; i2<mm+npd; i2++){//x for (i1=0; i1<nn1; i1++){//z vv[i1+nn1*i2+nn1*nn2*i3]=vv[i1+nn1*(mm+npd)+nn1*nn2*i3]; vv[i1+nn1*(nn2-i2-1)+nn1*nn2*i3]=vv[i1+nn1*(nn2-mm-npd-1)+nn1*nn2*i3]; } } } //front & back for(i3=0; i3<mm+npd; i3++){//y for(i2=0; i2<nn2; i2++){//x for(i1=0; i1<nn1; i1++){//z vv[i1+nn1*i2+nn1*nn2*i3]=vv[i1+nn1*i2+nn1*nn2*(mm+npd)]; vv[i1+nn1*i2+nn1*nn2*(nn3-1-i3)]=vv[i1+nn1*i2+nn1*nn2*(nn3-mm-npd-1)]; } } } } void window3d(float *a, float *b, int n1, int n2, int n3) /*< window a 3d subvolume >*/ { int i1, i2, i3, nn1, nn2; nn1=n1+2*mm+ 2*npd;//z nn2=n2+2*mm+ 2*npd;//x for(i3=0; i3<n3; i3++) for(i2=0; i2<n2; i2++) for(i1=0; i1<n1; i1++) { a[i1+n1*i2+n1*n2*i3]=b[(i1+mm+npd)+nn1*(i2+mm+npd)+nn1*nn2*(i3+mm+npd)]; } } extern "C" void cuda_3dfd(FILE *fpvel, FILE *fpsnap, FILE *fpshot, int is, int ns, int myid, int nx, int ny, int nz, float dx, float dy, float dz, int sxbeg, int sybeg, int szbeg, int jsx, int jsy, int jsz, int nt, int kt, float dt, float fm) { int nnz, nnx, nny, it, ng; int *d_szxy,*d_gzxy; float _dz2, _dx2, _dy2; float *v0, *vv, *d_wlt, *d_vv, *d_p0, *d_p1, *ptr; float *d_dcal_device_xy,*d_dcal_device_txy,*d_dcal_host; clock_t t0, t1; t0 = clock(); _dz2=1.0/(dz*dz); _dx2=1.0/(dx*dx); _dy2=1.0/(dy*dy); nnz=nz+2*mm+2*npd; nnx=nx+2*mm+2*npd; nny=ny+2*mm+2*npd; ng=nx*ny; v0=(float*)malloc(nz*nx*ny*sizeof(float)); vv=(float*)malloc(nnz*nnx*nny*sizeof(float)); d_dcal_host=(float*)malloc(ng*nt*sizeof(float)); fread(v0, sizeof(float), nz*nx*ny, fpvel); velocity_transform(v0, vv, dt, nz, nx, ny); cudaSetDevice(0); check_gpu_error("CUDA:Failed to initialize device!"); dim3 dimg, dimb; dimg.x=(nz+2*npd+2*mm+BlockSize1-1)/BlockSize1; dimg.y=(nx+2*npd+2*mm+BlockSize2-1)/BlockSize2; dimb.x=BlockSize1; dimb.y=BlockSize2; /* allocate memory on device */ cudaMalloc(&d_wlt, nt*sizeof(float)); cudaMalloc(&d_vv, nnz*nnx*nny*sizeof(float)); cudaMalloc(&d_p0, nnz*nnx*nny*sizeof(float)); cudaMalloc(&d_p1, nnz*nnx*nny*sizeof(float)); cudaMalloc(&d_szxy, ns*sizeof(int)); cudaMalloc(&d_gzxy, ng*sizeof(int)); cudaMalloc(&d_dcal_device_xy, ng*sizeof(float)); cudaMalloc(&d_dcal_device_txy, ng*nt*sizeof(float)); check_gpu_error("CUDA: Failed to allocate memory for variables!"); cuda_ricker_wavelet<<<(nt+511)/512, 512>>>(d_wlt, fm, dt, nt); cudaMemcpy(d_vv, vv, nnz*nnx*nny*sizeof(float), cudaMemcpyHostToDevice); cuda_set_s<<<1, ns>>>(d_szxy, szbeg, sxbeg, sybeg, jsz, jsx, jsy, ns, nz, nx, ny); cuda_set_g<<<(ng+511)/512,512>>>(d_gzxy, ng, nz, nx, ny); cudaMemset(d_p0, 0, nnz*nnx*nny*sizeof(float)); cudaMemset(d_p1, 0, nnz*nnx*nny*sizeof(float)); cudaMemset(d_dcal_device_xy, 0, ng*sizeof(float)); cudaMemset(d_dcal_device_txy, 0, ng*nt*sizeof(float)); if(myid==0)printf(" cuda: is=%2d << it= 0",is); for(it=0; it<nt; it++) { cuda_add_source<<<1,1>>>(true, d_p1, &d_wlt[it], &d_szxy[is], 1); cuda_step_fd3d<<<dimg,dimb>>>(d_p0, d_p1, d_vv, _dz2, _dx2, _dy2, nz, nx, ny); ptr=d_p0; d_p0=d_p1; d_p1=ptr; cuda_absorb_bndr<<<dimg,dimb>>>(d_p0, nz, nx, ny, -0.25); cuda_absorb_bndr<<<dimg,dimb>>>(d_p1, nz, nx, ny, -0.25); cuda_record<<<(ng+511)/512, 512>>>(d_p0, d_dcal_device_xy, d_gzxy, ng); cuda_trans_xy2txy<<<(ng+511)/512, 512>>>(d_dcal_device_xy, d_dcal_device_txy, it, nt, ng); /* if(it==kt&&is==0){ cudaMemcpy(vv, d_p0, nnz*nnx*nny*sizeof(float), cudaMemcpyDeviceToHost); window3d(v0, vv, nz, nx, ny); fwrite(v0, sizeof(float),nz*nx*ny, fpsnap); }*/ if(it%200==0&&myid==0) printf("-%d",it); } cudaMemcpy(d_dcal_host, d_dcal_device_txy, ng*nt*sizeof(float), cudaMemcpyDeviceToHost); fseek(fpshot,is*ng*nt*sizeof(float),0); fwrite(d_dcal_host, sizeof(float), ng*nt, fpshot); t1 = clock(); if(myid==0)printf(" >> %.3f (s)\n", ((float)(t1-t0))/CLOCKS_PER_SEC); /* free memory on device */ cudaFree(d_wlt); cudaFree(d_vv); cudaFree(d_p0); cudaFree(d_p1); cudaFree(d_szxy); cudaFree(d_gzxy); cudaFree(d_dcal_device_xy); cudaFree(d_dcal_device_txy); free(v0); free(vv); free(d_dcal_host); }
e4f14c3a01993f427782f76bfed4f0fe1e9c6f46.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #define GET_TIME(X, Y) (((Y).tv_sec - (X).tv_sec) + ((Y).tv_nsec - (X).tv_nsec) / 1000000000.0) __device__ int IE_d; __device__ int JE_d; __device__ float cb_d; __global__ void ezCalc ( float *ez, float *hx, float *hy ) { //float cb = 133.105; //int IE = 10, JE = 10; int i = threadIdx.x, j = blockIdx.x; if (j == 0) { // at x=0 if (i == 0 || i == IE_d - 1) // at x=0,y=0 ez[j * IE_d + i] = 0.0; else ez[j * IE_d + i] = ez[j * IE_d + i] + cb_d * (hy[j * IE_d + i] - hy[j * IE_d + (i - 1)] + hx[(j - 1 + JE_d) * IE_d + i] - hx[j * IE_d + i]); } else { if (i == 0 || i == IE_d - 1) ez[j * IE_d + i] = 0.0; else ez[j * IE_d + i] = ez[j * IE_d + i] + cb_d * (hy[j * IE_d + i] - hy[j * IE_d + (i - 1)] + hx[(j - 1) * IE_d + i] - hx[j * IE_d + i]); } } int main(int argc, char * argv[]) { int IE, JE, nsteps; int i, j, n, is, jc, y, k; int xstart; float pi = 3.141592653589793238462643; float * ez, * hx, * hy; float * ez_d, *hx_d, * hy_d; float dx, dt, epsz, mu, courant, cb, db, c, x, t, lambda, freq; int tStart, tTotal; int size; FILE * fp; if (argc != 4) { printf("Invalid arguments... please type:\n"); printf(" %s IE JE steps\n", argv[0]); exit(0); } IE = atoi(argv[1]); JE = atoi(argv[2]); nsteps = atoi(argv[3]); printf("Running 2D FDTD algorithm with matrix of size %d x %d (%d steps)\n", IE, JE, nsteps); struct timespec Begin, Step1, Step2, End; double diff, accum; is = 10; epsz = 8.854e-12; mu = 4.0 * pi * 1.0e-7; c = 3.0e8; courant = 0.5; dx = 0.001; dt = (courant * dx) / (sqrt(2) * c); cb = dt / (epsz * dx); db = dt / (mu * dx); printf("Coefficients are: dt=%g cb=%g db=%g\n", dt, cb, db); size = IE * JE; ez = (float * ) calloc(size, sizeof(float)); hx = (float * ) calloc(size, sizeof(float)); hy = (float * ) calloc(size, sizeof(float)); hipMalloc( (void **) &ez_d, size * sizeof(float)); hipMalloc( (void **) &hx_d, size * sizeof(float)); hipMalloc( (void **) &hy_d, size * sizeof(float)); hipMemcpyToSymbol(cb_d, &cb, sizeof(float), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(JE_d, &JE, sizeof(float), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(IE_d, &IE, sizeof(float), 0, hipMemcpyHostToDevice); freq = 50e9; accum = 0.0; for (n = 0; n < nsteps; n++) { // TIME if (clock_gettime(CLOCK_REALTIME, &Begin) == -1) { perror("Error in gettime"); exit(1); } hipMemcpy( ez_d, ez, size * sizeof(float), hipMemcpyHostToDevice ); hipMemcpy( hx_d, hx, size * sizeof(float), hipMemcpyHostToDevice ); hipMemcpy( hy_d, hy, size * sizeof(float), hipMemcpyHostToDevice ); //Calculate the Ez field hipLaunchKernelGGL(( ezCalc), dim3(JE), dim3(IE), 0, 0, ez_d, hx_d, hy_d ); hipMemcpy( ez, ez_d, size * sizeof(float), hipMemcpyDeviceToHost ); clock_gettime(CLOCK_REALTIME, &Step1); for (j = 0; j < JE; j++) { // x dimension ez[j * IE + is] = cos(2 * pi * freq * n * dt); } clock_gettime(CLOCK_REALTIME, &Step2); //Calculate the H field for (j = 0; j < JE; j++) { for (i = 0; i < IE; i++) { // y dimension // Periodic Boundary implementation if (j + 1 == JE) hx[j * IE + i] = hx[j * IE + i] + db * (ez[j * IE + i] - ez[i]); else hx[j * IE + i] = hx[j * IE + i] + db * (ez[j * IE + i] - ez[(j + 1) * JE + i]); if (i == IE - 1) hy[j * JE + i] = hy[j * JE + i] + db * (0 - ez[j * JE + i]); else hy[j * JE + i] = hy[j * JE + i] + db * (ez[j * JE + (i + 1)] - ez[j * JE + i]); } } //Hx calculation if (clock_gettime(CLOCK_REALTIME, &End) == -1) { perror("Error in gettime"); exit(1); } diff = GET_TIME(Begin, End); accum += diff; printf("\n====Iteration (%d)====", n); printf("\n======Total time: (%f)====", diff); printf("\n======Part 1: (%f)====", GET_TIME(Begin, Step1)); printf("\n======Part 2: (%f)====", GET_TIME(Step1, Step2)); printf("\n======Part 3: (%f)====", GET_TIME(Step2, End)); } printf("\n\n====Total time: %f\n", accum); // write output to file fp = fopen("output_gpu_v2.txt", "w"); fprintf(fp, "==================== Ez MATRIX ========================\n"); for (i = 0, j = 0; (i < IE * JE) && (i < 1000); i++, j++) { if (j == 8) { fprintf(fp, "\n"); j = 0; } fprintf(fp, "%8f ", ez[i]); } fprintf(fp, "==================== Hx MATRIX ========================\n"); for (i = 0, j = 0; (i < IE * JE) && (i < 1000); i++, j++) { if (j == 8) { fprintf(fp, "\n"); j = 0; } fprintf(fp, "%8f ", hx[i]); } fprintf(fp, "==================== Hy MATRIX ========================\n"); for (i = 0, j = 0; (i < IE * JE) && (i < 1000); i++, j++) { if (j == 8) { fprintf(fp, "\n"); j = 0; } fprintf(fp, "%8f ", hy[i]); } free(ez); free(hy); free(hx); hipFree( ez_d ); hipFree( hx_d ); hipFree( hy_d ); return 0; }
e4f14c3a01993f427782f76bfed4f0fe1e9c6f46.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #define GET_TIME(X, Y) (((Y).tv_sec - (X).tv_sec) + ((Y).tv_nsec - (X).tv_nsec) / 1000000000.0) __device__ int IE_d; __device__ int JE_d; __device__ float cb_d; __global__ void ezCalc ( float *ez, float *hx, float *hy ) { //float cb = 133.105; //int IE = 10, JE = 10; int i = threadIdx.x, j = blockIdx.x; if (j == 0) { // at x=0 if (i == 0 || i == IE_d - 1) // at x=0,y=0 ez[j * IE_d + i] = 0.0; else ez[j * IE_d + i] = ez[j * IE_d + i] + cb_d * (hy[j * IE_d + i] - hy[j * IE_d + (i - 1)] + hx[(j - 1 + JE_d) * IE_d + i] - hx[j * IE_d + i]); } else { if (i == 0 || i == IE_d - 1) ez[j * IE_d + i] = 0.0; else ez[j * IE_d + i] = ez[j * IE_d + i] + cb_d * (hy[j * IE_d + i] - hy[j * IE_d + (i - 1)] + hx[(j - 1) * IE_d + i] - hx[j * IE_d + i]); } } int main(int argc, char * argv[]) { int IE, JE, nsteps; int i, j, n, is, jc, y, k; int xstart; float pi = 3.141592653589793238462643; float * ez, * hx, * hy; float * ez_d, *hx_d, * hy_d; float dx, dt, epsz, mu, courant, cb, db, c, x, t, lambda, freq; int tStart, tTotal; int size; FILE * fp; if (argc != 4) { printf("Invalid arguments... please type:\n"); printf(" %s IE JE steps\n", argv[0]); exit(0); } IE = atoi(argv[1]); JE = atoi(argv[2]); nsteps = atoi(argv[3]); printf("Running 2D FDTD algorithm with matrix of size %d x %d (%d steps)\n", IE, JE, nsteps); struct timespec Begin, Step1, Step2, End; double diff, accum; is = 10; epsz = 8.854e-12; mu = 4.0 * pi * 1.0e-7; c = 3.0e8; courant = 0.5; dx = 0.001; dt = (courant * dx) / (sqrt(2) * c); cb = dt / (epsz * dx); db = dt / (mu * dx); printf("Coefficients are: dt=%g cb=%g db=%g\n", dt, cb, db); size = IE * JE; ez = (float * ) calloc(size, sizeof(float)); hx = (float * ) calloc(size, sizeof(float)); hy = (float * ) calloc(size, sizeof(float)); cudaMalloc( (void **) &ez_d, size * sizeof(float)); cudaMalloc( (void **) &hx_d, size * sizeof(float)); cudaMalloc( (void **) &hy_d, size * sizeof(float)); cudaMemcpyToSymbol(cb_d, &cb, sizeof(float), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(JE_d, &JE, sizeof(float), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(IE_d, &IE, sizeof(float), 0, cudaMemcpyHostToDevice); freq = 50e9; accum = 0.0; for (n = 0; n < nsteps; n++) { // TIME if (clock_gettime(CLOCK_REALTIME, &Begin) == -1) { perror("Error in gettime"); exit(1); } cudaMemcpy( ez_d, ez, size * sizeof(float), cudaMemcpyHostToDevice ); cudaMemcpy( hx_d, hx, size * sizeof(float), cudaMemcpyHostToDevice ); cudaMemcpy( hy_d, hy, size * sizeof(float), cudaMemcpyHostToDevice ); //Calculate the Ez field ezCalc<<<JE, IE>>>( ez_d, hx_d, hy_d ); cudaMemcpy( ez, ez_d, size * sizeof(float), cudaMemcpyDeviceToHost ); clock_gettime(CLOCK_REALTIME, &Step1); for (j = 0; j < JE; j++) { // x dimension ez[j * IE + is] = cos(2 * pi * freq * n * dt); } clock_gettime(CLOCK_REALTIME, &Step2); //Calculate the H field for (j = 0; j < JE; j++) { for (i = 0; i < IE; i++) { // y dimension // Periodic Boundary implementation if (j + 1 == JE) hx[j * IE + i] = hx[j * IE + i] + db * (ez[j * IE + i] - ez[i]); else hx[j * IE + i] = hx[j * IE + i] + db * (ez[j * IE + i] - ez[(j + 1) * JE + i]); if (i == IE - 1) hy[j * JE + i] = hy[j * JE + i] + db * (0 - ez[j * JE + i]); else hy[j * JE + i] = hy[j * JE + i] + db * (ez[j * JE + (i + 1)] - ez[j * JE + i]); } } //Hx calculation if (clock_gettime(CLOCK_REALTIME, &End) == -1) { perror("Error in gettime"); exit(1); } diff = GET_TIME(Begin, End); accum += diff; printf("\n====Iteration (%d)====", n); printf("\n======Total time: (%f)====", diff); printf("\n======Part 1: (%f)====", GET_TIME(Begin, Step1)); printf("\n======Part 2: (%f)====", GET_TIME(Step1, Step2)); printf("\n======Part 3: (%f)====", GET_TIME(Step2, End)); } printf("\n\n====Total time: %f\n", accum); // write output to file fp = fopen("output_gpu_v2.txt", "w"); fprintf(fp, "==================== Ez MATRIX ========================\n"); for (i = 0, j = 0; (i < IE * JE) && (i < 1000); i++, j++) { if (j == 8) { fprintf(fp, "\n"); j = 0; } fprintf(fp, "%8f ", ez[i]); } fprintf(fp, "==================== Hx MATRIX ========================\n"); for (i = 0, j = 0; (i < IE * JE) && (i < 1000); i++, j++) { if (j == 8) { fprintf(fp, "\n"); j = 0; } fprintf(fp, "%8f ", hx[i]); } fprintf(fp, "==================== Hy MATRIX ========================\n"); for (i = 0, j = 0; (i < IE * JE) && (i < 1000); i++, j++) { if (j == 8) { fprintf(fp, "\n"); j = 0; } fprintf(fp, "%8f ", hy[i]); } free(ez); free(hy); free(hx); cudaFree( ez_d ); cudaFree( hx_d ); cudaFree( hy_d ); return 0; }
29e35ba2b16643fba9ff94231c68d71ff4f272e6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2016, National University of Defense Technology // Authors: Xuhao Chen <[email protected]> #include <stdio.h> #define SPMV_VARIANT "scalar" #include "spmv.h" #include "cuda_launch_config.hpp" #include "cutil_subset.h" #include "timer.h" //////////////////////////////////////////////////////////////////////// // CSR SpMV kernels based on a scalar model (one thread per row) /////////////////////////////////////////////////////////////////////// // // spmv_csr_scalar_device // Straightforward translation of standard CSR SpMV to CUDA // where each thread computes y[i] += A[i,:] * x // (the dot product of the i-th row of A with the x vector) // // spmv_csr_scalar_tex_device // Same as spmv_csr_scalar_device, except x is accessed via texture cache. // __global__ void spmv_csr_scalar_kernel(const int num_rows, const int * Ap, const int * Aj, const ValueType * Ax, const ValueType * x, ValueType * y, float * value, int * block, int * row_start) { int row = blockIdx.x * blockDim.x + threadIdx.x; if(row < num_rows / 2) { float d0 = y[2*row]; float d1 = y[2*row+1]; int row_begin = row_start[row]; int row_end = row_start[row+1]; for(int j = row_begin; j < row_end; j ++){ d0 += value[j * 4 + 0]*x[2*block[j]+0]; d0 += value[j * 4 + 1]*x[2*block[j]+1]; d1 += value[j * 4 + 2]*x[2*block[j]+0]; d1 += value[j * 4 + 3]*x[2*block[j]+1]; } y[2*row] = d0; y[2*row+1] = d1; } } /* __global__ void spmv_csr_scalar_kernel(const int num_rows, const int * Ap, const int * Aj, const ValueType * Ax, const ValueType * x, ValueType * y) { ; int row = blockIdx.x * blockDim.x + threadIdx.x; if(row < num_rows) { for(int i = 0; i < num_rows; i ++) B[i*num_rows + row] = 0.0; } if(row < num_rows) { for(int j = 0; j < num_rows; j ++) for(int offset = Ap[row]; offset < Ap[row+1]; offset ++) B[j*num_rows + Aj[offset]] = Ax[offset]; } //for (int i = 0; i < num_rows*num_rows; i=i+4){ if((row%4 == 0) && row < num_rows){ sum0 += B[row] * x[row]; sum1 += B[row+num_rows] * x[row]; sum0 += B[row] * x[row + 1]; sum1 += B[row+num_rows] * x[row+1]; } sum0= atomic(sum0); sum1= atomic(sum1); if(row == 0) sum = sum0 + sum1; int block_total = BlockReduce(temp_storage).Sum(local_total); if(threadIdx.x == 0) atomicAdd(total, block_total); int row = blockIdx.x * blockDim.x + threadIdx.x; int bm = 2*row; //if(((bm % 2) == 0) && (bm < num_rows)) { if(bm < num_rows) { // for(int i = 0; i < 2; i ++){ // ValueType sum = y[bm+i]; ValueType sum = y[bm]; colidx[nnz/2]; b_Ap[bm] = Ap[bm] / 2; int row_begin_1 = Ap[bm]; int row_end_1 = Ap[bm+1]; for (int offset = row_begin_1; offset < row_end_1; offset ++){ colidx[offset] = Aj[offset]/2; if(offset > 0 && colidx[offset]!=colidx[offset-1]) block_size++; } int row_begin_2 = Ap[bm+1]; int row_end_2 = Ap[bm+2]; for (int offset = row_begin_2; offset < row_end_2; offset ++){ colidx[offset] = Aj[offset]/2; if(offset > 0 && (colidx[offset]!=colidx[offset-1]) &&) block_size++; } b_Ax[offset] = 0; b_Ax[offset] = Ax[offset]; //row_begin = min(row_begin_1, row_begin_2)/2; //row_end = max(row_end_1, row_end_2)/2; for (int bm_offset = row_begin_1; bm_offset < row_end_2; bm_offset ++){ for (int block = 0; block < num_rows; block ++){ //block spmv //for(int offset = bm_offset; offset < bm_offset + 4; offset ++){ for(int i = 0; i < 4; i ++) bitmask[i]=0; if(Aj[bm_offset]%2==0 && row%2==0)bitmask[0]=1; else if(Aj[bm_offset]%2==1 && row%2==0)bitmask[1]=1; else if(Aj[bm_offset]%2==0 && row%2==1)bitmask[2]=1; else bitmask[3]=1; sum0 += Ax[bm_offset]*bitmask[0] * x[2*colidx[bm_offset]]; sum1 += Ax[bm_offset+(bm_row_end - bm_row_begin)]*bitmask[1] * x[2*colidx[bm_offset]]; sum0 += Ax[bm_offset]*bitmask[2] * x[2*colidx[bm_offset] + 1]; sum1 += Ax[bm_offset+(bm_row_end - bm_row_begin)]*bitmask[3] * x[2*colidx[bm_offset] + 1]; } // y[bm+i] = sum; y[bm] = sum; // } } int bm = blockIdx.x * blockDim.x + threadIdx.x; //if(((bm % 2) == 0) && (bm < num_rows)) { if(bm < num_rows) { // for(int i = 0; i < 2; i ++){ // ValueType sum = y[bm+i]; ValueType sum = y[bm]; b_Ap[bm] = Ap[bm] / 2; int row_begin = Ap[bm]; int row_end = Ap[bm+1]; bm_row_begin = row_begin / 2; bm_row_end = row_end / 2; for (int bm_offset = bm_row_begin; bm_offset < bm_row_end; bm_offset ++){ //block spmv //for(int offset = bm_offset; offset < bm_offset + 4; offset ++){ sum0 += Ax[bm_offset] * x[Aj[offset]]; sum1 += Ax[bm_offset+(bm_row_end - bm_row_begin)] * x[Aj[offset]]; sum0 += Ax[bm_offset] * x[Aj[offset] + 1]; sum1 += Ax[bm_offset+(bm_row_end - bm_row_begin)] * x[Aj[offset] + 1]; } // y[bm+i] = sum; y[bm] = sum; // } } } */ void SpmvSolver(int num_rows, int nnz, int *h_Ap, int *h_Aj, ValueType *h_Ax, ValueType *h_x, ValueType *h_y, ValueType *h_value, int *h_block, int *h_row_start, int &num_block_all) { //print_device_info(0); int *d_Ap, *d_Aj; CUDA_SAFE_CALL(hipMalloc((void **)&d_Ap, (num_rows + 1) * sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void **)&d_Aj, nnz * sizeof(int))); CUDA_SAFE_CALL(hipMemcpy(d_Ap, h_Ap, (num_rows + 1) * sizeof(int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_Aj, h_Aj, nnz * sizeof(int), hipMemcpyHostToDevice)); ValueType *d_Ax, *d_x, *d_y; CUDA_SAFE_CALL(hipMalloc((void **)&d_Ax, sizeof(ValueType) * nnz)); CUDA_SAFE_CALL(hipMalloc((void **)&d_x, sizeof(ValueType) * num_rows)); CUDA_SAFE_CALL(hipMalloc((void **)&d_y, sizeof(ValueType) * num_rows)); CUDA_SAFE_CALL(hipMemcpy(d_Ax, h_Ax, nnz * sizeof(ValueType), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_x, h_x, num_rows * sizeof(ValueType), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_y, h_y, num_rows * sizeof(ValueType), hipMemcpyHostToDevice)); float *d_value; int *d_block, *d_row_start; CUDA_SAFE_CALL(hipMalloc((void **)&d_value, num_block_all * 4 * sizeof(float))); CUDA_SAFE_CALL(hipMalloc((void **)&d_block, num_block_all * sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void **)&d_row_start, (num_rows / 2 + 1) * sizeof(int))); CUDA_SAFE_CALL(hipMemcpy(d_value, h_value, num_block_all * 4 * sizeof(float), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_block, h_block, num_block_all * sizeof(int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_row_start, h_row_start, (num_rows / 2 + 1) * sizeof(int), hipMemcpyHostToDevice)); /*for(int j = 0; j < 20; j ++){ printf("h_value: %f \n", h_value[j]); } for(int j = 0; j < 20; j ++){ printf("h_Ax: %f \n", h_Ax[j]); } for(int j = 0; j < 8; j ++){ printf("h_x: %f \n", h_x[j]); } for(int j = 0; j < 8; j ++){ printf("h_y: %f \n", h_y[j]); }*/ int nthreads = BLOCK_SIZE; int nblocks = (num_rows - 1) / nthreads + 1; printf("Launching CUDA SpMV solver (%d CTAs, %d threads/CTA) ...\n", nblocks, nthreads); Timer t; t.Start(); hipLaunchKernelGGL(( spmv_csr_scalar_kernel) , dim3(nblocks), dim3(nthreads), 0, 0, num_rows, d_Ap, d_Aj, d_Ax, d_x, d_y, d_value, d_block, d_row_start); CudaTest("solving failed"); CUDA_SAFE_CALL(hipDeviceSynchronize()); t.Stop(); printf("\truntime [%s] = %f ms.\n", SPMV_VARIANT, t.Millisecs()); CUDA_SAFE_CALL(hipMemcpy(h_y, d_y, sizeof(ValueType) * num_rows, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(d_Ap)); CUDA_SAFE_CALL(hipFree(d_Aj)); CUDA_SAFE_CALL(hipFree(d_Ax)); CUDA_SAFE_CALL(hipFree(d_x)); CUDA_SAFE_CALL(hipFree(d_y)); CUDA_SAFE_CALL(hipFree(d_value)); CUDA_SAFE_CALL(hipFree(d_block)); CUDA_SAFE_CALL(hipFree(d_row_start)); }
29e35ba2b16643fba9ff94231c68d71ff4f272e6.cu
// Copyright 2016, National University of Defense Technology // Authors: Xuhao Chen <[email protected]> #include <stdio.h> #define SPMV_VARIANT "scalar" #include "spmv.h" #include "cuda_launch_config.hpp" #include "cutil_subset.h" #include "timer.h" //////////////////////////////////////////////////////////////////////// // CSR SpMV kernels based on a scalar model (one thread per row) /////////////////////////////////////////////////////////////////////// // // spmv_csr_scalar_device // Straightforward translation of standard CSR SpMV to CUDA // where each thread computes y[i] += A[i,:] * x // (the dot product of the i-th row of A with the x vector) // // spmv_csr_scalar_tex_device // Same as spmv_csr_scalar_device, except x is accessed via texture cache. // __global__ void spmv_csr_scalar_kernel(const int num_rows, const int * Ap, const int * Aj, const ValueType * Ax, const ValueType * x, ValueType * y, float * value, int * block, int * row_start) { int row = blockIdx.x * blockDim.x + threadIdx.x; if(row < num_rows / 2) { float d0 = y[2*row]; float d1 = y[2*row+1]; int row_begin = row_start[row]; int row_end = row_start[row+1]; for(int j = row_begin; j < row_end; j ++){ d0 += value[j * 4 + 0]*x[2*block[j]+0]; d0 += value[j * 4 + 1]*x[2*block[j]+1]; d1 += value[j * 4 + 2]*x[2*block[j]+0]; d1 += value[j * 4 + 3]*x[2*block[j]+1]; } y[2*row] = d0; y[2*row+1] = d1; } } /* __global__ void spmv_csr_scalar_kernel(const int num_rows, const int * Ap, const int * Aj, const ValueType * Ax, const ValueType * x, ValueType * y) { ; int row = blockIdx.x * blockDim.x + threadIdx.x; if(row < num_rows) { for(int i = 0; i < num_rows; i ++) B[i*num_rows + row] = 0.0; } if(row < num_rows) { for(int j = 0; j < num_rows; j ++) for(int offset = Ap[row]; offset < Ap[row+1]; offset ++) B[j*num_rows + Aj[offset]] = Ax[offset]; } //for (int i = 0; i < num_rows*num_rows; i=i+4){ if((row%4 == 0) && row < num_rows){ sum0 += B[row] * x[row]; sum1 += B[row+num_rows] * x[row]; sum0 += B[row] * x[row + 1]; sum1 += B[row+num_rows] * x[row+1]; } sum0= atomic(sum0); sum1= atomic(sum1); if(row == 0) sum = sum0 + sum1; int block_total = BlockReduce(temp_storage).Sum(local_total); if(threadIdx.x == 0) atomicAdd(total, block_total); int row = blockIdx.x * blockDim.x + threadIdx.x; int bm = 2*row; //if(((bm % 2) == 0) && (bm < num_rows)) { if(bm < num_rows) { // for(int i = 0; i < 2; i ++){ // ValueType sum = y[bm+i]; ValueType sum = y[bm]; colidx[nnz/2]; b_Ap[bm] = Ap[bm] / 2; int row_begin_1 = Ap[bm]; int row_end_1 = Ap[bm+1]; for (int offset = row_begin_1; offset < row_end_1; offset ++){ colidx[offset] = Aj[offset]/2; if(offset > 0 && colidx[offset]!=colidx[offset-1]) block_size++; } int row_begin_2 = Ap[bm+1]; int row_end_2 = Ap[bm+2]; for (int offset = row_begin_2; offset < row_end_2; offset ++){ colidx[offset] = Aj[offset]/2; if(offset > 0 && (colidx[offset]!=colidx[offset-1]) &&) block_size++; } b_Ax[offset] = 0; b_Ax[offset] = Ax[offset]; //row_begin = min(row_begin_1, row_begin_2)/2; //row_end = max(row_end_1, row_end_2)/2; for (int bm_offset = row_begin_1; bm_offset < row_end_2; bm_offset ++){ for (int block = 0; block < num_rows; block ++){ //block spmv //for(int offset = bm_offset; offset < bm_offset + 4; offset ++){ for(int i = 0; i < 4; i ++) bitmask[i]=0; if(Aj[bm_offset]%2==0 && row%2==0)bitmask[0]=1; else if(Aj[bm_offset]%2==1 && row%2==0)bitmask[1]=1; else if(Aj[bm_offset]%2==0 && row%2==1)bitmask[2]=1; else bitmask[3]=1; sum0 += Ax[bm_offset]*bitmask[0] * x[2*colidx[bm_offset]]; sum1 += Ax[bm_offset+(bm_row_end - bm_row_begin)]*bitmask[1] * x[2*colidx[bm_offset]]; sum0 += Ax[bm_offset]*bitmask[2] * x[2*colidx[bm_offset] + 1]; sum1 += Ax[bm_offset+(bm_row_end - bm_row_begin)]*bitmask[3] * x[2*colidx[bm_offset] + 1]; } // y[bm+i] = sum; y[bm] = sum; // } } int bm = blockIdx.x * blockDim.x + threadIdx.x; //if(((bm % 2) == 0) && (bm < num_rows)) { if(bm < num_rows) { // for(int i = 0; i < 2; i ++){ // ValueType sum = y[bm+i]; ValueType sum = y[bm]; b_Ap[bm] = Ap[bm] / 2; int row_begin = Ap[bm]; int row_end = Ap[bm+1]; bm_row_begin = row_begin / 2; bm_row_end = row_end / 2; for (int bm_offset = bm_row_begin; bm_offset < bm_row_end; bm_offset ++){ //block spmv //for(int offset = bm_offset; offset < bm_offset + 4; offset ++){ sum0 += Ax[bm_offset] * x[Aj[offset]]; sum1 += Ax[bm_offset+(bm_row_end - bm_row_begin)] * x[Aj[offset]]; sum0 += Ax[bm_offset] * x[Aj[offset] + 1]; sum1 += Ax[bm_offset+(bm_row_end - bm_row_begin)] * x[Aj[offset] + 1]; } // y[bm+i] = sum; y[bm] = sum; // } } } */ void SpmvSolver(int num_rows, int nnz, int *h_Ap, int *h_Aj, ValueType *h_Ax, ValueType *h_x, ValueType *h_y, ValueType *h_value, int *h_block, int *h_row_start, int &num_block_all) { //print_device_info(0); int *d_Ap, *d_Aj; CUDA_SAFE_CALL(cudaMalloc((void **)&d_Ap, (num_rows + 1) * sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_Aj, nnz * sizeof(int))); CUDA_SAFE_CALL(cudaMemcpy(d_Ap, h_Ap, (num_rows + 1) * sizeof(int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_Aj, h_Aj, nnz * sizeof(int), cudaMemcpyHostToDevice)); ValueType *d_Ax, *d_x, *d_y; CUDA_SAFE_CALL(cudaMalloc((void **)&d_Ax, sizeof(ValueType) * nnz)); CUDA_SAFE_CALL(cudaMalloc((void **)&d_x, sizeof(ValueType) * num_rows)); CUDA_SAFE_CALL(cudaMalloc((void **)&d_y, sizeof(ValueType) * num_rows)); CUDA_SAFE_CALL(cudaMemcpy(d_Ax, h_Ax, nnz * sizeof(ValueType), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_x, h_x, num_rows * sizeof(ValueType), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_y, h_y, num_rows * sizeof(ValueType), cudaMemcpyHostToDevice)); float *d_value; int *d_block, *d_row_start; CUDA_SAFE_CALL(cudaMalloc((void **)&d_value, num_block_all * 4 * sizeof(float))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_block, num_block_all * sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_row_start, (num_rows / 2 + 1) * sizeof(int))); CUDA_SAFE_CALL(cudaMemcpy(d_value, h_value, num_block_all * 4 * sizeof(float), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_block, h_block, num_block_all * sizeof(int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_row_start, h_row_start, (num_rows / 2 + 1) * sizeof(int), cudaMemcpyHostToDevice)); /*for(int j = 0; j < 20; j ++){ printf("h_value: %f \n", h_value[j]); } for(int j = 0; j < 20; j ++){ printf("h_Ax: %f \n", h_Ax[j]); } for(int j = 0; j < 8; j ++){ printf("h_x: %f \n", h_x[j]); } for(int j = 0; j < 8; j ++){ printf("h_y: %f \n", h_y[j]); }*/ int nthreads = BLOCK_SIZE; int nblocks = (num_rows - 1) / nthreads + 1; printf("Launching CUDA SpMV solver (%d CTAs, %d threads/CTA) ...\n", nblocks, nthreads); Timer t; t.Start(); spmv_csr_scalar_kernel <<<nblocks, nthreads>>> (num_rows, d_Ap, d_Aj, d_Ax, d_x, d_y, d_value, d_block, d_row_start); CudaTest("solving failed"); CUDA_SAFE_CALL(cudaDeviceSynchronize()); t.Stop(); printf("\truntime [%s] = %f ms.\n", SPMV_VARIANT, t.Millisecs()); CUDA_SAFE_CALL(cudaMemcpy(h_y, d_y, sizeof(ValueType) * num_rows, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(d_Ap)); CUDA_SAFE_CALL(cudaFree(d_Aj)); CUDA_SAFE_CALL(cudaFree(d_Ax)); CUDA_SAFE_CALL(cudaFree(d_x)); CUDA_SAFE_CALL(cudaFree(d_y)); CUDA_SAFE_CALL(cudaFree(d_value)); CUDA_SAFE_CALL(cudaFree(d_block)); CUDA_SAFE_CALL(cudaFree(d_row_start)); }
3cfa7ea69c85134bfccfa4c464c856ef47e66589.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <gpu_error.cuh> /* Thread and blocks are one-dimensional ___________ ___________ ___________ |0|1|2|3|4|5| |0|1|2|3|4|5| |0|1|2|3|4|5| Block 0, Block 1, Block 2, Block X: 6 Global index: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 */ __global__ void saxpy(int n, float a, float *x, float *y) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Bounds check in case there are extra threads if (i < n) { y[i] = a * x[i] + y[i]; } } int main(void) { // Size of vectors for operation int N = 1 << 20; // Host arrays float *x, *y; // Device arrays float *d_x, *d_y; // Allocate host data x = (float *) malloc(N * sizeof(float)); y = (float *) malloc(N * sizeof(float)); // Allocate device data errchk( hipMalloc(&d_x, N * sizeof(float)) ); errchk( hipMalloc(&d_y, N * sizeof(float)) ); // Fill host array with dummy data for (int i = 0; i < N; ++i) { x[i] = 3.0f; y[i] = 4.0f; } // Copy data to device errchk( hipMemcpy(d_x, x, N * sizeof(float), hipMemcpyHostToDevice) ); errchk( hipMemcpy(d_y, y, N * sizeof(float), hipMemcpyHostToDevice) ); // Perform SAXPY kernel on elements, one thread per array index // 256 threads per block, and ensure that we round up // to the correct number of thread blocks hipLaunchKernelGGL(( saxpy), dim3((N + 255) / 256), dim3(256), 0, 0, N, 2.0f, d_x, d_y); errchk( hipPeekAtLastError() ); errchk( hipDeviceSynchronize() ); // Copy results back to host errchk( hipMemcpy(y, d_y, N * sizeof(float), hipMemcpyDeviceToHost) ); // Compute max error float max_error = 0.0f; float expected = 10.0f; // 2 * 3 + 4 for (int i = 0; i < N; ++i) { max_error = max(max_error, fabs(y[i] - expected)); } printf("Maximum error: %.5f\n", max_error); // Free memory free(x); free(y); errchk( hipFree(d_x) ); errchk( hipFree(d_y) ); }
3cfa7ea69c85134bfccfa4c464c856ef47e66589.cu
#include <stdio.h> #include <gpu_error.cuh> /* Thread and blocks are one-dimensional ___________ ___________ ___________ |0|1|2|3|4|5| |0|1|2|3|4|5| |0|1|2|3|4|5| Block 0, Block 1, Block 2, Block X: 6 Global index: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 */ __global__ void saxpy(int n, float a, float *x, float *y) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Bounds check in case there are extra threads if (i < n) { y[i] = a * x[i] + y[i]; } } int main(void) { // Size of vectors for operation int N = 1 << 20; // Host arrays float *x, *y; // Device arrays float *d_x, *d_y; // Allocate host data x = (float *) malloc(N * sizeof(float)); y = (float *) malloc(N * sizeof(float)); // Allocate device data errchk( cudaMalloc(&d_x, N * sizeof(float)) ); errchk( cudaMalloc(&d_y, N * sizeof(float)) ); // Fill host array with dummy data for (int i = 0; i < N; ++i) { x[i] = 3.0f; y[i] = 4.0f; } // Copy data to device errchk( cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice) ); errchk( cudaMemcpy(d_y, y, N * sizeof(float), cudaMemcpyHostToDevice) ); // Perform SAXPY kernel on elements, one thread per array index // 256 threads per block, and ensure that we round up // to the correct number of thread blocks saxpy<<<(N + 255) / 256, 256>>>(N, 2.0f, d_x, d_y); errchk( cudaPeekAtLastError() ); errchk( cudaDeviceSynchronize() ); // Copy results back to host errchk( cudaMemcpy(y, d_y, N * sizeof(float), cudaMemcpyDeviceToHost) ); // Compute max error float max_error = 0.0f; float expected = 10.0f; // 2 * 3 + 4 for (int i = 0; i < N; ++i) { max_error = max(max_error, fabs(y[i] - expected)); } printf("Maximum error: %.5f\n", max_error); // Free memory free(x); free(y); errchk( cudaFree(d_x) ); errchk( cudaFree(d_y) ); }
458da0d57394d8c0a90f5ad5fbc1f6edb74ead69.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "THHHalf.h" #include "THHHalfAutoNumerics.cuh" #include <THH/THHApply.cuh> #include "common.h" template <typename T> struct PReLUUpdateOutput { T* weight_; PReLUUpdateOutput(T* weight) : weight_(weight) {} __device__ __forceinline__ void operator()(T *out, T *in) { T x = *in; *out = (x > 0) ? x : weight_[0] * x; } }; template <typename T> __global__ void preluForward(T *output, const T *input, const T *weight, int n, int nElemsPerSample, int mapSize) { CUDA_KERNEL_LOOP(i, n) { int positionInSample = i % nElemsPerSample; int mapNumber = positionInSample / mapSize; output[i] = input[i] > 0 ? input[i] : input[i] * weight[mapNumber]; } } template <typename T> struct PReLUUpdateGradInput { T *weight_; PReLUUpdateGradInput(T *weight) : weight_(weight) {} __device__ __forceinline__ void operator()(T *gradInput, T *gradOutput, T *input) { *gradInput = *input > 0 ? *gradOutput : *gradOutput * *weight_; } }; template <typename T> __global__ void preluBackward( T *gradInput, const T *input, const T *weight, const T *gradOutput, int n, int nElemsPerSample, int mapSize) { CUDA_KERNEL_LOOP(i, n) { int positionInSample = i % nElemsPerSample; int mapNumber = positionInSample / mapSize; gradInput[i] = input[i] > 0 ? gradOutput[i] : gradOutput[i] * weight[mapNumber]; } } template <typename T> struct PReLUAccGradParametersShared { __device__ __forceinline__ void operator()(T *gradInput, T *input, T *gradOutput) { *gradInput = (*input) * (*gradOutput) * (*input <= 0); } }; template <typename T> struct PReLUAccGradParameters { T scale; PReLUAccGradParameters(T scale) : scale(scale) {} __device__ __forceinline__ void operator()(T *gradInput, T *input, T *gradOutput) { *gradInput = (*input) * (*gradOutput) * scale * (*input <= 0); } }; template <typename T> struct PReLUAccGradParameters1to1 { T scale; PReLUAccGradParameters1to1(T scale) : scale(scale) {} __device__ __forceinline__ void operator()(T *gradWeight, T *input, T *gradOutput) { *gradWeight += (*input) * (*gradOutput) * scale * (*input <= 0); } }; #include "generic/PReLU.cu" #include "THHGenerateFloatTypes.h"
458da0d57394d8c0a90f5ad5fbc1f6edb74ead69.cu
#include "THCUNN.h" #include "THCHalf.h" #include "THCHalfAutoNumerics.cuh" #include <THC/THCApply.cuh> #include "common.h" template <typename T> struct PReLUUpdateOutput { T* weight_; PReLUUpdateOutput(T* weight) : weight_(weight) {} __device__ __forceinline__ void operator()(T *out, T *in) { T x = *in; *out = (x > 0) ? x : weight_[0] * x; } }; template <typename T> __global__ void preluForward(T *output, const T *input, const T *weight, int n, int nElemsPerSample, int mapSize) { CUDA_KERNEL_LOOP(i, n) { int positionInSample = i % nElemsPerSample; int mapNumber = positionInSample / mapSize; output[i] = input[i] > 0 ? input[i] : input[i] * weight[mapNumber]; } } template <typename T> struct PReLUUpdateGradInput { T *weight_; PReLUUpdateGradInput(T *weight) : weight_(weight) {} __device__ __forceinline__ void operator()(T *gradInput, T *gradOutput, T *input) { *gradInput = *input > 0 ? *gradOutput : *gradOutput * *weight_; } }; template <typename T> __global__ void preluBackward( T *gradInput, const T *input, const T *weight, const T *gradOutput, int n, int nElemsPerSample, int mapSize) { CUDA_KERNEL_LOOP(i, n) { int positionInSample = i % nElemsPerSample; int mapNumber = positionInSample / mapSize; gradInput[i] = input[i] > 0 ? gradOutput[i] : gradOutput[i] * weight[mapNumber]; } } template <typename T> struct PReLUAccGradParametersShared { __device__ __forceinline__ void operator()(T *gradInput, T *input, T *gradOutput) { *gradInput = (*input) * (*gradOutput) * (*input <= 0); } }; template <typename T> struct PReLUAccGradParameters { T scale; PReLUAccGradParameters(T scale) : scale(scale) {} __device__ __forceinline__ void operator()(T *gradInput, T *input, T *gradOutput) { *gradInput = (*input) * (*gradOutput) * scale * (*input <= 0); } }; template <typename T> struct PReLUAccGradParameters1to1 { T scale; PReLUAccGradParameters1to1(T scale) : scale(scale) {} __device__ __forceinline__ void operator()(T *gradWeight, T *input, T *gradOutput) { *gradWeight += (*input) * (*gradOutput) * scale * (*input <= 0); } }; #include "generic/PReLU.cu" #include "THCGenerateFloatTypes.h"
24f28eda78dae71b831058bfd5f9c5a5444fc016.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cudaHeader.h> #include <stdio.h> #include <unistd.h> #include <string> #include <vector> #include <algorithm> __constant__ double dist[12] = { 0.140, // fuel 0.052, // cladding 0.275, // cold, borated water 0.134, // hot, borated water 0.154, // RPV 0.064, // Lower, radial reflector 0.066, // Upper reflector / top plate 0.055, // bottom plate 0.008, // bottom nozzle 0.015, // top nozzle 0.025, // top of fuel assemblies 0.153 // bottom of fuel assemblies }; #define NO_BINARY_SEARCH 0 #define RANDOM_CONC_NUC 0 /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __device__ double devRn(unsigned long * seed) { #if STRIP_RANDOM == 1 return 0.0; #else /* unsigned int m = 2147483647; unsigned int n1 = ( 16807u * (*seed) ) % m; (*seed) = n1; return (double) n1 / (double) m; */ double x = (double)seed * (double)31415.9262; x -= (double) (int) x; return x; #endif } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __device__ int devPickMat( unsigned long * seed) { double roll = devRn(seed); // makes a pick based on the distro double running = 0; for( int i = 0; i < 12; i++ ) { running += dist[i]; if( roll < running ) return i; } return 11; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void devCalculateXS_integrated_single(int numLookups, int n_isotopes, int n_gridpoints, GridPoint_Index * energyGrid, int * numNucs, int numMats, int maxNumNucs, NuclideGridPoint * nuclideGrid, int * mats, double * concs, double * macro_xs_vector, double * results, int nResults) { unsigned long seed = 10000*threadIdx.x + 10* blockIdx.x + threadIdx.x + 31415; int matIdx; int lin; double p_energy; double r[5]; double f; int k = threadIdx.x + blockIdx.x * blockDim.x; #if(STRIP_RANDOM==1) p_energy = 0.01 + (((double)(k%10))/10.0) + (((double)(k%1000))/1000.0); p_energy -= ((int)(p_energy)); matIdx = k%12; #else p_energy = devRn(&seed); matIdx = devPickMat(&seed); #endif r[0] = r[1] = r[2] = r[3]= r[4] = 0.0; int idx = devBasicBinarySearch_index<GridPoint_Index>(energyGrid, p_energy, n_isotopes*n_gridpoints); for(int i=0; i< numNucs[matIdx]; i++) { lin = linearize(matIdx, i, maxNumNucs); NuclideGridPoint * high; NuclideGridPoint * low; int nuc = mats[lin]; int ptr = energyGrid[idx].xs_idx[nuc]; low = &nuclideGrid[linearize(nuc, ptr, n_gridpoints)]; high = low + 1 ; double c = concs[lin]; // calculate the re-useable interpolation factor f = (high->energy - p_energy) / (high->energy - low->energy); r[0] += c * (high->total_xs - f * (high->total_xs - low->total_xs)); r[1] += c * (high->elastic_xs - f * (high->elastic_xs - low->elastic_xs)); r[2] += c * (high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs)); r[3] += c * (high->fission_xs - f * (high->fission_xs - low->fission_xs)); r[4] += c * (high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs)); } // For some sense of legitimacy, write back the outputs, even though we will overwrite o #pragma unroll 5 for(int i=0; i<5; i++) { macro_xs_vector[i*blockDim.x + threadIdx.x] = r[i]; } #if(STRIP_RANDOM==1) if(k < nResults) { memcpy(&results[5*k], r, N_ELEMENTS*sizeof(double)); } #endif } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // devCalculateXS_d3 is the 'best performing' kernel on the large case. This kernel unwraps the nuclide lookup for two nuclides simultaneously with the // edge case handled inline (instead of having a completely separate code path for the final iteration as in the d2 version of this kernel). /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void devCalculateXS_d4(int numLookups, int n_isotopes, int n_gridpoints, GridPoint_Index * energyGrid, int * numNucs, int numMats, int maxNumNucs, NuclideGridPoint * nuclideGrid, int * mats, double * concs, double * macro_xs_vector, double * results, int nResults) { int matIdx; double p_energy; int k = threadIdx.x + blockIdx.x * blockDim.x; if(k > numLookups) { return; } #if(STRIP_RANDOM==1) p_energy = 0.01 + (((double)(k%10))/10.0) + (((double)(k%1000))/1000.0); p_energy -= ((int)(p_energy)); matIdx = k%12; #else unsigned long seed = 10000*threadIdx.x + threadIdx.x + 10* blockIdx.x + 31415; p_energy = devRn(&seed); matIdx = devPickMat(&seed); #endif double r[5]; r[0] = r[1] = r[2] = r[3]= r[4] = 0.0; #if NO_BINARY_SEARCH int idx = k % (n_isotopes * n_gridpoints); idx += (((unsigned long)k * (unsigned long)k) % (unsigned long) (n_isotopes * n_gridpoints)); idx = ((unsigned long)idx * (unsigned long) blockIdx.x * (unsigned long) threadIdx.x) % ((unsigned long) (n_isotopes * n_gridpoints)); #else int idx = devBasicBinarySearch_index<GridPoint_Index>(energyGrid, p_energy, n_isotopes*n_gridpoints); #endif for(int i=0; i< numNucs[matIdx]; i+=2) { double2 t[12]; int nuc = __LDG(&mats[linearize(matIdx, i, maxNumNucs)]); double c = __LDG(&concs[linearize(matIdx, i, maxNumNucs)]); IDX_TYPE ptr = __LDG(&energyGrid[idx].xs_idx[nuc]); NuclideGridPoint * low = &nuclideGrid[linearize(nuc, ptr, n_gridpoints)]; double2 * base = (double2*) low; int nucB; IDX_TYPE ptrB; double2 * baseB; double cB; if( i < numNucs[matIdx]-1 ) { nucB = __LDG(&mats[linearize(matIdx, i+1, maxNumNucs)]); cB = __LDG(&concs[linearize(matIdx, i+1, maxNumNucs)]); ptrB = __LDG(&energyGrid[idx].xs_idx[nucB]); baseB = (double2*)&nuclideGrid[linearize(nucB, ptrB, n_gridpoints)]; } #pragma unroll 6 for(int s=0; s<6; s++) { t[s] = __LDG(&base[s]); } if( i < numNucs[matIdx]-1 ) { #pragma unroll 6 for(int s=0; s<6; s++) { t[s+6] = __LDG(&baseB[s]); } } { double f = (t[3].x - p_energy) / (t[3].x - t[0].x); double fB; r[0] += c*(t[3].y - f* (t[3].y - t[0].y)); if( i < numNucs[matIdx]-1 ) { fB = (t[9].x - p_energy) / (t[9].x - t[6].x); r[0] += cB*(t[9].y - fB* (t[9].y - t[6].y)); } #pragma unroll for(int s=0; s<2; s++) { r[2*s+1] += c*(t[s+4].x - f* (t[s+4].x - t[s+1].x)); r[2*s+2] += c*(t[s+4].y - f* (t[s+4].y - t[s+1].y)); if( i < numNucs[matIdx]-1 ) { r[2*s+1] += cB*(t[s+10].x - fB* (t[s+10].x - t[s+7].x)); r[2*s+2] += cB*(t[s+10].y - fB* (t[s+10].y - t[s+7].y)); } } } } // For some sense of legitimacy, write back the outputs, even though we will overwrite o #pragma unroll for(int i=0; i<5; i++) { macro_xs_vector[i* blockDim.x + threadIdx.x] = r[i]; } #if(STRIP_RANDOM==1) if(k < nResults) { results[5*k] = r[0]; results[5*k+1] = r[1]; results[5*k+2] = r[2]; results[5*k+3] = r[3]; results[5*k+4] = r[4]; memcpy(&results[5*k], r, N_ELEMENTS*sizeof(double)); } #endif } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // devCalculateXS_d3 is the 'best performing' kernel on the large case. This kernel unwraps the nuclide lookup for two nuclides simultaneously with the // edge case handled inline (instead of having a completely separate code path for the final iteration as in the d2 version of this kernel). /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void devCalculateXS_d3(int numLookups, int n_isotopes, int n_gridpoints, GridPoint_Index * energyGrid, int * numNucs, int numMats, int maxNumNucs, NuclideGridPoint * nuclideGrid, int * mats, double * concs, double * macro_xs_vector, double * results, int nResults) { unsigned long seed = 10000*threadIdx.x + threadIdx.x + 10* blockIdx.x + 31415; int matIdx; double p_energy; int workPerBlock = 1 + ( numLookups / (gridDim.x) ); int lowerLimit = blockIdx.x * workPerBlock + threadIdx.x; int upperLimit = lowerLimit + workPerBlock; for(int k=lowerLimit; k< upperLimit; k+= blockDim.x) { #if(STRIP_RANDOM==1) p_energy = 0.01 + (((double)(k%10))/10.0) + (((double)(k%1000))/1000.0); p_energy -= ((int)(p_energy)); matIdx = k%12; #else p_energy = devRn(&seed); matIdx = devPickMat(&seed); #endif double r[5]; r[0] = r[1] = r[2] = r[3]= r[4] = 0.0; int idx = devBasicBinarySearch_index<GridPoint_Index>(energyGrid, p_energy, n_isotopes*n_gridpoints); for(int i=0; i< numNucs[matIdx]; i+=2) { double2 t[12]; int nuc = __LDG(&mats[linearize(matIdx, i, maxNumNucs)]); double c = __LDG(&concs[linearize(matIdx, i, maxNumNucs)]); IDX_TYPE ptr = __LDG(&energyGrid[idx].xs_idx[nuc]); NuclideGridPoint * low = &nuclideGrid[linearize(nuc, ptr, n_gridpoints)]; double2 * base = (double2*) low; int nucB; IDX_TYPE ptrB; double2 * baseB; double cB; if( i < numNucs[matIdx]-1 ) { nucB = __LDG(&mats[linearize(matIdx, i+1, maxNumNucs)]); cB = __LDG(&concs[linearize(matIdx, i+1, maxNumNucs)]); ptrB = __LDG(&energyGrid[idx].xs_idx[nucB]); baseB = (double2*)&nuclideGrid[linearize(nucB, ptrB, n_gridpoints)]; } #pragma unroll 6 for(int s=0; s<6; s++) { t[s] = __LDG(&base[s]); } if( i < numNucs[matIdx]-1 ) { #pragma unroll 6 for(int s=0; s<6; s++) { t[s+6] = __LDG(&baseB[s]); } } { double f = (t[3].x - p_energy) / (t[3].x - t[0].x); double fB; r[0] += c*(t[3].y - f* (t[3].y - t[0].y)); if( i < numNucs[matIdx]-1 ) { fB = (t[9].x - p_energy) / (t[9].x - t[6].x); r[0] += cB*(t[9].y - fB* (t[9].y - t[6].y)); } #pragma unroll for(int s=0; s<2; s++) { r[2*s+1] += c*(t[s+4].x - f* (t[s+4].x - t[s+1].x)); r[2*s+2] += c*(t[s+4].y - f* (t[s+4].y - t[s+1].y)); if( i < numNucs[matIdx]-1 ) { r[2*s+1] += cB*(t[s+10].x - fB* (t[s+10].x - t[s+7].x)); r[2*s+2] += cB*(t[s+10].y - fB* (t[s+10].y - t[s+7].y)); } } } } // For some sense of legitimacy, write back the outputs, even though we will overwrite o #pragma unroll for(int i=0; i<5; i++) { macro_xs_vector[i* blockDim.x + threadIdx.x] = (double) idx;// r[i]; } #if(STRIP_RANDOM==1) if(k < nResults) { results[5*k] = r[0]; results[5*k+1] = r[1]; results[5*k+2] = r[2]; results[5*k+3] = r[3]; results[5*k+4] = r[4]; memcpy(&results[5*k], r, N_ELEMENTS*sizeof(double)); } #endif } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void devCalculateXS_opt_single(int numLookups, int n_isotopes, int n_gridpoints, GridPoint_Index * energyGrid, int * numNucs, int numMats, int maxNumNucs, NuclideGridPoint * nuclideGrid, int * mats, double * concs, double * macro_xs_vector, double * results, int nResults) { int id = threadIdx.x + blockDim.x * blockIdx.x; int matIdx; double p_energy; double r0, r1, r2, r3, r4; #if(STRIP_RANDOM==1) int k = threadIdx.x + blockDim.x * blockIdx.x; p_energy = 0.01 + (((double)(k%10))/10.0) + (((double)(k%1000))/1000.0); p_energy -= ((int)(p_energy)); matIdx = k%12; #else unsigned long seed = 10000*threadIdx.x + 10* blockIdx.x + threadIdx.x + 31415; p_energy = devRn(&seed); matIdx = devPickMat(&seed); #endif r0 = r1 = r2 = r3 = r4 = 0.0; int idx = devBasicBinarySearch_index<GridPoint_Index>(energyGrid, p_energy, n_isotopes*n_gridpoints); for(int i=0; i< numNucs[matIdx]; i++) { double2 t0, t1, t2, t3, t4, t5; int nuc = __LDG(&mats[linearize(matIdx, i, maxNumNucs)]); double c = __LDG(&concs[linearize(matIdx, i, maxNumNucs)]); IDX_TYPE ptr = __LDG(&energyGrid[idx].xs_idx[nuc]); double2 * base = (double2*) &nuclideGrid[linearize(nuc, ptr, n_gridpoints)]; t0 = __LDG(base); t1 = __LDG(&base[1]); t2 = __LDG(&base[2]); t3 = __LDG(&base[3]); t4 = __LDG(&base[4]); t5 = __LDG(&base[5]); double f = (t3.x - p_energy) / (t3.x - t0.x); r0 += c*(t3.y - f * (t3.y - t0.y)); r1 += c*(t4.x - f * (t4.x - t1.x)); r2 += c*(t4.y - f * (t4.y - t1.y)); r3 += c*(t5.x - f * (t5.x - t2.x)); r4 += c*(t5.y - f * (t5.y - t2.y)); } // For some sense of legitimacy, write back the outputs, even though we will overwrite o macro_xs_vector[threadIdx.x] = r0; macro_xs_vector[blockDim.x + threadIdx.x] = r1; macro_xs_vector[2*blockDim.x + threadIdx.x] = r2; macro_xs_vector[3*blockDim.x + threadIdx.x] = r3; macro_xs_vector[4*blockDim.x + threadIdx.x] = r4; #if(STRIP_RANDOM==1) if(k < nResults) { results[5*k] = r0; results[5*k+1] = r1; results[5*k+2] = r2; results[5*k+3] = r3; results[5*k+4] = r4; } #endif } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// C_LINKAGE int checkResults(int numResults, int numElements, double * referenceResults, double * comparisonResults) { bool correct = true; for(int i=0; i<numResults*numElements; i++) { if(fabs(referenceResults[i] - comparisonResults[i]) > 0.000001) { printf("Answers do not match at lookup %d element %d (%.10f, %.10f) \n", i/numElements, i%numElements, referenceResults[i], comparisonResults[i]); correct = false; break; } } if(correct) { return 1; } else { return 0; } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// C_LINKAGE int checkGPUResults(int numResults, int numElements, double * referenceResults, double * devGPUResults) { double * lResults; bool correct; CUDA_CALL(hipHostMalloc(&lResults, numResults * numElements*sizeof(double))); CUDA_CALL(hipMemcpy(lResults, devGPUResults, numResults * numElements *sizeof(double), hipMemcpyDeviceToHost)); correct = checkResults(numResults, numElements, referenceResults, lResults); CUDA_CALL(hipHostFree(lResults)); if(correct) { return 1; } else { return 0; } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// double runExperiment( void (*kernel)(int, int, int, GridPoint_Index*, int*, int, int, NuclideGridPoint *, int *, double*, double *, double*, int), std::string kernelName, int numLookups, int n_isotopes, int n_gridpoints, GridPoint_Index * energyGrid, int * numNucs, int numMats, int maxNumNucs, NuclideGridPoint * nuclideGrid, int * mats, double * concs, double * macro_xs_vector, double * searchGrid, double * results, int nResults, double * cpuResults, int blockLimiter, int lookupsPerThread=1, int dshared=0) { int numBlocks; int threadsPerBlock; double performance = -1.0E12; hipEvent_t start, stop; //CUDA_CALL(hipEventCreate(&start)); //CUDA_CALL(hipEventCreate(&stop)); double maxPerf = -1.0; #if(PROFILE_MODE) threadsPerBlock = THREADS_PER_BLOCK; CUDA_CALL(hipMemset(results, 0, 5*NUM_RESULTS*sizeof(double))); //CUDA_CALL(hipEventRecord(start)); numBlocks = 1 + numLookups/(threadsPerBlock*lookupsPerThread); hipLaunchKernelGGL(( kernel), dim3(numBlocks), dim3(threadsPerBlock), dshared, 0, numLookups, n_isotopes, n_gridpoints, energyGrid, numNucs, numMats, maxNumNucs, nuclideGrid, mats, concs, macro_xs_vector, results, nResults); //CUDA_CHECK(); //CUDA_CALL(hipEventRecord(stop)); //CUDA_CALL(hipEventSynchronize(stop)); CUDA_CALL(hipDeviceSynchronize()); float gpuTime = 0.0; //CUDA_CALL(hipEventElapsedTime(&gpuTime, start, stop)); gpuTime = 1.0; performance = 1000.0 * ( (double) numLookups/ (double) gpuTime); printf("%s <<<%d, %d>>> Lookups/s: %.0lf in %f ms\n", kernelName.c_str(), numBlocks, threadsPerBlock, performance, gpuTime); maxPerf = performance; #else for(threadsPerBlock=32; threadsPerBlock<=blockLimiter; threadsPerBlock+=32) { CUDA_CALL(hipMemset(results, 0, 5*NUM_RESULTS*sizeof(double))); //CUDA_CALL(hipEventRecord(start)); numBlocks = 1 + numLookups/(threadsPerBlock*lookupsPerThread); hipLaunchKernelGGL(( kernel), dim3(numBlocks), dim3(threadsPerBlock), dshared, 0, numLookups, n_isotopes, n_gridpoints, energyGrid, numNucs, numMats, maxNumNucs, nuclideGrid, mats, concs, macro_xs_vector, results, nResults); //CUDA_CHECK(); //CUDA_CALL(hipEventRecord(stop)); //CUDA_CALL(hipEventSynchronize(stop)); CUDA_CALL(hipDeviceSynchronize()); float gpuTime = 0.0; //CUDA_CALL(hipEventElapsedTime(&gpuTime, start, stop)); gpuTime = 1.0; performance = 1000.0 * ( (double) numLookups/ (double) gpuTime); printf("%s <<<%d, %d>>> Lookups/s: %.0lf in %f ms\n", kernelName.c_str(), numBlocks, threadsPerBlock, performance, gpuTime); #if STRIP_RANDOM == 1 if(checkGPUResults(nResults, N_ELEMENTS, cpuResults, results)) { printf("%s results match reference CPU Results\n", kernelName.c_str()); } else{ printf("%s Results are INCORRECT\n", kernelName.c_str()); performance = -1.0; } #endif if(performance > maxPerf) { maxPerf = performance; } } #endif return maxPerf; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// C_LINKAGE double cudaDriver(int numLookups, int n_isotopes, int n_gridpoints, int numMats, int * numNucs, GridPoint * energyGrid, double ** concs, NuclideGridPoint ** nuclideGrid, int ** mats, double * cpuResults, int kernelId) { printf("################################################################################\n"); printf(" GPU SIMULATION\n"); printf("################################################################################\n"); int * devNumNucs = NULL; double * devConcs = NULL; int * devMats = NULL; GridPoint_Index * devEnergyGrid = NULL; NuclideGridPoint * devNuclideGrid = NULL; double * devEnergySearchGrid = NULL; double * devMacroXSVector = NULL; double * devResults = NULL; IDX_TYPE * devIndexArray; hipEvent_t start, stop; //CUDA_CALL(hipEventCreate(&start)); //CUDA_CALL(hipEventCreate(&stop)); int threadsPerBlock = THREADS_PER_BLOCK; long numBlocks = 1024; int maxNumNucs=-1; for(int i=0; i<numMats; i++) { maxNumNucs = max(maxNumNucs, numNucs[i]); } printf("MaxNumNucs is %d\n", maxNumNucs); printf(" numIsotopes: %d, n_gridpoints: %d, n_mats: %d\n", n_isotopes, n_gridpoints, numMats); // Hoisted for doing annotaion of hot structure CUDA_CALL(hipMalloc(&devIndexArray, n_isotopes * n_isotopes * n_gridpoints * sizeof(IDX_TYPE))); CUDA_CALL(hipMalloc(&devMacroXSVector, 5 * numBlocks * threadsPerBlock * sizeof(double))); CUDA_CALL(hipMalloc(&devNumNucs, numMats * sizeof(int))); CUDA_CALL(hipMemcpy(devNumNucs, numNucs, numMats * sizeof(int), hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&devNuclideGrid, n_isotopes * n_gridpoints * sizeof(NuclideGridPoint))); CUDA_CALL(hipMemcpy(devNuclideGrid, nuclideGrid[0], n_isotopes * n_gridpoints * sizeof(NuclideGridPoint), hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&devResults, 5*NUM_RESULTS*sizeof(double))); // Deal with concs and Mats. Notice, when copying these to the GPU we flatten out the structure into a dense matrix { long totalElements = numMats * maxNumNucs; CUDA_CALL(hipMalloc(&devMats, totalElements * sizeof(int))); CUDA_CALL(hipMalloc(&devConcs, totalElements * sizeof(double))); CUDA_CALL(hipMemset(devMats, 0, totalElements * sizeof(int))); CUDA_CALL(hipMemset(devConcs, 0, totalElements * sizeof(double))); for(int i=0; i<numMats; i++) { CUDA_CALL(hipMemcpy(&devMats[i*maxNumNucs], mats[i], numNucs[i]*sizeof(int), hipMemcpyHostToDevice)); CUDA_CALL(hipMemcpy(&devConcs[i*maxNumNucs], concs[i], numNucs[i]*sizeof(double), hipMemcpyHostToDevice)); } } // Deal with Energy Grid. { GridPoint_Index * lEnergyGrid; IDX_TYPE * lIndexArray; IDX_TYPE * rowptr; CUDA_CALL(hipMalloc(&devEnergyGrid, n_isotopes * n_gridpoints * sizeof(GridPoint_Index))); CUDA_CALL(hipHostMalloc(&lEnergyGrid, n_isotopes * n_gridpoints * sizeof(GridPoint_Index))); printf("Allocation is %f mb\n", ((long)n_isotopes * (long)n_isotopes * (long)n_gridpoints * sizeof(IDX_TYPE))/(double) 1E6); CUDA_CALL(hipHostMalloc(&lIndexArray, n_gridpoints *n_isotopes* n_isotopes * sizeof(IDX_TYPE))); memset(lIndexArray, 0, n_gridpoints * n_isotopes * n_isotopes * sizeof(IDX_TYPE)); for(int i=0; i<n_isotopes * n_gridpoints; i++) { lEnergyGrid[i].energy = energyGrid[i].energy; lEnergyGrid[i].xs_idx = &devIndexArray[i*n_isotopes]; rowptr = &lIndexArray[i*n_isotopes]; switch(sizeof(IDX_TYPE)) { case 4: memcpy(rowptr, energyGrid[i].xs_ptrs, n_isotopes*sizeof(int)); break; case 2: for(int s=0; s<n_isotopes; s++) { rowptr[s] = (IDX_TYPE)energyGrid[i].xs_ptrs[s]; } break; default: printf("Error: sizeof(IDX_TYPE) is not supported\n"); break; } } CUDA_CALL(hipMemcpy(devIndexArray, lIndexArray, n_isotopes*n_isotopes*n_gridpoints*sizeof(IDX_TYPE), hipMemcpyHostToDevice)); CUDA_CALL(hipMemcpy(devEnergyGrid, lEnergyGrid, n_isotopes * n_gridpoints * sizeof(GridPoint_Index), hipMemcpyHostToDevice)); CUDA_CALL(hipHostFree(lIndexArray)); CUDA_CALL(hipHostFree(lEnergyGrid)); } // Run Experiments std::vector<double> perf; int dshared = 0; switch(kernelId) { case 0: perf.push_back(runExperiment(devCalculateXS_d4, "LDG OPT Double Revised singular", numLookups, n_isotopes, n_gridpoints, devEnergyGrid, devNumNucs, numMats, maxNumNucs, devNuclideGrid, devMats, devConcs, devMacroXSVector, devEnergySearchGrid, devResults, NUM_RESULTS, cpuResults, 128, 1, dshared)); break; case 1: perf.push_back(runExperiment(devCalculateXS_opt_single, "LDG OPT singler", numLookups, n_isotopes, n_gridpoints, devEnergyGrid, devNumNucs, numMats, maxNumNucs, devNuclideGrid, devMats, devConcs, devMacroXSVector, devEnergySearchGrid, devResults, NUM_RESULTS, cpuResults, 128, 1, dshared)); break; case 2: perf.push_back(runExperiment(devCalculateXS_integrated_single, "Basic Kernel singular", numLookups, n_isotopes, n_gridpoints, devEnergyGrid, devNumNucs, numMats, maxNumNucs, devNuclideGrid, devMats, devConcs, devMacroXSVector, devEnergySearchGrid, devResults, NUM_RESULTS, cpuResults, 128, 1, dshared)); break; case 3: perf.push_back(runExperiment(devCalculateXS_d3, "LDG OPT Double (original) singular", numLookups, n_isotopes, n_gridpoints, devEnergyGrid, devNumNucs, numMats, maxNumNucs, devNuclideGrid, devMats, devConcs, devMacroXSVector, devEnergySearchGrid, devResults, NUM_RESULTS, cpuResults, 128, 1, dshared)); default: printf("Error: unrecognized kernel id\n"); break; } CUDA_CALL(hipFree(devIndexArray)); CUDA_CALL(hipFree(devNumNucs)); CUDA_CALL(hipFree(devConcs)); CUDA_CALL(hipFree(devMats)); if(devEnergyGrid != NULL) { CUDA_CALL(hipFree(devEnergyGrid)); } CUDA_CALL(hipFree(devNuclideGrid)); CUDA_CALL(hipFree(devMacroXSVector)); std::vector<double>::iterator r = std::max_element(perf.begin(), perf.end()); printf("Max Perf is %f\n", (*r)); return (*r); }
24f28eda78dae71b831058bfd5f9c5a5444fc016.cu
#include <cudaHeader.h> #include <stdio.h> #include <unistd.h> #include <string> #include <vector> #include <algorithm> __constant__ double dist[12] = { 0.140, // fuel 0.052, // cladding 0.275, // cold, borated water 0.134, // hot, borated water 0.154, // RPV 0.064, // Lower, radial reflector 0.066, // Upper reflector / top plate 0.055, // bottom plate 0.008, // bottom nozzle 0.015, // top nozzle 0.025, // top of fuel assemblies 0.153 // bottom of fuel assemblies }; #define NO_BINARY_SEARCH 0 #define RANDOM_CONC_NUC 0 /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __device__ double devRn(unsigned long * seed) { #if STRIP_RANDOM == 1 return 0.0; #else /* unsigned int m = 2147483647; unsigned int n1 = ( 16807u * (*seed) ) % m; (*seed) = n1; return (double) n1 / (double) m; */ double x = (double)seed * (double)31415.9262; x -= (double) (int) x; return x; #endif } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __device__ int devPickMat( unsigned long * seed) { double roll = devRn(seed); // makes a pick based on the distro double running = 0; for( int i = 0; i < 12; i++ ) { running += dist[i]; if( roll < running ) return i; } return 11; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void devCalculateXS_integrated_single(int numLookups, int n_isotopes, int n_gridpoints, GridPoint_Index * energyGrid, int * numNucs, int numMats, int maxNumNucs, NuclideGridPoint * nuclideGrid, int * mats, double * concs, double * macro_xs_vector, double * results, int nResults) { unsigned long seed = 10000*threadIdx.x + 10* blockIdx.x + threadIdx.x + 31415; int matIdx; int lin; double p_energy; double r[5]; double f; int k = threadIdx.x + blockIdx.x * blockDim.x; #if(STRIP_RANDOM==1) p_energy = 0.01 + (((double)(k%10))/10.0) + (((double)(k%1000))/1000.0); p_energy -= ((int)(p_energy)); matIdx = k%12; #else p_energy = devRn(&seed); matIdx = devPickMat(&seed); #endif r[0] = r[1] = r[2] = r[3]= r[4] = 0.0; int idx = devBasicBinarySearch_index<GridPoint_Index>(energyGrid, p_energy, n_isotopes*n_gridpoints); for(int i=0; i< numNucs[matIdx]; i++) { lin = linearize(matIdx, i, maxNumNucs); NuclideGridPoint * high; NuclideGridPoint * low; int nuc = mats[lin]; int ptr = energyGrid[idx].xs_idx[nuc]; low = &nuclideGrid[linearize(nuc, ptr, n_gridpoints)]; high = low + 1 ; double c = concs[lin]; // calculate the re-useable interpolation factor f = (high->energy - p_energy) / (high->energy - low->energy); r[0] += c * (high->total_xs - f * (high->total_xs - low->total_xs)); r[1] += c * (high->elastic_xs - f * (high->elastic_xs - low->elastic_xs)); r[2] += c * (high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs)); r[3] += c * (high->fission_xs - f * (high->fission_xs - low->fission_xs)); r[4] += c * (high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs)); } // For some sense of legitimacy, write back the outputs, even though we will overwrite o #pragma unroll 5 for(int i=0; i<5; i++) { macro_xs_vector[i*blockDim.x + threadIdx.x] = r[i]; } #if(STRIP_RANDOM==1) if(k < nResults) { memcpy(&results[5*k], r, N_ELEMENTS*sizeof(double)); } #endif } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // devCalculateXS_d3 is the 'best performing' kernel on the large case. This kernel unwraps the nuclide lookup for two nuclides simultaneously with the // edge case handled inline (instead of having a completely separate code path for the final iteration as in the d2 version of this kernel). /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void devCalculateXS_d4(int numLookups, int n_isotopes, int n_gridpoints, GridPoint_Index * energyGrid, int * numNucs, int numMats, int maxNumNucs, NuclideGridPoint * nuclideGrid, int * mats, double * concs, double * macro_xs_vector, double * results, int nResults) { int matIdx; double p_energy; int k = threadIdx.x + blockIdx.x * blockDim.x; if(k > numLookups) { return; } #if(STRIP_RANDOM==1) p_energy = 0.01 + (((double)(k%10))/10.0) + (((double)(k%1000))/1000.0); p_energy -= ((int)(p_energy)); matIdx = k%12; #else unsigned long seed = 10000*threadIdx.x + threadIdx.x + 10* blockIdx.x + 31415; p_energy = devRn(&seed); matIdx = devPickMat(&seed); #endif double r[5]; r[0] = r[1] = r[2] = r[3]= r[4] = 0.0; #if NO_BINARY_SEARCH int idx = k % (n_isotopes * n_gridpoints); idx += (((unsigned long)k * (unsigned long)k) % (unsigned long) (n_isotopes * n_gridpoints)); idx = ((unsigned long)idx * (unsigned long) blockIdx.x * (unsigned long) threadIdx.x) % ((unsigned long) (n_isotopes * n_gridpoints)); #else int idx = devBasicBinarySearch_index<GridPoint_Index>(energyGrid, p_energy, n_isotopes*n_gridpoints); #endif for(int i=0; i< numNucs[matIdx]; i+=2) { double2 t[12]; int nuc = __LDG(&mats[linearize(matIdx, i, maxNumNucs)]); double c = __LDG(&concs[linearize(matIdx, i, maxNumNucs)]); IDX_TYPE ptr = __LDG(&energyGrid[idx].xs_idx[nuc]); NuclideGridPoint * low = &nuclideGrid[linearize(nuc, ptr, n_gridpoints)]; double2 * base = (double2*) low; int nucB; IDX_TYPE ptrB; double2 * baseB; double cB; if( i < numNucs[matIdx]-1 ) { nucB = __LDG(&mats[linearize(matIdx, i+1, maxNumNucs)]); cB = __LDG(&concs[linearize(matIdx, i+1, maxNumNucs)]); ptrB = __LDG(&energyGrid[idx].xs_idx[nucB]); baseB = (double2*)&nuclideGrid[linearize(nucB, ptrB, n_gridpoints)]; } #pragma unroll 6 for(int s=0; s<6; s++) { t[s] = __LDG(&base[s]); } if( i < numNucs[matIdx]-1 ) { #pragma unroll 6 for(int s=0; s<6; s++) { t[s+6] = __LDG(&baseB[s]); } } { double f = (t[3].x - p_energy) / (t[3].x - t[0].x); double fB; r[0] += c*(t[3].y - f* (t[3].y - t[0].y)); if( i < numNucs[matIdx]-1 ) { fB = (t[9].x - p_energy) / (t[9].x - t[6].x); r[0] += cB*(t[9].y - fB* (t[9].y - t[6].y)); } #pragma unroll for(int s=0; s<2; s++) { r[2*s+1] += c*(t[s+4].x - f* (t[s+4].x - t[s+1].x)); r[2*s+2] += c*(t[s+4].y - f* (t[s+4].y - t[s+1].y)); if( i < numNucs[matIdx]-1 ) { r[2*s+1] += cB*(t[s+10].x - fB* (t[s+10].x - t[s+7].x)); r[2*s+2] += cB*(t[s+10].y - fB* (t[s+10].y - t[s+7].y)); } } } } // For some sense of legitimacy, write back the outputs, even though we will overwrite o #pragma unroll for(int i=0; i<5; i++) { macro_xs_vector[i* blockDim.x + threadIdx.x] = r[i]; } #if(STRIP_RANDOM==1) if(k < nResults) { results[5*k] = r[0]; results[5*k+1] = r[1]; results[5*k+2] = r[2]; results[5*k+3] = r[3]; results[5*k+4] = r[4]; memcpy(&results[5*k], r, N_ELEMENTS*sizeof(double)); } #endif } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // devCalculateXS_d3 is the 'best performing' kernel on the large case. This kernel unwraps the nuclide lookup for two nuclides simultaneously with the // edge case handled inline (instead of having a completely separate code path for the final iteration as in the d2 version of this kernel). /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void devCalculateXS_d3(int numLookups, int n_isotopes, int n_gridpoints, GridPoint_Index * energyGrid, int * numNucs, int numMats, int maxNumNucs, NuclideGridPoint * nuclideGrid, int * mats, double * concs, double * macro_xs_vector, double * results, int nResults) { unsigned long seed = 10000*threadIdx.x + threadIdx.x + 10* blockIdx.x + 31415; int matIdx; double p_energy; int workPerBlock = 1 + ( numLookups / (gridDim.x) ); int lowerLimit = blockIdx.x * workPerBlock + threadIdx.x; int upperLimit = lowerLimit + workPerBlock; for(int k=lowerLimit; k< upperLimit; k+= blockDim.x) { #if(STRIP_RANDOM==1) p_energy = 0.01 + (((double)(k%10))/10.0) + (((double)(k%1000))/1000.0); p_energy -= ((int)(p_energy)); matIdx = k%12; #else p_energy = devRn(&seed); matIdx = devPickMat(&seed); #endif double r[5]; r[0] = r[1] = r[2] = r[3]= r[4] = 0.0; int idx = devBasicBinarySearch_index<GridPoint_Index>(energyGrid, p_energy, n_isotopes*n_gridpoints); for(int i=0; i< numNucs[matIdx]; i+=2) { double2 t[12]; int nuc = __LDG(&mats[linearize(matIdx, i, maxNumNucs)]); double c = __LDG(&concs[linearize(matIdx, i, maxNumNucs)]); IDX_TYPE ptr = __LDG(&energyGrid[idx].xs_idx[nuc]); NuclideGridPoint * low = &nuclideGrid[linearize(nuc, ptr, n_gridpoints)]; double2 * base = (double2*) low; int nucB; IDX_TYPE ptrB; double2 * baseB; double cB; if( i < numNucs[matIdx]-1 ) { nucB = __LDG(&mats[linearize(matIdx, i+1, maxNumNucs)]); cB = __LDG(&concs[linearize(matIdx, i+1, maxNumNucs)]); ptrB = __LDG(&energyGrid[idx].xs_idx[nucB]); baseB = (double2*)&nuclideGrid[linearize(nucB, ptrB, n_gridpoints)]; } #pragma unroll 6 for(int s=0; s<6; s++) { t[s] = __LDG(&base[s]); } if( i < numNucs[matIdx]-1 ) { #pragma unroll 6 for(int s=0; s<6; s++) { t[s+6] = __LDG(&baseB[s]); } } { double f = (t[3].x - p_energy) / (t[3].x - t[0].x); double fB; r[0] += c*(t[3].y - f* (t[3].y - t[0].y)); if( i < numNucs[matIdx]-1 ) { fB = (t[9].x - p_energy) / (t[9].x - t[6].x); r[0] += cB*(t[9].y - fB* (t[9].y - t[6].y)); } #pragma unroll for(int s=0; s<2; s++) { r[2*s+1] += c*(t[s+4].x - f* (t[s+4].x - t[s+1].x)); r[2*s+2] += c*(t[s+4].y - f* (t[s+4].y - t[s+1].y)); if( i < numNucs[matIdx]-1 ) { r[2*s+1] += cB*(t[s+10].x - fB* (t[s+10].x - t[s+7].x)); r[2*s+2] += cB*(t[s+10].y - fB* (t[s+10].y - t[s+7].y)); } } } } // For some sense of legitimacy, write back the outputs, even though we will overwrite o #pragma unroll for(int i=0; i<5; i++) { macro_xs_vector[i* blockDim.x + threadIdx.x] = (double) idx;// r[i]; } #if(STRIP_RANDOM==1) if(k < nResults) { results[5*k] = r[0]; results[5*k+1] = r[1]; results[5*k+2] = r[2]; results[5*k+3] = r[3]; results[5*k+4] = r[4]; memcpy(&results[5*k], r, N_ELEMENTS*sizeof(double)); } #endif } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void devCalculateXS_opt_single(int numLookups, int n_isotopes, int n_gridpoints, GridPoint_Index * energyGrid, int * numNucs, int numMats, int maxNumNucs, NuclideGridPoint * nuclideGrid, int * mats, double * concs, double * macro_xs_vector, double * results, int nResults) { int id = threadIdx.x + blockDim.x * blockIdx.x; int matIdx; double p_energy; double r0, r1, r2, r3, r4; #if(STRIP_RANDOM==1) int k = threadIdx.x + blockDim.x * blockIdx.x; p_energy = 0.01 + (((double)(k%10))/10.0) + (((double)(k%1000))/1000.0); p_energy -= ((int)(p_energy)); matIdx = k%12; #else unsigned long seed = 10000*threadIdx.x + 10* blockIdx.x + threadIdx.x + 31415; p_energy = devRn(&seed); matIdx = devPickMat(&seed); #endif r0 = r1 = r2 = r3 = r4 = 0.0; int idx = devBasicBinarySearch_index<GridPoint_Index>(energyGrid, p_energy, n_isotopes*n_gridpoints); for(int i=0; i< numNucs[matIdx]; i++) { double2 t0, t1, t2, t3, t4, t5; int nuc = __LDG(&mats[linearize(matIdx, i, maxNumNucs)]); double c = __LDG(&concs[linearize(matIdx, i, maxNumNucs)]); IDX_TYPE ptr = __LDG(&energyGrid[idx].xs_idx[nuc]); double2 * base = (double2*) &nuclideGrid[linearize(nuc, ptr, n_gridpoints)]; t0 = __LDG(base); t1 = __LDG(&base[1]); t2 = __LDG(&base[2]); t3 = __LDG(&base[3]); t4 = __LDG(&base[4]); t5 = __LDG(&base[5]); double f = (t3.x - p_energy) / (t3.x - t0.x); r0 += c*(t3.y - f * (t3.y - t0.y)); r1 += c*(t4.x - f * (t4.x - t1.x)); r2 += c*(t4.y - f * (t4.y - t1.y)); r3 += c*(t5.x - f * (t5.x - t2.x)); r4 += c*(t5.y - f * (t5.y - t2.y)); } // For some sense of legitimacy, write back the outputs, even though we will overwrite o macro_xs_vector[threadIdx.x] = r0; macro_xs_vector[blockDim.x + threadIdx.x] = r1; macro_xs_vector[2*blockDim.x + threadIdx.x] = r2; macro_xs_vector[3*blockDim.x + threadIdx.x] = r3; macro_xs_vector[4*blockDim.x + threadIdx.x] = r4; #if(STRIP_RANDOM==1) if(k < nResults) { results[5*k] = r0; results[5*k+1] = r1; results[5*k+2] = r2; results[5*k+3] = r3; results[5*k+4] = r4; } #endif } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// C_LINKAGE int checkResults(int numResults, int numElements, double * referenceResults, double * comparisonResults) { bool correct = true; for(int i=0; i<numResults*numElements; i++) { if(fabs(referenceResults[i] - comparisonResults[i]) > 0.000001) { printf("Answers do not match at lookup %d element %d (%.10f, %.10f) \n", i/numElements, i%numElements, referenceResults[i], comparisonResults[i]); correct = false; break; } } if(correct) { return 1; } else { return 0; } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// C_LINKAGE int checkGPUResults(int numResults, int numElements, double * referenceResults, double * devGPUResults) { double * lResults; bool correct; CUDA_CALL(cudaMallocHost(&lResults, numResults * numElements*sizeof(double))); CUDA_CALL(cudaMemcpy(lResults, devGPUResults, numResults * numElements *sizeof(double), cudaMemcpyDeviceToHost)); correct = checkResults(numResults, numElements, referenceResults, lResults); CUDA_CALL(cudaFreeHost(lResults)); if(correct) { return 1; } else { return 0; } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// double runExperiment( void (*kernel)(int, int, int, GridPoint_Index*, int*, int, int, NuclideGridPoint *, int *, double*, double *, double*, int), std::string kernelName, int numLookups, int n_isotopes, int n_gridpoints, GridPoint_Index * energyGrid, int * numNucs, int numMats, int maxNumNucs, NuclideGridPoint * nuclideGrid, int * mats, double * concs, double * macro_xs_vector, double * searchGrid, double * results, int nResults, double * cpuResults, int blockLimiter, int lookupsPerThread=1, int dshared=0) { int numBlocks; int threadsPerBlock; double performance = -1.0E12; cudaEvent_t start, stop; //CUDA_CALL(cudaEventCreate(&start)); //CUDA_CALL(cudaEventCreate(&stop)); double maxPerf = -1.0; #if(PROFILE_MODE) threadsPerBlock = THREADS_PER_BLOCK; CUDA_CALL(cudaMemset(results, 0, 5*NUM_RESULTS*sizeof(double))); //CUDA_CALL(cudaEventRecord(start)); numBlocks = 1 + numLookups/(threadsPerBlock*lookupsPerThread); kernel<<<numBlocks, threadsPerBlock, dshared>>>(numLookups, n_isotopes, n_gridpoints, energyGrid, numNucs, numMats, maxNumNucs, nuclideGrid, mats, concs, macro_xs_vector, results, nResults); //CUDA_CHECK(); //CUDA_CALL(cudaEventRecord(stop)); //CUDA_CALL(cudaEventSynchronize(stop)); CUDA_CALL(cudaDeviceSynchronize()); float gpuTime = 0.0; //CUDA_CALL(cudaEventElapsedTime(&gpuTime, start, stop)); gpuTime = 1.0; performance = 1000.0 * ( (double) numLookups/ (double) gpuTime); printf("%s <<<%d, %d>>> Lookups/s: %.0lf in %f ms\n", kernelName.c_str(), numBlocks, threadsPerBlock, performance, gpuTime); maxPerf = performance; #else for(threadsPerBlock=32; threadsPerBlock<=blockLimiter; threadsPerBlock+=32) { CUDA_CALL(cudaMemset(results, 0, 5*NUM_RESULTS*sizeof(double))); //CUDA_CALL(cudaEventRecord(start)); numBlocks = 1 + numLookups/(threadsPerBlock*lookupsPerThread); kernel<<<numBlocks, threadsPerBlock, dshared>>>(numLookups, n_isotopes, n_gridpoints, energyGrid, numNucs, numMats, maxNumNucs, nuclideGrid, mats, concs, macro_xs_vector, results, nResults); //CUDA_CHECK(); //CUDA_CALL(cudaEventRecord(stop)); //CUDA_CALL(cudaEventSynchronize(stop)); CUDA_CALL(cudaDeviceSynchronize()); float gpuTime = 0.0; //CUDA_CALL(cudaEventElapsedTime(&gpuTime, start, stop)); gpuTime = 1.0; performance = 1000.0 * ( (double) numLookups/ (double) gpuTime); printf("%s <<<%d, %d>>> Lookups/s: %.0lf in %f ms\n", kernelName.c_str(), numBlocks, threadsPerBlock, performance, gpuTime); #if STRIP_RANDOM == 1 if(checkGPUResults(nResults, N_ELEMENTS, cpuResults, results)) { printf("%s results match reference CPU Results\n", kernelName.c_str()); } else{ printf("%s Results are INCORRECT\n", kernelName.c_str()); performance = -1.0; } #endif if(performance > maxPerf) { maxPerf = performance; } } #endif return maxPerf; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// C_LINKAGE double cudaDriver(int numLookups, int n_isotopes, int n_gridpoints, int numMats, int * numNucs, GridPoint * energyGrid, double ** concs, NuclideGridPoint ** nuclideGrid, int ** mats, double * cpuResults, int kernelId) { printf("################################################################################\n"); printf(" GPU SIMULATION\n"); printf("################################################################################\n"); int * devNumNucs = NULL; double * devConcs = NULL; int * devMats = NULL; GridPoint_Index * devEnergyGrid = NULL; NuclideGridPoint * devNuclideGrid = NULL; double * devEnergySearchGrid = NULL; double * devMacroXSVector = NULL; double * devResults = NULL; IDX_TYPE * devIndexArray; cudaEvent_t start, stop; //CUDA_CALL(cudaEventCreate(&start)); //CUDA_CALL(cudaEventCreate(&stop)); int threadsPerBlock = THREADS_PER_BLOCK; long numBlocks = 1024; int maxNumNucs=-1; for(int i=0; i<numMats; i++) { maxNumNucs = max(maxNumNucs, numNucs[i]); } printf("MaxNumNucs is %d\n", maxNumNucs); printf(" numIsotopes: %d, n_gridpoints: %d, n_mats: %d\n", n_isotopes, n_gridpoints, numMats); // Hoisted for doing annotaion of hot structure CUDA_CALL(cudaMalloc(&devIndexArray, n_isotopes * n_isotopes * n_gridpoints * sizeof(IDX_TYPE))); CUDA_CALL(cudaMalloc(&devMacroXSVector, 5 * numBlocks * threadsPerBlock * sizeof(double))); CUDA_CALL(cudaMalloc(&devNumNucs, numMats * sizeof(int))); CUDA_CALL(cudaMemcpy(devNumNucs, numNucs, numMats * sizeof(int), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&devNuclideGrid, n_isotopes * n_gridpoints * sizeof(NuclideGridPoint))); CUDA_CALL(cudaMemcpy(devNuclideGrid, nuclideGrid[0], n_isotopes * n_gridpoints * sizeof(NuclideGridPoint), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&devResults, 5*NUM_RESULTS*sizeof(double))); // Deal with concs and Mats. Notice, when copying these to the GPU we flatten out the structure into a dense matrix { long totalElements = numMats * maxNumNucs; CUDA_CALL(cudaMalloc(&devMats, totalElements * sizeof(int))); CUDA_CALL(cudaMalloc(&devConcs, totalElements * sizeof(double))); CUDA_CALL(cudaMemset(devMats, 0, totalElements * sizeof(int))); CUDA_CALL(cudaMemset(devConcs, 0, totalElements * sizeof(double))); for(int i=0; i<numMats; i++) { CUDA_CALL(cudaMemcpy(&devMats[i*maxNumNucs], mats[i], numNucs[i]*sizeof(int), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(&devConcs[i*maxNumNucs], concs[i], numNucs[i]*sizeof(double), cudaMemcpyHostToDevice)); } } // Deal with Energy Grid. { GridPoint_Index * lEnergyGrid; IDX_TYPE * lIndexArray; IDX_TYPE * rowptr; CUDA_CALL(cudaMalloc(&devEnergyGrid, n_isotopes * n_gridpoints * sizeof(GridPoint_Index))); CUDA_CALL(cudaMallocHost(&lEnergyGrid, n_isotopes * n_gridpoints * sizeof(GridPoint_Index))); printf("Allocation is %f mb\n", ((long)n_isotopes * (long)n_isotopes * (long)n_gridpoints * sizeof(IDX_TYPE))/(double) 1E6); CUDA_CALL(cudaMallocHost(&lIndexArray, n_gridpoints *n_isotopes* n_isotopes * sizeof(IDX_TYPE))); memset(lIndexArray, 0, n_gridpoints * n_isotopes * n_isotopes * sizeof(IDX_TYPE)); for(int i=0; i<n_isotopes * n_gridpoints; i++) { lEnergyGrid[i].energy = energyGrid[i].energy; lEnergyGrid[i].xs_idx = &devIndexArray[i*n_isotopes]; rowptr = &lIndexArray[i*n_isotopes]; switch(sizeof(IDX_TYPE)) { case 4: memcpy(rowptr, energyGrid[i].xs_ptrs, n_isotopes*sizeof(int)); break; case 2: for(int s=0; s<n_isotopes; s++) { rowptr[s] = (IDX_TYPE)energyGrid[i].xs_ptrs[s]; } break; default: printf("Error: sizeof(IDX_TYPE) is not supported\n"); break; } } CUDA_CALL(cudaMemcpy(devIndexArray, lIndexArray, n_isotopes*n_isotopes*n_gridpoints*sizeof(IDX_TYPE), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(devEnergyGrid, lEnergyGrid, n_isotopes * n_gridpoints * sizeof(GridPoint_Index), cudaMemcpyHostToDevice)); CUDA_CALL(cudaFreeHost(lIndexArray)); CUDA_CALL(cudaFreeHost(lEnergyGrid)); } // Run Experiments std::vector<double> perf; int dshared = 0; switch(kernelId) { case 0: perf.push_back(runExperiment(devCalculateXS_d4, "LDG OPT Double Revised singular", numLookups, n_isotopes, n_gridpoints, devEnergyGrid, devNumNucs, numMats, maxNumNucs, devNuclideGrid, devMats, devConcs, devMacroXSVector, devEnergySearchGrid, devResults, NUM_RESULTS, cpuResults, 128, 1, dshared)); break; case 1: perf.push_back(runExperiment(devCalculateXS_opt_single, "LDG OPT singler", numLookups, n_isotopes, n_gridpoints, devEnergyGrid, devNumNucs, numMats, maxNumNucs, devNuclideGrid, devMats, devConcs, devMacroXSVector, devEnergySearchGrid, devResults, NUM_RESULTS, cpuResults, 128, 1, dshared)); break; case 2: perf.push_back(runExperiment(devCalculateXS_integrated_single, "Basic Kernel singular", numLookups, n_isotopes, n_gridpoints, devEnergyGrid, devNumNucs, numMats, maxNumNucs, devNuclideGrid, devMats, devConcs, devMacroXSVector, devEnergySearchGrid, devResults, NUM_RESULTS, cpuResults, 128, 1, dshared)); break; case 3: perf.push_back(runExperiment(devCalculateXS_d3, "LDG OPT Double (original) singular", numLookups, n_isotopes, n_gridpoints, devEnergyGrid, devNumNucs, numMats, maxNumNucs, devNuclideGrid, devMats, devConcs, devMacroXSVector, devEnergySearchGrid, devResults, NUM_RESULTS, cpuResults, 128, 1, dshared)); default: printf("Error: unrecognized kernel id\n"); break; } CUDA_CALL(cudaFree(devIndexArray)); CUDA_CALL(cudaFree(devNumNucs)); CUDA_CALL(cudaFree(devConcs)); CUDA_CALL(cudaFree(devMats)); if(devEnergyGrid != NULL) { CUDA_CALL(cudaFree(devEnergyGrid)); } CUDA_CALL(cudaFree(devNuclideGrid)); CUDA_CALL(cudaFree(devMacroXSVector)); std::vector<double>::iterator r = std::max_element(perf.begin(), perf.end()); printf("Max Perf is %f\n", (*r)); return (*r); }
cce443ae776704e9895ad4554772b51ac0c76670.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ __forceinline__ float relu(float a) { return a < 0 ? 0 : a; } __global__ void relu_kernel(float *vec, int len) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < len) { vec[index] = relu(vec[index]); } }
cce443ae776704e9895ad4554772b51ac0c76670.cu
#include "includes.h" __device__ __forceinline__ float relu(float a) { return a < 0 ? 0 : a; } __global__ void relu_kernel(float *vec, int len) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < len) { vec[index] = relu(vec[index]); } }
975f733ed3afa4126ba8852e32edea3a373687a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../headers/gpuManager.h" typedef struct{ int e1,e2; } pair; CUDA_CALLABLE_MEMBER_DEVICE pair pair_init(int e1, int e2){ pair p; p.e1 = e1; p.e2 = e2; return p; } typedef struct{ pair* v; int max_size; int read_cursor; int write_cursor; int size; } queue; CUDA_CALLABLE_MEMBER_DEVICE queue queue_init(int size){ queue q; q.v = (pair*) malloc(sizeof(pair)*size); q.max_size = size; q.read_cursor = 0; q.write_cursor = 0; q.size=0; return q; } CUDA_CALLABLE_MEMBER_DEVICE void queue_push(queue* q, pair e){ q->v[ q->write_cursor ] = e; q->write_cursor = (q->write_cursor+1)%q->max_size; q->size+=1; } CUDA_CALLABLE_MEMBER_DEVICE pair queue_pop(queue* q){ pair p = q->v[q->read_cursor]; q->read_cursor = (q->read_cursor+1)%q->max_size; q->size-=1; return p; } CUDA_CALLABLE_MEMBER_DEVICE void queue_destroy(queue* q){ free(q->v); } template<class T> CUDA_CALLABLE_MEMBER_DEVICE void merge(T* l, bool(* comp)(T* el1, T* el2), int b1, int e1, int b2, int e2){ int size = e1-b1+1 + e2-b2+1; T* buffer = (T*) malloc(sizeof(T)*size); int cur_l = 0; int end_l = e1-b1+1; for(int i=cur_l;i<end_l;i++){ buffer[i] = l[b1+i]; } int cur_r = e1-b1+1; int end_r = e2-b1+1; for(int i=cur_r;i<end_r;i++){ buffer[i] = l[b1+i]; } int index=0; while( cur_l < end_l && cur_r < end_r ){ if( comp( &buffer[cur_l], &buffer[cur_r] ) ){ l[b1+index++] = buffer[cur_l++]; }else{ l[b1+index++] = buffer[cur_r++]; } } while( cur_l < end_l ) l[b1+index++] = buffer[cur_l++]; while( cur_r < end_r ) l[b1+index++] = buffer[cur_r++]; free(buffer); } template<class T> CUDA_CALLABLE_MEMBER_DEVICE void mergeSort(T* l, bool(* comp)(T* el1, T* el2), int begin, int end){ int size = end-begin+1; queue q1 = queue_init(size+1); queue q2 = queue_init(size+1); for(int i=begin;i<=end;i++){ queue_push(&q1, pair_init(i,i)); } queue* q = &q1; queue* qb = &q2; while(1){ if(q->size==1) break; while( q->size>0 ){ if(q->size==3){ pair pl = queue_pop(q); pair pr = queue_pop(q); pair plast = queue_pop(q); //printf("%d %d - %d %d\n",pl.e1,pl.e2, pr.e1, pr.e2); merge<T>(l, comp, pl.e1, pl.e2, pr.e1, pr.e2 ); pair presult = pair_init(pl.e1,pr.e2); merge<T>(l, comp, presult.e1, presult.e2, plast.e1, plast.e2 ); queue_push(qb, pair_init(presult.e1,plast.e2)); }else{ pair pl = queue_pop(q); pair pr = queue_pop(q); //printf("%d %d - %d %d\n",pl.e1,pl.e2, pr.e1, pr.e2); merge<T>(l, comp, pl.e1, pl.e2, pr.e1, pr.e2 ); queue_push(qb, pair_init(pl.e1,pr.e2)); } } queue* qt = q; q = qb; qb = qt; } queue_destroy(&q1); queue_destroy(&q2); //pair p = queue_pop(q); //printf("%d %d - %d\n",p.e1,p.e2, q->size); } FeatureMaskDev convertFeatureMask(FeatureMask fm){ FeatureMaskDev fmd; fmd._mask_size = fm._mask._size; fmd._location = fm._location; fmd._white_length = fm._mask._white.size(); fmd._black_length = fm._mask._black.size(); fmd._id = fm._id; for(int i=0;i<fmd._white_length;i++){ fmd._white[i]._points[0] = fm._mask._white[i]._points[0]; fmd._white[i]._points[1] = fm._mask._white[i]._points[1]; fmd._white[i]._points[2] = fm._mask._white[i]._points[2]; fmd._white[i]._points[3] = fm._mask._white[i]._points[3]; fmd._white[i]._w = fm._mask._white[i]._w; fmd._white[i]._h = fm._mask._white[i]._h; } for(int i=0;i<fmd._black_length;i++){ fmd._black[i]._points[0] = fm._mask._black[i]._points[0]; fmd._black[i]._points[1] = fm._mask._black[i]._points[1]; fmd._black[i]._points[2] = fm._mask._black[i]._points[2]; fmd._black[i]._points[3] = fm._mask._black[i]._points[3]; fmd._black[i]._w = fm._mask._black[i]._w; fmd._black[i]._h = fm._mask._black[i]._h; } return fmd; } CUDA_CALLABLE_MEMBER_DEVICE long IntegralImageDev::getFromData(Point* points){ long sum[4]={0,0,0,0}; for(int i=0;i<4;++i){ Point p = points[i]; if(outsideLimits(p)){ sum[i] = 0; }else{ sum[i] = _data[p.y*24+p.x]; } } return (sum[0]+sum[3]-sum[2]-sum[1]); } CUDA_CALLABLE_MEMBER_DEVICE long IntegralImageDev::computeBlock(MaskBlockDev* b, ulong mask_length, Point location){ long block_sum = 0; for(int i=0;i<mask_length;++i){ Point pa,pb,pc,pd; pa.y = location.y + b[i]._points[0].y - 1; pa.x = location.x + b[i]._points[0].x - 1; pb.y = location.y + b[i]._points[1].y - 1; pb.x = location.x + b[i]._points[1].x; pc.y = location.y + b[i]._points[2].y; pc.x = location.x + b[i]._points[2].x - 1; pd.y = location.y + b[i]._points[3].y; pd.x = location.x + b[i]._points[3].x; Point points[4] = {pa,pb,pc,pd}; // printf("%ld (%lu %lu) [ (%lu %lu) (%lu %lu) (%lu %lu) (%lu %lu) ]\n",getFromData(points),location.x,location.y,pa.y,pa.x,pb.y,pb.x,pc.y,pc.x,pd.y,pd.x); // printf("%ld (%lu %lu)",b[i]._points[0].y,b[i]._points[0].x); block_sum+=getFromData(points); } return block_sum; } CUDA_CALLABLE_MEMBER_DEVICE ulong IntegralImageDev::filter(FeatureMaskDev* fm){ if( (fm->_mask_size.x > _size.x) || (fm->_mask_size.y > _size.y) ){ return 120; } // printf("%d\n", abs( computeBlock(fm._mask._black,fm._location) - computeBlock(fm._mask._white,fm._location) ) ); return abs( computeBlock(fm->_black,fm->_black_length,fm->_location) - computeBlock(fm->_white,fm->_white_length,fm->_location) ); } void loadImage(ulong* data, const char* filepath,Point* size){ MagickWand * image_wand; PixelIterator* iterator; MagickBooleanType status; MagickPixelPacket pixel; PixelWand** pixels; long x,y; ulong line_sum; image_wand = NewMagickWand(); status=MagickReadImage(image_wand,filepath); iterator=NewPixelIterator(image_wand); size->y = MagickGetImageHeight(image_wand); size->x = MagickGetImageWidth(image_wand); // printf("MAGICK %lu %lu\n",size->y,size->x); if(data==NULL){ return; } for (y=0; y < size->y; ++y){ pixels=PixelGetNextIteratorRow(iterator,&(size->x)); if ( (pixels == (PixelWand **) NULL) ) break; line_sum=0; for (x=0; x < (long) size->x; ++x){ line_sum+= PixelGetBlue(pixels[x])*255; if(y>0){ data[y*size->x+x] = data[(y-1)*size->x+x] + line_sum; }else{ data[y*size->x+x] = line_sum; } // printf("%lu\n",(*data)[y][x]); } } DestroyPixelIterator(iterator); DestroyMagickWand(image_wand); } void prepareData(TrainingSet& ts, Point* size_image, ulong** data_host, ulong** data_device, int nfeatures, FeatureMaskDev** fmd_host, FeatureMaskDev** fmd_device, bool alloc){ int ardis_w = 24; int ardis_h = 24; int nimages = ts.size(); int totalPixels = nimages*ardis_w*ardis_h; printf("PREPARING DATA\n"); //Preparing Images if(alloc){ (*data_host) = (ulong*) malloc(sizeof(ulong)*totalPixels); hipMalloc( (void**) data_device, sizeof(ulong)*totalPixels ); } for(int i=0;i<nimages;i++){ TrainingImage* ti = ts.get(i); for(int h=0;h<ardis_h;h++){ for(int w=0;w<ardis_w;w++){ ( (*data_host)+(i*ardis_w*ardis_h) )[h*ardis_w+w] = ti->_ii->_data[h][w]; } } } /* for(int i=0;i<totalPixels;i++){ printf("%d\n", (*data_host)[i] ); } */ hipMemcpy( *data_device, *data_host, sizeof(ulong)*totalPixels, hipMemcpyHostToDevice ); //Preparing Features FacesFeatureFactory fff; if(alloc) (*fmd_host) = (FeatureMaskDev*) malloc(sizeof(FeatureMaskDev)*nfeatures); for(int i=0;i<nfeatures;i++){ (*fmd_host)[i] = convertFeatureMask(fff._facesFeatures[i]); } if(alloc) hipMalloc( (void**) fmd_device, sizeof(FeatureMaskDev)*nfeatures ); hipMemcpy( (*fmd_device), (*fmd_host), sizeof(FeatureMaskDev)*nfeatures, hipMemcpyHostToDevice ); } __global__ void kernelFilter(ulong* data, Point size, int nimages, FeatureMaskDev* fmd,int startFeature, int stepFeature, int nfeatures, ulong* answer){ /* int factorF = stepFeature/712 + 1; int featDe = threadIdx.x*factorF; int featAte = (threadIdx.x+1)*factorF; int factorI = nimages/16000 + 1; int imgDe = blockIdx.x*factorI; int imgAte = (blockIdx.x+1)*factorI; */ int factorF = stepFeature/gridDim.x + 1; int featDe = blockIdx.x*factorF; int featAte = (blockIdx.x+1)*factorF; int factorI = nimages/blockDim.x + 1; int imgDe = threadIdx.x*factorI; int imgAte = (threadIdx.x+1)*factorI; if(featDe < stepFeature && imgDe < nimages){ if(featAte>stepFeature) featAte = stepFeature; if(imgAte>nimages) imgAte = nimages; for(int featId=featDe;featId<featAte;featId++){ if( (featId+startFeature)>=nfeatures) break; for(int imgId=imgDe;imgId<imgAte;imgId++){ IntegralImageDev ii( &(data[24*24*imgId]) ,size); answer[featId*nimages+imgId] = ii.filter( &(fmd[featId+startFeature]) ); //answer[threadIdx.x] = ii.filter( &(fmd[0]) ); } } } /* answer[0] = data[0]; answer[1] = data[1]; answer[2] = data[2]; answer[3] = data[3]; */ } int callCUDA(ulong* data_device, Point size_image, int nimages, FeatureMaskDev* fmd_device, int startFeature, int stepFeature, int nfeatures, int sizeAnswer, ulong* answer_host, ulong* answer_device){ Logger::cuda->log("KERNEL CALL %d %d %d %d %d %d %d\n",size_image.x,size_image.y,nimages,startFeature,stepFeature,nfeatures,sizeAnswer); hipLaunchKernelGGL(( kernelFilter), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, data_device, size_image, nimages, fmd_device, startFeature, stepFeature, nfeatures, answer_device); hipMemcpy( answer_host, answer_device, sizeof(ulong)*sizeAnswer, hipMemcpyDeviceToHost ); Logger::cuda->log("END CALL\n"); hipError_t error = hipGetLastError(); if(error != hipSuccess){ // print the CUDA error message and exit printf("CUDA error: %s\n", hipGetErrorString(error)); } } void* gpuThread(void* vp){ GPUManager* manager = (GPUManager*) vp; Logger::cuda->log("BEGINBEGUNBEGIN\n"); while(manager->END_STAGE==0){ if(manager->CONSUMED_BUFFER==1){ Logger::cuda->log("BUFFER LOAD PROCESS BEGIN\n"); pthread_mutex_lock( &(manager->M) ); Logger::cuda->log("INSIDE LOCK\n"); int from,to; GPUBuffer* buffer = manager->getConsumedBuffer(&from,&to); pthread_mutex_unlock( &(manager->M) ); manager->fillBuffer(buffer,from,to); Logger::cuda->log("BUFFER LOAD PROCESS END\n"); } } Logger::cuda->log("FIMFIMFIM\n"); pthread_exit(NULL); } void GPUManager::wait(){ pthread_join( threads[0], NULL ); } GPUManager::GPUManager(TrainingSet& ts, int totalFeatures, int nimages):_total_features(totalFeatures),_nimages(nimages){ _feat_per_buffer = SINGLE_BUFFER_STEP_FEATURE/GPU_BUFFER + 1; _max_stage = totalFeatures/_feat_per_buffer + 1; size_image.x = 24; size_image.y = 24; printf("GPU BUFFER %d\n",Config::CUDA_BUFFER); buffers = (GPUBuffer**) malloc(sizeof(GPUBuffer*)*GPU_BUFFER); for(int i=0;i<GPU_BUFFER;i++){ buffers[i] = new GPUBuffer(i,_feat_per_buffer, nimages); } resetManager(); prepareData(ts, &size_image, &data_host, &data_device, totalFeatures, &fmd_host, &fmd_device, true); } void GPUManager::resetManager(){ _cur_stage = 0; _buffer_chunk = 0; _get_filled_counter = 0; while(!consumed_buffers.empty()) consumed_buffers.pop(); while(!filled_buffers.empty()) filled_buffers.pop(); for(int i=0;i<GPU_BUFFER;i++){ consumed_buffers.push(buffers[i]); } CONSUMED_BUFFER = true; END_STAGE=false; } void GPUManager::resetImageData(TrainingSet& ts){ _nimages = ts.size(); Logger::cuda->log("NEW SIZE %d\n",_nimages); free(data_host); free(fmd_host); hipFree(data_device); hipFree(fmd_device); for(int i=0;i<GPU_BUFFER;i++){ Logger::cuda->log("REMOVING BUFFER\n"); delete buffers[i]; buffers[i] = new GPUBuffer(i,_feat_per_buffer, _nimages); } resetManager(); prepareData(ts, &size_image, &data_host, &data_device, _total_features, &fmd_host, &fmd_device, true); } void GPUManager::restart(){ Logger::cuda->log("INITIALIZING GPU MANAGER...START THREAD\n"); pthread_create(&(threads[0]),NULL, gpuThread, this); } GPUBuffer* GPUManager::getConsumedBuffer(int* from, int* to){ if(consumed_buffers.size()==0) return NULL; if(consumed_buffers.size()==1){ CONSUMED_BUFFER = false; Logger::cuda->log("CONSUMED FALSE %d\n", CONSUMED_BUFFER); } GPUBuffer* b = consumed_buffers.front(); consumed_buffers.pop(); *from = _cur_stage*SINGLE_BUFFER_STEP_FEATURE + b->_id*_feat_per_buffer; *to = _cur_stage*SINGLE_BUFFER_STEP_FEATURE + (b->_id+1)*_feat_per_buffer; if(_buffer_chunk == GPU_BUFFER-1){ _cur_stage = _cur_stage+1; } _buffer_chunk = (_buffer_chunk+1)%GPU_BUFFER; if(*to>_total_features) *to=_total_features; b->_from = *from; b->_to = *to; return b; } void GPUManager::bufferHasBeenConsumed(GPUBuffer* b){ consumed_buffers.push(b); pthread_mutex_lock( &(M) ); CONSUMED_BUFFER = true; Logger::cuda->log("CONSUMED TRUE %d\n", CONSUMED_BUFFER); pthread_mutex_unlock( &(M) ); } void GPUManager::fillBuffer(GPUBuffer* b, int from, int to){ Logger::cuda->log("CALL CUDA BUFFER ID: %d\n",b->_id); if(from<_total_features){ callCUDA(data_device, size_image, _nimages, fmd_device, from, _feat_per_buffer, _total_features, b->_size, b->answer_host, b->answer_device); } filled_buffers.push(b); } GPUBuffer* GPUManager::getFilledBuffer(){ if(filled_buffers.size()==0) return NULL; Logger::cuda->log("%d - %d\n",_get_filled_counter,_max_stage); if( _get_filled_counter==(_max_stage-1) ) END_STAGE = true; _get_filled_counter= (_get_filled_counter+1)%(_max_stage); GPUBuffer* b = filled_buffers.front(); filled_buffers.pop(); return b; }
975f733ed3afa4126ba8852e32edea3a373687a7.cu
#include "../headers/gpuManager.h" typedef struct{ int e1,e2; } pair; CUDA_CALLABLE_MEMBER_DEVICE pair pair_init(int e1, int e2){ pair p; p.e1 = e1; p.e2 = e2; return p; } typedef struct{ pair* v; int max_size; int read_cursor; int write_cursor; int size; } queue; CUDA_CALLABLE_MEMBER_DEVICE queue queue_init(int size){ queue q; q.v = (pair*) malloc(sizeof(pair)*size); q.max_size = size; q.read_cursor = 0; q.write_cursor = 0; q.size=0; return q; } CUDA_CALLABLE_MEMBER_DEVICE void queue_push(queue* q, pair e){ q->v[ q->write_cursor ] = e; q->write_cursor = (q->write_cursor+1)%q->max_size; q->size+=1; } CUDA_CALLABLE_MEMBER_DEVICE pair queue_pop(queue* q){ pair p = q->v[q->read_cursor]; q->read_cursor = (q->read_cursor+1)%q->max_size; q->size-=1; return p; } CUDA_CALLABLE_MEMBER_DEVICE void queue_destroy(queue* q){ free(q->v); } template<class T> CUDA_CALLABLE_MEMBER_DEVICE void merge(T* l, bool(* comp)(T* el1, T* el2), int b1, int e1, int b2, int e2){ int size = e1-b1+1 + e2-b2+1; T* buffer = (T*) malloc(sizeof(T)*size); int cur_l = 0; int end_l = e1-b1+1; for(int i=cur_l;i<end_l;i++){ buffer[i] = l[b1+i]; } int cur_r = e1-b1+1; int end_r = e2-b1+1; for(int i=cur_r;i<end_r;i++){ buffer[i] = l[b1+i]; } int index=0; while( cur_l < end_l && cur_r < end_r ){ if( comp( &buffer[cur_l], &buffer[cur_r] ) ){ l[b1+index++] = buffer[cur_l++]; }else{ l[b1+index++] = buffer[cur_r++]; } } while( cur_l < end_l ) l[b1+index++] = buffer[cur_l++]; while( cur_r < end_r ) l[b1+index++] = buffer[cur_r++]; free(buffer); } template<class T> CUDA_CALLABLE_MEMBER_DEVICE void mergeSort(T* l, bool(* comp)(T* el1, T* el2), int begin, int end){ int size = end-begin+1; queue q1 = queue_init(size+1); queue q2 = queue_init(size+1); for(int i=begin;i<=end;i++){ queue_push(&q1, pair_init(i,i)); } queue* q = &q1; queue* qb = &q2; while(1){ if(q->size==1) break; while( q->size>0 ){ if(q->size==3){ pair pl = queue_pop(q); pair pr = queue_pop(q); pair plast = queue_pop(q); //printf("%d %d - %d %d\n",pl.e1,pl.e2, pr.e1, pr.e2); merge<T>(l, comp, pl.e1, pl.e2, pr.e1, pr.e2 ); pair presult = pair_init(pl.e1,pr.e2); merge<T>(l, comp, presult.e1, presult.e2, plast.e1, plast.e2 ); queue_push(qb, pair_init(presult.e1,plast.e2)); }else{ pair pl = queue_pop(q); pair pr = queue_pop(q); //printf("%d %d - %d %d\n",pl.e1,pl.e2, pr.e1, pr.e2); merge<T>(l, comp, pl.e1, pl.e2, pr.e1, pr.e2 ); queue_push(qb, pair_init(pl.e1,pr.e2)); } } queue* qt = q; q = qb; qb = qt; } queue_destroy(&q1); queue_destroy(&q2); //pair p = queue_pop(q); //printf("%d %d - %d\n",p.e1,p.e2, q->size); } FeatureMaskDev convertFeatureMask(FeatureMask fm){ FeatureMaskDev fmd; fmd._mask_size = fm._mask._size; fmd._location = fm._location; fmd._white_length = fm._mask._white.size(); fmd._black_length = fm._mask._black.size(); fmd._id = fm._id; for(int i=0;i<fmd._white_length;i++){ fmd._white[i]._points[0] = fm._mask._white[i]._points[0]; fmd._white[i]._points[1] = fm._mask._white[i]._points[1]; fmd._white[i]._points[2] = fm._mask._white[i]._points[2]; fmd._white[i]._points[3] = fm._mask._white[i]._points[3]; fmd._white[i]._w = fm._mask._white[i]._w; fmd._white[i]._h = fm._mask._white[i]._h; } for(int i=0;i<fmd._black_length;i++){ fmd._black[i]._points[0] = fm._mask._black[i]._points[0]; fmd._black[i]._points[1] = fm._mask._black[i]._points[1]; fmd._black[i]._points[2] = fm._mask._black[i]._points[2]; fmd._black[i]._points[3] = fm._mask._black[i]._points[3]; fmd._black[i]._w = fm._mask._black[i]._w; fmd._black[i]._h = fm._mask._black[i]._h; } return fmd; } CUDA_CALLABLE_MEMBER_DEVICE long IntegralImageDev::getFromData(Point* points){ long sum[4]={0,0,0,0}; for(int i=0;i<4;++i){ Point p = points[i]; if(outsideLimits(p)){ sum[i] = 0; }else{ sum[i] = _data[p.y*24+p.x]; } } return (sum[0]+sum[3]-sum[2]-sum[1]); } CUDA_CALLABLE_MEMBER_DEVICE long IntegralImageDev::computeBlock(MaskBlockDev* b, ulong mask_length, Point location){ long block_sum = 0; for(int i=0;i<mask_length;++i){ Point pa,pb,pc,pd; pa.y = location.y + b[i]._points[0].y - 1; pa.x = location.x + b[i]._points[0].x - 1; pb.y = location.y + b[i]._points[1].y - 1; pb.x = location.x + b[i]._points[1].x; pc.y = location.y + b[i]._points[2].y; pc.x = location.x + b[i]._points[2].x - 1; pd.y = location.y + b[i]._points[3].y; pd.x = location.x + b[i]._points[3].x; Point points[4] = {pa,pb,pc,pd}; // printf("%ld (%lu %lu) [ (%lu %lu) (%lu %lu) (%lu %lu) (%lu %lu) ]\n",getFromData(points),location.x,location.y,pa.y,pa.x,pb.y,pb.x,pc.y,pc.x,pd.y,pd.x); // printf("%ld (%lu %lu)",b[i]._points[0].y,b[i]._points[0].x); block_sum+=getFromData(points); } return block_sum; } CUDA_CALLABLE_MEMBER_DEVICE ulong IntegralImageDev::filter(FeatureMaskDev* fm){ if( (fm->_mask_size.x > _size.x) || (fm->_mask_size.y > _size.y) ){ return 120; } // printf("%d\n", abs( computeBlock(fm._mask._black,fm._location) - computeBlock(fm._mask._white,fm._location) ) ); return abs( computeBlock(fm->_black,fm->_black_length,fm->_location) - computeBlock(fm->_white,fm->_white_length,fm->_location) ); } void loadImage(ulong* data, const char* filepath,Point* size){ MagickWand * image_wand; PixelIterator* iterator; MagickBooleanType status; MagickPixelPacket pixel; PixelWand** pixels; long x,y; ulong line_sum; image_wand = NewMagickWand(); status=MagickReadImage(image_wand,filepath); iterator=NewPixelIterator(image_wand); size->y = MagickGetImageHeight(image_wand); size->x = MagickGetImageWidth(image_wand); // printf("MAGICK %lu %lu\n",size->y,size->x); if(data==NULL){ return; } for (y=0; y < size->y; ++y){ pixels=PixelGetNextIteratorRow(iterator,&(size->x)); if ( (pixels == (PixelWand **) NULL) ) break; line_sum=0; for (x=0; x < (long) size->x; ++x){ line_sum+= PixelGetBlue(pixels[x])*255; if(y>0){ data[y*size->x+x] = data[(y-1)*size->x+x] + line_sum; }else{ data[y*size->x+x] = line_sum; } // printf("%lu\n",(*data)[y][x]); } } DestroyPixelIterator(iterator); DestroyMagickWand(image_wand); } void prepareData(TrainingSet& ts, Point* size_image, ulong** data_host, ulong** data_device, int nfeatures, FeatureMaskDev** fmd_host, FeatureMaskDev** fmd_device, bool alloc){ int ardis_w = 24; int ardis_h = 24; int nimages = ts.size(); int totalPixels = nimages*ardis_w*ardis_h; printf("PREPARING DATA\n"); //Preparing Images if(alloc){ (*data_host) = (ulong*) malloc(sizeof(ulong)*totalPixels); cudaMalloc( (void**) data_device, sizeof(ulong)*totalPixels ); } for(int i=0;i<nimages;i++){ TrainingImage* ti = ts.get(i); for(int h=0;h<ardis_h;h++){ for(int w=0;w<ardis_w;w++){ ( (*data_host)+(i*ardis_w*ardis_h) )[h*ardis_w+w] = ti->_ii->_data[h][w]; } } } /* for(int i=0;i<totalPixels;i++){ printf("%d\n", (*data_host)[i] ); } */ cudaMemcpy( *data_device, *data_host, sizeof(ulong)*totalPixels, cudaMemcpyHostToDevice ); //Preparing Features FacesFeatureFactory fff; if(alloc) (*fmd_host) = (FeatureMaskDev*) malloc(sizeof(FeatureMaskDev)*nfeatures); for(int i=0;i<nfeatures;i++){ (*fmd_host)[i] = convertFeatureMask(fff._facesFeatures[i]); } if(alloc) cudaMalloc( (void**) fmd_device, sizeof(FeatureMaskDev)*nfeatures ); cudaMemcpy( (*fmd_device), (*fmd_host), sizeof(FeatureMaskDev)*nfeatures, cudaMemcpyHostToDevice ); } __global__ void kernelFilter(ulong* data, Point size, int nimages, FeatureMaskDev* fmd,int startFeature, int stepFeature, int nfeatures, ulong* answer){ /* int factorF = stepFeature/712 + 1; int featDe = threadIdx.x*factorF; int featAte = (threadIdx.x+1)*factorF; int factorI = nimages/16000 + 1; int imgDe = blockIdx.x*factorI; int imgAte = (blockIdx.x+1)*factorI; */ int factorF = stepFeature/gridDim.x + 1; int featDe = blockIdx.x*factorF; int featAte = (blockIdx.x+1)*factorF; int factorI = nimages/blockDim.x + 1; int imgDe = threadIdx.x*factorI; int imgAte = (threadIdx.x+1)*factorI; if(featDe < stepFeature && imgDe < nimages){ if(featAte>stepFeature) featAte = stepFeature; if(imgAte>nimages) imgAte = nimages; for(int featId=featDe;featId<featAte;featId++){ if( (featId+startFeature)>=nfeatures) break; for(int imgId=imgDe;imgId<imgAte;imgId++){ IntegralImageDev ii( &(data[24*24*imgId]) ,size); answer[featId*nimages+imgId] = ii.filter( &(fmd[featId+startFeature]) ); //answer[threadIdx.x] = ii.filter( &(fmd[0]) ); } } } /* answer[0] = data[0]; answer[1] = data[1]; answer[2] = data[2]; answer[3] = data[3]; */ } int callCUDA(ulong* data_device, Point size_image, int nimages, FeatureMaskDev* fmd_device, int startFeature, int stepFeature, int nfeatures, int sizeAnswer, ulong* answer_host, ulong* answer_device){ Logger::cuda->log("KERNEL CALL %d %d %d %d %d %d %d\n",size_image.x,size_image.y,nimages,startFeature,stepFeature,nfeatures,sizeAnswer); kernelFilter<<<GRID_SIZE,BLOCK_SIZE>>>(data_device, size_image, nimages, fmd_device, startFeature, stepFeature, nfeatures, answer_device); cudaMemcpy( answer_host, answer_device, sizeof(ulong)*sizeAnswer, cudaMemcpyDeviceToHost ); Logger::cuda->log("END CALL\n"); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess){ // print the CUDA error message and exit printf("CUDA error: %s\n", cudaGetErrorString(error)); } } void* gpuThread(void* vp){ GPUManager* manager = (GPUManager*) vp; Logger::cuda->log("BEGINBEGUNBEGIN\n"); while(manager->END_STAGE==0){ if(manager->CONSUMED_BUFFER==1){ Logger::cuda->log("BUFFER LOAD PROCESS BEGIN\n"); pthread_mutex_lock( &(manager->M) ); Logger::cuda->log("INSIDE LOCK\n"); int from,to; GPUBuffer* buffer = manager->getConsumedBuffer(&from,&to); pthread_mutex_unlock( &(manager->M) ); manager->fillBuffer(buffer,from,to); Logger::cuda->log("BUFFER LOAD PROCESS END\n"); } } Logger::cuda->log("FIMFIMFIM\n"); pthread_exit(NULL); } void GPUManager::wait(){ pthread_join( threads[0], NULL ); } GPUManager::GPUManager(TrainingSet& ts, int totalFeatures, int nimages):_total_features(totalFeatures),_nimages(nimages){ _feat_per_buffer = SINGLE_BUFFER_STEP_FEATURE/GPU_BUFFER + 1; _max_stage = totalFeatures/_feat_per_buffer + 1; size_image.x = 24; size_image.y = 24; printf("GPU BUFFER %d\n",Config::CUDA_BUFFER); buffers = (GPUBuffer**) malloc(sizeof(GPUBuffer*)*GPU_BUFFER); for(int i=0;i<GPU_BUFFER;i++){ buffers[i] = new GPUBuffer(i,_feat_per_buffer, nimages); } resetManager(); prepareData(ts, &size_image, &data_host, &data_device, totalFeatures, &fmd_host, &fmd_device, true); } void GPUManager::resetManager(){ _cur_stage = 0; _buffer_chunk = 0; _get_filled_counter = 0; while(!consumed_buffers.empty()) consumed_buffers.pop(); while(!filled_buffers.empty()) filled_buffers.pop(); for(int i=0;i<GPU_BUFFER;i++){ consumed_buffers.push(buffers[i]); } CONSUMED_BUFFER = true; END_STAGE=false; } void GPUManager::resetImageData(TrainingSet& ts){ _nimages = ts.size(); Logger::cuda->log("NEW SIZE %d\n",_nimages); free(data_host); free(fmd_host); cudaFree(data_device); cudaFree(fmd_device); for(int i=0;i<GPU_BUFFER;i++){ Logger::cuda->log("REMOVING BUFFER\n"); delete buffers[i]; buffers[i] = new GPUBuffer(i,_feat_per_buffer, _nimages); } resetManager(); prepareData(ts, &size_image, &data_host, &data_device, _total_features, &fmd_host, &fmd_device, true); } void GPUManager::restart(){ Logger::cuda->log("INITIALIZING GPU MANAGER...START THREAD\n"); pthread_create(&(threads[0]),NULL, gpuThread, this); } GPUBuffer* GPUManager::getConsumedBuffer(int* from, int* to){ if(consumed_buffers.size()==0) return NULL; if(consumed_buffers.size()==1){ CONSUMED_BUFFER = false; Logger::cuda->log("CONSUMED FALSE %d\n", CONSUMED_BUFFER); } GPUBuffer* b = consumed_buffers.front(); consumed_buffers.pop(); *from = _cur_stage*SINGLE_BUFFER_STEP_FEATURE + b->_id*_feat_per_buffer; *to = _cur_stage*SINGLE_BUFFER_STEP_FEATURE + (b->_id+1)*_feat_per_buffer; if(_buffer_chunk == GPU_BUFFER-1){ _cur_stage = _cur_stage+1; } _buffer_chunk = (_buffer_chunk+1)%GPU_BUFFER; if(*to>_total_features) *to=_total_features; b->_from = *from; b->_to = *to; return b; } void GPUManager::bufferHasBeenConsumed(GPUBuffer* b){ consumed_buffers.push(b); pthread_mutex_lock( &(M) ); CONSUMED_BUFFER = true; Logger::cuda->log("CONSUMED TRUE %d\n", CONSUMED_BUFFER); pthread_mutex_unlock( &(M) ); } void GPUManager::fillBuffer(GPUBuffer* b, int from, int to){ Logger::cuda->log("CALL CUDA BUFFER ID: %d\n",b->_id); if(from<_total_features){ callCUDA(data_device, size_image, _nimages, fmd_device, from, _feat_per_buffer, _total_features, b->_size, b->answer_host, b->answer_device); } filled_buffers.push(b); } GPUBuffer* GPUManager::getFilledBuffer(){ if(filled_buffers.size()==0) return NULL; Logger::cuda->log("%d - %d\n",_get_filled_counter,_max_stage); if( _get_filled_counter==(_max_stage-1) ) END_STAGE = true; _get_filled_counter= (_get_filled_counter+1)%(_max_stage); GPUBuffer* b = filled_buffers.front(); filled_buffers.pop(); return b; }
1a7c6d1bd704834fb3cf95b402161f716337c9f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /************************************************************************************************** * * Computer Engineering Group, Heidelberg University - GPU Computing Exercise 05 * * Group : TODO * * File : main.cu * * Purpose : Naive Matrix Multiplication * *************************************************************************************************/ #include <cmath> #include <iostream> #include <cstdlib> #include <ctime> #include <chCommandLine.h> #include <chTimer.hpp> #include "mmult_cpu.h" const static int DEFAULT_MATRIX_WIDTH = 1024; const static int DEFAULT_BLOCK_DIM = 32; // // Function Prototypes // void printHelp(char * /*programName*/); // // matMul_Kernel // __global__ void matMul_Kernel(int matrixSize, float* matrixA, float* matrixB, float* matrixC) { float fElementSum, fXelement, fYelement; int elementIdx = blockIdx.x * blockDim.x + threadIdx.x; int elementIdy = blockIdx.y * blockDim.y + threadIdx.y; int elementId = elementIdx * matrixSize + elementIdy; if (elementIdx < matrixSize && elementIdy < matrixSize) { fElementSum = 0.f; for(int i = 0; i < matrixSize; ++i) { fXelement = matrixA[elementIdx * matrixSize + i]; fYelement = matrixB[i * matrixSize + elementIdy]; fElementSum += fXelement * fYelement; } matrixC[elementId] = fElementSum; } } // // Shared matMul_Kernel // __global__ void shMatMul_Kernel(int matrixSize, float* matrixA, float* matrixB, float* matrixC) { extern __shared__ float sh_Mem[]; int tilewidth = blockDim.x; float *sh_MatrixA = &(sh_Mem[0]); float *sh_MatrixB = &(sh_Mem[1*tilewidth*tilewidth]); //float *sh_MatrixC= &(sh_Mem[2*tilewidth*tilewidth]); int elementIdx = blockIdx.x * blockDim.x + threadIdx.x; // Col int elementIdy = blockIdx.y * blockDim.y + threadIdx.y; // Row int elementId = elementIdy * matrixSize + elementIdx; float CValue = 0; if (elementIdx < matrixSize && elementIdy < matrixSize) { for(int m=0; m < (matrixSize/tilewidth); ++m) { sh_MatrixA[tilewidth*threadIdx.y + threadIdx.x] = matrixA[elementIdy*matrixSize + (m*tilewidth+threadIdx.x)]; sh_MatrixB[tilewidth*threadIdx.y + threadIdx.x] = matrixB[elementIdx + (m*tilewidth+threadIdx.y)*matrixSize]; __syncthreads(); for(int k=0; k<tilewidth; ++k) CValue += sh_MatrixA[tilewidth*threadIdx.y + k] * sh_MatrixB[tilewidth*k + threadIdx.x]; __syncthreads(); } matrixC[elementId] = CValue; } } // // Main // int main(int argc, char * argv[]) { // // Show Help // bool showHelp = chCommandLineGetBool("h", argc, argv); if (!showHelp) { showHelp = chCommandLineGetBool("help", argc, argv); } if (showHelp) { printHelp(argv[0]); exit(0); } std::cout << "***" << std::endl << "*** Starting ..." << std::endl << "***" << std::endl; ChTimer memCpyH2DTimer, memCpyD2HTimer; ChTimer kernelTimer, hostTimer; // // Allocate Memory // int matrixWidth = 0; chCommandLineGet<int>(&matrixWidth, "s", argc, argv); chCommandLineGet<int>(&matrixWidth, "size", argc, argv); matrixWidth = matrixWidth != 0 ? matrixWidth : DEFAULT_MATRIX_WIDTH; int matrixSize = matrixWidth * matrixWidth; // // Host Memory // bool pinnedMemory = chCommandLineGetBool("p", argc, argv); if (!pinnedMemory) { pinnedMemory = chCommandLineGetBool("pinned-memory",argc,argv); } float* h_matrixA = NULL; float* h_matrixB = NULL; float* h_matrixC = NULL; if (!pinnedMemory) { // Pageable h_matrixA = static_cast<float*>(malloc( static_cast<size_t>(matrixSize * sizeof(*h_matrixA)))); h_matrixB = static_cast<float*>(malloc( static_cast<size_t>(matrixSize * sizeof(*h_matrixB)))); h_matrixC = static_cast<float*>(calloc( static_cast<size_t>(matrixSize), sizeof *h_matrixC)); } else { // Pinned hipHostMalloc(&h_matrixA, static_cast<size_t>(matrixSize * sizeof(*h_matrixA))); hipHostMalloc(&h_matrixB, static_cast<size_t>(matrixSize * sizeof(*h_matrixB))); hipHostMalloc(&h_matrixC, static_cast<size_t>(matrixSize * sizeof(*h_matrixC))); memset ( h_matrixC, 0, matrixSize * sizeof(*h_matrixC) ); } // // Device Memory // float* d_matrixA = NULL; float* d_matrixB = NULL; float* d_matrixC = NULL; hipMalloc(&d_matrixA, static_cast<size_t>(matrixSize * sizeof(*d_matrixA))); hipMalloc(&d_matrixB, static_cast<size_t>(matrixSize * sizeof(*d_matrixB))); hipMalloc(&d_matrixC, static_cast<size_t>(matrixSize * sizeof(*d_matrixC))); // // Check Pointers // if (h_matrixA == NULL || h_matrixB == NULL || h_matrixC == NULL || d_matrixA == NULL || d_matrixB == NULL || d_matrixC == NULL ) { std::cout << "\033[31m***" << std::endl << "*** Error - Allocation of Memory failed!!!" << std::endl << "***\033[0m" << std::endl; exit(-1); } // // Init Matrices // for (int i = 0; i < matrixSize; i++) { int x = i % matrixWidth; int y = i / matrixWidth; h_matrixA[i] = static_cast<float>(x * y); h_matrixB[i] = static_cast<float>(x + y); } // // Copy Data to the Device // memCpyH2DTimer.start(); hipMemcpy(d_matrixA, h_matrixA, static_cast<size_t>(matrixSize * sizeof(*d_matrixA)), hipMemcpyHostToDevice); hipMemcpy(d_matrixB, h_matrixB, static_cast<size_t>(matrixSize * sizeof(*d_matrixB)), hipMemcpyHostToDevice); memCpyH2DTimer.stop(); // // Get Kernel Launch Parameters // int blockSize = 0, gridSize = 0; // Block Dimension / Threads per Block chCommandLineGet<int>(&blockSize,"t", argc, argv); chCommandLineGet<int>(&blockSize,"threads-per-block", argc, argv); blockSize = blockSize != 0 ? blockSize : DEFAULT_BLOCK_DIM; if (blockSize > 32) { std::cout << "\033[31m***" << std::endl << "*** Error - The number of threads per block is too big" << std::endl << "***\033[0m" << std::endl; exit(-1); } gridSize = ceil(static_cast<float>(matrixWidth) / static_cast<float>(blockSize)); dim3 grid_dim = dim3(gridSize, gridSize, 1); dim3 block_dim = dim3(blockSize, blockSize, 1); std::cout << "***" << std::endl << "*** Grid Dim: " << grid_dim.x << "x" << grid_dim.y << "x" << grid_dim.z << std::endl << "*** Block Dim: " << block_dim.x << "x" << block_dim.y << "x" << block_dim.z << std::endl << "***" << std::endl; int sharedMemSize = 2 * blockSize * blockSize * sizeof(float); kernelTimer.start(); // // Launch Kernel // FIX: option '-shared' calculates now the shared version, // devault: calcutates the device memory version // if (!chCommandLineGetBool("shared", argc, argv)) { //std::cout << "calculating naive version\n"; hipLaunchKernelGGL(( matMul_Kernel), dim3(grid_dim), dim3(block_dim), 0, 0, matrixWidth, d_matrixA, d_matrixB, d_matrixC); } else { hipLaunchKernelGGL(( shMatMul_Kernel), dim3(grid_dim), dim3(block_dim), sharedMemSize, 0, matrixWidth, d_matrixA, d_matrixB, d_matrixC); } // // Synchronize // hipDeviceSynchronize(); // // Check for Errors // hipError_t hipError_t = hipGetLastError(); if ( hipError_t != hipSuccess ) { std::cout << "\033[31m***" << std::endl << "***ERROR*** " << hipError_t << " - " << hipGetErrorString(hipError_t) << std::endl << "***\033[0m" << std::endl; return -1; } kernelTimer.stop(); // // Copy Back Data // memCpyD2HTimer.start(); hipMemcpy(h_matrixC, d_matrixC, static_cast<size_t>(matrixSize * sizeof(*d_matrixC)), hipMemcpyDeviceToHost); memCpyD2HTimer.stop(); // // Check Result // bool dontCheckResult = chCommandLineGetBool("c", argc, argv); if (!dontCheckResult) { dontCheckResult = chCommandLineGetBool("no-check", argc, argv); } if (!dontCheckResult) { float* h_matrixD = static_cast<float*>( calloc(static_cast<size_t>(matrixSize), sizeof(*h_matrixD))); hostTimer.start(); MatrixMulOnHostBlocked(h_matrixA, h_matrixB, h_matrixD, static_cast<long>(matrixWidth), 32); hostTimer.stop(); bool resultOk = MatrixCompare(h_matrixC, h_matrixD, static_cast<long>(matrixWidth)); if (!resultOk) { std::cout << "\033[31m***" << std::endl << "*** Error - The two matrices are different!!!" << std::endl << "***\033[0m" << std::endl; exit(-1); } free(h_matrixD); } // // Print Meassurement Results // std::cout << "***" << std::endl << "*** Results:" << std::endl << "*** Matrix Size: " << matrixSize << std::endl << "*** Time to Copy to Device: " << 1e3 * memCpyH2DTimer.getTime() << " ms" << std::endl << "*** Copy Bandwidth: " << 1e-9 * memCpyH2DTimer.getBandwidth(2 * matrixSize * sizeof(*h_matrixA)) << " GB/s" << std::endl << "*** Time to Copy from Device: " << 1e3 * memCpyD2HTimer.getTime() << " ms" << std::endl << "*** Copy Bandwidth: " << 1e-9 * memCpyD2HTimer.getBandwidth(matrixSize * sizeof(*h_matrixA)) << " GB/s" << std::endl << "*** Time for Matrix Multiplication: " << 1e3 * kernelTimer.getTime() << " ms" << std::endl << "*** CPU Version : " << 1e3 * hostTimer.getTime() << " ms" << std::endl << "*** Overall Time (a+b+c): " << 1e3 * (memCpyH2DTimer.getTime() + memCpyD2HTimer.getTime() + kernelTimer.getTime()) << " ms " << std::endl << "*** Speed-Up compared to host (with movements) : " << hostTimer.getTime()/(memCpyH2DTimer.getTime() + memCpyD2HTimer.getTime() + kernelTimer.getTime()) << std::endl << "*** Speed-Up compared to host (without movements) : " << hostTimer.getTime()/kernelTimer.getTime() << std::endl << "***" << std::endl; if (chCommandLineGetBool("print-matrix", argc, argv) && matrixWidth <= 16) { printOutMatrix(h_matrixC, matrixWidth); } // Free Memory if (!pinnedMemory) { free(h_matrixA); free(h_matrixB); free(h_matrixC); } else { hipHostFree(h_matrixA); hipHostFree(h_matrixB); hipHostFree(h_matrixC); } hipFree(d_matrixA); hipFree(d_matrixB); hipFree(d_matrixC); return 0; } void printHelp(char * programName) { std::cout << "Help:" << std::endl << " Usage: " << std::endl << " " << programName << " [-p] [-s <matrix_size>] [-t <threads_per_block>]" << std::endl << " [-g <blocks_per_grid] [-c] [--print-matrix]" << std::endl << "" << std::endl << " -p|--pinned-memory" << std::endl << " Use pinned Memory instead of pageable memory" << std::endl << "" << std::endl << " -s <matrix_size>|--size <matix_size>" << std::endl << " The width of the Matrix" << std::endl << "" << std::endl << " -t <threads_per_block>|--threads-per-block <threads_per_block>" << std::endl << " The number of threads per block" << std::endl << "" << std::endl << " -c|--no-checking" << std::endl << " Do not check the result of the matrix multiplication" << std::endl << "" << std::endl << " --print-matrix" << std::endl << " Print the output matrix (only recommended for small matrices)" << std::endl << std::endl; }
1a7c6d1bd704834fb3cf95b402161f716337c9f1.cu
/************************************************************************************************** * * Computer Engineering Group, Heidelberg University - GPU Computing Exercise 05 * * Group : TODO * * File : main.cu * * Purpose : Naive Matrix Multiplication * *************************************************************************************************/ #include <cmath> #include <iostream> #include <cstdlib> #include <ctime> #include <chCommandLine.h> #include <chTimer.hpp> #include "mmult_cpu.h" const static int DEFAULT_MATRIX_WIDTH = 1024; const static int DEFAULT_BLOCK_DIM = 32; // // Function Prototypes // void printHelp(char * /*programName*/); // // matMul_Kernel // __global__ void matMul_Kernel(int matrixSize, float* matrixA, float* matrixB, float* matrixC) { float fElementSum, fXelement, fYelement; int elementIdx = blockIdx.x * blockDim.x + threadIdx.x; int elementIdy = blockIdx.y * blockDim.y + threadIdx.y; int elementId = elementIdx * matrixSize + elementIdy; if (elementIdx < matrixSize && elementIdy < matrixSize) { fElementSum = 0.f; for(int i = 0; i < matrixSize; ++i) { fXelement = matrixA[elementIdx * matrixSize + i]; fYelement = matrixB[i * matrixSize + elementIdy]; fElementSum += fXelement * fYelement; } matrixC[elementId] = fElementSum; } } // // Shared matMul_Kernel // __global__ void shMatMul_Kernel(int matrixSize, float* matrixA, float* matrixB, float* matrixC) { extern __shared__ float sh_Mem[]; int tilewidth = blockDim.x; float *sh_MatrixA = &(sh_Mem[0]); float *sh_MatrixB = &(sh_Mem[1*tilewidth*tilewidth]); //float *sh_MatrixC= &(sh_Mem[2*tilewidth*tilewidth]); int elementIdx = blockIdx.x * blockDim.x + threadIdx.x; // Col int elementIdy = blockIdx.y * blockDim.y + threadIdx.y; // Row int elementId = elementIdy * matrixSize + elementIdx; float CValue = 0; if (elementIdx < matrixSize && elementIdy < matrixSize) { for(int m=0; m < (matrixSize/tilewidth); ++m) { sh_MatrixA[tilewidth*threadIdx.y + threadIdx.x] = matrixA[elementIdy*matrixSize + (m*tilewidth+threadIdx.x)]; sh_MatrixB[tilewidth*threadIdx.y + threadIdx.x] = matrixB[elementIdx + (m*tilewidth+threadIdx.y)*matrixSize]; __syncthreads(); for(int k=0; k<tilewidth; ++k) CValue += sh_MatrixA[tilewidth*threadIdx.y + k] * sh_MatrixB[tilewidth*k + threadIdx.x]; __syncthreads(); } matrixC[elementId] = CValue; } } // // Main // int main(int argc, char * argv[]) { // // Show Help // bool showHelp = chCommandLineGetBool("h", argc, argv); if (!showHelp) { showHelp = chCommandLineGetBool("help", argc, argv); } if (showHelp) { printHelp(argv[0]); exit(0); } std::cout << "***" << std::endl << "*** Starting ..." << std::endl << "***" << std::endl; ChTimer memCpyH2DTimer, memCpyD2HTimer; ChTimer kernelTimer, hostTimer; // // Allocate Memory // int matrixWidth = 0; chCommandLineGet<int>(&matrixWidth, "s", argc, argv); chCommandLineGet<int>(&matrixWidth, "size", argc, argv); matrixWidth = matrixWidth != 0 ? matrixWidth : DEFAULT_MATRIX_WIDTH; int matrixSize = matrixWidth * matrixWidth; // // Host Memory // bool pinnedMemory = chCommandLineGetBool("p", argc, argv); if (!pinnedMemory) { pinnedMemory = chCommandLineGetBool("pinned-memory",argc,argv); } float* h_matrixA = NULL; float* h_matrixB = NULL; float* h_matrixC = NULL; if (!pinnedMemory) { // Pageable h_matrixA = static_cast<float*>(malloc( static_cast<size_t>(matrixSize * sizeof(*h_matrixA)))); h_matrixB = static_cast<float*>(malloc( static_cast<size_t>(matrixSize * sizeof(*h_matrixB)))); h_matrixC = static_cast<float*>(calloc( static_cast<size_t>(matrixSize), sizeof *h_matrixC)); } else { // Pinned cudaMallocHost(&h_matrixA, static_cast<size_t>(matrixSize * sizeof(*h_matrixA))); cudaMallocHost(&h_matrixB, static_cast<size_t>(matrixSize * sizeof(*h_matrixB))); cudaMallocHost(&h_matrixC, static_cast<size_t>(matrixSize * sizeof(*h_matrixC))); memset ( h_matrixC, 0, matrixSize * sizeof(*h_matrixC) ); } // // Device Memory // float* d_matrixA = NULL; float* d_matrixB = NULL; float* d_matrixC = NULL; cudaMalloc(&d_matrixA, static_cast<size_t>(matrixSize * sizeof(*d_matrixA))); cudaMalloc(&d_matrixB, static_cast<size_t>(matrixSize * sizeof(*d_matrixB))); cudaMalloc(&d_matrixC, static_cast<size_t>(matrixSize * sizeof(*d_matrixC))); // // Check Pointers // if (h_matrixA == NULL || h_matrixB == NULL || h_matrixC == NULL || d_matrixA == NULL || d_matrixB == NULL || d_matrixC == NULL ) { std::cout << "\033[31m***" << std::endl << "*** Error - Allocation of Memory failed!!!" << std::endl << "***\033[0m" << std::endl; exit(-1); } // // Init Matrices // for (int i = 0; i < matrixSize; i++) { int x = i % matrixWidth; int y = i / matrixWidth; h_matrixA[i] = static_cast<float>(x * y); h_matrixB[i] = static_cast<float>(x + y); } // // Copy Data to the Device // memCpyH2DTimer.start(); cudaMemcpy(d_matrixA, h_matrixA, static_cast<size_t>(matrixSize * sizeof(*d_matrixA)), cudaMemcpyHostToDevice); cudaMemcpy(d_matrixB, h_matrixB, static_cast<size_t>(matrixSize * sizeof(*d_matrixB)), cudaMemcpyHostToDevice); memCpyH2DTimer.stop(); // // Get Kernel Launch Parameters // int blockSize = 0, gridSize = 0; // Block Dimension / Threads per Block chCommandLineGet<int>(&blockSize,"t", argc, argv); chCommandLineGet<int>(&blockSize,"threads-per-block", argc, argv); blockSize = blockSize != 0 ? blockSize : DEFAULT_BLOCK_DIM; if (blockSize > 32) { std::cout << "\033[31m***" << std::endl << "*** Error - The number of threads per block is too big" << std::endl << "***\033[0m" << std::endl; exit(-1); } gridSize = ceil(static_cast<float>(matrixWidth) / static_cast<float>(blockSize)); dim3 grid_dim = dim3(gridSize, gridSize, 1); dim3 block_dim = dim3(blockSize, blockSize, 1); std::cout << "***" << std::endl << "*** Grid Dim: " << grid_dim.x << "x" << grid_dim.y << "x" << grid_dim.z << std::endl << "*** Block Dim: " << block_dim.x << "x" << block_dim.y << "x" << block_dim.z << std::endl << "***" << std::endl; int sharedMemSize = 2 * blockSize * blockSize * sizeof(float); kernelTimer.start(); // // Launch Kernel // FIX: option '-shared' calculates now the shared version, // devault: calcutates the device memory version // if (!chCommandLineGetBool("shared", argc, argv)) { //std::cout << "calculating naive version\n"; matMul_Kernel<<<grid_dim, block_dim>>>(matrixWidth, d_matrixA, d_matrixB, d_matrixC); } else { shMatMul_Kernel<<<grid_dim, block_dim, sharedMemSize>>>(matrixWidth, d_matrixA, d_matrixB, d_matrixC); } // // Synchronize // cudaDeviceSynchronize(); // // Check for Errors // cudaError_t cudaError = cudaGetLastError(); if ( cudaError != cudaSuccess ) { std::cout << "\033[31m***" << std::endl << "***ERROR*** " << cudaError << " - " << cudaGetErrorString(cudaError) << std::endl << "***\033[0m" << std::endl; return -1; } kernelTimer.stop(); // // Copy Back Data // memCpyD2HTimer.start(); cudaMemcpy(h_matrixC, d_matrixC, static_cast<size_t>(matrixSize * sizeof(*d_matrixC)), cudaMemcpyDeviceToHost); memCpyD2HTimer.stop(); // // Check Result // bool dontCheckResult = chCommandLineGetBool("c", argc, argv); if (!dontCheckResult) { dontCheckResult = chCommandLineGetBool("no-check", argc, argv); } if (!dontCheckResult) { float* h_matrixD = static_cast<float*>( calloc(static_cast<size_t>(matrixSize), sizeof(*h_matrixD))); hostTimer.start(); MatrixMulOnHostBlocked(h_matrixA, h_matrixB, h_matrixD, static_cast<long>(matrixWidth), 32); hostTimer.stop(); bool resultOk = MatrixCompare(h_matrixC, h_matrixD, static_cast<long>(matrixWidth)); if (!resultOk) { std::cout << "\033[31m***" << std::endl << "*** Error - The two matrices are different!!!" << std::endl << "***\033[0m" << std::endl; exit(-1); } free(h_matrixD); } // // Print Meassurement Results // std::cout << "***" << std::endl << "*** Results:" << std::endl << "*** Matrix Size: " << matrixSize << std::endl << "*** Time to Copy to Device: " << 1e3 * memCpyH2DTimer.getTime() << " ms" << std::endl << "*** Copy Bandwidth: " << 1e-9 * memCpyH2DTimer.getBandwidth(2 * matrixSize * sizeof(*h_matrixA)) << " GB/s" << std::endl << "*** Time to Copy from Device: " << 1e3 * memCpyD2HTimer.getTime() << " ms" << std::endl << "*** Copy Bandwidth: " << 1e-9 * memCpyD2HTimer.getBandwidth(matrixSize * sizeof(*h_matrixA)) << " GB/s" << std::endl << "*** Time for Matrix Multiplication: " << 1e3 * kernelTimer.getTime() << " ms" << std::endl << "*** CPU Version : " << 1e3 * hostTimer.getTime() << " ms" << std::endl << "*** Overall Time (a+b+c): " << 1e3 * (memCpyH2DTimer.getTime() + memCpyD2HTimer.getTime() + kernelTimer.getTime()) << " ms " << std::endl << "*** Speed-Up compared to host (with movements) : " << hostTimer.getTime()/(memCpyH2DTimer.getTime() + memCpyD2HTimer.getTime() + kernelTimer.getTime()) << std::endl << "*** Speed-Up compared to host (without movements) : " << hostTimer.getTime()/kernelTimer.getTime() << std::endl << "***" << std::endl; if (chCommandLineGetBool("print-matrix", argc, argv) && matrixWidth <= 16) { printOutMatrix(h_matrixC, matrixWidth); } // Free Memory if (!pinnedMemory) { free(h_matrixA); free(h_matrixB); free(h_matrixC); } else { cudaFreeHost(h_matrixA); cudaFreeHost(h_matrixB); cudaFreeHost(h_matrixC); } cudaFree(d_matrixA); cudaFree(d_matrixB); cudaFree(d_matrixC); return 0; } void printHelp(char * programName) { std::cout << "Help:" << std::endl << " Usage: " << std::endl << " " << programName << " [-p] [-s <matrix_size>] [-t <threads_per_block>]" << std::endl << " [-g <blocks_per_grid] [-c] [--print-matrix]" << std::endl << "" << std::endl << " -p|--pinned-memory" << std::endl << " Use pinned Memory instead of pageable memory" << std::endl << "" << std::endl << " -s <matrix_size>|--size <matix_size>" << std::endl << " The width of the Matrix" << std::endl << "" << std::endl << " -t <threads_per_block>|--threads-per-block <threads_per_block>" << std::endl << " The number of threads per block" << std::endl << "" << std::endl << " -c|--no-checking" << std::endl << " Do not check the result of the matrix multiplication" << std::endl << "" << std::endl << " --print-matrix" << std::endl << " Print the output matrix (only recommended for small matrices)" << std::endl << std::endl; }
383e2b6638cc66d2481608acd32f30718341fc22.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layers/bnll_layer.hpp" namespace caffe { const float kBNLL_THRESHOLD = 50.; template <typename Dtype> __global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); } } template <typename Dtype> void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( BNLLForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void BNLLBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { // Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); Dtype expval = exp(min(in_data[index], Dtype(50.))); out_diff[index] = in_diff[index] * expval / (expval + 1.); } } template <typename Dtype> void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( BNLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, bottom_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer); } // namespace caffe
383e2b6638cc66d2481608acd32f30718341fc22.cu
#include <algorithm> #include <vector> #include "caffe/layers/bnll_layer.hpp" namespace caffe { const float kBNLL_THRESHOLD = 50.; template <typename Dtype> __global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); } } template <typename Dtype> void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) BNLLForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void BNLLBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { // Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); Dtype expval = exp(min(in_data[index], Dtype(50.))); out_diff[index] = in_diff[index] * expval / (expval + 1.); } } template <typename Dtype> void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) BNLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, bottom_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer); } // namespace caffe
7fd5eb7a4dc6aee1a18ca6e370e35d3bd3befd5a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void initAndUpdate( float *D_oldVal, float *D_currVal, int tpoints, int nsteps ) { int j = blockDim.x * blockIdx.x + threadIdx.x; if ( j < tpoints ) { j += 1; /* Calculate initial values based on sine curve */ /* Initialize old values array */ float x = ( float )( j - 1 ) / ( tpoints - 1 ); D_oldVal[j] = D_currVal[j] = sin ( 6.2831853f * x ); int i; /* global endpoints */ if ( ( j == 1 ) || ( j == tpoints ) ) { D_currVal[j] = 0.0; } else { /* Update values for each time step */ for ( i = 1; i <= nsteps; i++ ) { /* Update old values with new values */ float newVal = ( 2.0 * D_currVal[j] ) - D_oldVal[j] + ( 0.09f * ( -2.0 ) * D_currVal[j] ); D_oldVal[j] = D_currVal[j]; D_currVal[j] = newVal; } } } }
7fd5eb7a4dc6aee1a18ca6e370e35d3bd3befd5a.cu
#include "includes.h" __global__ void initAndUpdate( float *D_oldVal, float *D_currVal, int tpoints, int nsteps ) { int j = blockDim.x * blockIdx.x + threadIdx.x; if ( j < tpoints ) { j += 1; /* Calculate initial values based on sine curve */ /* Initialize old values array */ float x = ( float )( j - 1 ) / ( tpoints - 1 ); D_oldVal[j] = D_currVal[j] = sin ( 6.2831853f * x ); int i; /* global endpoints */ if ( ( j == 1 ) || ( j == tpoints ) ) { D_currVal[j] = 0.0; } else { /* Update values for each time step */ for ( i = 1; i <= nsteps; i++ ) { /* Update old values with new values */ float newVal = ( 2.0 * D_currVal[j] ) - D_oldVal[j] + ( 0.09f * ( -2.0 ) * D_currVal[j] ); D_oldVal[j] = D_currVal[j]; D_currVal[j] = newVal; } } } }
2a416c0b770843bfbd6ca3f6b1d5480c09720be7.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/copying.hpp> #include <cudf/detail/gather.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/null_mask.hpp> #include <cudf/table/row_operators.cuh> #include <cudf/table/table.hpp> #include <cudf/table/table_device_view.cuh> #include <cudf/types.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/scan.h> #include <thrust/tuple.h> #include <algorithm> #include <cmath> // for ::ceil() #include <memory> #include <type_traits> #include <utility> #include <vector> namespace { /** * @brief Handles the "degenerate" case num_partitions >= num_rows. * * Specifically, * If num_partitions == nrows: * Then, offsets = [0..nrows-1] * gather_row_indices = rotate [0..nrows-1] right by start_partition positions; * * If num_partitions > nrows: * Then, let: * dbg = generate a directed bipartite graph with num_partitions nodes and nrows edges, * so that node j has an edge to node (j+start_partition) % num_partitions, for j = 0,...,nrows-1; * * transpose_dbg = transpose graph of dbg; (i.e., (i -> j) edge in dbg means (j -> i) edge in * transpose); * * (offsets, indices) = (row_offsets, col_indices) of transpose_dbg; * where (row_offsets, col_indices) are the CSR format of the graph; * * @param[in] input The input table to be round-robin partitioned * @param[in] num_partitions Number of partitions for the table * @param[in] start_partition Index of the 1st partition * @param[in] stream CUDA stream used for device memory operations and kernel launches. * @param[in] mr Device memory resource used to allocate the returned table's device memory * * @returns A std::pair consisting of a unique_ptr to the partitioned table and the partition * offsets for each partition within the table */ std::pair<std::unique_ptr<cudf::table>, std::vector<cudf::size_type>> degenerate_partitions( cudf::table_view const& input, cudf::size_type num_partitions, cudf::size_type start_partition, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto nrows = input.num_rows(); // iterator for partition index rotated right by start_partition positions: auto rotated_iter_begin = thrust::make_transform_iterator( thrust::make_counting_iterator<cudf::size_type>(0), [num_partitions, start_partition] __device__(auto index) { return (index + num_partitions - start_partition) % num_partitions; }); if (num_partitions == nrows) { rmm::device_uvector<cudf::size_type> partition_offsets(num_partitions, stream); thrust::sequence(rmm::exec_policy(stream), partition_offsets.begin(), partition_offsets.end()); auto uniq_tbl = cudf::detail::gather(input, rotated_iter_begin, rotated_iter_begin + nrows, // map cudf::out_of_bounds_policy::DONT_CHECK, stream, mr); return std::make_pair(std::move(uniq_tbl), cudf::detail::make_std_vector_sync(partition_offsets, stream)); } else { //( num_partitions > nrows ) rmm::device_uvector<cudf::size_type> d_row_indices(nrows, stream); // copy rotated right partition indexes that // fall in the interval [0, nrows): //(this relies on a _stable_ copy_if()) thrust::copy_if(rmm::exec_policy(stream), rotated_iter_begin, rotated_iter_begin + num_partitions, d_row_indices.begin(), [nrows] __device__(auto index) { return (index < nrows); }); //...and then use the result, d_row_indices, as gather map: auto uniq_tbl = cudf::detail::gather(input, d_row_indices.begin(), d_row_indices.end(), // map cudf::out_of_bounds_policy::DONT_CHECK, stream, mr); // offsets (part 1: compute partition sizes); // iterator for number of edges of the transposed bipartite graph; // this composes rotated_iter transform (above) iterator with // calculating number of edges of transposed bi-graph: auto nedges_iter_begin = thrust::make_transform_iterator( rotated_iter_begin, [nrows] __device__(auto index) { return (index < nrows ? 1 : 0); }); // offsets (part 2: compute partition offsets): rmm::device_uvector<cudf::size_type> partition_offsets(num_partitions, stream); thrust::exclusive_scan(rmm::exec_policy(stream), nedges_iter_begin, nedges_iter_begin + num_partitions, partition_offsets.begin()); return std::make_pair(std::move(uniq_tbl), cudf::detail::make_std_vector_sync(partition_offsets, stream)); } } } // namespace namespace cudf { namespace detail { std::pair<std::unique_ptr<table>, std::vector<cudf::size_type>> round_robin_partition( table_view const& input, cudf::size_type num_partitions, cudf::size_type start_partition = 0, rmm::cuda_stream_view stream = rmm::cuda_stream_default, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) { auto nrows = input.num_rows(); CUDF_EXPECTS(num_partitions > 0, "Incorrect number of partitions. Must be greater than 0."); CUDF_EXPECTS(start_partition < num_partitions, "Incorrect start_partition index. Must be less than number of partitions."); CUDF_EXPECTS( start_partition >= 0, "Incorrect start_partition index. Must be positive."); // since cudf::size_type is an alias for // int32_t, it _can_ be negative // handle degenerate case: // if (num_partitions >= nrows) { return degenerate_partitions(input, num_partitions, start_partition, stream, mr); } auto np_max_size = nrows % num_partitions; //# partitions of max size // handle case when nr `mod` np == 0; // fix for bug: https://github.com/rapidsai/cudf/issues/4043 auto num_partitions_max_size = (np_max_size > 0 ? np_max_size : num_partitions); cudf::size_type max_partition_size = ::ceil( static_cast<double>(nrows) / static_cast<double>(num_partitions)); // max size of partitions auto total_max_partitions_size = num_partitions_max_size * max_partition_size; auto num_partitions_min_size = num_partitions - num_partitions_max_size; // delta is the number of positions to rotate right // the original range [0,1,...,n-1] // and is calculated by accumulating the first //`start_partition` partition sizes from the end; // i.e., // the partition sizes array (of size p) being: //[m,m,...,m,(m-1),...,(m-1)] //(with num_partitions_max_size sizes `m` at the beginning; // and (p-num_partitions_max_size) sizes `(m-1)` at the end) // we accumulate the 1st `start_partition` entries from the end: // auto delta = (start_partition > num_partitions_min_size ? num_partitions_min_size * (max_partition_size - 1) + (start_partition - num_partitions_min_size) * max_partition_size : start_partition * (max_partition_size - 1)); auto iter_begin = thrust::make_transform_iterator( thrust::make_counting_iterator<cudf::size_type>(0), [nrows, num_partitions, max_partition_size, num_partitions_max_size, total_max_partitions_size, delta] __device__(auto index0) { // rotate original index right by delta positions; // this is the effect of applying start_partition: // auto rotated_index = (index0 + nrows - delta) % nrows; // using rotated_index = given index0, rotated; // the algorithm below calculates the src round-robin row, // by calculating the partition_index and the index_within_partition: // auto index_within_partition = (rotated_index <= total_max_partitions_size ? rotated_index % max_partition_size : (rotated_index - total_max_partitions_size) % (max_partition_size - 1)); auto partition_index = (rotated_index <= total_max_partitions_size ? rotated_index / max_partition_size : num_partitions_max_size + (rotated_index - total_max_partitions_size) / (max_partition_size - 1)); return num_partitions * index_within_partition + partition_index; }); auto uniq_tbl = cudf::detail::gather( input, iter_begin, iter_begin + nrows, cudf::out_of_bounds_policy::DONT_CHECK, stream, mr); auto ret_pair = std::make_pair(std::move(uniq_tbl), std::vector<cudf::size_type>(num_partitions)); // this has the effect of rotating the set of partition sizes // right by start_partition positions: // auto rotated_iter_begin = thrust::make_transform_iterator( thrust::make_counting_iterator<cudf::size_type>(0), [num_partitions, start_partition, max_partition_size, num_partitions_max_size](auto index) { return ((index + num_partitions - start_partition) % num_partitions < num_partitions_max_size ? max_partition_size : max_partition_size - 1); }); // then exclusive_scan on the resulting // rotated partition sizes to get the partition offsets // corresponding to start_partition: // Since: //"num_partitions is usually going to be relatively small //(<1,000), as such, it's probably more expensive to do this on the device. // Instead, do it on the host directly into the std::vector and avoid the memcpy." - JH // thrust::exclusive_scan( thrust::host, rotated_iter_begin, rotated_iter_begin + num_partitions, ret_pair.second.begin()); return ret_pair; } } // namespace detail std::pair<std::unique_ptr<cudf::table>, std::vector<cudf::size_type>> round_robin_partition( table_view const& input, cudf::size_type num_partitions, cudf::size_type start_partition = 0, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) { CUDF_FUNC_RANGE(); return cudf::detail::round_robin_partition( input, num_partitions, start_partition, rmm::cuda_stream_default, mr); } } // namespace cudf
2a416c0b770843bfbd6ca3f6b1d5480c09720be7.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/copying.hpp> #include <cudf/detail/gather.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/null_mask.hpp> #include <cudf/table/row_operators.cuh> #include <cudf/table/table.hpp> #include <cudf/table/table_device_view.cuh> #include <cudf/types.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/scan.h> #include <thrust/tuple.h> #include <algorithm> #include <cmath> // for std::ceil() #include <memory> #include <type_traits> #include <utility> #include <vector> namespace { /** * @brief Handles the "degenerate" case num_partitions >= num_rows. * * Specifically, * If num_partitions == nrows: * Then, offsets = [0..nrows-1] * gather_row_indices = rotate [0..nrows-1] right by start_partition positions; * * If num_partitions > nrows: * Then, let: * dbg = generate a directed bipartite graph with num_partitions nodes and nrows edges, * so that node j has an edge to node (j+start_partition) % num_partitions, for j = 0,...,nrows-1; * * transpose_dbg = transpose graph of dbg; (i.e., (i -> j) edge in dbg means (j -> i) edge in * transpose); * * (offsets, indices) = (row_offsets, col_indices) of transpose_dbg; * where (row_offsets, col_indices) are the CSR format of the graph; * * @param[in] input The input table to be round-robin partitioned * @param[in] num_partitions Number of partitions for the table * @param[in] start_partition Index of the 1st partition * @param[in] stream CUDA stream used for device memory operations and kernel launches. * @param[in] mr Device memory resource used to allocate the returned table's device memory * * @returns A std::pair consisting of a unique_ptr to the partitioned table and the partition * offsets for each partition within the table */ std::pair<std::unique_ptr<cudf::table>, std::vector<cudf::size_type>> degenerate_partitions( cudf::table_view const& input, cudf::size_type num_partitions, cudf::size_type start_partition, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto nrows = input.num_rows(); // iterator for partition index rotated right by start_partition positions: auto rotated_iter_begin = thrust::make_transform_iterator( thrust::make_counting_iterator<cudf::size_type>(0), [num_partitions, start_partition] __device__(auto index) { return (index + num_partitions - start_partition) % num_partitions; }); if (num_partitions == nrows) { rmm::device_uvector<cudf::size_type> partition_offsets(num_partitions, stream); thrust::sequence(rmm::exec_policy(stream), partition_offsets.begin(), partition_offsets.end()); auto uniq_tbl = cudf::detail::gather(input, rotated_iter_begin, rotated_iter_begin + nrows, // map cudf::out_of_bounds_policy::DONT_CHECK, stream, mr); return std::make_pair(std::move(uniq_tbl), cudf::detail::make_std_vector_sync(partition_offsets, stream)); } else { //( num_partitions > nrows ) rmm::device_uvector<cudf::size_type> d_row_indices(nrows, stream); // copy rotated right partition indexes that // fall in the interval [0, nrows): //(this relies on a _stable_ copy_if()) thrust::copy_if(rmm::exec_policy(stream), rotated_iter_begin, rotated_iter_begin + num_partitions, d_row_indices.begin(), [nrows] __device__(auto index) { return (index < nrows); }); //...and then use the result, d_row_indices, as gather map: auto uniq_tbl = cudf::detail::gather(input, d_row_indices.begin(), d_row_indices.end(), // map cudf::out_of_bounds_policy::DONT_CHECK, stream, mr); // offsets (part 1: compute partition sizes); // iterator for number of edges of the transposed bipartite graph; // this composes rotated_iter transform (above) iterator with // calculating number of edges of transposed bi-graph: auto nedges_iter_begin = thrust::make_transform_iterator( rotated_iter_begin, [nrows] __device__(auto index) { return (index < nrows ? 1 : 0); }); // offsets (part 2: compute partition offsets): rmm::device_uvector<cudf::size_type> partition_offsets(num_partitions, stream); thrust::exclusive_scan(rmm::exec_policy(stream), nedges_iter_begin, nedges_iter_begin + num_partitions, partition_offsets.begin()); return std::make_pair(std::move(uniq_tbl), cudf::detail::make_std_vector_sync(partition_offsets, stream)); } } } // namespace namespace cudf { namespace detail { std::pair<std::unique_ptr<table>, std::vector<cudf::size_type>> round_robin_partition( table_view const& input, cudf::size_type num_partitions, cudf::size_type start_partition = 0, rmm::cuda_stream_view stream = rmm::cuda_stream_default, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) { auto nrows = input.num_rows(); CUDF_EXPECTS(num_partitions > 0, "Incorrect number of partitions. Must be greater than 0."); CUDF_EXPECTS(start_partition < num_partitions, "Incorrect start_partition index. Must be less than number of partitions."); CUDF_EXPECTS( start_partition >= 0, "Incorrect start_partition index. Must be positive."); // since cudf::size_type is an alias for // int32_t, it _can_ be negative // handle degenerate case: // if (num_partitions >= nrows) { return degenerate_partitions(input, num_partitions, start_partition, stream, mr); } auto np_max_size = nrows % num_partitions; //# partitions of max size // handle case when nr `mod` np == 0; // fix for bug: https://github.com/rapidsai/cudf/issues/4043 auto num_partitions_max_size = (np_max_size > 0 ? np_max_size : num_partitions); cudf::size_type max_partition_size = std::ceil( static_cast<double>(nrows) / static_cast<double>(num_partitions)); // max size of partitions auto total_max_partitions_size = num_partitions_max_size * max_partition_size; auto num_partitions_min_size = num_partitions - num_partitions_max_size; // delta is the number of positions to rotate right // the original range [0,1,...,n-1] // and is calculated by accumulating the first //`start_partition` partition sizes from the end; // i.e., // the partition sizes array (of size p) being: //[m,m,...,m,(m-1),...,(m-1)] //(with num_partitions_max_size sizes `m` at the beginning; // and (p-num_partitions_max_size) sizes `(m-1)` at the end) // we accumulate the 1st `start_partition` entries from the end: // auto delta = (start_partition > num_partitions_min_size ? num_partitions_min_size * (max_partition_size - 1) + (start_partition - num_partitions_min_size) * max_partition_size : start_partition * (max_partition_size - 1)); auto iter_begin = thrust::make_transform_iterator( thrust::make_counting_iterator<cudf::size_type>(0), [nrows, num_partitions, max_partition_size, num_partitions_max_size, total_max_partitions_size, delta] __device__(auto index0) { // rotate original index right by delta positions; // this is the effect of applying start_partition: // auto rotated_index = (index0 + nrows - delta) % nrows; // using rotated_index = given index0, rotated; // the algorithm below calculates the src round-robin row, // by calculating the partition_index and the index_within_partition: // auto index_within_partition = (rotated_index <= total_max_partitions_size ? rotated_index % max_partition_size : (rotated_index - total_max_partitions_size) % (max_partition_size - 1)); auto partition_index = (rotated_index <= total_max_partitions_size ? rotated_index / max_partition_size : num_partitions_max_size + (rotated_index - total_max_partitions_size) / (max_partition_size - 1)); return num_partitions * index_within_partition + partition_index; }); auto uniq_tbl = cudf::detail::gather( input, iter_begin, iter_begin + nrows, cudf::out_of_bounds_policy::DONT_CHECK, stream, mr); auto ret_pair = std::make_pair(std::move(uniq_tbl), std::vector<cudf::size_type>(num_partitions)); // this has the effect of rotating the set of partition sizes // right by start_partition positions: // auto rotated_iter_begin = thrust::make_transform_iterator( thrust::make_counting_iterator<cudf::size_type>(0), [num_partitions, start_partition, max_partition_size, num_partitions_max_size](auto index) { return ((index + num_partitions - start_partition) % num_partitions < num_partitions_max_size ? max_partition_size : max_partition_size - 1); }); // then exclusive_scan on the resulting // rotated partition sizes to get the partition offsets // corresponding to start_partition: // Since: //"num_partitions is usually going to be relatively small //(<1,000), as such, it's probably more expensive to do this on the device. // Instead, do it on the host directly into the std::vector and avoid the memcpy." - JH // thrust::exclusive_scan( thrust::host, rotated_iter_begin, rotated_iter_begin + num_partitions, ret_pair.second.begin()); return ret_pair; } } // namespace detail std::pair<std::unique_ptr<cudf::table>, std::vector<cudf::size_type>> round_robin_partition( table_view const& input, cudf::size_type num_partitions, cudf::size_type start_partition = 0, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) { CUDF_FUNC_RANGE(); return cudf::detail::round_robin_partition( input, num_partitions, start_partition, rmm::cuda_stream_default, mr); } } // namespace cudf
bfbc3403c8b45d2f1f473680c6c465fb809cf24e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "BranchLayer.h" #include <vector> #include <helper_functions.h> #include <helper_cuda.h> #include <math.h> #include "../common/Config.h" #include "../common/cuBase.h" /* * dim3 block = dim3(batch); * dim3 thread= dim3(min(outputs[0]->getLen() / batch, 1024)); */ __global__ void g_BranchLayer_backpropagation( float** curDelta, float* preDelta, int curDeltaSize, int len); /* *dim3 block = dim3(outputs.size(), batch); *dim3 thread= dim3(min(outputs[0]->getLen() / batch, 1024)); */ __global__ void g_BranchLayer_feedforward( float* inputs, float** outputs, int len); void BranchLayer::feedforward() { /*copy the input to outputs*/ dim3 block = dim3(outputs.size(), batch); dim3 thread= dim3(min(outputs[0]->getLen() / batch, 1024)); hipLaunchKernelGGL(( g_BranchLayer_feedforward), dim3(block), dim3(thread), 0, 0, inputs->getDev(), outputs.m_devPoint, outputs[0]->getLen()); checkCudaErrors(hipDeviceSynchronize()); getLastCudaError("BranchLayer feedforward"); } void BranchLayer::backpropagation() { if(Config::instance()->getLayerByName(m_name)->m_input == std::string("data")) return; dim3 block = dim3(batch); dim3 thread= dim3(min(outputs[0]->getLen() / batch, 1024)); preDelta->gpuClear(); hipLaunchKernelGGL(( g_BranchLayer_backpropagation), dim3(block), dim3(thread), 0, 0, curDelta.m_devPoint, preDelta->getDev(), curDelta.size(), preDelta->getLen()); checkCudaErrors(hipDeviceSynchronize()); getLastCudaError("BranchLayer backpropagation"); } BranchLayer::BranchLayer(std::string name) { cost = NULL; m_name = name; ConfigBranchLayer* config = (ConfigBranchLayer*)Config::instance()->getLayerByName(m_name); ConvLayerBase * preLayer = (ConvLayerBase*)Layers::instance()->get(config->m_input); inputs = preLayer->getOutputs(); if(inputs == NULL){ /*inputs = NULL the type must be BranchLayers*/ Assert(Config::instance()->getLayerByName(config->m_input)->isBranchLayer()); Assert(config->m_subInput != std::string("NULL")); BranchLayer* bl = static_cast<BranchLayer*>(preLayer); inputs = bl->getSubOutput(config->m_subInput); preDelta = bl->getSubCurDelta(config->m_subInput); }else{ preDelta = preLayer->getCurDelta(); } inputDim = preLayer->outputDim; outputDim = inputDim; outputAmount = preLayer->outputAmount; inputAmount = outputAmount; batch = Config::instance()->getBatchSize(); for(int i = 0; i < (int)config->m_outputs.size(); i++){ outputs.push_back (new cuMatrix<float>(batch, outputDim * outputDim, outputAmount)); curDelta.push_back(new cuMatrix<float>(batch, outputDim * outputDim, outputAmount)); mapId[config->m_outputs[i]] = i; } outputs.toGpu(); curDelta.toGpu(); Layers::instance()->set(m_name, this); } /* *dim3 block = dim3(outputs.size(), batch); *dim3 thread= dim3(min(outputs[0]->getLen() / batch, 1024)); */ __global__ void g_BranchLayer_feedforward( float* inputs, float** outputs, int len) { int branchId = blockIdx.x; float* output = outputs[branchId]; for(int i = 0; i < len; i += gridDim.y * blockDim.x){ int idx = i + threadIdx.x + blockIdx.y * blockDim.x; if(idx < len){ output[idx] = inputs[idx]; } } } /* * dim3 block = dim3(batch); * dim3 thread= dim3(min(outputs[0]->getLen() / batch, 1024)); */ __global__ void g_BranchLayer_backpropagation( float** curDelta, float* preDelta, int curDeltaSize, int len) { for(int i = 0; i < len ; i += gridDim.x * blockDim.x){ int idx = i + blockDim.x * blockIdx.x + threadIdx.x; if(idx < len){ float val = 0.0; for(int c = 0; c < curDeltaSize; c++){ val += curDelta[c][idx]; } preDelta[idx] = val; } } }
bfbc3403c8b45d2f1f473680c6c465fb809cf24e.cu
#include "BranchLayer.h" #include <vector> #include <helper_functions.h> #include <helper_cuda.h> #include <math.h> #include "../common/Config.h" #include "../common/cuBase.h" /* * dim3 block = dim3(batch); * dim3 thread= dim3(min(outputs[0]->getLen() / batch, 1024)); */ __global__ void g_BranchLayer_backpropagation( float** curDelta, float* preDelta, int curDeltaSize, int len); /* *dim3 block = dim3(outputs.size(), batch); *dim3 thread= dim3(min(outputs[0]->getLen() / batch, 1024)); */ __global__ void g_BranchLayer_feedforward( float* inputs, float** outputs, int len); void BranchLayer::feedforward() { /*copy the input to outputs*/ dim3 block = dim3(outputs.size(), batch); dim3 thread= dim3(min(outputs[0]->getLen() / batch, 1024)); g_BranchLayer_feedforward<<<block, thread>>>( inputs->getDev(), outputs.m_devPoint, outputs[0]->getLen()); checkCudaErrors(cudaDeviceSynchronize()); getLastCudaError("BranchLayer feedforward"); } void BranchLayer::backpropagation() { if(Config::instance()->getLayerByName(m_name)->m_input == std::string("data")) return; dim3 block = dim3(batch); dim3 thread= dim3(min(outputs[0]->getLen() / batch, 1024)); preDelta->gpuClear(); g_BranchLayer_backpropagation<<<block, thread>>>( curDelta.m_devPoint, preDelta->getDev(), curDelta.size(), preDelta->getLen()); checkCudaErrors(cudaDeviceSynchronize()); getLastCudaError("BranchLayer backpropagation"); } BranchLayer::BranchLayer(std::string name) { cost = NULL; m_name = name; ConfigBranchLayer* config = (ConfigBranchLayer*)Config::instance()->getLayerByName(m_name); ConvLayerBase * preLayer = (ConvLayerBase*)Layers::instance()->get(config->m_input); inputs = preLayer->getOutputs(); if(inputs == NULL){ /*inputs = NULL the type must be BranchLayers*/ Assert(Config::instance()->getLayerByName(config->m_input)->isBranchLayer()); Assert(config->m_subInput != std::string("NULL")); BranchLayer* bl = static_cast<BranchLayer*>(preLayer); inputs = bl->getSubOutput(config->m_subInput); preDelta = bl->getSubCurDelta(config->m_subInput); }else{ preDelta = preLayer->getCurDelta(); } inputDim = preLayer->outputDim; outputDim = inputDim; outputAmount = preLayer->outputAmount; inputAmount = outputAmount; batch = Config::instance()->getBatchSize(); for(int i = 0; i < (int)config->m_outputs.size(); i++){ outputs.push_back (new cuMatrix<float>(batch, outputDim * outputDim, outputAmount)); curDelta.push_back(new cuMatrix<float>(batch, outputDim * outputDim, outputAmount)); mapId[config->m_outputs[i]] = i; } outputs.toGpu(); curDelta.toGpu(); Layers::instance()->set(m_name, this); } /* *dim3 block = dim3(outputs.size(), batch); *dim3 thread= dim3(min(outputs[0]->getLen() / batch, 1024)); */ __global__ void g_BranchLayer_feedforward( float* inputs, float** outputs, int len) { int branchId = blockIdx.x; float* output = outputs[branchId]; for(int i = 0; i < len; i += gridDim.y * blockDim.x){ int idx = i + threadIdx.x + blockIdx.y * blockDim.x; if(idx < len){ output[idx] = inputs[idx]; } } } /* * dim3 block = dim3(batch); * dim3 thread= dim3(min(outputs[0]->getLen() / batch, 1024)); */ __global__ void g_BranchLayer_backpropagation( float** curDelta, float* preDelta, int curDeltaSize, int len) { for(int i = 0; i < len ; i += gridDim.x * blockDim.x){ int idx = i + blockDim.x * blockIdx.x + threadIdx.x; if(idx < len){ float val = 0.0; for(int c = 0; c < curDeltaSize; c++){ val += curDelta[c][idx]; } preDelta[idx] = val; } } }
9fa30af1eae8fb3e991c38152b998d74fa0d3dba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "timerc.h" __global__ void warmup(){ } __global__ void finishCumSum(int *input, int sizeI, int* output, int sizeO){ int numElementsPerBlock = sizeI/sizeO; int* s_input = input + numElementsPerBlock*(blockIdx.x + 1) ; s_input[threadIdx.x] += output[blockIdx.x]; s_input[threadIdx.x + numElementsPerBlock/2 ] += output[blockIdx.x]; } __global__ void vectorCumSum(int *input, int size, int* output, int WO){ int numElementsPerBlock = size/gridDim.x; int* s_input = input + numElementsPerBlock*blockIdx.x; for(int s = 1; s<=numElementsPerBlock/2; s=s*2){ if( threadIdx.x < numElementsPerBlock/(2*s)){ s_input[threadIdx.x*2*s + s-1+s] = s_input[threadIdx.x*2*s + s-1] + s_input[threadIdx.x*2*s+s-1 + s]; } __syncthreads(); } for(int s = numElementsPerBlock/4; s >= 1; s=s/2){ if( threadIdx.x < -1 + numElementsPerBlock/(2*s)){ s_input[threadIdx.x*2*s + 2*s-1+s] = s_input[threadIdx.x*2*s + 2*s-1] + s_input[threadIdx.x*2*s + 2*s-1 + s]; } __syncthreads(); } if (WO == 1) output[blockIdx.x] = s_input[numElementsPerBlock - 1]; } __global__ void vectorSumBetterCoalescedWithPresum(int *input,int c_size, int size, int *output){ int numElementsPerBlock = size/(gridDim.x*c_size); int total1 = 0; int total2 = 0; for(int i = 0; i < c_size;i++){ total1 = total1 + input[i*2*blockDim.x + threadIdx.x + blockIdx.x*numElementsPerBlock*c_size]; total2 = total2 + input[i*2*blockDim.x + blockDim.x + threadIdx.x + blockIdx.x*numElementsPerBlock*c_size]; } __shared__ int s_input[2048]; s_input[threadIdx.x] = total1; s_input[threadIdx.x + (numElementsPerBlock/2) ] = total2; __syncthreads(); for(int s = numElementsPerBlock/2; s>=1; s=s/2){ if( threadIdx.x < s){ s_input[threadIdx.x] = s_input[threadIdx.x] + s_input[threadIdx.x+s]; } __syncthreads(); } output[blockIdx.x] = s_input[0]; } __global__ void vectorSumBetterCoalesced(int *input, int size, int *output){ int numElementsPerBlock = size/gridDim.x; __shared__ int s_input[2048]; s_input[threadIdx.x] = input[threadIdx.x + blockIdx.x*numElementsPerBlock]; s_input[threadIdx.x + (numElementsPerBlock/2) ] = input[threadIdx.x + (numElementsPerBlock/2) + blockIdx.x]; __syncthreads(); for(int s = numElementsPerBlock/2; s>=1; s=s/2){ if( threadIdx.x < s){ s_input[threadIdx.x] = s_input[threadIdx.x] + s_input[threadIdx.x+s]; } __syncthreads(); } output[blockIdx.x] = s_input[0]; } __global__ void vector_sum_naive(int *input, int size, int *output){ int abs_thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int numElementsPerThread = size / ( gridDim.x * blockDim.x); int startPos = abs_thread_idx * numElementsPerThread; int localTotal = 0; for(int i = 0; i < numElementsPerThread; i++){ localTotal = localTotal + input[i + startPos]; } output[abs_thread_idx] = localTotal; } __global__ void vector_sum(int *input, int size, int *output){ int abs_thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int numElementsPerThread = size / (blockDim.x * gridDim.x); int startPos = abs_thread_idx * numElementsPerThread; int localTotal = 0; for(int i = 0; i < numElementsPerThread; i++){ localTotal = localTotal + input[i + startPos]; } __shared__ int totals[2048]; totals[abs_thread_idx] = localTotal; __syncthreads(); if(abs_thread_idx == 0){ for(int i = 1; i < blockDim.x * gridDim.x; i++){ localTotal = localTotal + totals[i]; } output[0] = localTotal; } } int main(){ int numElements = 128*1024 * 1024; int *hostInput = (int *) malloc(numElements * sizeof(int)); for(int i = 0; i < numElements; i++){ hostInput[i] = 1; } int *deviceInput; int *deviceOutput; int hostOutput[1024*1024]; int cpu_total = 0; float cpu_time; cstart(); for (int i = 0; i < 128*1024*1024; i++){ cpu_total = cpu_total + 1; } cend(&cpu_time); printf("Cpu total = %d\n", cpu_total); printf("Cpu time = %f\n", cpu_time); hipLaunchKernelGGL(( warmup), dim3(1),dim3(1), 0, 0, ); float malloc_and_cpy_time; gstart(); hipMalloc((void **) &deviceInput, numElements * sizeof(int)); hipMalloc((void **) &deviceOutput, 128*1024 * 1024 * sizeof(int)); hipMemcpy(deviceInput, hostInput, numElements* sizeof(int),hipMemcpyHostToDevice); gend(&malloc_and_cpy_time); float CumSumKernelCallTime; gstart(); hipLaunchKernelGGL(( vectorCumSum), dim3(512*128) , dim3(1024), 0, 0, deviceInput, 128*1024*1024, deviceOutput, 1); //vectorCumSum<<<1,256*128>>>(deviceOutput, 512*128, NULL, 0);//deviceInput is not needed here hipMemcpy(hostOutput, deviceOutput, 128*512*sizeof(int),hipMemcpyDeviceToHost); int tmpCumSum = 0; for (int i = 0; i < 128*512; i++){ tmpCumSum += hostOutput[i]; hostOutput[i] = tmpCumSum; } hipMemcpy( deviceOutput, hostOutput, 128*512*sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( finishCumSum), dim3(512*128 - 1) , dim3(1024), 0, 0, deviceInput, 128*1024*1024, deviceOutput, 512*128); gend(&CumSumKernelCallTime); float CumSumHostTime; cstart(); int hostCumSum = 0; for (int i = 0; i < 128*1024*1024; i++){ hostCumSum += hostInput[i]; hostInput[i] = hostCumSum; } cend(&CumSumHostTime); hipMemcpy(hostInput, deviceInput, 128*1024*1024*sizeof(int),hipMemcpyDeviceToHost); printf("Time Cum Sum CPU time = %f\n", CumSumHostTime); printf("Time Cum Sum Kernel Call time = %f\n", CumSumKernelCallTime); return 0; float naive_gpu_time; gstart(); hipLaunchKernelGGL(( vector_sum_naive), dim3(64*4),dim3(64/2), 0, 0, deviceInput, 128*1024*1024, deviceOutput); gend(&naive_gpu_time); printf("Naive kernel time = %f\n",naive_gpu_time); hipMemcpy(hostOutput, deviceOutput, 64*64*2 * sizeof(int),hipMemcpyDeviceToHost); int naive_total = 0; for (int i = 0; i < 64*64*2; i++){ naive_total = naive_total + hostOutput[i]; } printf("Total = %d\n", naive_total); float better_gpu_time; gstart(); hipLaunchKernelGGL(( vectorSumBetterCoalescedWithPresum), dim3(256), dim3(1024), 0, 0, deviceInput,256, 128*1024*1024, deviceOutput); gend(&better_gpu_time); printf("Better kernel time with presum = %f\n",better_gpu_time); float copy_back_and_finish_time; gstart(); hipMemcpy(hostOutput, deviceOutput, 256 * sizeof(int),hipMemcpyDeviceToHost); int better_total = 0; for (int i = 0; i < 256; i++){ better_total = better_total + hostOutput[i]; } gend(&copy_back_and_finish_time); printf("Total = %d\n", better_total); printf("Time to malloc and copy = %f, time to cpy back and finish = %f\n",malloc_and_cpy_time,copy_back_and_finish_time); hipDeviceSynchronize(); }
9fa30af1eae8fb3e991c38152b998d74fa0d3dba.cu
#include <stdio.h> #include "timerc.h" __global__ void warmup(){ } __global__ void finishCumSum(int *input, int sizeI, int* output, int sizeO){ int numElementsPerBlock = sizeI/sizeO; int* s_input = input + numElementsPerBlock*(blockIdx.x + 1) ; s_input[threadIdx.x] += output[blockIdx.x]; s_input[threadIdx.x + numElementsPerBlock/2 ] += output[blockIdx.x]; } __global__ void vectorCumSum(int *input, int size, int* output, int WO){ int numElementsPerBlock = size/gridDim.x; int* s_input = input + numElementsPerBlock*blockIdx.x; for(int s = 1; s<=numElementsPerBlock/2; s=s*2){ if( threadIdx.x < numElementsPerBlock/(2*s)){ s_input[threadIdx.x*2*s + s-1+s] = s_input[threadIdx.x*2*s + s-1] + s_input[threadIdx.x*2*s+s-1 + s]; } __syncthreads(); } for(int s = numElementsPerBlock/4; s >= 1; s=s/2){ if( threadIdx.x < -1 + numElementsPerBlock/(2*s)){ s_input[threadIdx.x*2*s + 2*s-1+s] = s_input[threadIdx.x*2*s + 2*s-1] + s_input[threadIdx.x*2*s + 2*s-1 + s]; } __syncthreads(); } if (WO == 1) output[blockIdx.x] = s_input[numElementsPerBlock - 1]; } __global__ void vectorSumBetterCoalescedWithPresum(int *input,int c_size, int size, int *output){ int numElementsPerBlock = size/(gridDim.x*c_size); int total1 = 0; int total2 = 0; for(int i = 0; i < c_size;i++){ total1 = total1 + input[i*2*blockDim.x + threadIdx.x + blockIdx.x*numElementsPerBlock*c_size]; total2 = total2 + input[i*2*blockDim.x + blockDim.x + threadIdx.x + blockIdx.x*numElementsPerBlock*c_size]; } __shared__ int s_input[2048]; s_input[threadIdx.x] = total1; s_input[threadIdx.x + (numElementsPerBlock/2) ] = total2; __syncthreads(); for(int s = numElementsPerBlock/2; s>=1; s=s/2){ if( threadIdx.x < s){ s_input[threadIdx.x] = s_input[threadIdx.x] + s_input[threadIdx.x+s]; } __syncthreads(); } output[blockIdx.x] = s_input[0]; } __global__ void vectorSumBetterCoalesced(int *input, int size, int *output){ int numElementsPerBlock = size/gridDim.x; __shared__ int s_input[2048]; s_input[threadIdx.x] = input[threadIdx.x + blockIdx.x*numElementsPerBlock]; s_input[threadIdx.x + (numElementsPerBlock/2) ] = input[threadIdx.x + (numElementsPerBlock/2) + blockIdx.x]; __syncthreads(); for(int s = numElementsPerBlock/2; s>=1; s=s/2){ if( threadIdx.x < s){ s_input[threadIdx.x] = s_input[threadIdx.x] + s_input[threadIdx.x+s]; } __syncthreads(); } output[blockIdx.x] = s_input[0]; } __global__ void vector_sum_naive(int *input, int size, int *output){ int abs_thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int numElementsPerThread = size / ( gridDim.x * blockDim.x); int startPos = abs_thread_idx * numElementsPerThread; int localTotal = 0; for(int i = 0; i < numElementsPerThread; i++){ localTotal = localTotal + input[i + startPos]; } output[abs_thread_idx] = localTotal; } __global__ void vector_sum(int *input, int size, int *output){ int abs_thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int numElementsPerThread = size / (blockDim.x * gridDim.x); int startPos = abs_thread_idx * numElementsPerThread; int localTotal = 0; for(int i = 0; i < numElementsPerThread; i++){ localTotal = localTotal + input[i + startPos]; } __shared__ int totals[2048]; totals[abs_thread_idx] = localTotal; __syncthreads(); if(abs_thread_idx == 0){ for(int i = 1; i < blockDim.x * gridDim.x; i++){ localTotal = localTotal + totals[i]; } output[0] = localTotal; } } int main(){ int numElements = 128*1024 * 1024; int *hostInput = (int *) malloc(numElements * sizeof(int)); for(int i = 0; i < numElements; i++){ hostInput[i] = 1; } int *deviceInput; int *deviceOutput; int hostOutput[1024*1024]; int cpu_total = 0; float cpu_time; cstart(); for (int i = 0; i < 128*1024*1024; i++){ cpu_total = cpu_total + 1; } cend(&cpu_time); printf("Cpu total = %d\n", cpu_total); printf("Cpu time = %f\n", cpu_time); warmup<<<1,1>>>(); float malloc_and_cpy_time; gstart(); cudaMalloc((void **) &deviceInput, numElements * sizeof(int)); cudaMalloc((void **) &deviceOutput, 128*1024 * 1024 * sizeof(int)); cudaMemcpy(deviceInput, hostInput, numElements* sizeof(int),cudaMemcpyHostToDevice); gend(&malloc_and_cpy_time); float CumSumKernelCallTime; gstart(); vectorCumSum<<<512*128 , 1024>>>(deviceInput, 128*1024*1024, deviceOutput, 1); //vectorCumSum<<<1,256*128>>>(deviceOutput, 512*128, NULL, 0);//deviceInput is not needed here cudaMemcpy(hostOutput, deviceOutput, 128*512*sizeof(int),cudaMemcpyDeviceToHost); int tmpCumSum = 0; for (int i = 0; i < 128*512; i++){ tmpCumSum += hostOutput[i]; hostOutput[i] = tmpCumSum; } cudaMemcpy( deviceOutput, hostOutput, 128*512*sizeof(int),cudaMemcpyHostToDevice); finishCumSum<<<512*128 - 1 , 1024>>>(deviceInput, 128*1024*1024, deviceOutput, 512*128); gend(&CumSumKernelCallTime); float CumSumHostTime; cstart(); int hostCumSum = 0; for (int i = 0; i < 128*1024*1024; i++){ hostCumSum += hostInput[i]; hostInput[i] = hostCumSum; } cend(&CumSumHostTime); cudaMemcpy(hostInput, deviceInput, 128*1024*1024*sizeof(int),cudaMemcpyDeviceToHost); printf("Time Cum Sum CPU time = %f\n", CumSumHostTime); printf("Time Cum Sum Kernel Call time = %f\n", CumSumKernelCallTime); return 0; float naive_gpu_time; gstart(); vector_sum_naive<<<64*4,64/2>>>(deviceInput, 128*1024*1024, deviceOutput); gend(&naive_gpu_time); printf("Naive kernel time = %f\n",naive_gpu_time); cudaMemcpy(hostOutput, deviceOutput, 64*64*2 * sizeof(int),cudaMemcpyDeviceToHost); int naive_total = 0; for (int i = 0; i < 64*64*2; i++){ naive_total = naive_total + hostOutput[i]; } printf("Total = %d\n", naive_total); float better_gpu_time; gstart(); vectorSumBetterCoalescedWithPresum<<<256, 1024>>>(deviceInput,256, 128*1024*1024, deviceOutput); gend(&better_gpu_time); printf("Better kernel time with presum = %f\n",better_gpu_time); float copy_back_and_finish_time; gstart(); cudaMemcpy(hostOutput, deviceOutput, 256 * sizeof(int),cudaMemcpyDeviceToHost); int better_total = 0; for (int i = 0; i < 256; i++){ better_total = better_total + hostOutput[i]; } gend(&copy_back_and_finish_time); printf("Total = %d\n", better_total); printf("Time to malloc and copy = %f, time to cpy back and finish = %f\n",malloc_and_cpy_time,copy_back_and_finish_time); cudaDeviceSynchronize(); }
13c4426916be3ff870ec8a99ba7ec02cf7fd204e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <string.h> #include <stdio.h> struct DataElement { char *name; int value; }; __global__ void Kernel(DataElement *elem) { printf("On device: name=%s, value=%d\n", elem->name, elem->value); elem->name[0] = 'd'; elem->value++; } void launch(DataElement *elem) { hipLaunchKernelGGL(( Kernel), dim3(1), dim3(1) , 0, 0, elem); hipDeviceSynchronize(); } int main(void) { DataElement *e; hipMallocManaged((void**)&e, sizeof(DataElement)); e->value = 10; hipMallocManaged((void**)&(e->name), sizeof(char) * (strlen("hello") + 1) ); strcpy(e->name, "hello"); launch(e); printf("On host: name=%s, value=%d\n", e->name, e->value); hipFree(e->name); hipFree(e); hipDeviceReset(); }
13c4426916be3ff870ec8a99ba7ec02cf7fd204e.cu
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <string.h> #include <stdio.h> struct DataElement { char *name; int value; }; __global__ void Kernel(DataElement *elem) { printf("On device: name=%s, value=%d\n", elem->name, elem->value); elem->name[0] = 'd'; elem->value++; } void launch(DataElement *elem) { Kernel<<< 1, 1 >>>(elem); cudaDeviceSynchronize(); } int main(void) { DataElement *e; cudaMallocManaged((void**)&e, sizeof(DataElement)); e->value = 10; cudaMallocManaged((void**)&(e->name), sizeof(char) * (strlen("hello") + 1) ); strcpy(e->name, "hello"); launch(e); printf("On host: name=%s, value=%d\n", e->name, e->value); cudaFree(e->name); cudaFree(e); cudaDeviceReset(); }
870aae9b989feacda166b3238b1860bfc535fd68.hip
// !!! This is a file automatically generated by hipify!!! //Ran@2018/3/30 #include <exception> #include <iostream> #include <fstream> #include <string> #include <random> #include <cstdio> #include <hip/hip_runtime.h> #include "ran_timer.h" #include "ran_helper_functions.h" //kernel __global__ void vectorAdd(float *a, float *b, float *c, int num) { // //blockDimblockthread //blockIdxblock //threadIdxthread int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num) return; c[i] = a[i] + a[i]; } int main() { // constexpr long num = 500000; constexpr long size = num * sizeof(float); // Timer t; displayInfo(); try { // float *ra=new float[num]; float *rb=new float[num]; float *rc=new float[num]; // float *ga = NULL; float *gb = NULL; float *gc = NULL; // for (int i = 0; i != num; ++i) { ra[i] = rand(); rb[i] = rand(); } t.begin(); // errProc(hipMalloc(&ga, size), "A"); errProc(hipMalloc(&gb, size), "B"); errProc(hipMalloc(&gc, size), "C"); // errProc(hipMemcpy(ga, ra, size, hipMemcpyHostToDevice), "A"); errProc(hipMemcpy(gb, rb, size, hipMemcpyHostToDevice), "B"); // // const int threads = 1024; // const int block = (num + threads - 1) / threads; //blockblockthread vectorAdd << <block, threads>> > (ga, gb, gc, num); errProc(hipGetLastError(), ""); // errProc(hipMemcpy(rc, gc, size, hipMemcpyDeviceToHost), ""); // hipFree(ga), hipFree(gb); hipFree(gc); t.end(); std::cout << "GPU" << t.time() << std::endl; t.reset(); t.begin(); for (int i = 0; i < num; ++i) { rc[i] = ra[i] + rb[i]; } t.end(); std::cout << "CPU" << t.time() << std::endl; delete[] ra; delete[] rb; delete[] rc; } catch (std::exception e) { std::cerr << e.what() << std::endl; } std::cout << "Press Enter to exit" << std::endl; getchar(); return 0; }
870aae9b989feacda166b3238b1860bfc535fd68.cu
//Ran@2018/3/30 #include <exception> #include <iostream> #include <fstream> #include <string> #include <random> #include <cstdio> #include <cuda_runtime.h> #include "ran_timer.h" #include "ran_helper_functions.h" //kernel函数 __global__ void vectorAdd(float *a, float *b, float *c, int num) { //说明: //blockDim:每个block有多少个thread //blockIdx:当前block编号 //threadIdx:当前thread编号 int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num) return; c[i] = a[i] + a[i]; } int main() { //数据量 constexpr long num = 500000; constexpr long size = num * sizeof(float); //计时器 Timer t; displayInfo(); try { //内存中 float *ra=new float[num]; float *rb=new float[num]; float *rc=new float[num]; //显存中 float *ga = NULL; float *gb = NULL; float *gc = NULL; //生成随机数据 for (int i = 0; i != num; ++i) { ra[i] = rand(); rb[i] = rand(); } t.begin(); //在显存中分配 errProc(cudaMalloc(&ga, size), "分配A失败"); errProc(cudaMalloc(&gb, size), "分配B失败"); errProc(cudaMalloc(&gc, size), "分配C失败"); //复制到显存 errProc(cudaMemcpy(ga, ra, size, cudaMemcpyHostToDevice), "复制A失败"); errProc(cudaMemcpy(gb, rb, size, cudaMemcpyHostToDevice), "复制B失败"); //启动 //每个块线程个数 const int threads = 1024; //块数量 const int block = (num + threads - 1) / threads; //调用所需的block数、每个block中有多少thread vectorAdd << <block, threads>> > (ga, gb, gc, num); errProc(cudaGetLastError(), "无法启动"); //取结果 errProc(cudaMemcpy(rc, gc, size, cudaMemcpyDeviceToHost), "无法读取结果"); //释放显存 cudaFree(ga), cudaFree(gb); cudaFree(gc); t.end(); std::cout << "GPU运算完成,用时:" << t.time() << std::endl; t.reset(); t.begin(); for (int i = 0; i < num; ++i) { rc[i] = ra[i] + rb[i]; } t.end(); std::cout << "CPU计算完成,用时:" << t.time() << std::endl; delete[] ra; delete[] rb; delete[] rc; } catch (std::exception e) { std::cerr << e.what() << std::endl; } std::cout << "Press Enter to exit" << std::endl; getchar(); return 0; }
68d02e88a846036d4684f77a956eb8c9748f4565.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"library.h" #include "rocblas.h" __global__ void ScalarMultiply(float* in, float* out, int w, int h, int c, float factor){ int ix = threadIdx.x + blockDim.x * blockIdx.x;//xaxis of imagein int iy = threadIdx.y + blockDim.y * blockIdx.y;//yaxis of imagein int iz = threadIdx.z + blockDim.z * blockIdx.z; //channels imagein int currentlocation = iz*w*h + ix + iy * w; if (ix < w && iy <h && iz < c) out[currentlocation]=factor*in[currentlocation]; } cv::Mat scalarMult(cv::Mat src,float factor){ int w=src.cols; int h=src.rows; int nc=src.channels(); float *srcArray = new float[(size_t)w*h*nc]; float *g_srcArray; float *g_destArray; convert_mat_to_layered ( srcArray,src); hipMalloc( &g_srcArray, w*h*nc * sizeof(float) ); hipMalloc( &g_destArray, w*h*nc * sizeof(float) ); hipMemcpy( g_srcArray, srcArray, w*h*nc * sizeof(float), hipMemcpyHostToDevice); hipMemset(g_destArray, 0, w*h*nc * sizeof(float)); //dim3 Block = dim3(32,32,1); //dim3 Grid = dim3((w +Block.x -1) / Block.x, (h + Block.y -1) / Block.y, (nc+ Block.z -1) / Block.z); //ScalarMultiply<<<Grid,Block>>>(g_srcArray,g_destArray,w,h,nc,factor); hipblasSaxpy(nc*w*h,factor,g_srcArray,1,g_destArray,1); hipMemcpy(srcArray,g_destArray, nc*h*w * sizeof(float), hipMemcpyDeviceToHost ); convert_layered_to_mat(src, srcArray); hipFree(g_srcArray);//CUDA_CHECK; hipFree(g_destArray);//CUDA_CHECK; delete[] srcArray; //delete[] destArray; return src; }
68d02e88a846036d4684f77a956eb8c9748f4565.cu
#include"library.h" #include "cublas.h" __global__ void ScalarMultiply(float* in, float* out, int w, int h, int c, float factor){ int ix = threadIdx.x + blockDim.x * blockIdx.x;//xaxis of imagein int iy = threadIdx.y + blockDim.y * blockIdx.y;//yaxis of imagein int iz = threadIdx.z + blockDim.z * blockIdx.z; //channels imagein int currentlocation = iz*w*h + ix + iy * w; if (ix < w && iy <h && iz < c) out[currentlocation]=factor*in[currentlocation]; } cv::Mat scalarMult(cv::Mat src,float factor){ int w=src.cols; int h=src.rows; int nc=src.channels(); float *srcArray = new float[(size_t)w*h*nc]; float *g_srcArray; float *g_destArray; convert_mat_to_layered ( srcArray,src); cudaMalloc( &g_srcArray, w*h*nc * sizeof(float) ); cudaMalloc( &g_destArray, w*h*nc * sizeof(float) ); cudaMemcpy( g_srcArray, srcArray, w*h*nc * sizeof(float), cudaMemcpyHostToDevice); cudaMemset(g_destArray, 0, w*h*nc * sizeof(float)); //dim3 Block = dim3(32,32,1); //dim3 Grid = dim3((w +Block.x -1) / Block.x, (h + Block.y -1) / Block.y, (nc+ Block.z -1) / Block.z); //ScalarMultiply<<<Grid,Block>>>(g_srcArray,g_destArray,w,h,nc,factor); cublasSaxpy(nc*w*h,factor,g_srcArray,1,g_destArray,1); cudaMemcpy(srcArray,g_destArray, nc*h*w * sizeof(float), cudaMemcpyDeviceToHost ); convert_layered_to_mat(src, srcArray); cudaFree(g_srcArray);//CUDA_CHECK; cudaFree(g_destArray);//CUDA_CHECK; delete[] srcArray; //delete[] destArray; return src; }
d908b507dd96ee50ed15310e28eccb971b3dd19a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __device__ int geti() { int i = blockIdx.z; i = i*gridDim.y + blockIdx.y; i = i*gridDim.x + blockIdx.x; i = i*blockDim.z + threadIdx.z; i = i*blockDim.y + threadIdx.y; i = i*blockDim.x + threadIdx.x; return i; } __global__ void process_kernel1(const float *A, const float *B, float *C, const int numElements) { int i = geti(); if (i < numElements) { C[i] = sin(A[i]) + cos(B[i]); } } __global__ void process_kernel2(const float *A, float *C, const int numElements) { int i = geti(); if (i < numElements) { C[i] = log(A[i]); } } __global__ void process_kernel3(const float *A, float *C, const int numElements) { int i = geti(); if (i < numElements) { C[i] = sqrt(A[i]); } }
d908b507dd96ee50ed15310e28eccb971b3dd19a.cu
#include <stdio.h> __device__ int geti() { int i = blockIdx.z; i = i*gridDim.y + blockIdx.y; i = i*gridDim.x + blockIdx.x; i = i*blockDim.z + threadIdx.z; i = i*blockDim.y + threadIdx.y; i = i*blockDim.x + threadIdx.x; return i; } __global__ void process_kernel1(const float *A, const float *B, float *C, const int numElements) { int i = geti(); if (i < numElements) { C[i] = sin(A[i]) + cos(B[i]); } } __global__ void process_kernel2(const float *A, float *C, const int numElements) { int i = geti(); if (i < numElements) { C[i] = log(A[i]); } } __global__ void process_kernel3(const float *A, float *C, const int numElements) { int i = geti(); if (i < numElements) { C[i] = sqrt(A[i]); } }
949f279e4a82861ac7142b76c432569767e67bce.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <math.h> #include <algorithm> #include <map> #include <random> #include <time.h> #include <hip/hip_runtime.h> using namespace std; __host__ __device__ unsigned hash_func(unsigned key, int hash_num, unsigned tablesize){ int c2=0x27d4eb2d; switch (hash_num){ case 0: key = (key+0x7ed55d16) + (key<<12); key = (key^0xc761c23c) ^ (key>>19); key = (key+0x165667b1) + (key<<5); key = (key+0xd3a2646c) ^ (key<<9); key = (key+0xfd7046c5) + (key<<3); key = (key^0xb55a4f09) ^ (key>>16); return key%tablesize; case 1: key = (key^61)^(key>>16); key = key+(key<<3); key = key^(key>>4); key = key*c2; key = key^(key>>15); return key%tablesize; case 2: return ((66*key+32)%537539573)%tablesize; default: //printf("wrong hash_num\n"); return 0; } } __device__ void secondtableInsertion(unsigned key, unsigned* secondtable){ unsigned secondtablesize = pow(2,18); unsigned location = ((33*key+87)%116743349)&(secondtablesize-1); for(unsigned i = 0; i < 200; ++i) { key = atomicExch(&secondtable[location],key); if(key!=NULL){ location++; if(location == secondtablesize-1){ location = 0; } continue; } return; } //printf("Failed.\n"); return; } __global__ void lookupHash(unsigned* keys, unsigned* table,unsigned keysize,unsigned tablesize, unsigned* secondtable,int hash_num){ int index = blockDim.x * blockIdx.x + threadIdx.x; if(index>keysize){ return; } unsigned key = keys[index]; unsigned location[3]; for(unsigned j = 0; j < hash_num; ++j) { location[j] = hash_func(key,j,tablesize); } for(unsigned i = 0; i < hash_num; ++i) { if(atomicCAS(&table[location[i]], key, key) == key){ return; } } unsigned secondtablesize = pow(2,18); unsigned location1 = ((33*key+87)%116743349)&(secondtablesize-1); unsigned key1; for(unsigned i = 0; i < 200; ++i) { key1 = atomicCAS(&table[location1],key,key); if(key1 == key || key1 == NULL){ return; } location1++; if(location1 == secondtablesize-1){ location1 = 0; } } return; } __global__ void cuckooHash(unsigned* cuda_tables, unsigned* cuda_keys, unsigned keysize, int M,int hash_num, unsigned tablesize, unsigned* secondtable){ int index = blockDim.x*blockIdx.x + threadIdx.x; if(index >= keysize) { return; } unsigned key = cuda_keys[index]; unsigned location[3]; location[0] = hash_func(key,0,tablesize); for(unsigned i = 0; i <= M; ++i) { if(i==M) { secondtableInsertion(key,secondtable); } key = atomicExch(&cuda_tables[location[i%hash_num]],key); if(key==NULL) { return; } for(unsigned j = 0; j < hash_num; ++j) { location[j] = hash_func(key,j,tablesize); } } } int main() { for(unsigned t = 0; t < 5; ++t) { for(unsigned mn = 1; mn <=10 ; ++mn) { int hash_num = 3; unsigned keysize = pow(2,24); unsigned *keys = (unsigned *)malloc(keysize*sizeof(unsigned)); unsigned tablesize = 1.4*keysize; unsigned secondtablesize = pow(2,18); unsigned *tables = (unsigned *)malloc(tablesize*sizeof(unsigned)); unsigned *secondtable = (unsigned *)malloc(secondtablesize*sizeof(unsigned)); for(unsigned i = 0; i < tablesize; ++i) { tables[i] = 0; } for(unsigned i = 0; i < secondtablesize; ++i) { secondtable[i] = 0; } std::map<unsigned ,bool> randommap; std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<unsigned> dis(1, pow(2,32)-1); for(unsigned i = 0; i < keysize; ++i) { unsigned rand = dis(gen); while(randommap.find(rand) != randommap.end()) { rand = dis(gen); } randommap[rand] = true; keys[i] = rand; } unsigned* cuda_keys; unsigned* cuda_tables; unsigned* cuda_secondtable; int blockSize; int minGridSize; int gridSize; hipDeviceReset(); hipMalloc(&cuda_tables, tablesize*sizeof(unsigned)); hipMalloc(&cuda_keys, keysize*sizeof(unsigned)); hipMalloc(&cuda_secondtable, secondtablesize*sizeof(unsigned)); hipMemcpy(cuda_tables, tables, tablesize, hipMemcpyHostToDevice); hipMemcpy(cuda_keys, keys, keysize, hipMemcpyHostToDevice); hipMemcpy(cuda_secondtable, secondtable, secondtablesize, hipMemcpyHostToDevice); hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cuckooHash, 0, 1000000); gridSize = (keysize + blockSize - 1) / blockSize; int M = (int)mn*ceil(log2((double)keysize)); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( cuckooHash), dim3(gridSize),dim3(blockSize), 0, 0, cuda_tables, cuda_keys, keysize, M,hash_num,tablesize,cuda_secondtable); hipEventRecord(stop, 0); hipEventSynchronize(stop); float kernelTime; hipEventElapsedTime(&kernelTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("bound = %d * Log(n),time: %.2f ms\n",mn,kernelTime); } } return 0; }
949f279e4a82861ac7142b76c432569767e67bce.cu
#include <iostream> #include <math.h> #include <algorithm> #include <map> #include <random> #include <time.h> #include <cuda_runtime.h> using namespace std; __host__ __device__ unsigned hash_func(unsigned key, int hash_num, unsigned tablesize){ int c2=0x27d4eb2d; switch (hash_num){ case 0: key = (key+0x7ed55d16) + (key<<12); key = (key^0xc761c23c) ^ (key>>19); key = (key+0x165667b1) + (key<<5); key = (key+0xd3a2646c) ^ (key<<9); key = (key+0xfd7046c5) + (key<<3); key = (key^0xb55a4f09) ^ (key>>16); return key%tablesize; case 1: key = (key^61)^(key>>16); key = key+(key<<3); key = key^(key>>4); key = key*c2; key = key^(key>>15); return key%tablesize; case 2: return ((66*key+32)%537539573)%tablesize; default: //printf("wrong hash_num\n"); return 0; } } __device__ void secondtableInsertion(unsigned key, unsigned* secondtable){ unsigned secondtablesize = pow(2,18); unsigned location = ((33*key+87)%116743349)&(secondtablesize-1); for(unsigned i = 0; i < 200; ++i) { key = atomicExch(&secondtable[location],key); if(key!=NULL){ location++; if(location == secondtablesize-1){ location = 0; } continue; } return; } //printf("Failed.\n"); return; } __global__ void lookupHash(unsigned* keys, unsigned* table,unsigned keysize,unsigned tablesize, unsigned* secondtable,int hash_num){ int index = blockDim.x * blockIdx.x + threadIdx.x; if(index>keysize){ return; } unsigned key = keys[index]; unsigned location[3]; for(unsigned j = 0; j < hash_num; ++j) { location[j] = hash_func(key,j,tablesize); } for(unsigned i = 0; i < hash_num; ++i) { if(atomicCAS(&table[location[i]], key, key) == key){ return; } } unsigned secondtablesize = pow(2,18); unsigned location1 = ((33*key+87)%116743349)&(secondtablesize-1); unsigned key1; for(unsigned i = 0; i < 200; ++i) { key1 = atomicCAS(&table[location1],key,key); if(key1 == key || key1 == NULL){ return; } location1++; if(location1 == secondtablesize-1){ location1 = 0; } } return; } __global__ void cuckooHash(unsigned* cuda_tables, unsigned* cuda_keys, unsigned keysize, int M,int hash_num, unsigned tablesize, unsigned* secondtable){ int index = blockDim.x*blockIdx.x + threadIdx.x; if(index >= keysize) { return; } unsigned key = cuda_keys[index]; unsigned location[3]; location[0] = hash_func(key,0,tablesize); for(unsigned i = 0; i <= M; ++i) { if(i==M) { secondtableInsertion(key,secondtable); } key = atomicExch(&cuda_tables[location[i%hash_num]],key); if(key==NULL) { return; } for(unsigned j = 0; j < hash_num; ++j) { location[j] = hash_func(key,j,tablesize); } } } int main() { for(unsigned t = 0; t < 5; ++t) { for(unsigned mn = 1; mn <=10 ; ++mn) { int hash_num = 3; unsigned keysize = pow(2,24); unsigned *keys = (unsigned *)malloc(keysize*sizeof(unsigned)); unsigned tablesize = 1.4*keysize; unsigned secondtablesize = pow(2,18); unsigned *tables = (unsigned *)malloc(tablesize*sizeof(unsigned)); unsigned *secondtable = (unsigned *)malloc(secondtablesize*sizeof(unsigned)); for(unsigned i = 0; i < tablesize; ++i) { tables[i] = 0; } for(unsigned i = 0; i < secondtablesize; ++i) { secondtable[i] = 0; } std::map<unsigned ,bool> randommap; std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<unsigned> dis(1, pow(2,32)-1); for(unsigned i = 0; i < keysize; ++i) { unsigned rand = dis(gen); while(randommap.find(rand) != randommap.end()) { rand = dis(gen); } randommap[rand] = true; keys[i] = rand; } unsigned* cuda_keys; unsigned* cuda_tables; unsigned* cuda_secondtable; int blockSize; int minGridSize; int gridSize; cudaDeviceReset(); cudaMalloc(&cuda_tables, tablesize*sizeof(unsigned)); cudaMalloc(&cuda_keys, keysize*sizeof(unsigned)); cudaMalloc(&cuda_secondtable, secondtablesize*sizeof(unsigned)); cudaMemcpy(cuda_tables, tables, tablesize, cudaMemcpyHostToDevice); cudaMemcpy(cuda_keys, keys, keysize, cudaMemcpyHostToDevice); cudaMemcpy(cuda_secondtable, secondtable, secondtablesize, cudaMemcpyHostToDevice); cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cuckooHash, 0, 1000000); gridSize = (keysize + blockSize - 1) / blockSize; int M = (int)mn*ceil(log2((double)keysize)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cuckooHash<<<gridSize,blockSize>>>(cuda_tables, cuda_keys, keysize, M,hash_num,tablesize,cuda_secondtable); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float kernelTime; cudaEventElapsedTime(&kernelTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("bound = %d * Log(n),time: %.2f ms\n",mn,kernelTime); } } return 0; }
9abf08285832c74e56216f001c7abe3cfc31a1ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from ztrsm.cu normal z -> s, Fri Jul 18 17:34:13 2014 @author Peng Du @author Tingxing Dong */ #include "common_magma.h" #define BLOCK_SIZE 16 // inner blocking size, <=32 #define NB 128 // outer blocking size, >BLOCK_SIZE __global__ void strsm_copy_kernel(int m, int n, float *dB, int lddb, float *dX, int lddx) { int by = blockIdx.y; int ind = blockIdx.x*blockDim.x + threadIdx.x; if (ind < m) dB[by*lddb + ind] = dX[by*lddx + ind]; } #define MAX_THREAD_PER_BLOCK 512 #define WARP_SIZE 32 #define strsm_copy() \ do { \ dim3 threads( (m >= MAX_THREAD_PER_BLOCK) ? MAX_THREAD_PER_BLOCK : (WARP_SIZE*((m/WARP_SIZE)+(m % WARP_SIZE != 0))), 1 ); \ dim3 grid( (m - 1)/threads.x + 1, n ); \ hipLaunchKernelGGL(( strsm_copy_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, n, dB, lddb, dX, m); \ } while(0) // previously strsm_copy had sync -- there's no need; strsm should be async. // magma_device_sync(); \ /** Purpose ------- strsm_work solves one of the matrix equations on gpu op(A)*X = alpha*B, or X*op(A) = alpha*B, where alpha is a scalar, X and B are m by n matrices, A is a unit, or non-unit, upper or lower triangular matrix and op(A) is one of op(A) = A, or op(A) = A^T, or op(A) = A^H. The matrix X is overwritten on B. This is an asynchronous version of magmablas_strsm with flag, d_dinvA and dX workspaces as arguments. Arguments ---------- @param[in] side magma_side_t. On entry, side specifies whether op(A) appears on the left or right of X as follows: - = MagmaLeft: op(A)*X = alpha*B. - = MagmaRight: X*op(A) = alpha*B. @param[in] uplo magma_uplo_t. On entry, uplo specifies whether the matrix A is an upper or lower triangular matrix as follows: - = MagmaUpper: A is an upper triangular matrix. - = MagmaLower: A is a lower triangular matrix. @param[in] transA magma_trans_t. On entry, transA specifies the form of op(A) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op(A) = A. - = MagmaTrans: op(A) = A^T. - = MagmaTrans: op(A) = A^H. @param[in] diag magma_diag_t. On entry, diag specifies whether or not A is unit triangular as follows: - = MagmaUnit: A is assumed to be unit triangular. - = MagmaNonUnit: A is not assumed to be unit triangular. @param[in] m INTEGER. On entry, m specifies the number of rows of B. m must be at least zero. @param[in] n INTEGER. On entry, n specifies the number of columns of B. n must be at least zero. @param[in] alpha REAL. On entry, alpha specifies the scalar alpha. When alpha is zero then A is not referenced and B need not be set before entry. @param[in] dA REAL array of DIMENSION ( ldda, k ), where k is m when side = MagmaLeft and is n when side = MagmaRight. Before entry with uplo = MagmaUpper, the leading k by k upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with uplo = MagmaLower, the leading k by k lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when diag = MagmaUnit, the diagonal elements of A are not referenced either, but are assumed to be unity. @param[in] ldda INTEGER. On entry, ldda specifies the first dimension of A as declared in the calling (sub) program. When side = MagmaLeft then ldda must be at least max( 1, m ), when side = MagmaRight then ldda must be at least max( 1, n ). @param[in,out] dB REAL array of DIMENSION ( lddb, n ). Before entry, the leading m by n part of the array B must contain the right-hand side matrix B, and on exit is overwritten by the solution matrix X. @param[in] lddb INTEGER. On entry, lddb specifies the first dimension of B as declared in the calling (sub) program. lddb must be at least max( 1, m ). @param[in] flag BOOLEAN. If flag is true, invert diagonal blocks. If flag is false, assume diagonal blocks are already inverted. @param d_dinvA (workspace) on device. If side == MagmaLeft, d_dinvA must be of size >= ((m+NB-1)/NB)*NB*NB, If side == MagmaRight, d_dinvA must be of size >= ((n+NB-1)/NB)*NB*NB, where NB = 128. @param dX (workspace) size m*n, on device. @param[in] stream magma_queue_t Stream to execute in. @ingroup magma_sblas3 ********************************************************************/ extern "C" void magmablas_strsm_work( magma_side_t side, magma_uplo_t uplo, magma_trans_t transA, magma_diag_t diag, magma_int_t m, magma_int_t n, float alpha, const float* dA, magma_int_t ldda, float* dB, magma_int_t lddb, magma_int_t flag, float* d_dinvA, float *dX) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) #define dB(i_, j_) (dB + (i_) + (j_)*lddb) #define dX(i_, j_) (dX + (i_) + (j_)*m) #define d_dinvA(i_) (d_dinvA + (i_)*NB) const float c_neg_one = MAGMA_S_NEG_ONE; const float c_one = MAGMA_S_ONE; const float c_zero = MAGMA_S_ZERO; magma_int_t i, jb; magma_int_t nrowA = (side == MagmaLeft ? m : n); magma_int_t info = 0; if ( side != MagmaLeft && side != MagmaRight ) { info = -1; } else if ( uplo != MagmaUpper && uplo != MagmaLower ) { info = -2; } else if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != Magma_ConjTrans ) { info = -3; } else if ( diag != MagmaUnit && diag != MagmaNonUnit ) { info = -4; } else if (m < 0) { info = -5; } else if (n < 0) { info = -6; } else if (ldda < max(1,nrowA)) { info = -9; } else if (lddb < max(1,m)) { info = -11; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; } // quick return if possible. if (m == 0 || n == 0) return; if (side == MagmaLeft) { // invert diagonal blocks if (flag) magmablas_strtri_diag( uplo, diag, m, dA, ldda, d_dinvA ); if (transA == MagmaNoTrans) { if (uplo == MagmaLower) { // left, lower no-transpose // handle first block seperately with alpha jb = min(NB, m); magma_sgemm( MagmaNoTrans, MagmaNoTrans, jb, n, jb, alpha, d_dinvA(0), NB, dB, lddb, c_zero, dX, m ); if (NB >= m) { strsm_copy(); return; } magma_sgemm( MagmaNoTrans, MagmaNoTrans, m-NB, n, NB, c_neg_one, dA(NB,0), ldda, dX, m, alpha, dB(NB,0), lddb ); // remaining blocks for( i=NB; i < m; i += NB ) { jb = min(m-i, NB); magma_sgemm( MagmaNoTrans, MagmaNoTrans, jb, n, jb, c_one, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i+NB >= m) break; magma_sgemm( MagmaNoTrans, MagmaNoTrans, m-i-NB, n, NB, c_neg_one, dA(i+NB,i), ldda, dX(i,0), m, c_one, dB(i+NB,0), lddb ); } } else { // left, upper no-transpose // handle first block seperately with alpha jb = (m % NB == 0) ? NB : (m % NB); i = m-jb; magma_sgemm( MagmaNoTrans, MagmaNoTrans, jb, n, jb, alpha, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i-NB < 0) { strsm_copy(); return; } magma_sgemm( MagmaNoTrans, MagmaNoTrans, i, n, jb, c_neg_one, dA(0,i), ldda, dX(i,0), m, alpha, dB, lddb ); // remaining blocks for( i=m-jb-NB; i >= 0; i -= NB ) { magma_sgemm( MagmaNoTrans, MagmaNoTrans, NB, n, NB, c_one, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i-NB < 0) break; magma_sgemm( MagmaNoTrans, MagmaNoTrans, i, n, NB, c_neg_one, dA(0,i), ldda, dX(i,0), m, c_one, dB, lddb ); } } } else if( transA == MagmaTrans) { if (uplo == MagmaLower) { // left, lower transpose // handle first block seperately with alpha jb = (m % NB == 0) ? NB : (m % NB); i = m-jb; magma_sgemm( MagmaTrans, MagmaNoTrans, jb, n, jb, alpha, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i-NB < 0) { strsm_copy(); return; } magma_sgemm( MagmaTrans, MagmaNoTrans, i, n, jb, c_neg_one, dA(i,0), ldda, dX(i,0), m, alpha, dB, lddb ); // remaining blocks for( i=m-jb-NB; i >= 0; i -= NB ) { magma_sgemm( MagmaTrans, MagmaNoTrans, NB, n, NB, c_one, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i-NB < 0) break; magma_sgemm( MagmaTrans, MagmaNoTrans, i, n, NB, c_neg_one, dA(i,0), ldda, dX(i,0), m, c_one, dB, lddb ); } } else { // left, upper transpose // handle first block seperately with alpha jb = min(NB, m); magma_sgemm( MagmaTrans, MagmaNoTrans, jb, n, jb, alpha, d_dinvA(0), NB, dB, lddb, c_zero, dX, m ); if (NB >= m) { strsm_copy(); return; } magma_sgemm( MagmaTrans, MagmaNoTrans, m-NB, n, NB, c_neg_one, dA(0,NB), ldda, dX, m, alpha, dB(NB,0), lddb ); // remaining blocks for( i=NB; i < m; i += NB ) { jb = min(m-i, NB); magma_sgemm( MagmaTrans, MagmaNoTrans, jb, n, jb, c_one, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i+NB >= m) break; magma_sgemm( MagmaTrans, MagmaNoTrans, m-i-NB, n, NB, c_neg_one, dA(i,i+NB), ldda, dX(i,0), m, c_one, dB(i+NB,0), lddb ); } } } else { // transA == MagmaConjTras if (uplo == MagmaLower) { // left, lower conjugate-transpose // handle first block seperately with alpha jb = (m % NB == 0) ? NB : (m % NB); i = m-jb; magma_sgemm( MagmaTrans, MagmaNoTrans, jb, n, jb, alpha, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i-NB < 0) { strsm_copy(); return; } magma_sgemm( MagmaTrans, MagmaNoTrans, i, n, jb, c_neg_one, dA(i,0), ldda, dX(i,0), m, alpha, dB, lddb ); // remaining blocks for( i=m-jb-NB; i >= 0; i -= NB ) { magma_sgemm( MagmaTrans, MagmaNoTrans, NB, n, NB, c_one, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i-NB < 0) break; magma_sgemm( MagmaTrans, MagmaNoTrans, i, n, NB, c_neg_one, dA(i,0), ldda, dX(i,0), m, c_one, dB, lddb ); } } else { // left, upper conjugate-transpose // handle first block seperately with alpha jb = min(NB, m); magma_sgemm( MagmaTrans, MagmaNoTrans, jb, n, jb, alpha, d_dinvA(0), NB, dB, lddb, c_zero, dX, m ); if (NB >= m) { strsm_copy(); return; } magma_sgemm( MagmaTrans, MagmaNoTrans, m-NB, n, NB, c_neg_one, dA(0,NB), ldda, dX, m, alpha, dB(NB,0), lddb ); // remaining blocks for( i=NB; i < m; i += NB ) { jb = min(m-i, NB); magma_sgemm( MagmaTrans, MagmaNoTrans, jb, n, jb, c_one, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i+NB >= m) break; magma_sgemm( MagmaTrans, MagmaNoTrans, m-i-NB, n, NB, c_neg_one, dA(i,i+NB), ldda, dX(i,0), m, c_one, dB(i+NB,0), lddb ); } } } } else { // side == MagmaRight // invert diagonal blocks if (flag) magmablas_strtri_diag( uplo, diag, n, dA, ldda, d_dinvA ); if (transA == MagmaNoTrans) { if (uplo == MagmaLower) { // right, lower no-transpose // handle first block seperately with alpha int nn = (n % NB == 0) ? NB : (n % NB); i = n-nn; magma_sgemm( MagmaNoTrans, MagmaNoTrans, m, nn, nn, alpha, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i-NB < 0) { strsm_copy(); return; } magma_sgemm( MagmaNoTrans, MagmaNoTrans, m, i, nn, c_neg_one, dX(0,i), m, dA(i,0), ldda, alpha, dB, lddb ); // remaining blocks for( i=n-nn-NB; i >= 0; i -= NB ) { magma_sgemm( MagmaNoTrans, MagmaNoTrans, m, NB, NB, c_one, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i-NB < 0) break; magma_sgemm( MagmaNoTrans, MagmaNoTrans, m, i, NB, c_neg_one, dX(0,i), m, dA(i,0), ldda, c_one, dB, lddb ); } } else { // right, upper no-transpose // handle first block seperately with alpha int nn = min(NB, n); magma_sgemm( MagmaNoTrans, MagmaNoTrans, m, nn, nn, alpha, dB, lddb, d_dinvA(0), NB, c_zero, dX, m ); if (NB >= n) { strsm_copy(); return; } magma_sgemm( MagmaNoTrans, MagmaNoTrans, m, n-NB, NB, c_neg_one, dX, m, dA(0,NB), ldda, alpha, dB(0,NB), lddb ); // remaining blocks for( i=NB; i < n; i += NB ) { nn = min(NB, n-i); magma_sgemm( MagmaNoTrans, MagmaNoTrans, m, nn, nn, c_one, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i+NB >= n) break; magma_sgemm( MagmaNoTrans, MagmaNoTrans, m, n-i-NB, NB, c_neg_one, dX(0,i), m, dA(i,i+NB), ldda, c_one, dB(0,i+NB), lddb ); } } } else if (transA == MagmaTrans) { if (uplo == MagmaLower) { // right, lower transpose // handle first block seperately with alpha int nn = min(NB, n); magma_sgemm( MagmaNoTrans, MagmaTrans, m, nn, nn, alpha, dB, lddb, d_dinvA(0), NB, c_zero, dX, m ); if (NB >= n) { strsm_copy(); return; } magma_sgemm( MagmaNoTrans, MagmaTrans, m, n-NB, NB, c_neg_one, dX, m, dA(NB,0), ldda, alpha, dB(0,NB), lddb ); // remaining blocks for( i=NB; i < n; i += NB ) { nn = min(NB, n-i); magma_sgemm( MagmaNoTrans, MagmaTrans, m, nn, nn, c_one, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i+NB >= n) break; magma_sgemm( MagmaNoTrans, MagmaTrans, m, n-i-NB, NB, c_neg_one, dX(0,i), m, dA(NB+i,i), ldda, c_one, dB(0,i+NB), lddb ); } } else { // right, upper transpose // handle first block seperately with alpha int nn = (n % NB == 0) ? NB : (n % NB); i = n-nn; magma_sgemm( MagmaNoTrans, MagmaTrans, m, nn, nn, alpha, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i-NB < 0) { strsm_copy(); return; } magma_sgemm( MagmaNoTrans, MagmaTrans, m, i, nn, c_neg_one, dX(0,i), m, dA(0,i), ldda, alpha, dB, lddb ); // remaining blocks for( i=n-nn-NB; i >= 0; i -= NB ) { magma_sgemm( MagmaNoTrans, MagmaTrans, m, NB, NB, c_one, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i-NB < 0) break; magma_sgemm( MagmaNoTrans, MagmaTrans, m, i, NB, c_neg_one, dX(0,i), m, dA(0,i), ldda, c_one, dB, lddb ); } } } else { // TransA == MagmaTrans if (uplo == MagmaLower) { // right, lower conjugate-transpose // handle first block seperately with alpha int nn = min(NB, n); magma_sgemm( MagmaNoTrans, MagmaTrans, m, nn, nn, alpha, dB, lddb, d_dinvA(0), NB, c_zero, dX, m ); if (NB >= n) { strsm_copy(); return; } magma_sgemm( MagmaNoTrans, MagmaTrans, m, n-NB, NB, c_neg_one, dX, m, dA(NB,0), ldda, alpha, dB(0,NB), lddb ); // remaining blocks for( i=NB; i < n; i += NB ) { nn = min(NB, n-i); magma_sgemm( MagmaNoTrans, MagmaTrans, m, nn, nn, c_one, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i+NB >= n) break; magma_sgemm(MagmaNoTrans, MagmaTrans, m, n-i-NB, NB, c_neg_one, dX(0,i), m, dA(NB+i,i), ldda, c_one, dB(0,i+NB), lddb); } } else { // right, upper conjugate-transpose // handle first block seperately with alpha int nn = (n % NB == 0) ? NB : (n % NB); i = n-nn; magma_sgemm( MagmaNoTrans, MagmaTrans, m, nn, nn, alpha, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i-NB < 0) { strsm_copy(); return; } magma_sgemm( MagmaNoTrans, MagmaTrans, m, i, nn, c_neg_one, dX(0,i), m, dA(0,i), ldda, alpha, dB, lddb ); // remaining blocks for( i=n-nn-NB; i >= 0; i -= NB ) { magma_sgemm( MagmaNoTrans, MagmaTrans, m, NB, NB, c_one, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i-NB < 0) break; magma_sgemm( MagmaNoTrans, MagmaTrans, m, i, NB, c_neg_one, dX(0,i), m, dA(0,i), ldda, c_one, dB, lddb ); } } } } strsm_copy(); } /** @see magmablas_strsm_work @ingroup magma_sblas3 ********************************************************************/ extern "C" void magmablas_strsm( magma_side_t side, magma_uplo_t uplo, magma_trans_t transA, magma_diag_t diag, magma_int_t m, magma_int_t n, float alpha, const float* dA, magma_int_t ldda, float* dB, magma_int_t lddb ) { magma_int_t nrowA = (side == MagmaLeft ? m : n); magma_int_t info = 0; if ( side != MagmaLeft && side != MagmaRight ) { info = -1; } else if ( uplo != MagmaUpper && uplo != MagmaLower ) { info = -2; } else if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != Magma_ConjTrans ) { info = -3; } else if ( diag != MagmaUnit && diag != MagmaNonUnit ) { info = -4; } else if (m < 0) { info = -5; } else if (n < 0) { info = -6; } else if (ldda < max(1,nrowA)) { info = -9; } else if (lddb < max(1,m)) { info = -11; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; } float *d_dinvA, *dX; magma_int_t size_dinvA; magma_int_t size_x = m*n; if ( side == MagmaLeft ) { size_dinvA = ((m+NB-1)/NB)*NB*NB; } else { size_dinvA = ((n+NB-1)/NB)*NB*NB; } magma_smalloc( &d_dinvA, size_dinvA ); magma_smalloc( &dX, size_x ); if ( d_dinvA == NULL || dX == NULL ) { info = MAGMA_ERR_DEVICE_ALLOC; magma_xerbla( __func__, -(info) ); goto cleanup; } magmablas_strsm_work( side, uplo, transA, diag, m, n, alpha, dA, ldda, dB, lddb, 1, d_dinvA, dX ); cleanup: magma_free( d_dinvA ); magma_free( dX ); }
9abf08285832c74e56216f001c7abe3cfc31a1ac.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from ztrsm.cu normal z -> s, Fri Jul 18 17:34:13 2014 @author Peng Du @author Tingxing Dong */ #include "common_magma.h" #define BLOCK_SIZE 16 // inner blocking size, <=32 #define NB 128 // outer blocking size, >BLOCK_SIZE __global__ void strsm_copy_kernel(int m, int n, float *dB, int lddb, float *dX, int lddx) { int by = blockIdx.y; int ind = blockIdx.x*blockDim.x + threadIdx.x; if (ind < m) dB[by*lddb + ind] = dX[by*lddx + ind]; } #define MAX_THREAD_PER_BLOCK 512 #define WARP_SIZE 32 #define strsm_copy() \ do { \ dim3 threads( (m >= MAX_THREAD_PER_BLOCK) ? MAX_THREAD_PER_BLOCK : (WARP_SIZE*((m/WARP_SIZE)+(m % WARP_SIZE != 0))), 1 ); \ dim3 grid( (m - 1)/threads.x + 1, n ); \ strsm_copy_kernel<<< grid, threads, 0, magma_stream >>>(m, n, dB, lddb, dX, m); \ } while(0) // previously strsm_copy had sync -- there's no need; strsm should be async. // magma_device_sync(); \ /** Purpose ------- strsm_work solves one of the matrix equations on gpu op(A)*X = alpha*B, or X*op(A) = alpha*B, where alpha is a scalar, X and B are m by n matrices, A is a unit, or non-unit, upper or lower triangular matrix and op(A) is one of op(A) = A, or op(A) = A^T, or op(A) = A^H. The matrix X is overwritten on B. This is an asynchronous version of magmablas_strsm with flag, d_dinvA and dX workspaces as arguments. Arguments ---------- @param[in] side magma_side_t. On entry, side specifies whether op(A) appears on the left or right of X as follows: - = MagmaLeft: op(A)*X = alpha*B. - = MagmaRight: X*op(A) = alpha*B. @param[in] uplo magma_uplo_t. On entry, uplo specifies whether the matrix A is an upper or lower triangular matrix as follows: - = MagmaUpper: A is an upper triangular matrix. - = MagmaLower: A is a lower triangular matrix. @param[in] transA magma_trans_t. On entry, transA specifies the form of op(A) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op(A) = A. - = MagmaTrans: op(A) = A^T. - = MagmaTrans: op(A) = A^H. @param[in] diag magma_diag_t. On entry, diag specifies whether or not A is unit triangular as follows: - = MagmaUnit: A is assumed to be unit triangular. - = MagmaNonUnit: A is not assumed to be unit triangular. @param[in] m INTEGER. On entry, m specifies the number of rows of B. m must be at least zero. @param[in] n INTEGER. On entry, n specifies the number of columns of B. n must be at least zero. @param[in] alpha REAL. On entry, alpha specifies the scalar alpha. When alpha is zero then A is not referenced and B need not be set before entry. @param[in] dA REAL array of DIMENSION ( ldda, k ), where k is m when side = MagmaLeft and is n when side = MagmaRight. Before entry with uplo = MagmaUpper, the leading k by k upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with uplo = MagmaLower, the leading k by k lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when diag = MagmaUnit, the diagonal elements of A are not referenced either, but are assumed to be unity. @param[in] ldda INTEGER. On entry, ldda specifies the first dimension of A as declared in the calling (sub) program. When side = MagmaLeft then ldda must be at least max( 1, m ), when side = MagmaRight then ldda must be at least max( 1, n ). @param[in,out] dB REAL array of DIMENSION ( lddb, n ). Before entry, the leading m by n part of the array B must contain the right-hand side matrix B, and on exit is overwritten by the solution matrix X. @param[in] lddb INTEGER. On entry, lddb specifies the first dimension of B as declared in the calling (sub) program. lddb must be at least max( 1, m ). @param[in] flag BOOLEAN. If flag is true, invert diagonal blocks. If flag is false, assume diagonal blocks are already inverted. @param d_dinvA (workspace) on device. If side == MagmaLeft, d_dinvA must be of size >= ((m+NB-1)/NB)*NB*NB, If side == MagmaRight, d_dinvA must be of size >= ((n+NB-1)/NB)*NB*NB, where NB = 128. @param dX (workspace) size m*n, on device. @param[in] stream magma_queue_t Stream to execute in. @ingroup magma_sblas3 ********************************************************************/ extern "C" void magmablas_strsm_work( magma_side_t side, magma_uplo_t uplo, magma_trans_t transA, magma_diag_t diag, magma_int_t m, magma_int_t n, float alpha, const float* dA, magma_int_t ldda, float* dB, magma_int_t lddb, magma_int_t flag, float* d_dinvA, float *dX) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) #define dB(i_, j_) (dB + (i_) + (j_)*lddb) #define dX(i_, j_) (dX + (i_) + (j_)*m) #define d_dinvA(i_) (d_dinvA + (i_)*NB) const float c_neg_one = MAGMA_S_NEG_ONE; const float c_one = MAGMA_S_ONE; const float c_zero = MAGMA_S_ZERO; magma_int_t i, jb; magma_int_t nrowA = (side == MagmaLeft ? m : n); magma_int_t info = 0; if ( side != MagmaLeft && side != MagmaRight ) { info = -1; } else if ( uplo != MagmaUpper && uplo != MagmaLower ) { info = -2; } else if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != Magma_ConjTrans ) { info = -3; } else if ( diag != MagmaUnit && diag != MagmaNonUnit ) { info = -4; } else if (m < 0) { info = -5; } else if (n < 0) { info = -6; } else if (ldda < max(1,nrowA)) { info = -9; } else if (lddb < max(1,m)) { info = -11; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; } // quick return if possible. if (m == 0 || n == 0) return; if (side == MagmaLeft) { // invert diagonal blocks if (flag) magmablas_strtri_diag( uplo, diag, m, dA, ldda, d_dinvA ); if (transA == MagmaNoTrans) { if (uplo == MagmaLower) { // left, lower no-transpose // handle first block seperately with alpha jb = min(NB, m); magma_sgemm( MagmaNoTrans, MagmaNoTrans, jb, n, jb, alpha, d_dinvA(0), NB, dB, lddb, c_zero, dX, m ); if (NB >= m) { strsm_copy(); return; } magma_sgemm( MagmaNoTrans, MagmaNoTrans, m-NB, n, NB, c_neg_one, dA(NB,0), ldda, dX, m, alpha, dB(NB,0), lddb ); // remaining blocks for( i=NB; i < m; i += NB ) { jb = min(m-i, NB); magma_sgemm( MagmaNoTrans, MagmaNoTrans, jb, n, jb, c_one, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i+NB >= m) break; magma_sgemm( MagmaNoTrans, MagmaNoTrans, m-i-NB, n, NB, c_neg_one, dA(i+NB,i), ldda, dX(i,0), m, c_one, dB(i+NB,0), lddb ); } } else { // left, upper no-transpose // handle first block seperately with alpha jb = (m % NB == 0) ? NB : (m % NB); i = m-jb; magma_sgemm( MagmaNoTrans, MagmaNoTrans, jb, n, jb, alpha, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i-NB < 0) { strsm_copy(); return; } magma_sgemm( MagmaNoTrans, MagmaNoTrans, i, n, jb, c_neg_one, dA(0,i), ldda, dX(i,0), m, alpha, dB, lddb ); // remaining blocks for( i=m-jb-NB; i >= 0; i -= NB ) { magma_sgemm( MagmaNoTrans, MagmaNoTrans, NB, n, NB, c_one, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i-NB < 0) break; magma_sgemm( MagmaNoTrans, MagmaNoTrans, i, n, NB, c_neg_one, dA(0,i), ldda, dX(i,0), m, c_one, dB, lddb ); } } } else if( transA == MagmaTrans) { if (uplo == MagmaLower) { // left, lower transpose // handle first block seperately with alpha jb = (m % NB == 0) ? NB : (m % NB); i = m-jb; magma_sgemm( MagmaTrans, MagmaNoTrans, jb, n, jb, alpha, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i-NB < 0) { strsm_copy(); return; } magma_sgemm( MagmaTrans, MagmaNoTrans, i, n, jb, c_neg_one, dA(i,0), ldda, dX(i,0), m, alpha, dB, lddb ); // remaining blocks for( i=m-jb-NB; i >= 0; i -= NB ) { magma_sgemm( MagmaTrans, MagmaNoTrans, NB, n, NB, c_one, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i-NB < 0) break; magma_sgemm( MagmaTrans, MagmaNoTrans, i, n, NB, c_neg_one, dA(i,0), ldda, dX(i,0), m, c_one, dB, lddb ); } } else { // left, upper transpose // handle first block seperately with alpha jb = min(NB, m); magma_sgemm( MagmaTrans, MagmaNoTrans, jb, n, jb, alpha, d_dinvA(0), NB, dB, lddb, c_zero, dX, m ); if (NB >= m) { strsm_copy(); return; } magma_sgemm( MagmaTrans, MagmaNoTrans, m-NB, n, NB, c_neg_one, dA(0,NB), ldda, dX, m, alpha, dB(NB,0), lddb ); // remaining blocks for( i=NB; i < m; i += NB ) { jb = min(m-i, NB); magma_sgemm( MagmaTrans, MagmaNoTrans, jb, n, jb, c_one, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i+NB >= m) break; magma_sgemm( MagmaTrans, MagmaNoTrans, m-i-NB, n, NB, c_neg_one, dA(i,i+NB), ldda, dX(i,0), m, c_one, dB(i+NB,0), lddb ); } } } else { // transA == MagmaConjTras if (uplo == MagmaLower) { // left, lower conjugate-transpose // handle first block seperately with alpha jb = (m % NB == 0) ? NB : (m % NB); i = m-jb; magma_sgemm( MagmaTrans, MagmaNoTrans, jb, n, jb, alpha, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i-NB < 0) { strsm_copy(); return; } magma_sgemm( MagmaTrans, MagmaNoTrans, i, n, jb, c_neg_one, dA(i,0), ldda, dX(i,0), m, alpha, dB, lddb ); // remaining blocks for( i=m-jb-NB; i >= 0; i -= NB ) { magma_sgemm( MagmaTrans, MagmaNoTrans, NB, n, NB, c_one, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i-NB < 0) break; magma_sgemm( MagmaTrans, MagmaNoTrans, i, n, NB, c_neg_one, dA(i,0), ldda, dX(i,0), m, c_one, dB, lddb ); } } else { // left, upper conjugate-transpose // handle first block seperately with alpha jb = min(NB, m); magma_sgemm( MagmaTrans, MagmaNoTrans, jb, n, jb, alpha, d_dinvA(0), NB, dB, lddb, c_zero, dX, m ); if (NB >= m) { strsm_copy(); return; } magma_sgemm( MagmaTrans, MagmaNoTrans, m-NB, n, NB, c_neg_one, dA(0,NB), ldda, dX, m, alpha, dB(NB,0), lddb ); // remaining blocks for( i=NB; i < m; i += NB ) { jb = min(m-i, NB); magma_sgemm( MagmaTrans, MagmaNoTrans, jb, n, jb, c_one, d_dinvA(i), NB, dB(i,0), lddb, c_zero, dX(i,0), m ); if (i+NB >= m) break; magma_sgemm( MagmaTrans, MagmaNoTrans, m-i-NB, n, NB, c_neg_one, dA(i,i+NB), ldda, dX(i,0), m, c_one, dB(i+NB,0), lddb ); } } } } else { // side == MagmaRight // invert diagonal blocks if (flag) magmablas_strtri_diag( uplo, diag, n, dA, ldda, d_dinvA ); if (transA == MagmaNoTrans) { if (uplo == MagmaLower) { // right, lower no-transpose // handle first block seperately with alpha int nn = (n % NB == 0) ? NB : (n % NB); i = n-nn; magma_sgemm( MagmaNoTrans, MagmaNoTrans, m, nn, nn, alpha, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i-NB < 0) { strsm_copy(); return; } magma_sgemm( MagmaNoTrans, MagmaNoTrans, m, i, nn, c_neg_one, dX(0,i), m, dA(i,0), ldda, alpha, dB, lddb ); // remaining blocks for( i=n-nn-NB; i >= 0; i -= NB ) { magma_sgemm( MagmaNoTrans, MagmaNoTrans, m, NB, NB, c_one, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i-NB < 0) break; magma_sgemm( MagmaNoTrans, MagmaNoTrans, m, i, NB, c_neg_one, dX(0,i), m, dA(i,0), ldda, c_one, dB, lddb ); } } else { // right, upper no-transpose // handle first block seperately with alpha int nn = min(NB, n); magma_sgemm( MagmaNoTrans, MagmaNoTrans, m, nn, nn, alpha, dB, lddb, d_dinvA(0), NB, c_zero, dX, m ); if (NB >= n) { strsm_copy(); return; } magma_sgemm( MagmaNoTrans, MagmaNoTrans, m, n-NB, NB, c_neg_one, dX, m, dA(0,NB), ldda, alpha, dB(0,NB), lddb ); // remaining blocks for( i=NB; i < n; i += NB ) { nn = min(NB, n-i); magma_sgemm( MagmaNoTrans, MagmaNoTrans, m, nn, nn, c_one, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i+NB >= n) break; magma_sgemm( MagmaNoTrans, MagmaNoTrans, m, n-i-NB, NB, c_neg_one, dX(0,i), m, dA(i,i+NB), ldda, c_one, dB(0,i+NB), lddb ); } } } else if (transA == MagmaTrans) { if (uplo == MagmaLower) { // right, lower transpose // handle first block seperately with alpha int nn = min(NB, n); magma_sgemm( MagmaNoTrans, MagmaTrans, m, nn, nn, alpha, dB, lddb, d_dinvA(0), NB, c_zero, dX, m ); if (NB >= n) { strsm_copy(); return; } magma_sgemm( MagmaNoTrans, MagmaTrans, m, n-NB, NB, c_neg_one, dX, m, dA(NB,0), ldda, alpha, dB(0,NB), lddb ); // remaining blocks for( i=NB; i < n; i += NB ) { nn = min(NB, n-i); magma_sgemm( MagmaNoTrans, MagmaTrans, m, nn, nn, c_one, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i+NB >= n) break; magma_sgemm( MagmaNoTrans, MagmaTrans, m, n-i-NB, NB, c_neg_one, dX(0,i), m, dA(NB+i,i), ldda, c_one, dB(0,i+NB), lddb ); } } else { // right, upper transpose // handle first block seperately with alpha int nn = (n % NB == 0) ? NB : (n % NB); i = n-nn; magma_sgemm( MagmaNoTrans, MagmaTrans, m, nn, nn, alpha, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i-NB < 0) { strsm_copy(); return; } magma_sgemm( MagmaNoTrans, MagmaTrans, m, i, nn, c_neg_one, dX(0,i), m, dA(0,i), ldda, alpha, dB, lddb ); // remaining blocks for( i=n-nn-NB; i >= 0; i -= NB ) { magma_sgemm( MagmaNoTrans, MagmaTrans, m, NB, NB, c_one, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i-NB < 0) break; magma_sgemm( MagmaNoTrans, MagmaTrans, m, i, NB, c_neg_one, dX(0,i), m, dA(0,i), ldda, c_one, dB, lddb ); } } } else { // TransA == MagmaTrans if (uplo == MagmaLower) { // right, lower conjugate-transpose // handle first block seperately with alpha int nn = min(NB, n); magma_sgemm( MagmaNoTrans, MagmaTrans, m, nn, nn, alpha, dB, lddb, d_dinvA(0), NB, c_zero, dX, m ); if (NB >= n) { strsm_copy(); return; } magma_sgemm( MagmaNoTrans, MagmaTrans, m, n-NB, NB, c_neg_one, dX, m, dA(NB,0), ldda, alpha, dB(0,NB), lddb ); // remaining blocks for( i=NB; i < n; i += NB ) { nn = min(NB, n-i); magma_sgemm( MagmaNoTrans, MagmaTrans, m, nn, nn, c_one, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i+NB >= n) break; magma_sgemm(MagmaNoTrans, MagmaTrans, m, n-i-NB, NB, c_neg_one, dX(0,i), m, dA(NB+i,i), ldda, c_one, dB(0,i+NB), lddb); } } else { // right, upper conjugate-transpose // handle first block seperately with alpha int nn = (n % NB == 0) ? NB : (n % NB); i = n-nn; magma_sgemm( MagmaNoTrans, MagmaTrans, m, nn, nn, alpha, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i-NB < 0) { strsm_copy(); return; } magma_sgemm( MagmaNoTrans, MagmaTrans, m, i, nn, c_neg_one, dX(0,i), m, dA(0,i), ldda, alpha, dB, lddb ); // remaining blocks for( i=n-nn-NB; i >= 0; i -= NB ) { magma_sgemm( MagmaNoTrans, MagmaTrans, m, NB, NB, c_one, dB(0,i), lddb, d_dinvA(i), NB, c_zero, dX(0,i), m ); if (i-NB < 0) break; magma_sgemm( MagmaNoTrans, MagmaTrans, m, i, NB, c_neg_one, dX(0,i), m, dA(0,i), ldda, c_one, dB, lddb ); } } } } strsm_copy(); } /** @see magmablas_strsm_work @ingroup magma_sblas3 ********************************************************************/ extern "C" void magmablas_strsm( magma_side_t side, magma_uplo_t uplo, magma_trans_t transA, magma_diag_t diag, magma_int_t m, magma_int_t n, float alpha, const float* dA, magma_int_t ldda, float* dB, magma_int_t lddb ) { magma_int_t nrowA = (side == MagmaLeft ? m : n); magma_int_t info = 0; if ( side != MagmaLeft && side != MagmaRight ) { info = -1; } else if ( uplo != MagmaUpper && uplo != MagmaLower ) { info = -2; } else if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != Magma_ConjTrans ) { info = -3; } else if ( diag != MagmaUnit && diag != MagmaNonUnit ) { info = -4; } else if (m < 0) { info = -5; } else if (n < 0) { info = -6; } else if (ldda < max(1,nrowA)) { info = -9; } else if (lddb < max(1,m)) { info = -11; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; } float *d_dinvA, *dX; magma_int_t size_dinvA; magma_int_t size_x = m*n; if ( side == MagmaLeft ) { size_dinvA = ((m+NB-1)/NB)*NB*NB; } else { size_dinvA = ((n+NB-1)/NB)*NB*NB; } magma_smalloc( &d_dinvA, size_dinvA ); magma_smalloc( &dX, size_x ); if ( d_dinvA == NULL || dX == NULL ) { info = MAGMA_ERR_DEVICE_ALLOC; magma_xerbla( __func__, -(info) ); goto cleanup; } magmablas_strsm_work( side, uplo, transA, diag, m, n, alpha, dA, ldda, dB, lddb, 1, d_dinvA, dX ); cleanup: magma_free( d_dinvA ); magma_free( dX ); }
55d68894140edf5d2a7f3fd7cb4f49769401dce1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "cub/util_type.cuh" #include <hipcub/hipcub.hpp> #include <cub/device/device_segmented_radix_sort.cuh> #include "contrib_ops/cuda/transformers/generation_cuda_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { __global__ void InitKernel(float* beam_scores, int num_beams, int total_elements) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < total_elements) { int beam_index = index % num_beams; beam_scores[index] = beam_index > 0 ? static_cast<float>(-1e9) : 0.0f; } } void LaunchInitKernel( float* beam_scores, int batch_size, int num_beams, hipStream_t stream) { int total_elements = batch_size * num_beams; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; hipLaunchKernelGGL(( InitKernel), dim3(gridSize), dim3(blockSize), 0, stream, beam_scores, num_beams, total_elements); } __global__ void NextTokenKernel(const int64_t* next_token_indices, int32_t* next_indices, int32_t* next_tokens, int vocab_size, int total_elements) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < total_elements) { next_indices[index] = next_token_indices[index] / vocab_size; next_tokens[index] = next_token_indices[index] % vocab_size; } } void LaunchNextTokenKernel(const int64_t* next_token_indices, int32_t* next_indices, int32_t* next_tokens, int batch_size, int top_k, int vocab_size, hipStream_t stream) { int total_elements = batch_size * top_k; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; hipLaunchKernelGGL(( NextTokenKernel), dim3(gridSize), dim3(blockSize), 0, stream, next_token_indices, next_indices, next_tokens, vocab_size, total_elements); } template <typename T> __global__ void LogitsProcessKernel( T* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, const int* presence_mask, float presence_penalty, float temperature, int num_beams, int vocab_size, int padded_vocab_size, int total_elements, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < total_elements) { int batch_beam_index = index / padded_vocab_size; int word_id = index % padded_vocab_size; if (word_id >= vocab_size) { // Set any value within the padding region to the lowest value so that it isn't picked next_token_scores[index] = cub::FpLimits<T>::Lowest(); } else { // RepetitionPenaltyLogitsProcessor if (repetition_penalty != 1.0f) { int32_t* current_sequence = sequences + batch_beam_index * max_sequence_length; bool found = false; for (int i = 0; i < current_sequence_length; i++) { if (current_sequence[i] == word_id) { found = true; break; } } if (found) { float score = (float)next_token_scores[index]; next_token_scores[index] = (T)(score < 0 ? score * repetition_penalty : score / repetition_penalty); } } // NoRepeatNGramLogitsProcessor if (no_repeat_ngram_size > 0 && current_sequence_length >= no_repeat_ngram_size) { int32_t* current_sequence = sequences + batch_beam_index * max_sequence_length; bool found = false; for (int i = no_repeat_ngram_size - 1; i < current_sequence_length; i++) { if (current_sequence[i] == word_id) { // last token of n-gram matched found = true; for (int j = 0; j < no_repeat_ngram_size - 1; j++) { // match the remaining N-1 tokens if (current_sequence[i - j - 1] != current_sequence[current_sequence_length - 1 - j]) { found = false; break; } } if (found) { break; } } } if (found) { next_token_scores[index] = cub::FpLimits<T>::Lowest(); return; } } // VocabMaskLogitsProcessor if (vocab_mask != nullptr && vocab_mask[word_id] == 0) { next_token_scores[index] = cub::FpLimits<T>::Lowest(); return; } // PrefixVocabMaskLogitsProcessor int batch_id = batch_beam_index / num_beams; if (prefix_vocab_mask != nullptr && prefix_vocab_mask[batch_id * vocab_size + word_id] == 0) { next_token_scores[index] = cub::FpLimits<T>::Lowest(); return; } // MinLengthLogitsProcessor if (word_id == demote_token_id) { next_token_scores[index] = cub::FpLimits<T>::Lowest(); } // PresencePenaltyLogitsProcessor if (presence_mask != nullptr && presence_mask[index] == 1) { float score = (float)next_token_scores[index] - presence_penalty; next_token_scores[index] = (T)score; } // TemperatureLogitsProcessor if (temperature != 1.0f) { float score = (float)(next_token_scores[index]); next_token_scores[index] = (T)(score / temperature); } } } } template <typename T> void LaunchLogitsProcessKernel( T* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, int* presence_mask, float presence_penalty, float temperature, int batch_size, int num_beams, int vocab_size, int padded_vocab_size, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size, hipStream_t stream) { int total_elements = batch_size * num_beams * padded_vocab_size; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; hipLaunchKernelGGL(( LogitsProcessKernel<T>), dim3(gridSize), dim3(blockSize), 0, stream, next_token_scores, vocab_mask, prefix_vocab_mask, presence_mask, presence_penalty, temperature, num_beams, vocab_size, padded_vocab_size, total_elements, demote_token_id, sequences, max_sequence_length, current_sequence_length, repetition_penalty, no_repeat_ngram_size); } // Instantiation template void LaunchLogitsProcessKernel( float* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, int* presence_mask, float presence_penalty, float temperature, int batch_size, int num_beams, int vocab_size, int padded_vocab_size, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size, hipStream_t stream); template void LaunchLogitsProcessKernel( half* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, int* presence_mask, float presence_penalty, float temperature, int batch_size, int num_beams, int vocab_size, int padded_vocab_size, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size, hipStream_t stream); __global__ void AddProbsKernel(float* log_probs, float* cum_log_probs, const int vocab_size, const int total_elements) { int index = blockIdx.x * blockDim.x + threadIdx.x; int batch_beam_index = index / vocab_size; if (index < total_elements) log_probs[index] += cum_log_probs[batch_beam_index]; } template <typename T> void LaunchAddProbsKernel(T* log_probs, T* cum_log_probs, const int batch_size, const int num_beams, const int vocab_size, hipStream_t stream) { int total_elements = batch_size * num_beams * vocab_size; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; hipLaunchKernelGGL(( AddProbsKernel), dim3(gridSize), dim3(blockSize), 0, stream, log_probs, cum_log_probs, vocab_size, total_elements); } template void LaunchAddProbsKernel( float* log_probs, float* cum_log_probs, const int batch_size, const int num_beams, const int vocab_size, hipStream_t stream); template <typename T> __global__ void UpdateGptInputsKernel(const T* old_mask_data, T* mask_data, int32_t* next_positions, int batch_beam_size, int current_length) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < batch_beam_size * current_length) { // Update attention mask. int i = index / current_length; int j = index % current_length; mask_data[index] = (j < current_length - 1) ? old_mask_data[i * (current_length - 1) + j] : static_cast<T>(1); if (next_positions != nullptr) { // Update sequence length (or next positions). if (index < batch_beam_size) { next_positions[index]++; } } } } void LaunchUpdateGptKernel(const int32_t* old_mask_data, int32_t* mask_data, int32_t* next_positions, int batch_beam_size, int current_length, hipStream_t stream) { assert(current_length > 0); int total_elements = batch_beam_size * current_length; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; hipLaunchKernelGGL(( UpdateGptInputsKernel<int32_t>), dim3(gridSize), dim3(blockSize), 0, stream, old_mask_data, mask_data, next_positions, batch_beam_size, current_length); } template <typename T> void GetTempStorageSize(const T* d_keys_in, const int* d_values_in, int* d_offsets, int num_items, int num_segments, hipStream_t stream, bool is_descending, size_t& temp_storage_bytes) { if (is_descending) { CUDA_CALL_THROW(hipcub::DeviceSegmentedRadixSort::SortPairsDescending(nullptr, temp_storage_bytes, d_keys_in, (T*)nullptr, d_values_in, (int*)nullptr, num_items, num_segments, d_offsets, d_offsets + 1, 0, sizeof(T) * 8, stream)); } else { CUDA_CALL_THROW(hipcub::DeviceSegmentedRadixSort::SortPairs(nullptr, temp_storage_bytes, d_keys_in, (T*)nullptr, d_values_in, (int*)nullptr, num_items, num_segments, d_offsets, d_offsets + 1, 0, sizeof(T) * 8, stream)); } } template void GetTempStorageSize( const float* d_keys_in, const int* d_values_in, int* d_offsets, int num_items, int num_segments, hipStream_t stream, bool is_descending, size_t& temp_storage_bytes); template void GetTempStorageSize( const half* d_keys_in, const int* d_values_in, int* d_offsets, int num_items, int num_segments, hipStream_t stream, bool is_descending, size_t& temp_storage_bytes); // TODO: merge to one kernel __global__ void SetupParamsKernel(int* d_values_in, int* d_offsets, int batch_size, int vocab_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; int total_elements = batch_size * vocab_size; if (index < total_elements) { d_values_in[index] = index % vocab_size; } if (index < batch_size + 1) { d_offsets[index] = index * vocab_size; } } void LaunchSetupParamsKernel(int* d_values_in, int* d_offsets, int batch_size, int vocab_size, hipStream_t stream) { int total_elements = batch_size * vocab_size; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; hipLaunchKernelGGL(( SetupParamsKernel), dim3(gridSize), dim3(blockSize), 0, stream, d_values_in, d_offsets, batch_size, vocab_size); } template <typename T> void LaunchSortPairs(void* d_temp_storage, size_t temp_storage_bytes, const T* d_keys_in, T* d_keys_out, const int* d_values_in, int* d_values_out, int num_items, int num_segments, int* d_offsets, hipStream_t stream, bool is_descending) { if (is_descending) { CUDA_CALL_THROW(hipcub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, num_items, num_segments, d_offsets, d_offsets + 1, 0, sizeof(T) * 8, stream)); } else { CUDA_CALL_THROW(hipcub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, num_items, num_segments, d_offsets, d_offsets + 1, 0, sizeof(T) * 8, stream)); } } template void LaunchSortPairs(void* d_temp_storage, size_t temp_storage_bytes, const float* d_keys_in, float* d_keys_out, const int* d_values_in, int* d_values_out, int num_items, int num_segments, int* d_offsets, hipStream_t stream, bool is_descending); template void LaunchSortPairs(void* d_temp_storage, size_t temp_storage_bytes, const half* d_keys_in, half* d_keys_out, const int* d_values_in, int* d_values_out, int num_items, int num_segments, int* d_offsets, hipStream_t stream, bool is_descending); // A stateful callback functor that maintains a running prefix to be applied // during consecutive scan operations. struct BlockPrefixCallbackOp { float running_total; // running prefix __device__ BlockPrefixCallbackOp(float running_total) : running_total(running_total) {} // Callback operator to be entered by the first warp of threads in the block. // Thread-0 is responsible for returning a value for seeding the block-wide scan. __device__ float operator()(float block_aggregate) { float old_prefix = running_total; running_total += block_aggregate; return old_prefix; } }; template <typename T, int kBlockSize> __global__ void FilterLogitsKernelCustom(float* d_sorted_logits_in, const int* d_sorted_indices, T* d_logits_in_out, float top_p_threshold, float filter_value, int batch_size, int vocab_size) { int vocab_idx = threadIdx.x; int batch_id = blockIdx.x; int offset = batch_id * vocab_size; typedef hipcub::BlockScan<float, kBlockSize> BlockScan; __shared__ typename BlockScan::TempStorage temp_storage; BlockPrefixCallbackOp prefix_op(0); for (int idx = vocab_idx; idx < vocab_size; idx += kBlockSize) { float sum = d_sorted_logits_in[offset + idx]; BlockScan(temp_storage).ExclusiveSum(sum, sum, prefix_op); __syncthreads(); if (sum >= top_p_threshold) { int original_index = offset + d_sorted_indices[offset + idx]; d_logits_in_out[original_index] = (T)filter_value; } } } template <typename T, int kBlockSize> __global__ void FilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, T* d_logits_in_out, float top_p_threshold, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size) { int vocab_idx = threadIdx.x; int batch_id = blockIdx.x; int offset = batch_id * vocab_size; typedef hipcub::BlockScan<float, kBlockSize> BlockScan; __shared__ typename BlockScan::TempStorage temp_storage; BlockPrefixCallbackOp prefix_op(0); for (int idx = vocab_idx; idx < vocab_size; idx += kBlockSize) { float sum = d_sorted_logits_in[offset + idx]; BlockScan(temp_storage).InclusiveSum(sum, sum, prefix_op); __syncthreads(); if (sum <= top_p_threshold) { if (idx + min_tokens_to_keep < vocab_size) { int original_index = offset + d_sorted_indices[offset + idx]; d_logits_in_out[original_index] = (T)filter_value; } } } } template <typename T> void LaunchFilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, T* d_logits_in_out, float top_p, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size, hipStream_t stream, bool is_descending) { constexpr int kBlockSize = 256; if (is_descending) { hipLaunchKernelGGL(( FilterLogitsKernelCustom<T, kBlockSize>), dim3(batch_size), dim3(kBlockSize), 0, stream, d_sorted_logits_in, d_sorted_indices, d_logits_in_out, top_p, filter_value, batch_size, vocab_size); } else { hipLaunchKernelGGL(( FilterLogitsKernel<T, kBlockSize>), dim3(batch_size), dim3(kBlockSize), 0, stream, d_sorted_logits_in, d_sorted_indices, d_logits_in_out, 1 - top_p, filter_value, min_tokens_to_keep, batch_size, vocab_size); } } template void LaunchFilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, float* d_logits_in_out, float top_p, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size, hipStream_t stream, bool is_descending); template void LaunchFilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, half* d_logits_in_out, float top_p, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size, hipStream_t stream, bool is_descending); // Ref: https://github.com/pytorch/pytorch/blob/release/1.13/aten/src/ATen/native/cuda/MultinomialKernel.cu template <typename scalar_t, typename accscalar_t> __global__ void sampleMultinomialOnce(int32_t* dest, int distributions, int categories, scalar_t* sampled, scalar_t* dist, int stride_dist, // dist->stride(0) int stride_categories, // dist->stride(1) int* d_presence_mask) { extern __shared__ unsigned char my_smem[]; __shared__ bool found; __shared__ unsigned foundPos; accscalar_t* smem = reinterpret_cast<accscalar_t*>(my_smem); accscalar_t accZero = static_cast<accscalar_t>(0); scalar_t zero = static_cast<scalar_t>(0); for (int curDist = blockIdx.x; curDist < distributions; curDist += gridDim.x) { // Assume sum = 1 in Top P sampling as the input is softmaxed. accscalar_t sum = 1; // Broadcast sum and sample value if (threadIdx.x == 0) { // Make sure the sum of our distribution didn't overflow // CUDA_KERNEL_ASSERT(!_isinf(val)); // CUDA_KERNEL_ASSERT(sum > accZero); foundPos = 0; smem[0] = sum; smem[1] = sampled[curDist]; } __syncthreads(); sum = smem[0]; scalar_t sample = static_cast<scalar_t>(smem[1]); __syncthreads(); if (sum == accZero) { // Choose the first element if (threadIdx.x == 0) { dest[curDist] = 0; } continue; } int chunks = (categories + (int)blockDim.x - 1) / blockDim.x; accscalar_t prevHighProb = accZero; found = false; for (int chunk = 0; chunk < chunks && !found; ++chunk) { // All threads in bounds load a value int cat = chunk * blockDim.x + threadIdx.x; accscalar_t dist_val = cat < categories ? static_cast<accscalar_t>(dist[curDist * stride_dist + cat * stride_categories]) / sum : accZero; smem[threadIdx.x] = dist_val; __syncthreads(); // Perform an inclusive prefix sum of the shared memory contents for (int offset = 1; offset < blockDim.x; offset *= 2) { accscalar_t val = accZero; if (threadIdx.x >= offset) { val = smem[threadIdx.x - offset] + smem[threadIdx.x]; } __syncthreads(); if (threadIdx.x >= offset) { smem[threadIdx.x] = val; } __syncthreads(); } // Each thread will check to see if the sample falls in its bucket scalar_t curBucket = static_cast<scalar_t>(smem[threadIdx.x] + prevHighProb); scalar_t prevBucket = static_cast<scalar_t>( threadIdx.x == 0 ? prevHighProb : smem[threadIdx.x - 1] + prevHighProb); bool inBucket = (cat < categories) && (!(sample >= curBucket) && (sample >= prevBucket) && (dist_val > zero)); if (inBucket) { // We're done; we have the sample // Torch indices are 1-based atomicMax(&foundPos, cat); found = true; } // Store the previous scan's high value for future use prevHighProb = prevHighProb + smem[blockDim.x - 1]; __syncthreads(); } if (threadIdx.x == 0) { if (found) { dest[curDist] = foundPos; } else { // This should address a rare bug where we don't select a valid index. This likely occurs when // due to floating point arithmetic rounding errors, our cumulative sum does not add up to 1, but // and our uniform sample is greater than this value. In this case we likely have unitialized memory // in dest[curDist]. So basically we will loop through the distribution and pick the largest index // where the distribution is non-zero. This is obviously terribly inefficient, but due to the // rarity in which this occurs, this should not be an issue. for (int cat = categories - 1; cat >= 0; --cat) { if (dist[curDist * stride_dist + cat * stride_categories] > zero) { dest[curDist] = cat; break; } } } } } // update presence mask int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= distributions * categories) { return; } int dist_idx = index / categories; int cat_idx = index % categories; if (dest[dist_idx] == cat_idx) { d_presence_mask[index] = 1; } } // Only support n_sample = 1 void TorchMultinomialKernelLauncher(float* d_input, float* d_sampled, int32_t* d_output, int batch_size, int vocab_size, int* d_presence_mask, hipStream_t stream) { // Store the props in class variables int device; CUDA_CALL_THROW(hipGetDevice(&device)); hipDeviceProp_t props; CUDA_CALL_THROW(hipGetDeviceProperties(&props, device)); int numSM = props.multiProcessorCount; int maxThreads = props.maxThreadsPerBlock; int warp_size = 32; // at::cuda::warp_size(); int requiredWarps = (vocab_size + warp_size - 1) / warp_size; int requiredThreads = ::min(maxThreads, requiredWarps * warp_size); int requiredShared = requiredThreads * sizeof(float); dim3 block(requiredThreads); dim3 grid(::min(batch_size, numSM * 4)); hipLaunchKernelGGL(( sampleMultinomialOnce<float, float>) , dim3(grid), dim3(block), requiredShared, stream, d_output, batch_size, vocab_size, d_sampled, d_input, vocab_size, 1, d_presence_mask); } __global__ void UpdateDecoderMaskedMultiheadAttentionCacheIndirectionKernel(int32_t* tgt_indir_cache, const int32_t* src_indir_cache, const int32_t* beam_ids, int batch_size, int beam_width, int input_seq_length, int max_seq_length, int current_length) { int time_step = threadIdx.x + blockIdx.x * blockDim.x; int bb_id = threadIdx.y + blockIdx.y * blockDim.y; const int batch_id = bb_id / beam_width; const int beam_id = bb_id % beam_width; if (bb_id >= beam_width * batch_size || time_step >= current_length) { return; } const int src_beam = beam_ids[batch_id * beam_width + beam_id] % beam_width; const int tgt_offset = batch_id * beam_width * max_seq_length + beam_id * max_seq_length + time_step; if (time_step < input_seq_length) { // For time steps that correspond to the input sequence, // the beam that it comes from is always 0. tgt_indir_cache[tgt_offset] = static_cast<int32_t>(0); } else if (time_step == (current_length - 1)) { // For the final (newly generated) time step, // the beam that it comes from is always the beam that we // are currently processing (i.e.) from this point on, these time-steps // form the new beams. tgt_indir_cache[tgt_offset] = static_cast<int32_t>(beam_id); } else { // For all other time-steps, we look up the source indirection, to // see which beam it came from based on the `src_beam`. const int src_offset = batch_id * beam_width * max_seq_length + src_beam * max_seq_length + time_step; tgt_indir_cache[tgt_offset] = src_indir_cache[src_offset]; } } void UpdateDecoderMaskedMultiheadAttentionCacheIndirection(int32_t* tgt_indir_cache, const int32_t* src_indir_cache, const int32_t* beam_ids, int batch_size, int beam_width, int input_seq_length, int max_seq_length, int current_length, hipStream_t stream) { const dim3 block(32); const dim3 grid((current_length + block.x - 1) / block.x, batch_size * beam_width); hipLaunchKernelGGL(( UpdateDecoderMaskedMultiheadAttentionCacheIndirectionKernel), dim3(grid), dim3(block), 0, stream, tgt_indir_cache, src_indir_cache, beam_ids, batch_size, beam_width, input_seq_length, max_seq_length, current_length); } } // namespace cuda } // namespace contrib } // namespace onnxruntime
55d68894140edf5d2a7f3fd7cb4f49769401dce1.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "cub/util_type.cuh" #include <cub/cub.cuh> #include <cub/device/device_segmented_radix_sort.cuh> #include "contrib_ops/cuda/transformers/generation_cuda_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { __global__ void InitKernel(float* beam_scores, int num_beams, int total_elements) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < total_elements) { int beam_index = index % num_beams; beam_scores[index] = beam_index > 0 ? static_cast<float>(-1e9) : 0.0f; } } void LaunchInitKernel( float* beam_scores, int batch_size, int num_beams, cudaStream_t stream) { int total_elements = batch_size * num_beams; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; InitKernel<<<gridSize, blockSize, 0, stream>>>(beam_scores, num_beams, total_elements); } __global__ void NextTokenKernel(const int64_t* next_token_indices, int32_t* next_indices, int32_t* next_tokens, int vocab_size, int total_elements) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < total_elements) { next_indices[index] = next_token_indices[index] / vocab_size; next_tokens[index] = next_token_indices[index] % vocab_size; } } void LaunchNextTokenKernel(const int64_t* next_token_indices, int32_t* next_indices, int32_t* next_tokens, int batch_size, int top_k, int vocab_size, cudaStream_t stream) { int total_elements = batch_size * top_k; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; NextTokenKernel<<<gridSize, blockSize, 0, stream>>>(next_token_indices, next_indices, next_tokens, vocab_size, total_elements); } template <typename T> __global__ void LogitsProcessKernel( T* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, const int* presence_mask, float presence_penalty, float temperature, int num_beams, int vocab_size, int padded_vocab_size, int total_elements, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < total_elements) { int batch_beam_index = index / padded_vocab_size; int word_id = index % padded_vocab_size; if (word_id >= vocab_size) { // Set any value within the padding region to the lowest value so that it isn't picked next_token_scores[index] = cub::FpLimits<T>::Lowest(); } else { // RepetitionPenaltyLogitsProcessor if (repetition_penalty != 1.0f) { int32_t* current_sequence = sequences + batch_beam_index * max_sequence_length; bool found = false; for (int i = 0; i < current_sequence_length; i++) { if (current_sequence[i] == word_id) { found = true; break; } } if (found) { float score = (float)next_token_scores[index]; next_token_scores[index] = (T)(score < 0 ? score * repetition_penalty : score / repetition_penalty); } } // NoRepeatNGramLogitsProcessor if (no_repeat_ngram_size > 0 && current_sequence_length >= no_repeat_ngram_size) { int32_t* current_sequence = sequences + batch_beam_index * max_sequence_length; bool found = false; for (int i = no_repeat_ngram_size - 1; i < current_sequence_length; i++) { if (current_sequence[i] == word_id) { // last token of n-gram matched found = true; for (int j = 0; j < no_repeat_ngram_size - 1; j++) { // match the remaining N-1 tokens if (current_sequence[i - j - 1] != current_sequence[current_sequence_length - 1 - j]) { found = false; break; } } if (found) { break; } } } if (found) { next_token_scores[index] = cub::FpLimits<T>::Lowest(); return; } } // VocabMaskLogitsProcessor if (vocab_mask != nullptr && vocab_mask[word_id] == 0) { next_token_scores[index] = cub::FpLimits<T>::Lowest(); return; } // PrefixVocabMaskLogitsProcessor int batch_id = batch_beam_index / num_beams; if (prefix_vocab_mask != nullptr && prefix_vocab_mask[batch_id * vocab_size + word_id] == 0) { next_token_scores[index] = cub::FpLimits<T>::Lowest(); return; } // MinLengthLogitsProcessor if (word_id == demote_token_id) { next_token_scores[index] = cub::FpLimits<T>::Lowest(); } // PresencePenaltyLogitsProcessor if (presence_mask != nullptr && presence_mask[index] == 1) { float score = (float)next_token_scores[index] - presence_penalty; next_token_scores[index] = (T)score; } // TemperatureLogitsProcessor if (temperature != 1.0f) { float score = (float)(next_token_scores[index]); next_token_scores[index] = (T)(score / temperature); } } } } template <typename T> void LaunchLogitsProcessKernel( T* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, int* presence_mask, float presence_penalty, float temperature, int batch_size, int num_beams, int vocab_size, int padded_vocab_size, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size, cudaStream_t stream) { int total_elements = batch_size * num_beams * padded_vocab_size; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; LogitsProcessKernel<T><<<gridSize, blockSize, 0, stream>>>( next_token_scores, vocab_mask, prefix_vocab_mask, presence_mask, presence_penalty, temperature, num_beams, vocab_size, padded_vocab_size, total_elements, demote_token_id, sequences, max_sequence_length, current_sequence_length, repetition_penalty, no_repeat_ngram_size); } // Instantiation template void LaunchLogitsProcessKernel( float* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, int* presence_mask, float presence_penalty, float temperature, int batch_size, int num_beams, int vocab_size, int padded_vocab_size, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size, cudaStream_t stream); template void LaunchLogitsProcessKernel( half* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, int* presence_mask, float presence_penalty, float temperature, int batch_size, int num_beams, int vocab_size, int padded_vocab_size, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size, cudaStream_t stream); __global__ void AddProbsKernel(float* log_probs, float* cum_log_probs, const int vocab_size, const int total_elements) { int index = blockIdx.x * blockDim.x + threadIdx.x; int batch_beam_index = index / vocab_size; if (index < total_elements) log_probs[index] += cum_log_probs[batch_beam_index]; } template <typename T> void LaunchAddProbsKernel(T* log_probs, T* cum_log_probs, const int batch_size, const int num_beams, const int vocab_size, cudaStream_t stream) { int total_elements = batch_size * num_beams * vocab_size; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; AddProbsKernel<<<gridSize, blockSize, 0, stream>>>(log_probs, cum_log_probs, vocab_size, total_elements); } template void LaunchAddProbsKernel( float* log_probs, float* cum_log_probs, const int batch_size, const int num_beams, const int vocab_size, cudaStream_t stream); template <typename T> __global__ void UpdateGptInputsKernel(const T* old_mask_data, T* mask_data, int32_t* next_positions, int batch_beam_size, int current_length) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < batch_beam_size * current_length) { // Update attention mask. int i = index / current_length; int j = index % current_length; mask_data[index] = (j < current_length - 1) ? old_mask_data[i * (current_length - 1) + j] : static_cast<T>(1); if (next_positions != nullptr) { // Update sequence length (or next positions). if (index < batch_beam_size) { next_positions[index]++; } } } } void LaunchUpdateGptKernel(const int32_t* old_mask_data, int32_t* mask_data, int32_t* next_positions, int batch_beam_size, int current_length, cudaStream_t stream) { assert(current_length > 0); int total_elements = batch_beam_size * current_length; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; UpdateGptInputsKernel<int32_t><<<gridSize, blockSize, 0, stream>>>( old_mask_data, mask_data, next_positions, batch_beam_size, current_length); } template <typename T> void GetTempStorageSize(const T* d_keys_in, const int* d_values_in, int* d_offsets, int num_items, int num_segments, cudaStream_t stream, bool is_descending, size_t& temp_storage_bytes) { if (is_descending) { CUDA_CALL_THROW(cub::DeviceSegmentedRadixSort::SortPairsDescending(nullptr, temp_storage_bytes, d_keys_in, (T*)nullptr, d_values_in, (int*)nullptr, num_items, num_segments, d_offsets, d_offsets + 1, 0, sizeof(T) * 8, stream)); } else { CUDA_CALL_THROW(cub::DeviceSegmentedRadixSort::SortPairs(nullptr, temp_storage_bytes, d_keys_in, (T*)nullptr, d_values_in, (int*)nullptr, num_items, num_segments, d_offsets, d_offsets + 1, 0, sizeof(T) * 8, stream)); } } template void GetTempStorageSize( const float* d_keys_in, const int* d_values_in, int* d_offsets, int num_items, int num_segments, cudaStream_t stream, bool is_descending, size_t& temp_storage_bytes); template void GetTempStorageSize( const half* d_keys_in, const int* d_values_in, int* d_offsets, int num_items, int num_segments, cudaStream_t stream, bool is_descending, size_t& temp_storage_bytes); // TODO: merge to one kernel __global__ void SetupParamsKernel(int* d_values_in, int* d_offsets, int batch_size, int vocab_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; int total_elements = batch_size * vocab_size; if (index < total_elements) { d_values_in[index] = index % vocab_size; } if (index < batch_size + 1) { d_offsets[index] = index * vocab_size; } } void LaunchSetupParamsKernel(int* d_values_in, int* d_offsets, int batch_size, int vocab_size, cudaStream_t stream) { int total_elements = batch_size * vocab_size; constexpr int blockSize = 256; const int gridSize = (total_elements + blockSize - 1) / blockSize; SetupParamsKernel<<<gridSize, blockSize, 0, stream>>>(d_values_in, d_offsets, batch_size, vocab_size); } template <typename T> void LaunchSortPairs(void* d_temp_storage, size_t temp_storage_bytes, const T* d_keys_in, T* d_keys_out, const int* d_values_in, int* d_values_out, int num_items, int num_segments, int* d_offsets, cudaStream_t stream, bool is_descending) { if (is_descending) { CUDA_CALL_THROW(cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, num_items, num_segments, d_offsets, d_offsets + 1, 0, sizeof(T) * 8, stream)); } else { CUDA_CALL_THROW(cub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, num_items, num_segments, d_offsets, d_offsets + 1, 0, sizeof(T) * 8, stream)); } } template void LaunchSortPairs(void* d_temp_storage, size_t temp_storage_bytes, const float* d_keys_in, float* d_keys_out, const int* d_values_in, int* d_values_out, int num_items, int num_segments, int* d_offsets, cudaStream_t stream, bool is_descending); template void LaunchSortPairs(void* d_temp_storage, size_t temp_storage_bytes, const half* d_keys_in, half* d_keys_out, const int* d_values_in, int* d_values_out, int num_items, int num_segments, int* d_offsets, cudaStream_t stream, bool is_descending); // A stateful callback functor that maintains a running prefix to be applied // during consecutive scan operations. struct BlockPrefixCallbackOp { float running_total; // running prefix __device__ BlockPrefixCallbackOp(float running_total) : running_total(running_total) {} // Callback operator to be entered by the first warp of threads in the block. // Thread-0 is responsible for returning a value for seeding the block-wide scan. __device__ float operator()(float block_aggregate) { float old_prefix = running_total; running_total += block_aggregate; return old_prefix; } }; template <typename T, int kBlockSize> __global__ void FilterLogitsKernelCustom(float* d_sorted_logits_in, const int* d_sorted_indices, T* d_logits_in_out, float top_p_threshold, float filter_value, int batch_size, int vocab_size) { int vocab_idx = threadIdx.x; int batch_id = blockIdx.x; int offset = batch_id * vocab_size; typedef cub::BlockScan<float, kBlockSize> BlockScan; __shared__ typename BlockScan::TempStorage temp_storage; BlockPrefixCallbackOp prefix_op(0); for (int idx = vocab_idx; idx < vocab_size; idx += kBlockSize) { float sum = d_sorted_logits_in[offset + idx]; BlockScan(temp_storage).ExclusiveSum(sum, sum, prefix_op); __syncthreads(); if (sum >= top_p_threshold) { int original_index = offset + d_sorted_indices[offset + idx]; d_logits_in_out[original_index] = (T)filter_value; } } } template <typename T, int kBlockSize> __global__ void FilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, T* d_logits_in_out, float top_p_threshold, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size) { int vocab_idx = threadIdx.x; int batch_id = blockIdx.x; int offset = batch_id * vocab_size; typedef cub::BlockScan<float, kBlockSize> BlockScan; __shared__ typename BlockScan::TempStorage temp_storage; BlockPrefixCallbackOp prefix_op(0); for (int idx = vocab_idx; idx < vocab_size; idx += kBlockSize) { float sum = d_sorted_logits_in[offset + idx]; BlockScan(temp_storage).InclusiveSum(sum, sum, prefix_op); __syncthreads(); if (sum <= top_p_threshold) { if (idx + min_tokens_to_keep < vocab_size) { int original_index = offset + d_sorted_indices[offset + idx]; d_logits_in_out[original_index] = (T)filter_value; } } } } template <typename T> void LaunchFilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, T* d_logits_in_out, float top_p, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size, cudaStream_t stream, bool is_descending) { constexpr int kBlockSize = 256; if (is_descending) { FilterLogitsKernelCustom<T, kBlockSize><<<batch_size, kBlockSize, 0, stream>>>(d_sorted_logits_in, d_sorted_indices, d_logits_in_out, top_p, filter_value, batch_size, vocab_size); } else { FilterLogitsKernel<T, kBlockSize><<<batch_size, kBlockSize, 0, stream>>>(d_sorted_logits_in, d_sorted_indices, d_logits_in_out, 1 - top_p, filter_value, min_tokens_to_keep, batch_size, vocab_size); } } template void LaunchFilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, float* d_logits_in_out, float top_p, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size, cudaStream_t stream, bool is_descending); template void LaunchFilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, half* d_logits_in_out, float top_p, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size, cudaStream_t stream, bool is_descending); // Ref: https://github.com/pytorch/pytorch/blob/release/1.13/aten/src/ATen/native/cuda/MultinomialKernel.cu template <typename scalar_t, typename accscalar_t> __global__ void sampleMultinomialOnce(int32_t* dest, int distributions, int categories, scalar_t* sampled, scalar_t* dist, int stride_dist, // dist->stride(0) int stride_categories, // dist->stride(1) int* d_presence_mask) { extern __shared__ unsigned char my_smem[]; __shared__ bool found; __shared__ unsigned foundPos; accscalar_t* smem = reinterpret_cast<accscalar_t*>(my_smem); accscalar_t accZero = static_cast<accscalar_t>(0); scalar_t zero = static_cast<scalar_t>(0); for (int curDist = blockIdx.x; curDist < distributions; curDist += gridDim.x) { // Assume sum = 1 in Top P sampling as the input is softmaxed. accscalar_t sum = 1; // Broadcast sum and sample value if (threadIdx.x == 0) { // Make sure the sum of our distribution didn't overflow // CUDA_KERNEL_ASSERT(!_isinf(val)); // CUDA_KERNEL_ASSERT(sum > accZero); foundPos = 0; smem[0] = sum; smem[1] = sampled[curDist]; } __syncthreads(); sum = smem[0]; scalar_t sample = static_cast<scalar_t>(smem[1]); __syncthreads(); if (sum == accZero) { // Choose the first element if (threadIdx.x == 0) { dest[curDist] = 0; } continue; } int chunks = (categories + (int)blockDim.x - 1) / blockDim.x; accscalar_t prevHighProb = accZero; found = false; for (int chunk = 0; chunk < chunks && !found; ++chunk) { // All threads in bounds load a value int cat = chunk * blockDim.x + threadIdx.x; accscalar_t dist_val = cat < categories ? static_cast<accscalar_t>(dist[curDist * stride_dist + cat * stride_categories]) / sum : accZero; smem[threadIdx.x] = dist_val; __syncthreads(); // Perform an inclusive prefix sum of the shared memory contents for (int offset = 1; offset < blockDim.x; offset *= 2) { accscalar_t val = accZero; if (threadIdx.x >= offset) { val = smem[threadIdx.x - offset] + smem[threadIdx.x]; } __syncthreads(); if (threadIdx.x >= offset) { smem[threadIdx.x] = val; } __syncthreads(); } // Each thread will check to see if the sample falls in its bucket scalar_t curBucket = static_cast<scalar_t>(smem[threadIdx.x] + prevHighProb); scalar_t prevBucket = static_cast<scalar_t>( threadIdx.x == 0 ? prevHighProb : smem[threadIdx.x - 1] + prevHighProb); bool inBucket = (cat < categories) && (!(sample >= curBucket) && (sample >= prevBucket) && (dist_val > zero)); if (inBucket) { // We're done; we have the sample // Torch indices are 1-based atomicMax(&foundPos, cat); found = true; } // Store the previous scan's high value for future use prevHighProb = prevHighProb + smem[blockDim.x - 1]; __syncthreads(); } if (threadIdx.x == 0) { if (found) { dest[curDist] = foundPos; } else { // This should address a rare bug where we don't select a valid index. This likely occurs when // due to floating point arithmetic rounding errors, our cumulative sum does not add up to 1, but // and our uniform sample is greater than this value. In this case we likely have unitialized memory // in dest[curDist]. So basically we will loop through the distribution and pick the largest index // where the distribution is non-zero. This is obviously terribly inefficient, but due to the // rarity in which this occurs, this should not be an issue. for (int cat = categories - 1; cat >= 0; --cat) { if (dist[curDist * stride_dist + cat * stride_categories] > zero) { dest[curDist] = cat; break; } } } } } // update presence mask int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= distributions * categories) { return; } int dist_idx = index / categories; int cat_idx = index % categories; if (dest[dist_idx] == cat_idx) { d_presence_mask[index] = 1; } } // Only support n_sample = 1 void TorchMultinomialKernelLauncher(float* d_input, float* d_sampled, int32_t* d_output, int batch_size, int vocab_size, int* d_presence_mask, cudaStream_t stream) { // Store the props in class variables int device; CUDA_CALL_THROW(cudaGetDevice(&device)); cudaDeviceProp props; CUDA_CALL_THROW(cudaGetDeviceProperties(&props, device)); int numSM = props.multiProcessorCount; int maxThreads = props.maxThreadsPerBlock; int warp_size = 32; // at::cuda::warp_size(); int requiredWarps = (vocab_size + warp_size - 1) / warp_size; int requiredThreads = std::min(maxThreads, requiredWarps * warp_size); int requiredShared = requiredThreads * sizeof(float); dim3 block(requiredThreads); dim3 grid(std::min(batch_size, numSM * 4)); sampleMultinomialOnce<float, float> <<<grid, block, requiredShared, stream>>>(d_output, batch_size, vocab_size, d_sampled, d_input, vocab_size, 1, d_presence_mask); } __global__ void UpdateDecoderMaskedMultiheadAttentionCacheIndirectionKernel(int32_t* tgt_indir_cache, const int32_t* src_indir_cache, const int32_t* beam_ids, int batch_size, int beam_width, int input_seq_length, int max_seq_length, int current_length) { int time_step = threadIdx.x + blockIdx.x * blockDim.x; int bb_id = threadIdx.y + blockIdx.y * blockDim.y; const int batch_id = bb_id / beam_width; const int beam_id = bb_id % beam_width; if (bb_id >= beam_width * batch_size || time_step >= current_length) { return; } const int src_beam = beam_ids[batch_id * beam_width + beam_id] % beam_width; const int tgt_offset = batch_id * beam_width * max_seq_length + beam_id * max_seq_length + time_step; if (time_step < input_seq_length) { // For time steps that correspond to the input sequence, // the beam that it comes from is always 0. tgt_indir_cache[tgt_offset] = static_cast<int32_t>(0); } else if (time_step == (current_length - 1)) { // For the final (newly generated) time step, // the beam that it comes from is always the beam that we // are currently processing (i.e.) from this point on, these time-steps // form the new beams. tgt_indir_cache[tgt_offset] = static_cast<int32_t>(beam_id); } else { // For all other time-steps, we look up the source indirection, to // see which beam it came from based on the `src_beam`. const int src_offset = batch_id * beam_width * max_seq_length + src_beam * max_seq_length + time_step; tgt_indir_cache[tgt_offset] = src_indir_cache[src_offset]; } } void UpdateDecoderMaskedMultiheadAttentionCacheIndirection(int32_t* tgt_indir_cache, const int32_t* src_indir_cache, const int32_t* beam_ids, int batch_size, int beam_width, int input_seq_length, int max_seq_length, int current_length, cudaStream_t stream) { const dim3 block(32); const dim3 grid((current_length + block.x - 1) / block.x, batch_size * beam_width); UpdateDecoderMaskedMultiheadAttentionCacheIndirectionKernel<<<grid, block, 0, stream>>>(tgt_indir_cache, src_indir_cache, beam_ids, batch_size, beam_width, input_seq_length, max_seq_length, current_length); } } // namespace cuda } // namespace contrib } // namespace onnxruntime
decae6972e8e58ce01f5e713a991904a90c35a4c.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <hip/hip_runtime_api.h> #include "THH/THH.h" #include <ATen/hip/HIPContext.h> #include <torch/extension.h> #include <math.h> #include <vector> #include <stdio.h> #include "utils.cu" __device__ void rfa_forward_step( const __half * __restrict__ q_local, const __half * __restrict__ k_local, const __half * __restrict__ v_local, __half * __restrict__ attn_local, __half2 s1[4][HALF2_PER_THREAD], __half2 s2[4][HALF2_PER_THREAD], __half2 z[HALF2_PER_THREAD], int num_threads_per_head_dim) { __half2 qs1[4] = {__float2half2_rn(0.f)}; __half2 qz = __float2half2_rn(0.f); __half2 q[HALF2_PER_THREAD] = { __float2half2_rn(0.f)}; __half2 k[HALF2_PER_THREAD] = { __float2half2_rn(0.f)}; __half2 v1[4] = { __float2half2_rn(0.f)}; read_int4(q_local, q, INT4_PER_THREAD); read_int4(k_local, k, INT4_PER_THREAD); read_int4(v_local, v1, 1); __half2 v2[4] = { __lowhigh2highlow(v1[0]), __lowhigh2highlow(v1[1]), __lowhigh2highlow(v1[2]), __lowhigh2highlow(v1[3]), }; #pragma unroll for (int i = 0;i < 4; ++ i) { qs1[i] = __float2half2_rn(0.f); __half2 qs2 = __float2half2_rn(0.f); #pragma unroll for (int j = 0;j < HALF2_PER_THREAD; ++ j) { s1[i][j] = __hfma2(v1[i], k[j], s1[i][j]); s2[i][j] = __hfma2(v2[i], k[j], s2[i][j]); qs1[i] = __hfma2(s1[i][j], q[j], qs1[i]); qs2 = __hfma2(s2[i][j], q[j], qs2); } qs1[i] = __hadd2(qs1[i], __lowhigh2highlow(qs2)); } #pragma unroll for (int j = 0; j < HALF2_PER_THREAD; ++ j) { z[j] = __hadd2(k[j], z[j]); qz = __hfma2(z[j], q[j], qz); } #pragma unroll for (int offset = num_threads_per_head_dim >> 1; offset > 0; offset >>= 1) { qz = __hadd2(qz, __shfl_down_sync(FULL_MASK, qz, offset)); #pragma unroll for (int i = 0; i < 4; ++ i) { qs1[i] = __hadd2( qs1[i], __shfl_down_sync(FULL_MASK, qs1[i], offset)); } } __half qz_half = __hadd(qz.x, qz.y); qz_half = clamp_eps(qz_half); qz = __half2half2(qz_half); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < 4; ++ i) { qs1[i] = __h2div(qs1[i], qz); } *((int4 *) attn_local) = *((int4 *) qs1); } } __global__ void rfa_forward( const __half * __restrict__ q, const __half * __restrict__ k, const __half * __restrict__ v, __half * __restrict__ attn, int tgt_len, int head_dim, int proj_dim, int num_threads_per_head_dim, int qk_inc_t, int v_inc_t) { const int bid = blockIdx.x; const int head_dim_offset = threadIdx.y << 3; const int proj_dim_offset = threadIdx.x * DIM_PER_THREAD; const __half * __restrict__ q_local = q + bid * proj_dim + proj_dim_offset; const __half * __restrict__ k_local = k + bid * proj_dim + proj_dim_offset; const __half * __restrict__ v_local = v + bid * head_dim + head_dim_offset; __half * __restrict__ attn_local = attn + bid * head_dim + head_dim_offset; __half2 s1[4][HALF2_PER_THREAD] = {__float2half2_rn(0.f)}; __half2 s2[4][HALF2_PER_THREAD] = {__float2half2_rn(0.f)}; __half2 z[HALF2_PER_THREAD] = {__float2half2_rn(0.f)}; for (int t = 0; t < tgt_len; ++ t) { rfa_forward_step( q_local, k_local, v_local, attn_local, s1, s2, z, num_threads_per_head_dim ); q_local += qk_inc_t; k_local += qk_inc_t; v_local += v_inc_t; attn_local += v_inc_t; } } Tensor RFAForward( Tensor const& q, Tensor const& k, Tensor const& v) { /* Args: q: [tgt_len, bsz, proj_dim] k: [tgt_len, bsz, proj_dim] v: [tgt_len, bsz, head_dim] Return: attn: [tgt_len, bsz, head_dim] */ // column major const int tgt_len = q.size(0); const int bsz = q.size(1); const int proj_dim = q.size(2); const int head_dim = v.size(2); const int qk_inc_t = bsz * proj_dim; const int v_inc_t = bsz * head_dim; auto act_options = q.options().requires_grad(false); Tensor attn = torch::zeros({tgt_len, bsz, head_dim}, act_options); int num_threads_per_head_dim = proj_dim / DIM_PER_THREAD; dim3 dim_grid(bsz); dim3 dim_block(num_threads_per_head_dim, head_dim >> 3); hipLaunchKernelGGL(( rfa_forward) , dim3(dim_grid), dim3(dim_block), 0, 0, static_cast<const __half *> (q.data_ptr()), static_cast<const __half *> (k.data_ptr()), static_cast<const __half *> (v.data_ptr()), static_cast<__half *> (attn.data_ptr()), tgt_len, head_dim, proj_dim, num_threads_per_head_dim, qk_inc_t, v_inc_t ); return attn; }
decae6972e8e58ce01f5e713a991904a90c35a4c.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cuda_profiler_api.h> #include "THC/THC.h" #include <ATen/cuda/CUDAContext.h> #include <torch/extension.h> #include <math.h> #include <vector> #include <stdio.h> #include "utils.cu" __device__ void rfa_forward_step( const __half * __restrict__ q_local, const __half * __restrict__ k_local, const __half * __restrict__ v_local, __half * __restrict__ attn_local, __half2 s1[4][HALF2_PER_THREAD], __half2 s2[4][HALF2_PER_THREAD], __half2 z[HALF2_PER_THREAD], int num_threads_per_head_dim) { __half2 qs1[4] = {__float2half2_rn(0.f)}; __half2 qz = __float2half2_rn(0.f); __half2 q[HALF2_PER_THREAD] = { __float2half2_rn(0.f)}; __half2 k[HALF2_PER_THREAD] = { __float2half2_rn(0.f)}; __half2 v1[4] = { __float2half2_rn(0.f)}; read_int4(q_local, q, INT4_PER_THREAD); read_int4(k_local, k, INT4_PER_THREAD); read_int4(v_local, v1, 1); __half2 v2[4] = { __lowhigh2highlow(v1[0]), __lowhigh2highlow(v1[1]), __lowhigh2highlow(v1[2]), __lowhigh2highlow(v1[3]), }; #pragma unroll for (int i = 0;i < 4; ++ i) { qs1[i] = __float2half2_rn(0.f); __half2 qs2 = __float2half2_rn(0.f); #pragma unroll for (int j = 0;j < HALF2_PER_THREAD; ++ j) { s1[i][j] = __hfma2(v1[i], k[j], s1[i][j]); s2[i][j] = __hfma2(v2[i], k[j], s2[i][j]); qs1[i] = __hfma2(s1[i][j], q[j], qs1[i]); qs2 = __hfma2(s2[i][j], q[j], qs2); } qs1[i] = __hadd2(qs1[i], __lowhigh2highlow(qs2)); } #pragma unroll for (int j = 0; j < HALF2_PER_THREAD; ++ j) { z[j] = __hadd2(k[j], z[j]); qz = __hfma2(z[j], q[j], qz); } #pragma unroll for (int offset = num_threads_per_head_dim >> 1; offset > 0; offset >>= 1) { qz = __hadd2(qz, __shfl_down_sync(FULL_MASK, qz, offset)); #pragma unroll for (int i = 0; i < 4; ++ i) { qs1[i] = __hadd2( qs1[i], __shfl_down_sync(FULL_MASK, qs1[i], offset)); } } __half qz_half = __hadd(qz.x, qz.y); qz_half = clamp_eps(qz_half); qz = __half2half2(qz_half); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < 4; ++ i) { qs1[i] = __h2div(qs1[i], qz); } *((int4 *) attn_local) = *((int4 *) qs1); } } __global__ void rfa_forward( const __half * __restrict__ q, const __half * __restrict__ k, const __half * __restrict__ v, __half * __restrict__ attn, int tgt_len, int head_dim, int proj_dim, int num_threads_per_head_dim, int qk_inc_t, int v_inc_t) { const int bid = blockIdx.x; const int head_dim_offset = threadIdx.y << 3; const int proj_dim_offset = threadIdx.x * DIM_PER_THREAD; const __half * __restrict__ q_local = q + bid * proj_dim + proj_dim_offset; const __half * __restrict__ k_local = k + bid * proj_dim + proj_dim_offset; const __half * __restrict__ v_local = v + bid * head_dim + head_dim_offset; __half * __restrict__ attn_local = attn + bid * head_dim + head_dim_offset; __half2 s1[4][HALF2_PER_THREAD] = {__float2half2_rn(0.f)}; __half2 s2[4][HALF2_PER_THREAD] = {__float2half2_rn(0.f)}; __half2 z[HALF2_PER_THREAD] = {__float2half2_rn(0.f)}; for (int t = 0; t < tgt_len; ++ t) { rfa_forward_step( q_local, k_local, v_local, attn_local, s1, s2, z, num_threads_per_head_dim ); q_local += qk_inc_t; k_local += qk_inc_t; v_local += v_inc_t; attn_local += v_inc_t; } } Tensor RFAForward( Tensor const& q, Tensor const& k, Tensor const& v) { /* Args: q: [tgt_len, bsz, proj_dim] k: [tgt_len, bsz, proj_dim] v: [tgt_len, bsz, head_dim] Return: attn: [tgt_len, bsz, head_dim] */ // column major const int tgt_len = q.size(0); const int bsz = q.size(1); const int proj_dim = q.size(2); const int head_dim = v.size(2); const int qk_inc_t = bsz * proj_dim; const int v_inc_t = bsz * head_dim; auto act_options = q.options().requires_grad(false); Tensor attn = torch::zeros({tgt_len, bsz, head_dim}, act_options); int num_threads_per_head_dim = proj_dim / DIM_PER_THREAD; dim3 dim_grid(bsz); dim3 dim_block(num_threads_per_head_dim, head_dim >> 3); rfa_forward <<<dim_grid, dim_block>>>( static_cast<const __half *> (q.data_ptr()), static_cast<const __half *> (k.data_ptr()), static_cast<const __half *> (v.data_ptr()), static_cast<__half *> (attn.data_ptr()), tgt_len, head_dim, proj_dim, num_threads_per_head_dim, qk_inc_t, v_inc_t ); return attn; }
db6d0da848ebad9d4103d574966908064d0a4c13.hip
// !!! This is a file automatically generated by hipify!!! /******************************** Author: Sravanthi Kota Venkata ********************************/ #include "tracking.h" // LVA for interacting with pin #ifdef APPROXIMATE extern void LVA_FUNCTION(int type,void* start, void* end, int self) __attribute__ ((noinline)); extern void LVA_FUNCTION_RM(int type, void* start,void*end, int self) __attribute__ ((noinline)); extern void LVA_FUNCTION(int type, void* start, void* end, int self) { __asm__ __volatile__ ("xchg %dx,%dx"); } extern void LVA_FUNCTION_RM(int type,void* start,void* end, int self) { __asm__ __volatile__ ("xchg %dx,%dx"); } #endif #define LVA_BX_INSTRUCTION __asm__ __volatile__ ("xchg %bx,%bx"); int main(int argc, char* argv[]) { int i, j, k, N_FEA, WINSZ, LK_ITER, rows, cols; int endR, endC; F2D *blurredImage, *previousFrameBlurred_level1, *previousFrameBlurred_level2, *blurred_level1, *blurred_level2; F2D *verticalEdgeImage, *horizontalEdgeImage, *verticalEdge_level1, *verticalEdge_level2, *horizontalEdge_level1, *horizontalEdge_level2, *interestPnt; F2D *lambda, *lambdaTemp, *features; I2D *Ic, *status; float SUPPRESION_RADIUS; F2D *newpoints; int numFind, m, n; F2D *np_temp; unsigned int* start, *end, *elapsed, *elt; char im1[100]; int counter=2; float accuracy = 0.03; int count; if(argc < 2) { printf("We need input image path\n"); return -1; } sprintf(im1, "%s/1.bmp", argv[1]); N_FEA = 1600; WINSZ = 4; SUPPRESION_RADIUS = 10.0; LK_ITER = 20; #ifdef test WINSZ = 2; N_FEA = 100; LK_ITER = 2; counter = 2; accuracy = 0.1; #endif #ifdef sim_fast WINSZ = 2; N_FEA = 100; LK_ITER = 2; counter = 4; #endif #ifdef sim WINSZ = 2; N_FEA = 200; LK_ITER = 2; counter = 4; #endif #ifdef sqcif WINSZ = 8; N_FEA = 500; LK_ITER = 15; counter = 2; #endif #ifdef qcif WINSZ = 12; N_FEA = 400; LK_ITER = 15; counter = 4; #endif #ifdef cif WINSZ = 20; N_FEA = 500; LK_ITER = 20; counter = 4; #endif #ifdef vga WINSZ = 32; N_FEA = 400; LK_ITER = 20; counter = 4; #endif #ifdef wuxga WINSZ = 64; N_FEA = 500; LK_ITER = 20; counter = 4; #endif #ifdef fullhd WINSZ = 48; N_FEA = 500; LK_ITER = 20; counter = 4; #endif /** Read input image **/ Ic = readImage(im1); rows = Ic->height; cols = Ic->width; /* Other frames */ #define MAX_COUNTER (4) I2D *Ics[MAX_COUNTER]; ImagePyramid* newFramePyramids[MAX_COUNTER]; hipStream_t frameStreams[MAX_COUNTER]; for(count=1; count<=counter; count++) { /** Read image **/ sprintf(im1, "%s/%d.bmp", argv[1], count); Ics[count-1] = readImage(im1); } //start roi LVA_BX_INSTRUCTION; LVA_BX_INSTRUCTION; hipDeviceReset(); printf("Input size\t\t- (%dx%d)\n", rows, cols); /** Start Timing **/ start = photonStartTiming(); /** IMAGE PRE-PROCESSING **/ /** Blur the image to remove noise - weighted avergae filter **/ /* MARK: Added code to create all bluured level images in parallel */ hipStream_t frameStream; hipStreamCreate(&frameStream); //printf("Before calling createImgPyramid...\n"); ImagePyramid* preprocessed = createImgPyramid(Ic, frameStream); // just need to define a struct to return 4 float* arrays //printf("After calling createImgPyramid...\n"); /** Fire off streams for other frames **/ for(count=1; count<=counter; count++) { hipStreamCreate(&frameStreams[count-1]); newFramePyramids[count-1] = createImgPyramid(Ics[count-1],frameStreams[count-1]); } hipStreamSynchronize(frameStream); hipStreamDestroy(frameStream); destroyImgPyramid(Ic, preprocessed); blurredImage = preprocessed->blurredImg; /** Scale down the image to build Image Pyramid. We find features across all scales of the image **/ blurred_level1 = blurredImage; /** Scale 0 **/ blurred_level2 = preprocessed->resizedImg; /** Scale 1 **/ /* TwoStepKernel* cpu_blur_ret = imageBlur(Ic); TwoStepKernel* cpu_resize_ret = imageResize(cpu_blur_ret->final); blurred_level2 = cpu_resize_ret->final; F2D* blurredImageCPU = cpu_blur_ret->final; F2D* intBlurCPU = cpu_blur_ret->intermediate; F2D* gpuResizeInt = preprocessed->horizEdge; F2D* cpuResizeInt = cpu_resize_ret->intermediate; */ //ImagePyramid* preprocessed = createImgPyramid(Ic); // just need to define a struct to return 4 float* arrays blurredImage = imageBlur(Ic); //blurredImage = preprocessed->blurredImg; /** Scale down the image to build Image Pyramid. We find features across all scales of the image **/ blurred_level1 = blurredImage; /** Scale 0 **/ //blurred_level2 = preprocessed->resizedImg; /** Scale 1 **/ blurred_level2 = imageResize(blurredImage); //F2D* cpu_level2 = imageResize(blurredImage); /*for(int i = 0 ;i < 100;i++) { printf("Element # %d of GPU resize: %0.4f\n",i,blurred_level2->data[i]); printf("Element # %d of CPU resize: %0.4f\n",i,cpu_level2->data[i]); }*/ /** Edge Images - From pre-processed images, build gradient images, both horizontal and vertical **/ /* verticalEdgeImage = calcSobel_dX(blurredImage); TwoStepKernel* dyRet = calcSobel_dY(blurredImage); F2D* horizEdge_CPU = dyRet->final; F2D* horizEdge_int = dyRet->intermediate; F2D* verticalEdgeImage_GPU = preprocessed->vertEdge; F2D* horizontalEdgeImage_GPU = preprocessed->horizEdge; F2D* horizEdge_GPUint = preprocessed->tmp; */ horizontalEdgeImage = preprocessed->horizEdge; verticalEdgeImage = preprocessed->vertEdge; /* for(int i = 0 ;i < verticalEdgeImage->height * verticalEdgeImage->width ;i++) { //printf("Element # %d of GPU int: %0.8f\n",i,preprocessed->vertEdge->data[i]); //printf("Element # %d of CPU int: %0.8f\n", i,intBlurCPU->data[i]); //printf("Element # %d of GPU blur: %0.8f\n",i,blurred_level1->data[i]); //printf("Element # %d of CPU blur: %0.8f\n",i,blurredImageCPU->data[i]); //printf("Element # %d of GPU int: %0.8f\n",i,horizEdge_GPUint->data[i]); //printf("Element # %d of CPU int: %0.8f\n",i,horizEdge_int->data[i]); printf("Element # %d of GPU horiz: %0.8f\n",i,horizontalEdgeImage->data[i]); printf("Element # %d of CPU horiz: %0.8f\n",i,horizEdge_CPU->data[i]); } */ /** Edge images are used for feature detection. So, using the verticalEdgeImage and horizontalEdgeImage images, we compute feature strength across all pixels. Lambda matrix is the feature strength matrix returned by calcGoodFeature **/ lambda = calcGoodFeature(verticalEdgeImage, horizontalEdgeImage, verticalEdgeImage->width, verticalEdgeImage->height, WINSZ); endR = lambda->height; endC = lambda->width; lambdaTemp = fReshape(lambda, endR*endC, 1); /** We sort the lambda matrix based on the strengths **/ /** Fill features matrix with top N_FEA features **/ fFreeHandle(lambdaTemp); lambdaTemp = fillFeatures(lambda, N_FEA, WINSZ); features = fTranspose(lambdaTemp); /** Suppress features that have approximately similar strength and belong to close neighborhood **/ interestPnt = getANMS(features, SUPPRESION_RADIUS); /** Refill interestPnt in features matrix **/ fFreeHandle(features); features = fSetArray(2, interestPnt->height, 0); for(i=0; i<2; i++) { for(j=0; j<interestPnt->height; j++) { subsref(features,i,j) = subsref(interestPnt,j,i); } } fFreeHandle(verticalEdgeImage); fFreeHandle(horizontalEdgeImage); fFreeHandle(interestPnt); fFreeHandle(lambda); fFreeHandle(lambdaTemp); iFreeHandle(Ic); /** Until now, we processed base frame. The following for loop processes other frames **/ for(count=1; count<=counter; count++) { /** Read image **/ Ic = Ics[count-1]; rows = Ic->height; cols = Ic->width; hipStreamSynchronize(frameStreams[count-1]); hipStreamDestroy(frameStreams[count-1]); destroyImgPyramid(Ics[count-1], newFramePyramids[count-1]); //printf("Read image %d of dim %dx%d.\n",count,rows,cols); /* Start timing */ //start = photonStartTiming(); /** Blur image to remove noise **/ blurredImage = newFramePyramids[count-1]->blurredImg; /** MARK Added: Create the new blurred and resized image**/ //ImagePyramid* newFramePyramid = createImgPyramid(Ic); // just need to define a struct to return 4 float* arrays /** Blur image to remove noise **/ previousFrameBlurred_level1 = fDeepCopy(blurred_level1); previousFrameBlurred_level2 = fDeepCopy(blurred_level2); fFreeHandle(blurred_level1); fFreeHandle(blurred_level2); /** Image pyramid **/ blurred_level1 = blurredImage; blurred_level2 = newFramePyramids[count-1]->resizedImg; /** Gradient image computation, for all scales **/ /* verticalEdge_level1 = calcSobel_dX(blurred_level1); TwoStepKernel* r1 = calcSobel_dY(blurred_level1); horizontalEdge_level1 = r1->final; verticalEdge_level2 = calcSobel_dX(blurred_level2); r1 = calcSobel_dY(blurred_level2); horizontalEdge_level2 = r1->final; */ verticalEdge_level1 = newFramePyramids[count-1]->vertEdge; verticalEdge_level2 = newFramePyramids[count-1]->vertEdge_small; horizontalEdge_level1 = newFramePyramids[count-1]->horizEdge; horizontalEdge_level2 = newFramePyramids[count-1]->horizEdge_small; newpoints = fSetArray(2, features->width, 0); /** Based on features computed in the previous frame, find correspondence in the current frame. "status" returns the index of corresponding features **/ status = calcPyrLKTrack(previousFrameBlurred_level1, previousFrameBlurred_level2, verticalEdge_level1, verticalEdge_level2, horizontalEdge_level1, horizontalEdge_level2, blurred_level1, blurred_level2, features, features->width, WINSZ, accuracy, LK_ITER, newpoints); fFreeHandle(verticalEdge_level1); fFreeHandle(verticalEdge_level2); fFreeHandle(horizontalEdge_level1); fFreeHandle(horizontalEdge_level2); fFreeHandle(previousFrameBlurred_level1); fFreeHandle(previousFrameBlurred_level2); /** Populate newpoints with features that had correspondence with previous frame features **/ np_temp = fDeepCopy(newpoints); if(status->width > 0 ) { k = 0; numFind=0; for(i=0; i<status->width; i++) { if( asubsref(status,i) == 1) numFind++; } fFreeHandle(newpoints); newpoints = fSetArray(2, numFind, 0); for(i=0; i<status->width; i++) { if( asubsref(status,i) == 1) { subsref(newpoints,0,k) = subsref(np_temp,0,i); subsref(newpoints,1,k++) = subsref(np_temp,1,i); } } } iFreeHandle(status); iFreeHandle(Ic); fFreeHandle(np_temp); fFreeHandle(features); /** Populate newpoints into features **/ features = fDeepCopy(newpoints); //printf("Printing features...\n"); /*for(i = 0;i<features->height;i++) { for(j=0;j<features->width;j++) { printf("%f\t",subsref(features,i,j)); } printf("\n"); }*/ fFreeHandle(newpoints); } /* Timing utils */ end = photonEndTiming(); elapsed = photonReportTiming(start, end); free(start); free(end); //end roi LVA_BX_INSTRUCTION; #ifdef CHECK /* Self checking */ { int ret=0; float tol = 2.0; #ifdef GENERATE_OUTPUT fWriteMatrix(features, argv[1]); #endif ret = fSelfCheck(features, argv[1], tol); if (ret == -1) printf("Error in Tracking Map\n"); } #endif photonPrintTiming(elapsed); fFreeHandle(blurred_level1); fFreeHandle(blurred_level2); fFreeHandle(features); free(elapsed); return 0; }
db6d0da848ebad9d4103d574966908064d0a4c13.cu
/******************************** Author: Sravanthi Kota Venkata ********************************/ #include "tracking.h" // LVA for interacting with pin #ifdef APPROXIMATE extern void LVA_FUNCTION(int type,void* start, void* end, int self) __attribute__ ((noinline)); extern void LVA_FUNCTION_RM(int type, void* start,void*end, int self) __attribute__ ((noinline)); extern void LVA_FUNCTION(int type, void* start, void* end, int self) { __asm__ __volatile__ ("xchg %dx,%dx"); } extern void LVA_FUNCTION_RM(int type,void* start,void* end, int self) { __asm__ __volatile__ ("xchg %dx,%dx"); } #endif #define LVA_BX_INSTRUCTION __asm__ __volatile__ ("xchg %bx,%bx"); int main(int argc, char* argv[]) { int i, j, k, N_FEA, WINSZ, LK_ITER, rows, cols; int endR, endC; F2D *blurredImage, *previousFrameBlurred_level1, *previousFrameBlurred_level2, *blurred_level1, *blurred_level2; F2D *verticalEdgeImage, *horizontalEdgeImage, *verticalEdge_level1, *verticalEdge_level2, *horizontalEdge_level1, *horizontalEdge_level2, *interestPnt; F2D *lambda, *lambdaTemp, *features; I2D *Ic, *status; float SUPPRESION_RADIUS; F2D *newpoints; int numFind, m, n; F2D *np_temp; unsigned int* start, *end, *elapsed, *elt; char im1[100]; int counter=2; float accuracy = 0.03; int count; if(argc < 2) { printf("We need input image path\n"); return -1; } sprintf(im1, "%s/1.bmp", argv[1]); N_FEA = 1600; WINSZ = 4; SUPPRESION_RADIUS = 10.0; LK_ITER = 20; #ifdef test WINSZ = 2; N_FEA = 100; LK_ITER = 2; counter = 2; accuracy = 0.1; #endif #ifdef sim_fast WINSZ = 2; N_FEA = 100; LK_ITER = 2; counter = 4; #endif #ifdef sim WINSZ = 2; N_FEA = 200; LK_ITER = 2; counter = 4; #endif #ifdef sqcif WINSZ = 8; N_FEA = 500; LK_ITER = 15; counter = 2; #endif #ifdef qcif WINSZ = 12; N_FEA = 400; LK_ITER = 15; counter = 4; #endif #ifdef cif WINSZ = 20; N_FEA = 500; LK_ITER = 20; counter = 4; #endif #ifdef vga WINSZ = 32; N_FEA = 400; LK_ITER = 20; counter = 4; #endif #ifdef wuxga WINSZ = 64; N_FEA = 500; LK_ITER = 20; counter = 4; #endif #ifdef fullhd WINSZ = 48; N_FEA = 500; LK_ITER = 20; counter = 4; #endif /** Read input image **/ Ic = readImage(im1); rows = Ic->height; cols = Ic->width; /* Other frames */ #define MAX_COUNTER (4) I2D *Ics[MAX_COUNTER]; ImagePyramid* newFramePyramids[MAX_COUNTER]; cudaStream_t frameStreams[MAX_COUNTER]; for(count=1; count<=counter; count++) { /** Read image **/ sprintf(im1, "%s/%d.bmp", argv[1], count); Ics[count-1] = readImage(im1); } //start roi LVA_BX_INSTRUCTION; LVA_BX_INSTRUCTION; cudaDeviceReset(); printf("Input size\t\t- (%dx%d)\n", rows, cols); /** Start Timing **/ start = photonStartTiming(); /** IMAGE PRE-PROCESSING **/ /** Blur the image to remove noise - weighted avergae filter **/ /* MARK: Added code to create all bluured level images in parallel */ cudaStream_t frameStream; cudaStreamCreate(&frameStream); //printf("Before calling createImgPyramid...\n"); ImagePyramid* preprocessed = createImgPyramid(Ic, frameStream); // just need to define a struct to return 4 float* arrays //printf("After calling createImgPyramid...\n"); /** Fire off streams for other frames **/ for(count=1; count<=counter; count++) { cudaStreamCreate(&frameStreams[count-1]); newFramePyramids[count-1] = createImgPyramid(Ics[count-1],frameStreams[count-1]); } cudaStreamSynchronize(frameStream); cudaStreamDestroy(frameStream); destroyImgPyramid(Ic, preprocessed); blurredImage = preprocessed->blurredImg; /** Scale down the image to build Image Pyramid. We find features across all scales of the image **/ blurred_level1 = blurredImage; /** Scale 0 **/ blurred_level2 = preprocessed->resizedImg; /** Scale 1 **/ /* TwoStepKernel* cpu_blur_ret = imageBlur(Ic); TwoStepKernel* cpu_resize_ret = imageResize(cpu_blur_ret->final); blurred_level2 = cpu_resize_ret->final; F2D* blurredImageCPU = cpu_blur_ret->final; F2D* intBlurCPU = cpu_blur_ret->intermediate; F2D* gpuResizeInt = preprocessed->horizEdge; F2D* cpuResizeInt = cpu_resize_ret->intermediate; */ //ImagePyramid* preprocessed = createImgPyramid(Ic); // just need to define a struct to return 4 float* arrays blurredImage = imageBlur(Ic); //blurredImage = preprocessed->blurredImg; /** Scale down the image to build Image Pyramid. We find features across all scales of the image **/ blurred_level1 = blurredImage; /** Scale 0 **/ //blurred_level2 = preprocessed->resizedImg; /** Scale 1 **/ blurred_level2 = imageResize(blurredImage); //F2D* cpu_level2 = imageResize(blurredImage); /*for(int i = 0 ;i < 100;i++) { printf("Element # %d of GPU resize: %0.4f\n",i,blurred_level2->data[i]); printf("Element # %d of CPU resize: %0.4f\n",i,cpu_level2->data[i]); }*/ /** Edge Images - From pre-processed images, build gradient images, both horizontal and vertical **/ /* verticalEdgeImage = calcSobel_dX(blurredImage); TwoStepKernel* dyRet = calcSobel_dY(blurredImage); F2D* horizEdge_CPU = dyRet->final; F2D* horizEdge_int = dyRet->intermediate; F2D* verticalEdgeImage_GPU = preprocessed->vertEdge; F2D* horizontalEdgeImage_GPU = preprocessed->horizEdge; F2D* horizEdge_GPUint = preprocessed->tmp; */ horizontalEdgeImage = preprocessed->horizEdge; verticalEdgeImage = preprocessed->vertEdge; /* for(int i = 0 ;i < verticalEdgeImage->height * verticalEdgeImage->width ;i++) { //printf("Element # %d of GPU int: %0.8f\n",i,preprocessed->vertEdge->data[i]); //printf("Element # %d of CPU int: %0.8f\n", i,intBlurCPU->data[i]); //printf("Element # %d of GPU blur: %0.8f\n",i,blurred_level1->data[i]); //printf("Element # %d of CPU blur: %0.8f\n",i,blurredImageCPU->data[i]); //printf("Element # %d of GPU int: %0.8f\n",i,horizEdge_GPUint->data[i]); //printf("Element # %d of CPU int: %0.8f\n",i,horizEdge_int->data[i]); printf("Element # %d of GPU horiz: %0.8f\n",i,horizontalEdgeImage->data[i]); printf("Element # %d of CPU horiz: %0.8f\n",i,horizEdge_CPU->data[i]); } */ /** Edge images are used for feature detection. So, using the verticalEdgeImage and horizontalEdgeImage images, we compute feature strength across all pixels. Lambda matrix is the feature strength matrix returned by calcGoodFeature **/ lambda = calcGoodFeature(verticalEdgeImage, horizontalEdgeImage, verticalEdgeImage->width, verticalEdgeImage->height, WINSZ); endR = lambda->height; endC = lambda->width; lambdaTemp = fReshape(lambda, endR*endC, 1); /** We sort the lambda matrix based on the strengths **/ /** Fill features matrix with top N_FEA features **/ fFreeHandle(lambdaTemp); lambdaTemp = fillFeatures(lambda, N_FEA, WINSZ); features = fTranspose(lambdaTemp); /** Suppress features that have approximately similar strength and belong to close neighborhood **/ interestPnt = getANMS(features, SUPPRESION_RADIUS); /** Refill interestPnt in features matrix **/ fFreeHandle(features); features = fSetArray(2, interestPnt->height, 0); for(i=0; i<2; i++) { for(j=0; j<interestPnt->height; j++) { subsref(features,i,j) = subsref(interestPnt,j,i); } } fFreeHandle(verticalEdgeImage); fFreeHandle(horizontalEdgeImage); fFreeHandle(interestPnt); fFreeHandle(lambda); fFreeHandle(lambdaTemp); iFreeHandle(Ic); /** Until now, we processed base frame. The following for loop processes other frames **/ for(count=1; count<=counter; count++) { /** Read image **/ Ic = Ics[count-1]; rows = Ic->height; cols = Ic->width; cudaStreamSynchronize(frameStreams[count-1]); cudaStreamDestroy(frameStreams[count-1]); destroyImgPyramid(Ics[count-1], newFramePyramids[count-1]); //printf("Read image %d of dim %dx%d.\n",count,rows,cols); /* Start timing */ //start = photonStartTiming(); /** Blur image to remove noise **/ blurredImage = newFramePyramids[count-1]->blurredImg; /** MARK Added: Create the new blurred and resized image**/ //ImagePyramid* newFramePyramid = createImgPyramid(Ic); // just need to define a struct to return 4 float* arrays /** Blur image to remove noise **/ previousFrameBlurred_level1 = fDeepCopy(blurred_level1); previousFrameBlurred_level2 = fDeepCopy(blurred_level2); fFreeHandle(blurred_level1); fFreeHandle(blurred_level2); /** Image pyramid **/ blurred_level1 = blurredImage; blurred_level2 = newFramePyramids[count-1]->resizedImg; /** Gradient image computation, for all scales **/ /* verticalEdge_level1 = calcSobel_dX(blurred_level1); TwoStepKernel* r1 = calcSobel_dY(blurred_level1); horizontalEdge_level1 = r1->final; verticalEdge_level2 = calcSobel_dX(blurred_level2); r1 = calcSobel_dY(blurred_level2); horizontalEdge_level2 = r1->final; */ verticalEdge_level1 = newFramePyramids[count-1]->vertEdge; verticalEdge_level2 = newFramePyramids[count-1]->vertEdge_small; horizontalEdge_level1 = newFramePyramids[count-1]->horizEdge; horizontalEdge_level2 = newFramePyramids[count-1]->horizEdge_small; newpoints = fSetArray(2, features->width, 0); /** Based on features computed in the previous frame, find correspondence in the current frame. "status" returns the index of corresponding features **/ status = calcPyrLKTrack(previousFrameBlurred_level1, previousFrameBlurred_level2, verticalEdge_level1, verticalEdge_level2, horizontalEdge_level1, horizontalEdge_level2, blurred_level1, blurred_level2, features, features->width, WINSZ, accuracy, LK_ITER, newpoints); fFreeHandle(verticalEdge_level1); fFreeHandle(verticalEdge_level2); fFreeHandle(horizontalEdge_level1); fFreeHandle(horizontalEdge_level2); fFreeHandle(previousFrameBlurred_level1); fFreeHandle(previousFrameBlurred_level2); /** Populate newpoints with features that had correspondence with previous frame features **/ np_temp = fDeepCopy(newpoints); if(status->width > 0 ) { k = 0; numFind=0; for(i=0; i<status->width; i++) { if( asubsref(status,i) == 1) numFind++; } fFreeHandle(newpoints); newpoints = fSetArray(2, numFind, 0); for(i=0; i<status->width; i++) { if( asubsref(status,i) == 1) { subsref(newpoints,0,k) = subsref(np_temp,0,i); subsref(newpoints,1,k++) = subsref(np_temp,1,i); } } } iFreeHandle(status); iFreeHandle(Ic); fFreeHandle(np_temp); fFreeHandle(features); /** Populate newpoints into features **/ features = fDeepCopy(newpoints); //printf("Printing features...\n"); /*for(i = 0;i<features->height;i++) { for(j=0;j<features->width;j++) { printf("%f\t",subsref(features,i,j)); } printf("\n"); }*/ fFreeHandle(newpoints); } /* Timing utils */ end = photonEndTiming(); elapsed = photonReportTiming(start, end); free(start); free(end); //end roi LVA_BX_INSTRUCTION; #ifdef CHECK /* Self checking */ { int ret=0; float tol = 2.0; #ifdef GENERATE_OUTPUT fWriteMatrix(features, argv[1]); #endif ret = fSelfCheck(features, argv[1], tol); if (ret == -1) printf("Error in Tracking Map\n"); } #endif photonPrintTiming(elapsed); fFreeHandle(blurred_level1); fFreeHandle(blurred_level2); fFreeHandle(features); free(elapsed); return 0; }
1dc1e05de7001a19958e9b2de3e82da4d75f3a5e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <paddle/fluid/platform/device_context.h> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/sum_op.h" #include "paddle/fluid/platform/float16.h" namespace plat = paddle::platform; namespace paddle { namespace operators { #define CEIL_DIV(x, y) (((x) + (y)-1) / (y)) using LoDTensor = framework::LoDTensor; template <class T> __global__ void Sum2CUDAKernel(const T *in_0, const T *in_1, T *out, int64_t N) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { out[id] = in_0[id] + in_1[id]; id += blockDim.x * gridDim.x; } } template <class T> __global__ void SumArrayCUDAKernel(T **in, T *out, int64_t N, size_t in_size, bool read_dst) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { T total(0); for (int i = 0; i < in_size; ++i) { const T *tmp = in[i]; if (tmp) { total += tmp[id]; } } if (read_dst) { out[id] += total; } else { out[id] = total; } id += blockDim.x * gridDim.x; } } template <class T> __global__ void SumSelectedRowsCUDAKernel(T **sr_in_out, int64_t N, size_t rows) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { for (int i = 0; i < 2 * rows; i += 2) { const T *tmp = sr_in_out[i]; T *tmp_out = sr_in_out[i + 1]; if (tmp && tmp_out) { tmp_out[id] += tmp[id]; } } id += blockDim.x * gridDim.x; } } template <class T> __global__ void SumAlign4CUDAKernel(const T *in_0, const T *in_1, T *out, int64_t N) { int id = blockIdx.x * blockDim.x + threadIdx.x; for (int i = id; i < N / 4; i += blockDim.x * gridDim.x) { const float4 *in0_4 = reinterpret_cast<float4 *>(in_0); const float4 *in1_4 = reinterpret_cast<float4 *>(in_1); float4 tmp; tmp.x = in0_4[i].x + in1_4[i].x; tmp.y = in0_4[i].y + in1_4[i].y; tmp.z = in0_4[i].z + in1_4[i].z; tmp.w = in0_4[i].w + in1_4[i].w; reinterpret_cast<float4 *>(out)[i] = tmp; } } template <class T> void SumToLoDTensor(const framework::ExecutionContext &context) { auto in_vars = context.MultiInputVar("X"); const size_t in_num = in_vars.size(); constexpr size_t theory_sm_threads = 1024; auto &dev_ctx = context.template device_context<platform::CUDADeviceContext>(); auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; dim3 grids; dim3 blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = dim3(CEIL_DIV(length, tile_size), 1, 1); blocks = dim3(tile_size, 1, 1); }; auto *out = context.Output<LoDTensor>("Out"); bool in_place = in_vars[0] == context.OutputVar("Out"); if (!in_place) { auto *out_ptr = out->mutable_data<T>(context.GetPlace()); if (in_num >= 1 && in_vars[0]->IsType<framework::LoDTensor>()) { auto &in_0_tensor = in_vars[0]->Get<framework::LoDTensor>(); if (in_0_tensor.numel() > 0) { in_place = (in_0_tensor.data<T>() == out_ptr); } } } // Sum of two tensors if (in_num == 2 && in_vars[0]->IsType<framework::LoDTensor>() && in_vars[1]->IsType<framework::LoDTensor>()) { auto &in_0 = in_vars[0]->Get<framework::LoDTensor>(); auto &in_1 = in_vars[1]->Get<framework::LoDTensor>(); auto length = in_0.numel(); if (length && in_0.IsInitialized() && in_1.IsInitialized()) { auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); auto in_0_e = EigenVector<T>::Flatten(in_0); auto in_1_e = EigenVector<T>::Flatten(in_1); result.device(place) = in_0_e + in_1_e; } else if (length && in_0.IsInitialized()) { auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); result.device(place) = EigenVector<T>::Flatten(in_0); } else if (length && in_1.IsInitialized()) { auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); result.device(place) = EigenVector<T>::Flatten(in_1); } return; } int start = in_place ? 1 : 0; if (!in_place) { math::SetConstant<platform::CUDADeviceContext, T> constant_functor; constant_functor( context.template device_context<platform::CUDADeviceContext>(), out, static_cast<T>(0)); } std::vector<const T *> in_data; std::vector<int> selectrow_index; int64_t lod_length = 0; bool dst_write = false; for (int i = start; i < in_num; ++i) { if (in_vars[i]->IsType<framework::LoDTensor>()) { auto &in_i = in_vars[i]->Get<framework::LoDTensor>(); in_data.emplace_back(in_i.data<T>()); lod_length = in_i.numel(); } else if (in_vars[i]->IsType<framework::SelectedRows>()) { selectrow_index.push_back(i); } } // compute select rows seperately. if (!selectrow_index.empty()) { std::vector<const T *> sr_in_out_data; size_t rows = 0; int64_t length = 0; for (auto index : selectrow_index) { auto &sr = in_vars[index]->Get<framework::SelectedRows>(); auto &sr_value = sr.value(); auto &sr_rows = sr.rows(); auto row_numel = sr_value.numel() / sr_rows.size(); auto out_dims = out->dims(); PADDLE_ENFORCE_EQ(sr.height(), out_dims[0]); PADDLE_ENFORCE_EQ(row_numel, out->numel() / sr.height()); auto *sr_data = sr_value.data<T>(); auto *sr_out_data = out->data<T>(); rows += sr_rows.size(); length = row_numel; for (size_t i = 0; i < sr_rows.size(); ++i) { sr_in_out_data.emplace_back(&sr_data[i * row_numel]); sr_in_out_data.emplace_back(&sr_out_data[sr_rows[i] * row_numel]); } } if (!sr_in_out_data.empty()) { auto tmp_sr_in_out_array = platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx).Allocate( sr_in_out_data.size() * sizeof(T *)); memory::Copy(boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()), tmp_sr_in_out_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(sr_in_out_data.data()), sr_in_out_data.size() * sizeof(T *), dev_ctx.stream()); T **sr_in_out_array_data = reinterpret_cast<T **>(tmp_sr_in_out_array->ptr()); ComputeKernelParameter(length); hipLaunchKernelGGL(( SumSelectedRowsCUDAKernel<T>), dim3(grids), dim3(blocks), 0, stream, sr_in_out_array_data, length, rows); dst_write = true; } } // if indata not null, merge into one kernel call. if (!in_data.empty()) { auto tmp_in_array = platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx).Allocate( in_data.size() * sizeof(T *)); memory::Copy(boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()), tmp_in_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(in_data.data()), in_data.size() * sizeof(T *), dev_ctx.stream()); T **in_array_data = reinterpret_cast<T **>(tmp_in_array->ptr()); ComputeKernelParameter(lod_length); hipLaunchKernelGGL(( SumArrayCUDAKernel<T>), dim3(grids), dim3(blocks), 0, stream, in_array_data, out->data<T>(), lod_length, in_data.size(), dst_write | in_place); } } template <typename T> class SumKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto out_var = context.OutputVar("Out"); if (out_var->IsType<framework::LoDTensor>()) { SumToLoDTensor<T>(context); } else if (out_var->IsType<framework::SelectedRows>()) { SelectedRowsCompute<platform::CUDADeviceContext, T>(context); } else if (out_var->IsType<framework::LoDTensorArray>()) { LodTensorArrayCompute<platform::CUDADeviceContext, T>(context); } else { PADDLE_THROW("Unexpected branch, output variable type is %s", framework::ToTypeName(out_var->Type())); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( sum, ops::SumKernel<paddle::platform::CUDADeviceContext, float>, ops::SumKernel<paddle::platform::CUDADeviceContext, double>, ops::SumKernel<paddle::platform::CUDADeviceContext, int>, ops::SumKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::SumKernel<paddle::platform::CUDADeviceContext, plat::float16>);
1dc1e05de7001a19958e9b2de3e82da4d75f3a5e.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <paddle/fluid/platform/device_context.h> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/sum_op.h" #include "paddle/fluid/platform/float16.h" namespace plat = paddle::platform; namespace paddle { namespace operators { #define CEIL_DIV(x, y) (((x) + (y)-1) / (y)) using LoDTensor = framework::LoDTensor; template <class T> __global__ void Sum2CUDAKernel(const T *in_0, const T *in_1, T *out, int64_t N) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { out[id] = in_0[id] + in_1[id]; id += blockDim.x * gridDim.x; } } template <class T> __global__ void SumArrayCUDAKernel(T **in, T *out, int64_t N, size_t in_size, bool read_dst) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { T total(0); for (int i = 0; i < in_size; ++i) { const T *tmp = in[i]; if (tmp) { total += tmp[id]; } } if (read_dst) { out[id] += total; } else { out[id] = total; } id += blockDim.x * gridDim.x; } } template <class T> __global__ void SumSelectedRowsCUDAKernel(T **sr_in_out, int64_t N, size_t rows) { int id = blockIdx.x * blockDim.x + threadIdx.x; while (id < N) { for (int i = 0; i < 2 * rows; i += 2) { const T *tmp = sr_in_out[i]; T *tmp_out = sr_in_out[i + 1]; if (tmp && tmp_out) { tmp_out[id] += tmp[id]; } } id += blockDim.x * gridDim.x; } } template <class T> __global__ void SumAlign4CUDAKernel(const T *in_0, const T *in_1, T *out, int64_t N) { int id = blockIdx.x * blockDim.x + threadIdx.x; for (int i = id; i < N / 4; i += blockDim.x * gridDim.x) { const float4 *in0_4 = reinterpret_cast<float4 *>(in_0); const float4 *in1_4 = reinterpret_cast<float4 *>(in_1); float4 tmp; tmp.x = in0_4[i].x + in1_4[i].x; tmp.y = in0_4[i].y + in1_4[i].y; tmp.z = in0_4[i].z + in1_4[i].z; tmp.w = in0_4[i].w + in1_4[i].w; reinterpret_cast<float4 *>(out)[i] = tmp; } } template <class T> void SumToLoDTensor(const framework::ExecutionContext &context) { auto in_vars = context.MultiInputVar("X"); const size_t in_num = in_vars.size(); constexpr size_t theory_sm_threads = 1024; auto &dev_ctx = context.template device_context<platform::CUDADeviceContext>(); auto stream = dev_ctx.stream(); auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); auto sm_count = max_threads / theory_sm_threads; size_t tile_size = 0; dim3 grids; dim3 blocks; auto ComputeKernelParameter = [&](size_t length) { if (length >= max_threads) tile_size = 1024; else if (length < max_threads && length > sm_count * 128) tile_size = 512; else if (length <= sm_count * 128) tile_size = 256; grids = dim3(CEIL_DIV(length, tile_size), 1, 1); blocks = dim3(tile_size, 1, 1); }; auto *out = context.Output<LoDTensor>("Out"); bool in_place = in_vars[0] == context.OutputVar("Out"); if (!in_place) { auto *out_ptr = out->mutable_data<T>(context.GetPlace()); if (in_num >= 1 && in_vars[0]->IsType<framework::LoDTensor>()) { auto &in_0_tensor = in_vars[0]->Get<framework::LoDTensor>(); if (in_0_tensor.numel() > 0) { in_place = (in_0_tensor.data<T>() == out_ptr); } } } // Sum of two tensors if (in_num == 2 && in_vars[0]->IsType<framework::LoDTensor>() && in_vars[1]->IsType<framework::LoDTensor>()) { auto &in_0 = in_vars[0]->Get<framework::LoDTensor>(); auto &in_1 = in_vars[1]->Get<framework::LoDTensor>(); auto length = in_0.numel(); if (length && in_0.IsInitialized() && in_1.IsInitialized()) { auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); auto in_0_e = EigenVector<T>::Flatten(in_0); auto in_1_e = EigenVector<T>::Flatten(in_1); result.device(place) = in_0_e + in_1_e; } else if (length && in_0.IsInitialized()) { auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); result.device(place) = EigenVector<T>::Flatten(in_0); } else if (length && in_1.IsInitialized()) { auto result = EigenVector<T>::Flatten(*out); auto &place = *dev_ctx.eigen_device(); result.device(place) = EigenVector<T>::Flatten(in_1); } return; } int start = in_place ? 1 : 0; if (!in_place) { math::SetConstant<platform::CUDADeviceContext, T> constant_functor; constant_functor( context.template device_context<platform::CUDADeviceContext>(), out, static_cast<T>(0)); } std::vector<const T *> in_data; std::vector<int> selectrow_index; int64_t lod_length = 0; bool dst_write = false; for (int i = start; i < in_num; ++i) { if (in_vars[i]->IsType<framework::LoDTensor>()) { auto &in_i = in_vars[i]->Get<framework::LoDTensor>(); in_data.emplace_back(in_i.data<T>()); lod_length = in_i.numel(); } else if (in_vars[i]->IsType<framework::SelectedRows>()) { selectrow_index.push_back(i); } } // compute select rows seperately. if (!selectrow_index.empty()) { std::vector<const T *> sr_in_out_data; size_t rows = 0; int64_t length = 0; for (auto index : selectrow_index) { auto &sr = in_vars[index]->Get<framework::SelectedRows>(); auto &sr_value = sr.value(); auto &sr_rows = sr.rows(); auto row_numel = sr_value.numel() / sr_rows.size(); auto out_dims = out->dims(); PADDLE_ENFORCE_EQ(sr.height(), out_dims[0]); PADDLE_ENFORCE_EQ(row_numel, out->numel() / sr.height()); auto *sr_data = sr_value.data<T>(); auto *sr_out_data = out->data<T>(); rows += sr_rows.size(); length = row_numel; for (size_t i = 0; i < sr_rows.size(); ++i) { sr_in_out_data.emplace_back(&sr_data[i * row_numel]); sr_in_out_data.emplace_back(&sr_out_data[sr_rows[i] * row_numel]); } } if (!sr_in_out_data.empty()) { auto tmp_sr_in_out_array = platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx).Allocate( sr_in_out_data.size() * sizeof(T *)); memory::Copy(boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()), tmp_sr_in_out_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(sr_in_out_data.data()), sr_in_out_data.size() * sizeof(T *), dev_ctx.stream()); T **sr_in_out_array_data = reinterpret_cast<T **>(tmp_sr_in_out_array->ptr()); ComputeKernelParameter(length); SumSelectedRowsCUDAKernel<T><<<grids, blocks, 0, stream>>>( sr_in_out_array_data, length, rows); dst_write = true; } } // if indata not null, merge into one kernel call. if (!in_data.empty()) { auto tmp_in_array = platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx).Allocate( in_data.size() * sizeof(T *)); memory::Copy(boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()), tmp_in_array->ptr(), platform::CPUPlace(), reinterpret_cast<void *>(in_data.data()), in_data.size() * sizeof(T *), dev_ctx.stream()); T **in_array_data = reinterpret_cast<T **>(tmp_in_array->ptr()); ComputeKernelParameter(lod_length); SumArrayCUDAKernel<T><<<grids, blocks, 0, stream>>>( in_array_data, out->data<T>(), lod_length, in_data.size(), dst_write | in_place); } } template <typename T> class SumKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto out_var = context.OutputVar("Out"); if (out_var->IsType<framework::LoDTensor>()) { SumToLoDTensor<T>(context); } else if (out_var->IsType<framework::SelectedRows>()) { SelectedRowsCompute<platform::CUDADeviceContext, T>(context); } else if (out_var->IsType<framework::LoDTensorArray>()) { LodTensorArrayCompute<platform::CUDADeviceContext, T>(context); } else { PADDLE_THROW("Unexpected branch, output variable type is %s", framework::ToTypeName(out_var->Type())); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( sum, ops::SumKernel<paddle::platform::CUDADeviceContext, float>, ops::SumKernel<paddle::platform::CUDADeviceContext, double>, ops::SumKernel<paddle::platform::CUDADeviceContext, int>, ops::SumKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::SumKernel<paddle::platform::CUDADeviceContext, plat::float16>);
c06422f708e2568ba5c314c338f6a419a00d26ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2021 Darius Rckert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/core/time/timer.h" #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/memory.h" #include "saiga/cuda/shfl_helper.h" #include "saiga/cuda/shuffle_copy.h" #include "saiga/cuda/tests/test.h" #include "saiga/cuda/tests/test_helper.h" #include "saiga/cuda/thread_info.h" namespace Saiga { namespace CUDA { template <typename T, int ElementSize, unsigned int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ static void copyUnCoalesced(ArrayView<T> data, ArrayView<T> result) { CUDA::ThreadInfo<BLOCK_SIZE> ti; // grid stride loop for (auto id = ti.thread_id * ElementSize; id < data.size(); id += ti.grid_size * ElementSize) { T l[ElementSize]; auto localStart = ti.thread_id * ElementSize; for (int i = 0; i < ElementSize; ++i) { l[i] = data[localStart + i]; } for (int i = 0; i < ElementSize; ++i) { l[i] += 42; } for (int i = 0; i < ElementSize; ++i) { result[localStart + i] = l[i]; } } } template <typename T, int ElementSize, unsigned int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ static void copyFullCoalesced(ArrayView<T> data, ArrayView<T> result) { const int elementsPerWarp = ElementSize * SAIGA_WARP_SIZE; CUDA::ThreadInfo<BLOCK_SIZE> ti; auto N = data.size(); auto Nelements = N / ElementSize; auto requiredWarps = CUDA::getBlockCount(Nelements, SAIGA_WARP_SIZE); // grid stride loop for (auto wId = ti.warp_id; wId < requiredWarps; wId += ti.num_warps) { auto warpStart = wId * elementsPerWarp; for (auto e = ti.lane_id; e < elementsPerWarp; e += SAIGA_WARP_SIZE) { auto globalOffset = warpStart + e; if (globalOffset < N) { auto d = data[globalOffset]; d += 42; result[globalOffset] = d; } } } } template <typename T, int ElementSize, unsigned int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ static void sharedMemoryUnCoalesced(ArrayView<T> data, ArrayView<T> result) { __shared__ T buffer[BLOCK_SIZE][ElementSize + 0]; CUDA::ThreadInfo<BLOCK_SIZE> ti; // grid stride loop for (auto id = ti.thread_id * ElementSize; id < data.size(); id += ti.grid_size * ElementSize) { T l[ElementSize]; auto matrixId = ti.thread_id; auto globalOffset = matrixId * ElementSize; auto localMatrixId = ti.local_thread_id; // id in shared buffer // linear copy for (int i = 0; i < ElementSize; ++i) { buffer[localMatrixId][i] = data[globalOffset + i]; } for (int i = 0; i < ElementSize; ++i) { l[i] = buffer[localMatrixId][i]; } // add something so things don't get optimized away for (int i = 0; i < ElementSize; ++i) { l[i] += 42; } for (int i = 0; i < ElementSize; ++i) { buffer[localMatrixId][i] = l[i]; } // linear copy for (int i = 0; i < ElementSize; ++i) { result[globalOffset + i] = buffer[localMatrixId][i]; } } } template <typename T, int ElementSize, unsigned int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ static void sharedMemoryCoalesced(ArrayView<T> data, ArrayView<T> result) { CUDA::ThreadInfo<BLOCK_SIZE> ti; const int elementsPerWarp = ElementSize * SAIGA_WARP_SIZE; auto N = data.size(); auto Nelements = N / ElementSize; auto requiredWarps = CUDA::getBlockCount(Nelements, SAIGA_WARP_SIZE); // __shared__ double buffer[elementsPerBlock]; __shared__ T buffer[BLOCK_SIZE][ElementSize + 0]; // grid stride loop for (auto wId = ti.warp_id; wId < requiredWarps; wId += ti.num_warps) { // for(auto id = ti.thread_id * ElementSize; id < N; id += ti.num_warps){ // for(auto id = ti.thread_id; id < Nelements; id += ti.grid_size ){ T l[ElementSize]; auto localMatrixId = ti.local_thread_id; // id in shared buffer auto warpStart = ti.warp_id * elementsPerWarp; // strided copy for (auto e = ti.lane_id; e < elementsPerWarp; e += SAIGA_WARP_SIZE) { auto localMatrix = ti.warp_lane * SAIGA_WARP_SIZE + e / ElementSize; auto localOffset = e % ElementSize; auto globalOffset = warpStart + e; if (globalOffset < N) { buffer[localMatrix][localOffset] = data[globalOffset]; } } for (int i = 0; i < ElementSize; ++i) { l[i] = buffer[localMatrixId][i]; } // add something so things don't get optimized away for (int i = 0; i < ElementSize; ++i) { l[i] += 42; } for (int i = 0; i < ElementSize; ++i) { buffer[localMatrixId][i] = l[i]; } // strided copy for (auto e = ti.lane_id; e < elementsPerWarp; e += SAIGA_WARP_SIZE) { auto localMatrix = ti.warp_lane * SAIGA_WARP_SIZE + e / ElementSize; auto localOffset = e % ElementSize; auto globalOffset = warpStart + e; if (globalOffset < N) { result[globalOffset] = buffer[localMatrix][localOffset]; } } } } template <typename T, int ElementSize, unsigned int BLOCK_SIZE, typename VectorType = int2> __launch_bounds__(BLOCK_SIZE) __global__ static void sharedMemoryCoalesced2(ArrayView<T> data, ArrayView<T> result) { const int elementSize = sizeof(T) * ElementSize; const int fullVectorsPerElement = elementSize / sizeof(VectorType); #ifdef SAIGA_HAS_CONSTEXPR const int vectorsPerElement = CUDA::getBlockCount(elementSize, sizeof(VectorType)); static_assert(vectorsPerElement * sizeof(VectorType) == elementSize, "T cannot be loaded with VectorType"); #else const int vectorsPerElement = 1; #endif // const int vectorsPerWarp = fullVectorsPerElement * SAIGA_WARP_SIZE; const int tileSizeBytes = 64; const int tileSizeVectors = tileSizeBytes / sizeof(VectorType); const int fullVectorsPerTile = fullVectorsPerElement > tileSizeVectors ? tileSizeVectors : fullVectorsPerElement; const int vectorsPerTile = vectorsPerElement > tileSizeVectors ? tileSizeVectors : vectorsPerElement; // const int vectorsPerTile = N > 8 ? 8 : N; const int fullTiles = fullVectorsPerElement == 0 ? fullVectorsPerElement : fullVectorsPerElement / fullVectorsPerTile; // const int fullTiles = fullVectorsPerElement == 0 ? fullVectorsPerElement : fullVectorsPerElement / // fullVectorsPerTile; const int tiles = 2; const int elementsPerTile = N / tiles; const int fullVectorsPerBlock = fullVectorsPerElement * BLOCK_SIZE; // __shared__ double buffer[elementsPerBlock]; __shared__ VectorType buffer[BLOCK_SIZE][vectorsPerTile]; // __shared__ T buffer[BLOCK_SIZE][N]; T l[ElementSize]; auto N = data.size(); auto Nelements = N / ElementSize; auto NVectors = N * sizeof(T) / sizeof(VectorType); auto requiredWarps = CUDA::getBlockCount(Nelements, SAIGA_WARP_SIZE); VectorType* global = reinterpret_cast<VectorType*>(data.data()); VectorType* globalResult = reinterpret_cast<VectorType*>(result.data()); VectorType* local = reinterpret_cast<VectorType*>(l); CUDA::ThreadInfo<BLOCK_SIZE> ti; // grid stride loop // for(auto id = ti.thread_id * ElementSize; id < data.size(); id += ti.grid_size * ElementSize){ for (auto wId = ti.warp_id; wId < requiredWarps; wId += ti.num_warps) { auto localMatrixId = ti.local_thread_id; // id in shared buffer // auto warpStart = ti.warp_id * vectorsPerWarp; auto blockStart = ti.block_id * fullVectorsPerBlock; auto warpOffset = ti.warp_lane * SAIGA_WARP_SIZE; // start matrix of this warp in block local shared memory #if 1 for (int t = 0; t < fullTiles; ++t) { auto tileOffset = t * fullVectorsPerTile; // strided copy for (auto e = ti.lane_id; e < fullVectorsPerTile * SAIGA_WARP_SIZE; e += SAIGA_WARP_SIZE) { auto localMatrix = warpOffset + e / fullVectorsPerTile; auto localOffset = e % fullVectorsPerTile; auto globalIndex = blockStart + localMatrix * fullVectorsPerElement + tileOffset + localOffset; if (globalIndex < NVectors) { buffer[localMatrix][localOffset] = global[globalIndex]; // printf("read %d %d %d \n",ti.thread_id,localMatrix,globalIndex); } } for (int i = 0; i < fullVectorsPerTile; ++i) { local[i + tileOffset] = buffer[localMatrixId][i]; } } #else // strided copy for (auto e = ti.lane_id; e < elementsPerWarp; e += SAIGA_WARP_SIZE) { auto localMatrix = ti.warp_lane * SAIGA_WARP_SIZE + e / N; auto localOffset = e % N; buffer[localMatrix][localOffset] = data[warpStart + e]; } for (int i = 0; i < N; ++i) { l[i] = buffer[localMatrixId][i]; } #endif // add something so things don't get optimized away for (int i = 0; i < ElementSize; ++i) { l[i] += 42; } // for(int t = 0 ; t < tiles ; ++t){ // auto tileOffset = t * elementsPerTile; // for(int i = 0; i < elementsPerTile; ++i){ // buffer[localMatrixId][i] = l[i + tileOffset]; // } // //strided copy // for(auto e = ti.lane_id; e < elementsPerTile * SAIGA_WARP_SIZE; e += SAIGA_WARP_SIZE){ // auto localMatrix = ti.warp_lane * SAIGA_WARP_SIZE + e / elementsPerTile; // auto localOffset = e % elementsPerTile; // result[tileOffset+warpStart+e] = buffer[localMatrix][localOffset]; // } // } // for(int i = 0; i < N; ++i){ // buffer[localMatrixId][i] = l[i]; // } //// strided copy // for(auto e = ti.lane_id; e < elementsPerWarp; e += SAIGA_WARP_SIZE){ // auto localMatrix = ti.warp_lane * SAIGA_WARP_SIZE + e / N; // auto localOffset = e % N; // result[warpStart+e] = buffer[localMatrix][localOffset]; // } for (int t = 0; t < fullTiles; ++t) { auto tileOffset = t * fullVectorsPerTile; for (int i = 0; i < fullVectorsPerTile; ++i) { buffer[localMatrixId][i] = local[i + tileOffset]; } // strided copy for (auto e = ti.lane_id; e < fullVectorsPerTile * SAIGA_WARP_SIZE; e += SAIGA_WARP_SIZE) { auto localMatrix = warpOffset + e / fullVectorsPerTile; auto localOffset = e % fullVectorsPerTile; auto globalIndex = blockStart + localMatrix * fullVectorsPerElement + tileOffset + localOffset; if (globalIndex < NVectors) { globalResult[globalIndex] = buffer[localMatrix][localOffset]; // printf("write %d %d %d \n",ti.thread_id,localMatrix,globalIndex); } } } } } template <typename T, int ElementSize, unsigned int BLOCK_SIZE, typename VectorType = int4, int localWarpSize2 = -1> __launch_bounds__(BLOCK_SIZE) __global__ static void shuffleCopy(ArrayView<T> data, ArrayView<T> result) { const int localWarpSize = localWarpSize2 == -1 ? int(SAIGA_L2_CACHE_LINE_SIZE / sizeof(VectorType)) : localWarpSize2; const int vectorsPerElement = CUDA::getBlockCount(ElementSize * sizeof(T), sizeof(VectorType)); auto N = data.size(); auto Nelements = N / ElementSize; auto NVectors = N * sizeof(T) / sizeof(VectorType); auto requiredWarps = CUDA::getBlockCount(Nelements, localWarpSize); // const int localWarpSize = 2; CUDA::ThreadInfo<BLOCK_SIZE, localWarpSize> ti; // grid stride loop // for(auto id = ti.thread_id * ElementSize; id < data.size(); id += ti.grid_size * ElementSize){ for (auto wId = ti.warp_id; wId < requiredWarps; wId += ti.num_warps) { T l[ElementSize]; // auto matrixId = ti.thread_id; // auto globalOffset = matrixId * ElementSize; // auto localMatrixId = ti.local_thread_id; //id in shared buffer auto globalStart = wId * localWarpSize * vectorsPerElement; // printf("warp %d %d %d %d \n", wId,ti.lane_id,localWarpSize,Nelements); VectorType* global = reinterpret_cast<VectorType*>(data.data()); VectorType* globalResult = reinterpret_cast<VectorType*>(result.data()); VectorType* local = reinterpret_cast<VectorType*>(l); // loadShuffle<localWarpSize,sizeof(T)*ElementSize,VectorType>(data.data()+globalStart,local,ti.lane_id); loadShuffle<localWarpSize, sizeof(T) * ElementSize, VectorType>(global, local, ti.lane_id, globalStart, NVectors); for (int i = 0; i < ElementSize; ++i) { l[i] += 42; } storeShuffle<localWarpSize, sizeof(T) * ElementSize, VectorType>(globalResult, local, ti.lane_id, globalStart, NVectors); } } /* __global__ static void strangeLoop(int* data, int* out, int N){ auto id = blockDim.x * blockIdx.x + threadIdx.x; auto lane = threadIdx.x % 2; if(id >= N) return; int local[2]; for(int i = 0 ; i < 2 ; ++i) local[i] = data[id * 2 + i]; for(int i = 0 ; i < 2 ; ++i) local[i] += 42; int tmp; for(int i = 0 ; i < 2 ; ++i){ if(lane == i) tmp = local[i]; } out[id] = tmp; } __global__ static void strangeUnrolled(int* data, int* out, int N){ auto id = blockDim.x * blockIdx.x + threadIdx.x; auto lane = threadIdx.x % 2; if(id >= N) return; int local[2]; for(int i = 0 ; i < 2 ; ++i) local[i] = data[id * 2 + i]; for(int i = 0 ; i < 2 ; ++i) local[i] += 42; int tmp; //manually unrolled loop if(lane == 0) tmp = local[0]; if(lane == 1) tmp = local[1]; out[id] = tmp; } __global__ static void evenStrangerLoop(int* data, int* out, int N){ auto id = blockDim.x * blockIdx.x + threadIdx.x; auto lane = threadIdx.x % 2; if(id >= N) return; int local[2]; for(int i = 0 ; i < 2 ; ++i) local[i] = data[id * 2 + i]; for(int i = 0 ; i < 2 ; ++i) local[i] += 42; int tmp; for(int i = 0 ; i < 2 ; ++i){ if(lane >= i) tmp = local[i]; } out[id] = tmp; } */ // nvcc $CPPFLAGS -I ~/Master/libs/data/include/eigen3/ -ptx -lineinfo -src-in-ptx // -gencode=arch=compute_52,code=compute_52 -g -std=c++11 --expt-relaxed-constexpr inverse_test.cu nvcc $CPPFLAGS -I // ~/Master/libs/data/include/eigen3/ -ptx -gencode=arch=compute_52,code=compute_52 -g -std=c++11 // --expt-relaxed-constexpr inverse_test.cu template <typename ElementType, int ElementSize> void coalescedCopyTest2(int ElementCount) { std::cout << "Bytes per element = " << sizeof(ElementType) * ElementSize << std::endl; size_t readWrites = ElementSize * ElementCount * sizeof(ElementType) * 2; CUDA::PerformanceTestHelper test("Coalesced processing test. ElementSize: " + std::to_string(ElementSize) + " ElementCount: " + std::to_string(ElementCount), readWrites); thrust::host_vector<ElementType> data(ElementSize * ElementCount, 42); thrust::host_vector<ElementType> result(ElementSize * ElementCount + 1, -1); thrust::host_vector<ElementType> ref(ElementSize * ElementCount + 1, -1); for (int i = 0; i < int(data.size()); ++i) { data[i] = rand() % 10; ref[i] = data[i] + 42; } thrust::device_vector<ElementType> d_data(data); thrust::device_vector<ElementType> d_result(result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); hipLaunchKernelGGL(( copyUnCoalesced<ElementType, ElementSize, BLOCK_SIZE>) , dim3(CUDA::getBlockCount(ElementCount, BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, 0, d_data, d_result); } test.addMeassurement("copyUnCoalesced", time); CUDA_SYNC_CHECK_ERROR(); } SAIGA_ASSERT(ref == d_result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); hipLaunchKernelGGL(( sharedMemoryUnCoalesced<ElementType, ElementSize, BLOCK_SIZE>) , dim3(CUDA::getBlockCount(ElementCount, BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, 0, d_data, d_result); } test.addMeassurement("sharedMemoryUnCoalesced", time); CUDA_SYNC_CHECK_ERROR(); } SAIGA_ASSERT(ref == d_result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); hipLaunchKernelGGL(( sharedMemoryCoalesced<ElementType, ElementSize, BLOCK_SIZE>) , dim3(CUDA::getBlockCount(ElementCount, BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, 0, d_data, d_result); } test.addMeassurement("sharedMemoryCoalesced", time); CUDA_SYNC_CHECK_ERROR(); } SAIGA_ASSERT(ref == d_result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); hipLaunchKernelGGL(( sharedMemoryCoalesced2<ElementType, ElementSize, BLOCK_SIZE>) , dim3(CUDA::getBlockCount(ElementCount, BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, 0, d_data, d_result); // sharedMemoryCoalesced2<ElementType,ElementSize,BLOCK_SIZE> <<< // CUDA::getBlockCount(ElementCount,BLOCK_SIZE),BLOCK_SIZE >>>(d_data,d_result); } test.addMeassurement("sharedMemoryCoalesced2", time); CUDA_SYNC_CHECK_ERROR(); } SAIGA_ASSERT(ref == d_result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); hipLaunchKernelGGL(( copyFullCoalesced<ElementType, ElementSize, BLOCK_SIZE>) , dim3(CUDA::getBlockCount(ElementCount, BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, 0, d_data, d_result); } test.addMeassurement("copyFullCoalesced (no vector)", time); CUDA_SYNC_CHECK_ERROR(); } SAIGA_ASSERT(ref == d_result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); hipLaunchKernelGGL(( shuffleCopy<ElementType, ElementSize, BLOCK_SIZE>) , dim3(CUDA::getBlockCount(ElementCount, BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, 0, d_data, d_result); } test.addMeassurement("shuffleCopy", time); CUDA_SYNC_CHECK_ERROR(); } // for(int i = 0 ; i < ref.size() ; ++i){ // std::cout << ref[i] << " == " << d_result[i] << std::endl; // } SAIGA_ASSERT(ref == d_result); { result = result; float time; { CUDA::ScopedTimer t(time); hipMemcpy(thrust::raw_pointer_cast(d_result.data()), thrust::raw_pointer_cast(d_data.data()), d_data.size() * sizeof(ElementType), hipMemcpyDeviceToDevice); } test.addMeassurement("hipMemcpy", time); CUDA_SYNC_CHECK_ERROR(); } return; } void coalescedCopyTest() { CUDA_SYNC_CHECK_ERROR(); // coalescedCopyTest2<int,4>(1); // coalescedCopyTest2<int,2>(1); // coalescedCopyTest2<int,16>(1); // coalescedCopyTest2<int,16>(3); // coalescedCopyTest2<int,16>(5); // coalescedCopyTest2<int,16>(1000 * 1000 + 1); // coalescedCopyTest2<int,16>(32); coalescedCopyTest2<int, 32>(1000 * 1000 + 1); coalescedCopyTest2<int, 64>(1000 * 1000 + 1); CUDA_SYNC_CHECK_ERROR(); } } // namespace CUDA } // namespace Saiga
c06422f708e2568ba5c314c338f6a419a00d26ec.cu
/** * Copyright (c) 2021 Darius Rückert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/core/time/timer.h" #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/memory.h" #include "saiga/cuda/shfl_helper.h" #include "saiga/cuda/shuffle_copy.h" #include "saiga/cuda/tests/test.h" #include "saiga/cuda/tests/test_helper.h" #include "saiga/cuda/thread_info.h" namespace Saiga { namespace CUDA { template <typename T, int ElementSize, unsigned int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ static void copyUnCoalesced(ArrayView<T> data, ArrayView<T> result) { CUDA::ThreadInfo<BLOCK_SIZE> ti; // grid stride loop for (auto id = ti.thread_id * ElementSize; id < data.size(); id += ti.grid_size * ElementSize) { T l[ElementSize]; auto localStart = ti.thread_id * ElementSize; for (int i = 0; i < ElementSize; ++i) { l[i] = data[localStart + i]; } for (int i = 0; i < ElementSize; ++i) { l[i] += 42; } for (int i = 0; i < ElementSize; ++i) { result[localStart + i] = l[i]; } } } template <typename T, int ElementSize, unsigned int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ static void copyFullCoalesced(ArrayView<T> data, ArrayView<T> result) { const int elementsPerWarp = ElementSize * SAIGA_WARP_SIZE; CUDA::ThreadInfo<BLOCK_SIZE> ti; auto N = data.size(); auto Nelements = N / ElementSize; auto requiredWarps = CUDA::getBlockCount(Nelements, SAIGA_WARP_SIZE); // grid stride loop for (auto wId = ti.warp_id; wId < requiredWarps; wId += ti.num_warps) { auto warpStart = wId * elementsPerWarp; for (auto e = ti.lane_id; e < elementsPerWarp; e += SAIGA_WARP_SIZE) { auto globalOffset = warpStart + e; if (globalOffset < N) { auto d = data[globalOffset]; d += 42; result[globalOffset] = d; } } } } template <typename T, int ElementSize, unsigned int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ static void sharedMemoryUnCoalesced(ArrayView<T> data, ArrayView<T> result) { __shared__ T buffer[BLOCK_SIZE][ElementSize + 0]; CUDA::ThreadInfo<BLOCK_SIZE> ti; // grid stride loop for (auto id = ti.thread_id * ElementSize; id < data.size(); id += ti.grid_size * ElementSize) { T l[ElementSize]; auto matrixId = ti.thread_id; auto globalOffset = matrixId * ElementSize; auto localMatrixId = ti.local_thread_id; // id in shared buffer // linear copy for (int i = 0; i < ElementSize; ++i) { buffer[localMatrixId][i] = data[globalOffset + i]; } for (int i = 0; i < ElementSize; ++i) { l[i] = buffer[localMatrixId][i]; } // add something so things don't get optimized away for (int i = 0; i < ElementSize; ++i) { l[i] += 42; } for (int i = 0; i < ElementSize; ++i) { buffer[localMatrixId][i] = l[i]; } // linear copy for (int i = 0; i < ElementSize; ++i) { result[globalOffset + i] = buffer[localMatrixId][i]; } } } template <typename T, int ElementSize, unsigned int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ static void sharedMemoryCoalesced(ArrayView<T> data, ArrayView<T> result) { CUDA::ThreadInfo<BLOCK_SIZE> ti; const int elementsPerWarp = ElementSize * SAIGA_WARP_SIZE; auto N = data.size(); auto Nelements = N / ElementSize; auto requiredWarps = CUDA::getBlockCount(Nelements, SAIGA_WARP_SIZE); // __shared__ double buffer[elementsPerBlock]; __shared__ T buffer[BLOCK_SIZE][ElementSize + 0]; // grid stride loop for (auto wId = ti.warp_id; wId < requiredWarps; wId += ti.num_warps) { // for(auto id = ti.thread_id * ElementSize; id < N; id += ti.num_warps){ // for(auto id = ti.thread_id; id < Nelements; id += ti.grid_size ){ T l[ElementSize]; auto localMatrixId = ti.local_thread_id; // id in shared buffer auto warpStart = ti.warp_id * elementsPerWarp; // strided copy for (auto e = ti.lane_id; e < elementsPerWarp; e += SAIGA_WARP_SIZE) { auto localMatrix = ti.warp_lane * SAIGA_WARP_SIZE + e / ElementSize; auto localOffset = e % ElementSize; auto globalOffset = warpStart + e; if (globalOffset < N) { buffer[localMatrix][localOffset] = data[globalOffset]; } } for (int i = 0; i < ElementSize; ++i) { l[i] = buffer[localMatrixId][i]; } // add something so things don't get optimized away for (int i = 0; i < ElementSize; ++i) { l[i] += 42; } for (int i = 0; i < ElementSize; ++i) { buffer[localMatrixId][i] = l[i]; } // strided copy for (auto e = ti.lane_id; e < elementsPerWarp; e += SAIGA_WARP_SIZE) { auto localMatrix = ti.warp_lane * SAIGA_WARP_SIZE + e / ElementSize; auto localOffset = e % ElementSize; auto globalOffset = warpStart + e; if (globalOffset < N) { result[globalOffset] = buffer[localMatrix][localOffset]; } } } } template <typename T, int ElementSize, unsigned int BLOCK_SIZE, typename VectorType = int2> __launch_bounds__(BLOCK_SIZE) __global__ static void sharedMemoryCoalesced2(ArrayView<T> data, ArrayView<T> result) { const int elementSize = sizeof(T) * ElementSize; const int fullVectorsPerElement = elementSize / sizeof(VectorType); #ifdef SAIGA_HAS_CONSTEXPR const int vectorsPerElement = CUDA::getBlockCount(elementSize, sizeof(VectorType)); static_assert(vectorsPerElement * sizeof(VectorType) == elementSize, "T cannot be loaded with VectorType"); #else const int vectorsPerElement = 1; #endif // const int vectorsPerWarp = fullVectorsPerElement * SAIGA_WARP_SIZE; const int tileSizeBytes = 64; const int tileSizeVectors = tileSizeBytes / sizeof(VectorType); const int fullVectorsPerTile = fullVectorsPerElement > tileSizeVectors ? tileSizeVectors : fullVectorsPerElement; const int vectorsPerTile = vectorsPerElement > tileSizeVectors ? tileSizeVectors : vectorsPerElement; // const int vectorsPerTile = N > 8 ? 8 : N; const int fullTiles = fullVectorsPerElement == 0 ? fullVectorsPerElement : fullVectorsPerElement / fullVectorsPerTile; // const int fullTiles = fullVectorsPerElement == 0 ? fullVectorsPerElement : fullVectorsPerElement / // fullVectorsPerTile; const int tiles = 2; const int elementsPerTile = N / tiles; const int fullVectorsPerBlock = fullVectorsPerElement * BLOCK_SIZE; // __shared__ double buffer[elementsPerBlock]; __shared__ VectorType buffer[BLOCK_SIZE][vectorsPerTile]; // __shared__ T buffer[BLOCK_SIZE][N]; T l[ElementSize]; auto N = data.size(); auto Nelements = N / ElementSize; auto NVectors = N * sizeof(T) / sizeof(VectorType); auto requiredWarps = CUDA::getBlockCount(Nelements, SAIGA_WARP_SIZE); VectorType* global = reinterpret_cast<VectorType*>(data.data()); VectorType* globalResult = reinterpret_cast<VectorType*>(result.data()); VectorType* local = reinterpret_cast<VectorType*>(l); CUDA::ThreadInfo<BLOCK_SIZE> ti; // grid stride loop // for(auto id = ti.thread_id * ElementSize; id < data.size(); id += ti.grid_size * ElementSize){ for (auto wId = ti.warp_id; wId < requiredWarps; wId += ti.num_warps) { auto localMatrixId = ti.local_thread_id; // id in shared buffer // auto warpStart = ti.warp_id * vectorsPerWarp; auto blockStart = ti.block_id * fullVectorsPerBlock; auto warpOffset = ti.warp_lane * SAIGA_WARP_SIZE; // start matrix of this warp in block local shared memory #if 1 for (int t = 0; t < fullTiles; ++t) { auto tileOffset = t * fullVectorsPerTile; // strided copy for (auto e = ti.lane_id; e < fullVectorsPerTile * SAIGA_WARP_SIZE; e += SAIGA_WARP_SIZE) { auto localMatrix = warpOffset + e / fullVectorsPerTile; auto localOffset = e % fullVectorsPerTile; auto globalIndex = blockStart + localMatrix * fullVectorsPerElement + tileOffset + localOffset; if (globalIndex < NVectors) { buffer[localMatrix][localOffset] = global[globalIndex]; // printf("read %d %d %d \n",ti.thread_id,localMatrix,globalIndex); } } for (int i = 0; i < fullVectorsPerTile; ++i) { local[i + tileOffset] = buffer[localMatrixId][i]; } } #else // strided copy for (auto e = ti.lane_id; e < elementsPerWarp; e += SAIGA_WARP_SIZE) { auto localMatrix = ti.warp_lane * SAIGA_WARP_SIZE + e / N; auto localOffset = e % N; buffer[localMatrix][localOffset] = data[warpStart + e]; } for (int i = 0; i < N; ++i) { l[i] = buffer[localMatrixId][i]; } #endif // add something so things don't get optimized away for (int i = 0; i < ElementSize; ++i) { l[i] += 42; } // for(int t = 0 ; t < tiles ; ++t){ // auto tileOffset = t * elementsPerTile; // for(int i = 0; i < elementsPerTile; ++i){ // buffer[localMatrixId][i] = l[i + tileOffset]; // } // //strided copy // for(auto e = ti.lane_id; e < elementsPerTile * SAIGA_WARP_SIZE; e += SAIGA_WARP_SIZE){ // auto localMatrix = ti.warp_lane * SAIGA_WARP_SIZE + e / elementsPerTile; // auto localOffset = e % elementsPerTile; // result[tileOffset+warpStart+e] = buffer[localMatrix][localOffset]; // } // } // for(int i = 0; i < N; ++i){ // buffer[localMatrixId][i] = l[i]; // } //// strided copy // for(auto e = ti.lane_id; e < elementsPerWarp; e += SAIGA_WARP_SIZE){ // auto localMatrix = ti.warp_lane * SAIGA_WARP_SIZE + e / N; // auto localOffset = e % N; // result[warpStart+e] = buffer[localMatrix][localOffset]; // } for (int t = 0; t < fullTiles; ++t) { auto tileOffset = t * fullVectorsPerTile; for (int i = 0; i < fullVectorsPerTile; ++i) { buffer[localMatrixId][i] = local[i + tileOffset]; } // strided copy for (auto e = ti.lane_id; e < fullVectorsPerTile * SAIGA_WARP_SIZE; e += SAIGA_WARP_SIZE) { auto localMatrix = warpOffset + e / fullVectorsPerTile; auto localOffset = e % fullVectorsPerTile; auto globalIndex = blockStart + localMatrix * fullVectorsPerElement + tileOffset + localOffset; if (globalIndex < NVectors) { globalResult[globalIndex] = buffer[localMatrix][localOffset]; // printf("write %d %d %d \n",ti.thread_id,localMatrix,globalIndex); } } } } } template <typename T, int ElementSize, unsigned int BLOCK_SIZE, typename VectorType = int4, int localWarpSize2 = -1> __launch_bounds__(BLOCK_SIZE) __global__ static void shuffleCopy(ArrayView<T> data, ArrayView<T> result) { const int localWarpSize = localWarpSize2 == -1 ? int(SAIGA_L2_CACHE_LINE_SIZE / sizeof(VectorType)) : localWarpSize2; const int vectorsPerElement = CUDA::getBlockCount(ElementSize * sizeof(T), sizeof(VectorType)); auto N = data.size(); auto Nelements = N / ElementSize; auto NVectors = N * sizeof(T) / sizeof(VectorType); auto requiredWarps = CUDA::getBlockCount(Nelements, localWarpSize); // const int localWarpSize = 2; CUDA::ThreadInfo<BLOCK_SIZE, localWarpSize> ti; // grid stride loop // for(auto id = ti.thread_id * ElementSize; id < data.size(); id += ti.grid_size * ElementSize){ for (auto wId = ti.warp_id; wId < requiredWarps; wId += ti.num_warps) { T l[ElementSize]; // auto matrixId = ti.thread_id; // auto globalOffset = matrixId * ElementSize; // auto localMatrixId = ti.local_thread_id; //id in shared buffer auto globalStart = wId * localWarpSize * vectorsPerElement; // printf("warp %d %d %d %d \n", wId,ti.lane_id,localWarpSize,Nelements); VectorType* global = reinterpret_cast<VectorType*>(data.data()); VectorType* globalResult = reinterpret_cast<VectorType*>(result.data()); VectorType* local = reinterpret_cast<VectorType*>(l); // loadShuffle<localWarpSize,sizeof(T)*ElementSize,VectorType>(data.data()+globalStart,local,ti.lane_id); loadShuffle<localWarpSize, sizeof(T) * ElementSize, VectorType>(global, local, ti.lane_id, globalStart, NVectors); for (int i = 0; i < ElementSize; ++i) { l[i] += 42; } storeShuffle<localWarpSize, sizeof(T) * ElementSize, VectorType>(globalResult, local, ti.lane_id, globalStart, NVectors); } } /* __global__ static void strangeLoop(int* data, int* out, int N){ auto id = blockDim.x * blockIdx.x + threadIdx.x; auto lane = threadIdx.x % 2; if(id >= N) return; int local[2]; for(int i = 0 ; i < 2 ; ++i) local[i] = data[id * 2 + i]; for(int i = 0 ; i < 2 ; ++i) local[i] += 42; int tmp; for(int i = 0 ; i < 2 ; ++i){ if(lane == i) tmp = local[i]; } out[id] = tmp; } __global__ static void strangeUnrolled(int* data, int* out, int N){ auto id = blockDim.x * blockIdx.x + threadIdx.x; auto lane = threadIdx.x % 2; if(id >= N) return; int local[2]; for(int i = 0 ; i < 2 ; ++i) local[i] = data[id * 2 + i]; for(int i = 0 ; i < 2 ; ++i) local[i] += 42; int tmp; //manually unrolled loop if(lane == 0) tmp = local[0]; if(lane == 1) tmp = local[1]; out[id] = tmp; } __global__ static void evenStrangerLoop(int* data, int* out, int N){ auto id = blockDim.x * blockIdx.x + threadIdx.x; auto lane = threadIdx.x % 2; if(id >= N) return; int local[2]; for(int i = 0 ; i < 2 ; ++i) local[i] = data[id * 2 + i]; for(int i = 0 ; i < 2 ; ++i) local[i] += 42; int tmp; for(int i = 0 ; i < 2 ; ++i){ if(lane >= i) tmp = local[i]; } out[id] = tmp; } */ // nvcc $CPPFLAGS -I ~/Master/libs/data/include/eigen3/ -ptx -lineinfo -src-in-ptx // -gencode=arch=compute_52,code=compute_52 -g -std=c++11 --expt-relaxed-constexpr inverse_test.cu nvcc $CPPFLAGS -I // ~/Master/libs/data/include/eigen3/ -ptx -gencode=arch=compute_52,code=compute_52 -g -std=c++11 // --expt-relaxed-constexpr inverse_test.cu template <typename ElementType, int ElementSize> void coalescedCopyTest2(int ElementCount) { std::cout << "Bytes per element = " << sizeof(ElementType) * ElementSize << std::endl; size_t readWrites = ElementSize * ElementCount * sizeof(ElementType) * 2; CUDA::PerformanceTestHelper test("Coalesced processing test. ElementSize: " + std::to_string(ElementSize) + " ElementCount: " + std::to_string(ElementCount), readWrites); thrust::host_vector<ElementType> data(ElementSize * ElementCount, 42); thrust::host_vector<ElementType> result(ElementSize * ElementCount + 1, -1); thrust::host_vector<ElementType> ref(ElementSize * ElementCount + 1, -1); for (int i = 0; i < int(data.size()); ++i) { data[i] = rand() % 10; ref[i] = data[i] + 42; } thrust::device_vector<ElementType> d_data(data); thrust::device_vector<ElementType> d_result(result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); copyUnCoalesced<ElementType, ElementSize, BLOCK_SIZE> <<<CUDA::getBlockCount(ElementCount, BLOCK_SIZE), BLOCK_SIZE>>>(d_data, d_result); } test.addMeassurement("copyUnCoalesced", time); CUDA_SYNC_CHECK_ERROR(); } SAIGA_ASSERT(ref == d_result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); sharedMemoryUnCoalesced<ElementType, ElementSize, BLOCK_SIZE> <<<CUDA::getBlockCount(ElementCount, BLOCK_SIZE), BLOCK_SIZE>>>(d_data, d_result); } test.addMeassurement("sharedMemoryUnCoalesced", time); CUDA_SYNC_CHECK_ERROR(); } SAIGA_ASSERT(ref == d_result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); sharedMemoryCoalesced<ElementType, ElementSize, BLOCK_SIZE> <<<CUDA::getBlockCount(ElementCount, BLOCK_SIZE), BLOCK_SIZE>>>(d_data, d_result); } test.addMeassurement("sharedMemoryCoalesced", time); CUDA_SYNC_CHECK_ERROR(); } SAIGA_ASSERT(ref == d_result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); sharedMemoryCoalesced2<ElementType, ElementSize, BLOCK_SIZE> <<<CUDA::getBlockCount(ElementCount, BLOCK_SIZE), BLOCK_SIZE>>>(d_data, d_result); // sharedMemoryCoalesced2<ElementType,ElementSize,BLOCK_SIZE> <<< // CUDA::getBlockCount(ElementCount,BLOCK_SIZE),BLOCK_SIZE >>>(d_data,d_result); } test.addMeassurement("sharedMemoryCoalesced2", time); CUDA_SYNC_CHECK_ERROR(); } SAIGA_ASSERT(ref == d_result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); copyFullCoalesced<ElementType, ElementSize, BLOCK_SIZE> <<<CUDA::getBlockCount(ElementCount, BLOCK_SIZE), BLOCK_SIZE>>>(d_data, d_result); } test.addMeassurement("copyFullCoalesced (no vector)", time); CUDA_SYNC_CHECK_ERROR(); } SAIGA_ASSERT(ref == d_result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); shuffleCopy<ElementType, ElementSize, BLOCK_SIZE> <<<CUDA::getBlockCount(ElementCount, BLOCK_SIZE), BLOCK_SIZE>>>(d_data, d_result); } test.addMeassurement("shuffleCopy", time); CUDA_SYNC_CHECK_ERROR(); } // for(int i = 0 ; i < ref.size() ; ++i){ // std::cout << ref[i] << " == " << d_result[i] << std::endl; // } SAIGA_ASSERT(ref == d_result); { result = result; float time; { CUDA::ScopedTimer t(time); cudaMemcpy(thrust::raw_pointer_cast(d_result.data()), thrust::raw_pointer_cast(d_data.data()), d_data.size() * sizeof(ElementType), cudaMemcpyDeviceToDevice); } test.addMeassurement("cudaMemcpy", time); CUDA_SYNC_CHECK_ERROR(); } return; } void coalescedCopyTest() { CUDA_SYNC_CHECK_ERROR(); // coalescedCopyTest2<int,4>(1); // coalescedCopyTest2<int,2>(1); // coalescedCopyTest2<int,16>(1); // coalescedCopyTest2<int,16>(3); // coalescedCopyTest2<int,16>(5); // coalescedCopyTest2<int,16>(1000 * 1000 + 1); // coalescedCopyTest2<int,16>(32); coalescedCopyTest2<int, 32>(1000 * 1000 + 1); coalescedCopyTest2<int, 64>(1000 * 1000 + 1); CUDA_SYNC_CHECK_ERROR(); } } // namespace CUDA } // namespace Saiga
c3a06054d549423721cf60f3f97bcbae088f653f.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> #include <cusparse_v2.h> #include "rocblas.h" #include <hiprand/hiprand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ #include "kcArrayFunctions.h" //poison log likelihood for one observation __device__ KC_FP_TYPE lh(KC_FP_TYPE y, KC_FP_TYPE x, KC_FP_TYPE g, KC_FP_TYPE dt, KC_FP_TYPE sh, KC_FP_TYPE log_power) { KC_FP_TYPE logex = KC_MAX(KC_MINN,(g*x>100)?(g*x):KC_MIN(log1p(KC_EXP(x*g)),KC_MAXN)); KC_FP_TYPE r = KC_MAX(KC_MINN,KC_MIN(KC_POW(logex*1.00000,log_power),KC_MAXN))*KC_EXP(sh); return y*(KC_LOG(r)+KC_LOG(dt)) - dt*r - KC_GAMMALN(y+1.0); } //sums up log likelihood of each trial given model parameters __global__ void kcSumGBfinal(const KC_FP_TYPE * log_p_tr, KC_FP_TYPE * log_p, const int NT) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < 1) { log_p[0] = 0; for(int ii = 0; ii < NT; ii++) { log_p[0] += log_p_tr[ii]; } } } //averages log likelihood of each simulated path // (one thread for each trial) __global__ void kcSumGBlogpTr(const KC_FP_TYPE * log_p, KC_FP_TYPE * log_p_tr, const int NT, const int nSims) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT) { log_p_tr[idx] = 0; KC_FP_TYPE trSum = 0; KC_FP_TYPE log_x = 0; log_p_tr[idx] = KC_SQRT(-1.0); //computes log( 1/nSims * \sum exp( log p(y | sim paths)) ) for a single trial // does the sum in a slightly more numerical stable way than just blindly exponentiating all the log likleihoods for(int ii = 0; ii < nSims && isnan(log_p_tr[idx]);ii++) { trSum = 1 ; log_x = log_p[ii*NT+idx]; for(int kk = 0; kk < ii; kk++) { trSum += KC_EXP(log_p[kk*NT+idx] - log_x); } for(int kk = ii+1; kk < nSims; kk++) { trSum += KC_EXP(log_p[kk*NT+idx] - log_x); } if(trSum > 1e-25 && !isnan(trSum) && !isinf(trSum)) { log_p_tr[idx] = log_x-KC_LOG((double)nSims)+KC_LOG(trSum); break; } } } } //simulates a ramping (diffusion-to-bound) path for each trial and computes likelihood __global__ void kcSimGBPaths(const KC_FP_TYPE * y, const int * trIdx, const int * betaIdx, KC_FP_TYPE * xx, const KC_FP_TYPE * b,const KC_FP_TYPE w2,const KC_FP_TYPE l_0, const KC_FP_TYPE g, const KC_FP_TYPE dt, KC_FP_TYPE * log_p, const int NT, const int TT, const int sim, KC_FP_TYPE * spe, KC_FP_TYPE log_power) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT ) { int trNum = idx; int T1 = trIdx[trNum]; //xx contains zero mean Gaussian noise of variance \omega^2 xx[T1] += l_0; //xx[T1] now contains initial point for simulated diffusion trajectory for this trial int currIdx = sim*(NT)+idx; log_p[currIdx] = lh(y[T1],xx[T1],g,dt,spe[T1],log_power); for(int ii = T1+1; ii < trIdx[trNum+1];ii++) { //progates particle forward in time xx[ii] = (xx[ii-1] >= 1.0)?1.0:KC_MIN(xx[ii] + xx[ii-1]+b[betaIdx[ii]],1.0); //log likelihood of single observation (bin) y[ii] given diffusion path is at x[ii] log_p[currIdx] += lh(y[ii],xx[ii],g,dt,spe[ii],log_power); } } } //Estimates the log probability of a set of spike trains under the ramping model given a set of fixed parameters // This estimation is made by Monte Carlo simulations from the model to integrate out latent variable //args // 0 = y (observations) // 1 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y) // 2 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB) // 3 = spike history effect (same size as y) // 4 = beta values // 5 = w (variance of diffusion process) // 6 = l_0 (starting lambda value) // 7 = g (absorbing boundary effective height) // 8 = dt (bin size in seconds) // 9 = number of samples to use to estimate log probability of observations (I recommend using at least 1000) // 10 = log_power //outputs (left-hand side) // 0 = log p(y|\theta) // 1 = log p(y|\theta) for each individual trial void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { hipError_t ce; //load up trial data unsigned int TT = kcGetArrayNumEl(prhs[0]); KC_FP_TYPE * y = kcGetArrayData(prhs[0]); int * trIdx = kcGetArrayDataInt(prhs[1]); unsigned int NT = kcGetArrayNumEl(prhs[1])-1; int * betaIdx = kcGetArrayDataInt(prhs[2],TT); // load spike history effect KC_FP_TYPE * spe = kcGetArrayData(prhs[3]); //how many simulations to use to estimate log p(y|\theta) int trialsToSim = (int)mxGetScalar(prhs[9]); //load up parameters to simulate model if(mxGetClassID(prhs[4]) != KC_FP_TYPE_MATLAB) { mexErrMsgTxt("Beta input wrong floating point type (kcSimGaussianBound)!"); } KC_FP_TYPE * b = (KC_FP_TYPE *)mxGetPr(prhs[4]); int numBetas = mxGetNumberOfElements(prhs[4]); KC_FP_TYPE * b_gpu; ce = hipMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas); if(ce != hipSuccess) { mexPrintf("Error allocating space for betas on device - first allocation in function (kcSimGaussianBound) "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } checkCudaErrors(hipMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,hipMemcpyHostToDevice)); KC_FP_TYPE w = mxGetScalar(prhs[5]); KC_FP_TYPE l_0 = mxGetScalar(prhs[6]); KC_FP_TYPE g = mxGetScalar(prhs[7]); KC_FP_TYPE dt = mxGetScalar(prhs[8]); KC_FP_TYPE log_power = mxGetScalar(prhs[10]); //setup CUDA variables + random number generator int randSize = TT + (((TT)%2==0)?0:1); KC_FP_TYPE * xx; checkCudaErrors(hipMalloc((void**)&xx,randSize*sizeof(KC_FP_TYPE))); hiprandGenerator_t curandGen = 0; hiprandStatus_t hiprandStatus_t; hiprandStatus_t = hiprandCreateGenerator(&curandGen, HIPRAND_RNG_PSEUDO_DEFAULT); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) { mexPrintf("CURAND-1 error %d\n",(int)hiprandStatus_t); mexErrMsgTxt("CUDA errors"); } struct timeval now; gettimeofday(&now,NULL); unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec); hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(curandGen, mySeed); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) { mexPrintf("CURAND-2 error %d\n",(int)hiprandStatus_t); mexErrMsgTxt("CUDA errors"); } int blockSize = 2; int nBlocks = NT/blockSize + ((NT%blockSize==0)?0:1); int blockSizeT = 2; int nBlocksT = NT/blockSizeT + ((NT%blockSizeT==0)?0:1); //allocates sspace on GPU for simulating the likelihood KC_FP_TYPE * log_p; //KC_FP_TYPE * log_p_2; KC_FP_TYPE * log_p_tr; KC_FP_TYPE * sum_log_p; checkCudaErrors(hipMalloc((void**)&log_p,sizeof(KC_FP_TYPE)*NT*trialsToSim)); //checkCudaErrors(hipMalloc((void**)&log_p_2,sizeof(KC_FP_TYPE)*NT*trialsToSim)); checkCudaErrors(hipMalloc((void**)&log_p_tr,sizeof(KC_FP_TYPE)*NT)); checkCudaErrors(hipMalloc((void**)&sum_log_p,sizeof(KC_FP_TYPE)*1)); // generate AR1 noise for(int kk = 0; kk < trialsToSim; kk++) { //generates zero mean Gaussian noise with correct variance hiprandStatus_t = KC_RANDOM_NORMAL_FUNCTION(curandGen,xx,randSize,0,KC_SQRT(w)); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) { mexPrintf("CURAND gen error %d\n",(int)hiprandStatus_t); mexErrMsgTxt("CUDA errors"); } //checkCudaErrors(hipDeviceSynchronize()); //calculate path + logP hipLaunchKernelGGL(( kcSimGBPaths), dim3(nBlocks),dim3(blockSize), 0, 0, y,trIdx,betaIdx,xx,b_gpu,w,l_0,g,dt,log_p,NT,TT,kk,spe,log_power); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error in simulating of kcSimGaussianBound.cu "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA errors"); } } // log_p_2 = log_p; //average likelihood of each sampled path to get log p(y|\theta) for each trial hipLaunchKernelGGL(( kcSumGBlogpTr), dim3(nBlocksT),dim3(blockSizeT), 0, 0, log_p,log_p_tr,NT,trialsToSim); checkCudaErrors(hipDeviceSynchronize()); //sums up log likelihood of each trial hipLaunchKernelGGL(( kcSumGBfinal), dim3(1),dim3(1), 0, 0, log_p_tr,sum_log_p,NT); checkCudaErrors(hipDeviceSynchronize()); //copy back to host if(nlhs > 0) { plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(hipMemcpy((KC_FP_TYPE *)mxGetPr(plhs[0]),sum_log_p,1*sizeof(KC_FP_TYPE),hipMemcpyDeviceToHost)); } if(nlhs > 1) { plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(hipMemcpy((KC_FP_TYPE *)mxGetPr(plhs[1]),log_p_tr,NT*sizeof(KC_FP_TYPE),hipMemcpyDeviceToHost)); } //free up CUDA variables checkCudaErrors(hiprandDestroyGenerator(curandGen)); checkCudaErrors(hipFree(xx)); checkCudaErrors(hipFree(b_gpu)); checkCudaErrors(hipFree(log_p)); checkCudaErrors(hipFree(log_p_tr)); checkCudaErrors(hipFree(sum_log_p)); }
c3a06054d549423721cf60f3f97bcbae088f653f.cu
#include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cuda_runtime.h> #include <cusparse_v2.h> #include "cublas_v2.h" #include <curand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ #include "kcArrayFunctions.h" //poison log likelihood for one observation __device__ KC_FP_TYPE lh(KC_FP_TYPE y, KC_FP_TYPE x, KC_FP_TYPE g, KC_FP_TYPE dt, KC_FP_TYPE sh, KC_FP_TYPE log_power) { KC_FP_TYPE logex = KC_MAX(KC_MINN,(g*x>100)?(g*x):KC_MIN(log1p(KC_EXP(x*g)),KC_MAXN)); KC_FP_TYPE r = KC_MAX(KC_MINN,KC_MIN(KC_POW(logex*1.00000,log_power),KC_MAXN))*KC_EXP(sh); return y*(KC_LOG(r)+KC_LOG(dt)) - dt*r - KC_GAMMALN(y+1.0); } //sums up log likelihood of each trial given model parameters __global__ void kcSumGBfinal(const KC_FP_TYPE * log_p_tr, KC_FP_TYPE * log_p, const int NT) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < 1) { log_p[0] = 0; for(int ii = 0; ii < NT; ii++) { log_p[0] += log_p_tr[ii]; } } } //averages log likelihood of each simulated path // (one thread for each trial) __global__ void kcSumGBlogpTr(const KC_FP_TYPE * log_p, KC_FP_TYPE * log_p_tr, const int NT, const int nSims) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT) { log_p_tr[idx] = 0; KC_FP_TYPE trSum = 0; KC_FP_TYPE log_x = 0; log_p_tr[idx] = KC_SQRT(-1.0); //computes log( 1/nSims * \sum exp( log p(y | sim paths)) ) for a single trial // does the sum in a slightly more numerical stable way than just blindly exponentiating all the log likleihoods for(int ii = 0; ii < nSims && isnan(log_p_tr[idx]);ii++) { trSum = 1 ; log_x = log_p[ii*NT+idx]; for(int kk = 0; kk < ii; kk++) { trSum += KC_EXP(log_p[kk*NT+idx] - log_x); } for(int kk = ii+1; kk < nSims; kk++) { trSum += KC_EXP(log_p[kk*NT+idx] - log_x); } if(trSum > 1e-25 && !isnan(trSum) && !isinf(trSum)) { log_p_tr[idx] = log_x-KC_LOG((double)nSims)+KC_LOG(trSum); break; } } } } //simulates a ramping (diffusion-to-bound) path for each trial and computes likelihood __global__ void kcSimGBPaths(const KC_FP_TYPE * y, const int * trIdx, const int * betaIdx, KC_FP_TYPE * xx, const KC_FP_TYPE * b,const KC_FP_TYPE w2,const KC_FP_TYPE l_0, const KC_FP_TYPE g, const KC_FP_TYPE dt, KC_FP_TYPE * log_p, const int NT, const int TT, const int sim, KC_FP_TYPE * spe, KC_FP_TYPE log_power) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT ) { int trNum = idx; int T1 = trIdx[trNum]; //xx contains zero mean Gaussian noise of variance \omega^2 xx[T1] += l_0; //xx[T1] now contains initial point for simulated diffusion trajectory for this trial int currIdx = sim*(NT)+idx; log_p[currIdx] = lh(y[T1],xx[T1],g,dt,spe[T1],log_power); for(int ii = T1+1; ii < trIdx[trNum+1];ii++) { //progates particle forward in time xx[ii] = (xx[ii-1] >= 1.0)?1.0:KC_MIN(xx[ii] + xx[ii-1]+b[betaIdx[ii]],1.0); //log likelihood of single observation (bin) y[ii] given diffusion path is at x[ii] log_p[currIdx] += lh(y[ii],xx[ii],g,dt,spe[ii],log_power); } } } //Estimates the log probability of a set of spike trains under the ramping model given a set of fixed parameters // This estimation is made by Monte Carlo simulations from the model to integrate out latent variable //args // 0 = y (observations) // 1 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y) // 2 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB) // 3 = spike history effect (same size as y) // 4 = beta values // 5 = w (variance of diffusion process) // 6 = l_0 (starting lambda value) // 7 = g (absorbing boundary effective height) // 8 = dt (bin size in seconds) // 9 = number of samples to use to estimate log probability of observations (I recommend using at least 1000) // 10 = log_power //outputs (left-hand side) // 0 = log p(y|\theta) // 1 = log p(y|\theta) for each individual trial void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { cudaError_t ce; //load up trial data unsigned int TT = kcGetArrayNumEl(prhs[0]); KC_FP_TYPE * y = kcGetArrayData(prhs[0]); int * trIdx = kcGetArrayDataInt(prhs[1]); unsigned int NT = kcGetArrayNumEl(prhs[1])-1; int * betaIdx = kcGetArrayDataInt(prhs[2],TT); // load spike history effect KC_FP_TYPE * spe = kcGetArrayData(prhs[3]); //how many simulations to use to estimate log p(y|\theta) int trialsToSim = (int)mxGetScalar(prhs[9]); //load up parameters to simulate model if(mxGetClassID(prhs[4]) != KC_FP_TYPE_MATLAB) { mexErrMsgTxt("Beta input wrong floating point type (kcSimGaussianBound)!"); } KC_FP_TYPE * b = (KC_FP_TYPE *)mxGetPr(prhs[4]); int numBetas = mxGetNumberOfElements(prhs[4]); KC_FP_TYPE * b_gpu; ce = cudaMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas); if(ce != cudaSuccess) { mexPrintf("Error allocating space for betas on device - first allocation in function (kcSimGaussianBound) "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } checkCudaErrors(cudaMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,cudaMemcpyHostToDevice)); KC_FP_TYPE w = mxGetScalar(prhs[5]); KC_FP_TYPE l_0 = mxGetScalar(prhs[6]); KC_FP_TYPE g = mxGetScalar(prhs[7]); KC_FP_TYPE dt = mxGetScalar(prhs[8]); KC_FP_TYPE log_power = mxGetScalar(prhs[10]); //setup CUDA variables + random number generator int randSize = TT + (((TT)%2==0)?0:1); KC_FP_TYPE * xx; checkCudaErrors(cudaMalloc((void**)&xx,randSize*sizeof(KC_FP_TYPE))); curandGenerator_t curandGen = 0; curandStatus_t curandStatus; curandStatus = curandCreateGenerator(&curandGen, CURAND_RNG_PSEUDO_DEFAULT); if(curandStatus != CURAND_STATUS_SUCCESS ) { mexPrintf("CURAND-1 error %d\n",(int)curandStatus); mexErrMsgTxt("CUDA errors"); } struct timeval now; gettimeofday(&now,NULL); unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec); curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, mySeed); if(curandStatus != CURAND_STATUS_SUCCESS ) { mexPrintf("CURAND-2 error %d\n",(int)curandStatus); mexErrMsgTxt("CUDA errors"); } int blockSize = 2; int nBlocks = NT/blockSize + ((NT%blockSize==0)?0:1); int blockSizeT = 2; int nBlocksT = NT/blockSizeT + ((NT%blockSizeT==0)?0:1); //allocates sspace on GPU for simulating the likelihood KC_FP_TYPE * log_p; //KC_FP_TYPE * log_p_2; KC_FP_TYPE * log_p_tr; KC_FP_TYPE * sum_log_p; checkCudaErrors(cudaMalloc((void**)&log_p,sizeof(KC_FP_TYPE)*NT*trialsToSim)); //checkCudaErrors(cudaMalloc((void**)&log_p_2,sizeof(KC_FP_TYPE)*NT*trialsToSim)); checkCudaErrors(cudaMalloc((void**)&log_p_tr,sizeof(KC_FP_TYPE)*NT)); checkCudaErrors(cudaMalloc((void**)&sum_log_p,sizeof(KC_FP_TYPE)*1)); // generate AR1 noise for(int kk = 0; kk < trialsToSim; kk++) { //generates zero mean Gaussian noise with correct variance curandStatus = KC_RANDOM_NORMAL_FUNCTION(curandGen,xx,randSize,0,KC_SQRT(w)); if(curandStatus != CURAND_STATUS_SUCCESS ) { mexPrintf("CURAND gen error %d\n",(int)curandStatus); mexErrMsgTxt("CUDA errors"); } //checkCudaErrors(cudaDeviceSynchronize()); //calculate path + logP kcSimGBPaths<<<nBlocks,blockSize>>>(y,trIdx,betaIdx,xx,b_gpu,w,l_0,g,dt,log_p,NT,TT,kk,spe,log_power); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error in simulating of kcSimGaussianBound.cu "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA errors"); } } // log_p_2 = log_p; //average likelihood of each sampled path to get log p(y|\theta) for each trial kcSumGBlogpTr<<<nBlocksT,blockSizeT>>>(log_p,log_p_tr,NT,trialsToSim); checkCudaErrors(cudaDeviceSynchronize()); //sums up log likelihood of each trial kcSumGBfinal<<<1,1>>>(log_p_tr,sum_log_p,NT); checkCudaErrors(cudaDeviceSynchronize()); //copy back to host if(nlhs > 0) { plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(cudaMemcpy((KC_FP_TYPE *)mxGetPr(plhs[0]),sum_log_p,1*sizeof(KC_FP_TYPE),cudaMemcpyDeviceToHost)); } if(nlhs > 1) { plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(cudaMemcpy((KC_FP_TYPE *)mxGetPr(plhs[1]),log_p_tr,NT*sizeof(KC_FP_TYPE),cudaMemcpyDeviceToHost)); } //free up CUDA variables checkCudaErrors(curandDestroyGenerator(curandGen)); checkCudaErrors(cudaFree(xx)); checkCudaErrors(cudaFree(b_gpu)); checkCudaErrors(cudaFree(log_p)); checkCudaErrors(cudaFree(log_p_tr)); checkCudaErrors(cudaFree(sum_log_p)); }
ce14dc8c7c3022578e1247e58b4c0f84b2e79b1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void childKernel() { printf("Hello "); } __global__ void parentKernel() { // launch child hipLaunchKernelGGL(( childKernel), dim3(1),dim3(1), 0, 0, ); if (hipSuccess != hipGetLastError()) { return; } // wait for child to complete if (hipSuccess != hipDeviceSynchronize()) { return; } printf("World!\n"); } int main(int argc, char *argv[]) { // launch parent hipLaunchKernelGGL(( parentKernel), dim3(1),dim3(1), 0, 0, ); if (hipSuccess != hipGetLastError()) { return 1; } // wait for parent to complete if (hipSuccess != hipDeviceSynchronize()) { return 2; } return 0; }
ce14dc8c7c3022578e1247e58b4c0f84b2e79b1e.cu
#include <stdio.h> __global__ void childKernel() { printf("Hello "); } __global__ void parentKernel() { // launch child childKernel<<<1,1>>>(); if (cudaSuccess != cudaGetLastError()) { return; } // wait for child to complete if (cudaSuccess != cudaDeviceSynchronize()) { return; } printf("World!\n"); } int main(int argc, char *argv[]) { // launch parent parentKernel<<<1,1>>>(); if (cudaSuccess != cudaGetLastError()) { return 1; } // wait for parent to complete if (cudaSuccess != cudaDeviceSynchronize()) { return 2; } return 0; }
368cf66d070c1cef8fd50ef76df1d93a65e5c61c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdlib> #include <ctime> #define n 10 void random_matrix(int *array); void print_matrix(int *array); __global__ void filter(int *in, int *out); int main(){ int *in; int *out; int *dev_in, *dev_out; int size = n * n * sizeof(int); hipMalloc((void **)&dev_in, size); hipMalloc((void **)&dev_out, size); in = new int[size]; out = new int[size]; srand(time(nullptr)); random_matrix(in); printf("\In:\n"); print_matrix(in); hipMemcpy(dev_in, in, size, hipMemcpyHostToDevice); hipMemcpy(dev_out, out, size, hipMemcpyHostToDevice); int numOfBlocks = 1; dim3 threadsPerBlock(n, n); filter << < numOfBlocks, threadsPerBlock >> > (dev_in, dev_out); hipMemcpy(out, dev_out, size, hipMemcpyDeviceToHost); printf("\Out: \n"); print_matrix(out); hipFree(dev_in); hipFree(dev_out); free(in); free(out); int h; scanf("%d",&h); return 0; } void random_matrix(int *array){ for (auto i = 0; i < n; ++i){ for (auto j = 0; j < n; ++j){ array[i * n + j] = rand() % 10; } } } void print_matrix(int *array){ for (auto i = 0; i < n; ++i){ for (auto j = 0; j < n; ++j){ printf("%d ", array[i * n + j]); } printf("\n"); } } __global__ void filter(int *in, int *out) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int start_i = 0, start_j = 0, sum = 0, end_i = n - 1, end_j = n - 1; if (i > 0) { start_i = i - 1; } if (j > 0) { start_j = j - 1; } if (i < n - 1) { end_i = i + 1; } if (j < n - 1) { end_j = j + 1; } int no_elements = 0; for (auto ir = start_i; ir <= end_i; ++ir) { for (auto ic = start_j; ic <= end_j; ++ic) { if (ic != j || ir != i) { sum += in[ir * n + ic]; no_elements++; } } } out[i * n + j] = sum / no_elements; }
368cf66d070c1cef8fd50ef76df1d93a65e5c61c.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdlib> #include <ctime> #define n 10 void random_matrix(int *array); void print_matrix(int *array); __global__ void filter(int *in, int *out); int main(){ int *in; int *out; int *dev_in, *dev_out; int size = n * n * sizeof(int); cudaMalloc((void **)&dev_in, size); cudaMalloc((void **)&dev_out, size); in = new int[size]; out = new int[size]; srand(time(nullptr)); random_matrix(in); printf("\In:\n"); print_matrix(in); cudaMemcpy(dev_in, in, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_out, out, size, cudaMemcpyHostToDevice); int numOfBlocks = 1; dim3 threadsPerBlock(n, n); filter << < numOfBlocks, threadsPerBlock >> > (dev_in, dev_out); cudaMemcpy(out, dev_out, size, cudaMemcpyDeviceToHost); printf("\Out: \n"); print_matrix(out); cudaFree(dev_in); cudaFree(dev_out); free(in); free(out); int h; scanf("%d",&h); return 0; } void random_matrix(int *array){ for (auto i = 0; i < n; ++i){ for (auto j = 0; j < n; ++j){ array[i * n + j] = rand() % 10; } } } void print_matrix(int *array){ for (auto i = 0; i < n; ++i){ for (auto j = 0; j < n; ++j){ printf("%d ", array[i * n + j]); } printf("\n"); } } __global__ void filter(int *in, int *out) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int start_i = 0, start_j = 0, sum = 0, end_i = n - 1, end_j = n - 1; if (i > 0) { start_i = i - 1; } if (j > 0) { start_j = j - 1; } if (i < n - 1) { end_i = i + 1; } if (j < n - 1) { end_j = j + 1; } int no_elements = 0; for (auto ir = start_i; ir <= end_i; ++ir) { for (auto ic = start_j; ic <= end_j; ++ic) { if (ic != j || ir != i) { sum += in[ir * n + ic]; no_elements++; } } } out[i * n + j] = sum / no_elements; }
8259d4fb5ab2d709cb0d9d49a5aa9935f4977df3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010-2013 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #define MIN(X, Y) (((X) < (Y)) ? (X) : (Y)) #define BLOCK_SIZE 512 #define WARP_SIZE 32 #define NUM_WARPS (BLOCK_SIZE/WARP_SIZE) // Maximum number of elements that can be inserted into a block queue #define BQ_CAPACITY 2048 // Maximum number of elements that can be inserted into a warp queue #define WQ_CAPACITY 128 /****************************************************************************** GPU kernels *******************************************************************************/ __global__ void gpu_global_queuing_kernel(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { // INSERT KERNEL CODE HERE // Loop over all nodes in the curent level. //since can't assume we have more threads than numCurrLevelNodes, int nThreads=blockDim.x*gridDim.x; int nSteps = (*numCurrLevelNodes-1)/nThreads+1; unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; for (int iStep=0; iStep<nSteps; iStep++){ if (idx<*numCurrLevelNodes){ unsigned int node = currLevelNodes[idx]; for(unsigned int nbrIdx = nodePtrs[node]; nbrIdx < nodePtrs[node + 1]; ++nbrIdx){ unsigned int neighbor = nodeNeighbors[nbrIdx]; //See if neighbor has been visited yet. If not, add it to the queue. //This needs to be atomic op to avoid race conditions between the read and modify. int isVisited = atomicAdd(&nodeVisited[neighbor],1); //atomicAdd returns old value if(!isVisited) { // Already marked, add it to the queue int iNextLevel = atomicAdd(numNextLevelNodes,1); nextLevelNodes[iNextLevel] = neighbor; } } } idx += nThreads; } } __global__ void gpu_block_queuing_kernel(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { // INSERT KERNEL CODE HERE // Loop over all nodes in the curent level. __shared__ int nextLevelNodes_s[BQ_CAPACITY]; __shared__ int numNextLevelNodes_s; //can't initialize shared vars if (threadIdx.x==0){ //dunno if it matters if all threads modify numNextLevelNodes_s=0; } __syncthreads(); int nThreads=blockDim.x*gridDim.x; int nSteps = (*numCurrLevelNodes-1)/nThreads+1; unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; for (int iStep=0; iStep<nSteps; iStep++){ if (idx<*numCurrLevelNodes){ unsigned int node = currLevelNodes[idx]; for(unsigned int nbrIdx = nodePtrs[node]; nbrIdx < nodePtrs[node + 1]; ++nbrIdx){ unsigned int neighbor = nodeNeighbors[nbrIdx]; //See if neighbor has been visited yet. If not, add it to the queue. //This needs to be atomic op to avoid race conditions between the read and modify. int isVisited = atomicAdd(&nodeVisited[neighbor],1); //atomicAdd returns old value if(!isVisited) { // Already marked it, try to add to the shared queue but //have to deal w/ overflow int iNextLevel = atomicAdd(&numNextLevelNodes_s,1); //candidate index into shared array if (iNextLevel>=BQ_CAPACITY){ //no room in block's shared space //add to global queue iNextLevel = atomicAdd(numNextLevelNodes,1); nextLevelNodes[iNextLevel] = neighbor; } else { //room in block's shared space //add to block's queue nextLevelNodes_s[iNextLevel] = neighbor; } } } } idx += nThreads; } //now insert block's queue into global queue __syncthreads(); //reserve block's space in global queue __shared__ int iStartGlobal; if (threadIdx.x==0){ numNextLevelNodes_s = MIN(numNextLevelNodes_s, BQ_CAPACITY); iStartGlobal = atomicAdd(numNextLevelNodes,numNextLevelNodes_s); } __syncthreads(); //fill in global queue collaboratively nSteps = (numNextLevelNodes_s-1)/blockDim.x+1; for (unsigned int iStep=0; iStep<nSteps; iStep++){ idx = iStep*blockDim.x+threadIdx.x; if (idx<numNextLevelNodes_s){ nextLevelNodes[idx+iStartGlobal]=nextLevelNodes_s[idx]; } } } __global__ void gpu_warp_queuing_kernel(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { // INSERT KERNEL CODE HERE } /****************************************************************************** Functions *******************************************************************************/ void cpu_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { // Loop over all nodes in the curent level for(unsigned int idx = 0; idx < *numCurrLevelNodes; ++idx) { unsigned int node = currLevelNodes[idx]; // Loop over all neighbors of the node for(unsigned int nbrIdx = nodePtrs[node]; nbrIdx < nodePtrs[node + 1]; ++nbrIdx) { unsigned int neighbor = nodeNeighbors[nbrIdx]; // If the neighbor hasn't been visited yet if(!nodeVisited[neighbor]) { // Mark it and add it to the queue nodeVisited[neighbor] = 1; nextLevelNodes[*numNextLevelNodes] = neighbor; ++(*numNextLevelNodes); } } } } void gpu_global_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { const unsigned int numBlocks = 45; hipLaunchKernelGGL(( gpu_global_queuing_kernel) , dim3(numBlocks) , dim3(BLOCK_SIZE) , 0, 0, nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes, numCurrLevelNodes, numNextLevelNodes); } void gpu_block_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { const unsigned int numBlocks = 45; hipLaunchKernelGGL(( gpu_block_queuing_kernel) , dim3(numBlocks) , dim3(BLOCK_SIZE) , 0, 0, nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes, numCurrLevelNodes, numNextLevelNodes); } void gpu_warp_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { const unsigned int numBlocks = 45; hipLaunchKernelGGL(( gpu_warp_queuing_kernel) , dim3(numBlocks) , dim3(BLOCK_SIZE) , 0, 0, nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes, numCurrLevelNodes, numNextLevelNodes); }
8259d4fb5ab2d709cb0d9d49a5aa9935f4977df3.cu
/****************************************************************************** *cr *cr (C) Copyright 2010-2013 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #define MIN(X, Y) (((X) < (Y)) ? (X) : (Y)) #define BLOCK_SIZE 512 #define WARP_SIZE 32 #define NUM_WARPS (BLOCK_SIZE/WARP_SIZE) // Maximum number of elements that can be inserted into a block queue #define BQ_CAPACITY 2048 // Maximum number of elements that can be inserted into a warp queue #define WQ_CAPACITY 128 /****************************************************************************** GPU kernels *******************************************************************************/ __global__ void gpu_global_queuing_kernel(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { // INSERT KERNEL CODE HERE // Loop over all nodes in the curent level. //since can't assume we have more threads than numCurrLevelNodes, int nThreads=blockDim.x*gridDim.x; int nSteps = (*numCurrLevelNodes-1)/nThreads+1; unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; for (int iStep=0; iStep<nSteps; iStep++){ if (idx<*numCurrLevelNodes){ unsigned int node = currLevelNodes[idx]; for(unsigned int nbrIdx = nodePtrs[node]; nbrIdx < nodePtrs[node + 1]; ++nbrIdx){ unsigned int neighbor = nodeNeighbors[nbrIdx]; //See if neighbor has been visited yet. If not, add it to the queue. //This needs to be atomic op to avoid race conditions between the read and modify. int isVisited = atomicAdd(&nodeVisited[neighbor],1); //atomicAdd returns old value if(!isVisited) { // Already marked, add it to the queue int iNextLevel = atomicAdd(numNextLevelNodes,1); nextLevelNodes[iNextLevel] = neighbor; } } } idx += nThreads; } } __global__ void gpu_block_queuing_kernel(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { // INSERT KERNEL CODE HERE // Loop over all nodes in the curent level. __shared__ int nextLevelNodes_s[BQ_CAPACITY]; __shared__ int numNextLevelNodes_s; //can't initialize shared vars if (threadIdx.x==0){ //dunno if it matters if all threads modify numNextLevelNodes_s=0; } __syncthreads(); int nThreads=blockDim.x*gridDim.x; int nSteps = (*numCurrLevelNodes-1)/nThreads+1; unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; for (int iStep=0; iStep<nSteps; iStep++){ if (idx<*numCurrLevelNodes){ unsigned int node = currLevelNodes[idx]; for(unsigned int nbrIdx = nodePtrs[node]; nbrIdx < nodePtrs[node + 1]; ++nbrIdx){ unsigned int neighbor = nodeNeighbors[nbrIdx]; //See if neighbor has been visited yet. If not, add it to the queue. //This needs to be atomic op to avoid race conditions between the read and modify. int isVisited = atomicAdd(&nodeVisited[neighbor],1); //atomicAdd returns old value if(!isVisited) { // Already marked it, try to add to the shared queue but //have to deal w/ overflow int iNextLevel = atomicAdd(&numNextLevelNodes_s,1); //candidate index into shared array if (iNextLevel>=BQ_CAPACITY){ //no room in block's shared space //add to global queue iNextLevel = atomicAdd(numNextLevelNodes,1); nextLevelNodes[iNextLevel] = neighbor; } else { //room in block's shared space //add to block's queue nextLevelNodes_s[iNextLevel] = neighbor; } } } } idx += nThreads; } //now insert block's queue into global queue __syncthreads(); //reserve block's space in global queue __shared__ int iStartGlobal; if (threadIdx.x==0){ numNextLevelNodes_s = MIN(numNextLevelNodes_s, BQ_CAPACITY); iStartGlobal = atomicAdd(numNextLevelNodes,numNextLevelNodes_s); } __syncthreads(); //fill in global queue collaboratively nSteps = (numNextLevelNodes_s-1)/blockDim.x+1; for (unsigned int iStep=0; iStep<nSteps; iStep++){ idx = iStep*blockDim.x+threadIdx.x; if (idx<numNextLevelNodes_s){ nextLevelNodes[idx+iStartGlobal]=nextLevelNodes_s[idx]; } } } __global__ void gpu_warp_queuing_kernel(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { // INSERT KERNEL CODE HERE } /****************************************************************************** Functions *******************************************************************************/ void cpu_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { // Loop over all nodes in the curent level for(unsigned int idx = 0; idx < *numCurrLevelNodes; ++idx) { unsigned int node = currLevelNodes[idx]; // Loop over all neighbors of the node for(unsigned int nbrIdx = nodePtrs[node]; nbrIdx < nodePtrs[node + 1]; ++nbrIdx) { unsigned int neighbor = nodeNeighbors[nbrIdx]; // If the neighbor hasn't been visited yet if(!nodeVisited[neighbor]) { // Mark it and add it to the queue nodeVisited[neighbor] = 1; nextLevelNodes[*numNextLevelNodes] = neighbor; ++(*numNextLevelNodes); } } } } void gpu_global_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { const unsigned int numBlocks = 45; gpu_global_queuing_kernel <<< numBlocks , BLOCK_SIZE >>> (nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes, numCurrLevelNodes, numNextLevelNodes); } void gpu_block_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { const unsigned int numBlocks = 45; gpu_block_queuing_kernel <<< numBlocks , BLOCK_SIZE >>> (nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes, numCurrLevelNodes, numNextLevelNodes); } void gpu_warp_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { const unsigned int numBlocks = 45; gpu_warp_queuing_kernel <<< numBlocks , BLOCK_SIZE >>> (nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes, numCurrLevelNodes, numNextLevelNodes); }
b2f99afa167fe0eacd78584839fe7f68cb3eb52c.hip
// !!! This is a file automatically generated by hipify!!! #if __CUDACC_VER_MAJOR__ >= 8 #include <cassert> #include <hip/hip_runtime.h> #include "scope/scope.hpp" #include "args.hpp" #define NAME "Comm_Prefetch_Duplex_GPUGPU" auto Comm_Prefetch_Duplex_GPUGPU = [](benchmark::State &state, const int gpu0, const int gpu1) { if (gpu0 == gpu1) { state.SkipWithError(NAME " requires two different GPUs"); return; } const size_t pageSize = page_size(); const auto bytes = 1ULL << static_cast<size_t>(state.range(0)); hipStream_t streams[2]; char *ptrs[2] = {nullptr}; // start and end events. stop0 will not be recorded until after stop1 hipEvent_t start = nullptr; hipEvent_t stop0 = nullptr; hipEvent_t stop1 = nullptr; #define INIT(dev) \ OR_SKIP_AND_RETURN(scope::cuda_reset_device(gpu##dev), ""); \ OR_SKIP_AND_RETURN(hipSetDevice(gpu##dev), ""); \ OR_SKIP_AND_RETURN(hipStreamCreate(&streams[dev]), ""); \ OR_SKIP_AND_RETURN(hipMallocManaged(&ptrs[dev], bytes), ""); \ OR_SKIP_AND_RETURN(hipMemset(ptrs[dev], 0, bytes), "") INIT(0); INIT(1); // record the "pimary" events in the stream associated with gpu0 OR_SKIP_AND_RETURN(hipSetDevice(gpu0), ""); OR_SKIP_AND_RETURN(hipEventCreate(&start), "") OR_SKIP_AND_RETURN(hipEventCreate(&stop0), "") // record the end of the transfer task running on gpu1 OR_SKIP_AND_RETURN(hipSetDevice(gpu1), ""); OR_SKIP_AND_RETURN(hipEventCreate(&stop1), "") for (auto _ : state) { // prefetch data to src and sync OR_SKIP_AND_BREAK(hipMemPrefetchAsync(ptrs[0], bytes, gpu1, streams[0]), ""); OR_SKIP_AND_BREAK(hipMemPrefetchAsync(ptrs[1], bytes, gpu0, streams[1]), ""); OR_SKIP_AND_BREAK(hipStreamSynchronize(streams[0]), ""); OR_SKIP_AND_BREAK(hipStreamSynchronize(streams[1]), ""); OR_SKIP_AND_BREAK(hipSetDevice(gpu0), ""); OR_SKIP_AND_BREAK(hipEventRecord(start, streams[0]), ""); OR_SKIP_AND_BREAK(hipMemPrefetchAsync(ptrs[0], bytes, gpu0, streams[0]), ""); OR_SKIP_AND_BREAK(hipSetDevice(gpu1), ""); OR_SKIP_AND_BREAK(hipMemPrefetchAsync(ptrs[1], bytes, gpu1, streams[1]), ""); OR_SKIP_AND_BREAK(hipEventRecord(stop1, streams[1]), ""); OR_SKIP_AND_BREAK(hipSetDevice(gpu0), ""); OR_SKIP_AND_BREAK(hipStreamWaitEvent(streams[0], stop1, 0 /*must be 0*/), ""); OR_SKIP_AND_BREAK(hipEventRecord(stop0, streams[0]), ""); OR_SKIP_AND_BREAK(hipStreamSynchronize(streams[0]), ""); float millis = 0; OR_SKIP_AND_BREAK(hipEventElapsedTime(&millis, start, stop0), ""); state.SetIterationTime(millis / 1000); } state.SetBytesProcessed(int64_t(state.iterations()) * int64_t(bytes) * 2); state.counters["bytes"] = bytes; state.counters["gpu0"] = gpu0; state.counters["gpu1"] = gpu1; OR_SKIP_AND_RETURN(hipEventDestroy(start), ""); OR_SKIP_AND_RETURN(hipEventDestroy(stop0), ""); OR_SKIP_AND_RETURN(hipEventDestroy(stop1), ""); for (auto s : streams) { OR_SKIP_AND_RETURN(hipStreamDestroy(s), ""); } for (auto p : ptrs) { OR_SKIP_AND_RETURN(hipFree(p), ""); } }; static void registerer() { const std::vector<Device> cudas = scope::system::cuda_devices(); for (int i : cudas) { for (int j : cudas) { if (i < j) { std::string name = std::string(NAME) + "/" + std::to_string(i) + "/" + std::to_string(j); benchmark::RegisterBenchmark(name.c_str(), Comm_Prefetch_Duplex_GPUGPU, i, j) ->SMALL_ARGS() ->UseManualTime(); } } } } SCOPE_AFTER_INIT(registerer, NAME); #endif // __CUDACC_VER_MAJOR__ >= 8
b2f99afa167fe0eacd78584839fe7f68cb3eb52c.cu
#if __CUDACC_VER_MAJOR__ >= 8 #include <cassert> #include <cuda_runtime.h> #include "scope/scope.hpp" #include "args.hpp" #define NAME "Comm_Prefetch_Duplex_GPUGPU" auto Comm_Prefetch_Duplex_GPUGPU = [](benchmark::State &state, const int gpu0, const int gpu1) { if (gpu0 == gpu1) { state.SkipWithError(NAME " requires two different GPUs"); return; } const size_t pageSize = page_size(); const auto bytes = 1ULL << static_cast<size_t>(state.range(0)); cudaStream_t streams[2]; char *ptrs[2] = {nullptr}; // start and end events. stop0 will not be recorded until after stop1 cudaEvent_t start = nullptr; cudaEvent_t stop0 = nullptr; cudaEvent_t stop1 = nullptr; #define INIT(dev) \ OR_SKIP_AND_RETURN(scope::cuda_reset_device(gpu##dev), ""); \ OR_SKIP_AND_RETURN(cudaSetDevice(gpu##dev), ""); \ OR_SKIP_AND_RETURN(cudaStreamCreate(&streams[dev]), ""); \ OR_SKIP_AND_RETURN(cudaMallocManaged(&ptrs[dev], bytes), ""); \ OR_SKIP_AND_RETURN(cudaMemset(ptrs[dev], 0, bytes), "") INIT(0); INIT(1); // record the "pimary" events in the stream associated with gpu0 OR_SKIP_AND_RETURN(cudaSetDevice(gpu0), ""); OR_SKIP_AND_RETURN(cudaEventCreate(&start), "") OR_SKIP_AND_RETURN(cudaEventCreate(&stop0), "") // record the end of the transfer task running on gpu1 OR_SKIP_AND_RETURN(cudaSetDevice(gpu1), ""); OR_SKIP_AND_RETURN(cudaEventCreate(&stop1), "") for (auto _ : state) { // prefetch data to src and sync OR_SKIP_AND_BREAK(cudaMemPrefetchAsync(ptrs[0], bytes, gpu1, streams[0]), ""); OR_SKIP_AND_BREAK(cudaMemPrefetchAsync(ptrs[1], bytes, gpu0, streams[1]), ""); OR_SKIP_AND_BREAK(cudaStreamSynchronize(streams[0]), ""); OR_SKIP_AND_BREAK(cudaStreamSynchronize(streams[1]), ""); OR_SKIP_AND_BREAK(cudaSetDevice(gpu0), ""); OR_SKIP_AND_BREAK(cudaEventRecord(start, streams[0]), ""); OR_SKIP_AND_BREAK(cudaMemPrefetchAsync(ptrs[0], bytes, gpu0, streams[0]), ""); OR_SKIP_AND_BREAK(cudaSetDevice(gpu1), ""); OR_SKIP_AND_BREAK(cudaMemPrefetchAsync(ptrs[1], bytes, gpu1, streams[1]), ""); OR_SKIP_AND_BREAK(cudaEventRecord(stop1, streams[1]), ""); OR_SKIP_AND_BREAK(cudaSetDevice(gpu0), ""); OR_SKIP_AND_BREAK(cudaStreamWaitEvent(streams[0], stop1, 0 /*must be 0*/), ""); OR_SKIP_AND_BREAK(cudaEventRecord(stop0, streams[0]), ""); OR_SKIP_AND_BREAK(cudaStreamSynchronize(streams[0]), ""); float millis = 0; OR_SKIP_AND_BREAK(cudaEventElapsedTime(&millis, start, stop0), ""); state.SetIterationTime(millis / 1000); } state.SetBytesProcessed(int64_t(state.iterations()) * int64_t(bytes) * 2); state.counters["bytes"] = bytes; state.counters["gpu0"] = gpu0; state.counters["gpu1"] = gpu1; OR_SKIP_AND_RETURN(cudaEventDestroy(start), ""); OR_SKIP_AND_RETURN(cudaEventDestroy(stop0), ""); OR_SKIP_AND_RETURN(cudaEventDestroy(stop1), ""); for (auto s : streams) { OR_SKIP_AND_RETURN(cudaStreamDestroy(s), ""); } for (auto p : ptrs) { OR_SKIP_AND_RETURN(cudaFree(p), ""); } }; static void registerer() { const std::vector<Device> cudas = scope::system::cuda_devices(); for (int i : cudas) { for (int j : cudas) { if (i < j) { std::string name = std::string(NAME) + "/" + std::to_string(i) + "/" + std::to_string(j); benchmark::RegisterBenchmark(name.c_str(), Comm_Prefetch_Duplex_GPUGPU, i, j) ->SMALL_ARGS() ->UseManualTime(); } } } } SCOPE_AFTER_INIT(registerer, NAME); #endif // __CUDACC_VER_MAJOR__ >= 8
b505c8f0421e238ebbac2e6f4cec2b34e40543be.hip
// !!! This is a file automatically generated by hipify!!! #include "AlignmentMatch.cuh" #define gpuErrchk(code) { \ if(code != hipSuccess) { \ printf("GPUassert: %s at function %s on file %s line %d\n", hipGetErrorString(code), __FUNCTION__, __FILE__, __LINE__); \ exit(code); \ } \ } const char AlignmentMatch::conversion[256] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 23, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 20, 4, 3, 6, 13, 7, 8, 9, -1, 11, 10, 12, 2, -1, 14, 5, 1, 15, 16, -1, 19, 17, 22, 18, 21, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; const unsigned char AlignmentMatch::revConversion[128] = { 65, 82, 78, 68, 67, 81, 69, 71, 72, 73, 76, 75, 77, 70, 80, 83, 84, 87, 89, 86, 66, 90, 88, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; inline bool compare(seqEntry* first, seqEntry* second) { return (first->getEstimatedComplexity() < second->getEstimatedComplexity()); } TexVariablesAddresses AlignmentMatch::copySeqsToTex(int startSeqs1No, int startSeqs2No) { TexVariablesAddresses result; //the size of allocated memory for texSeqs1 and the size of allocated memory for texSeqs2 int size1, size2; char *firstSeqDev, *secondSeqDev; std::string firstSeqHost, secondSeqHost; //COPYING ARRAYS OF SEQUENCES STARTS INTO CONST MEMORY gpuErrchk(hipMemcpyToSymbol(tex1Starts, &starts[startSeqs1No], sizeof(int))); gpuErrchk(hipMemcpyToSymbol(tex2Starts, &starts[startSeqs2No], sizeof(int))); //COPYING X SEQUENCES TO TEXTURE firstSeqHost = seq[starts[startSeqs1No]]; size1 = lengths[startSeqs1No];//length of all sequences within the window gpuErrchk(hipMalloc((void**) &firstSeqDev, sizeof(char) * size1)); gpuErrchk(hipMemcpy(firstSeqDev, firstSeqHost.c_str(), sizeof(char) * size1, hipMemcpyHostToDevice)); gpuErrchk(hipBindTexture(0, texSeqsX, firstSeqDev, size1)); //COPYING Y SEQUENCES TO TEXTURE secondSeqHost = seq[starts[startSeqs2No]]; size2 = lengths[startSeqs2No]; gpuErrchk(hipMalloc((void**) &secondSeqDev, sizeof(char) * size2)); gpuErrchk(hipMemcpy(secondSeqDev, secondSeqHost.c_str(), sizeof(char) * size2, hipMemcpyHostToDevice)); gpuErrchk(hipBindTexture(0, texSeqsY, secondSeqDev, size2)); result.texSeqs1DevPtr = firstSeqDev; result.texSeqs2DevPtr = secondSeqDev; return result; } AlignmentMatch::AlignmentMatch(int gapOpen, int gapExt, unsigned int wSize, int maxSeqLen) { int deviceCount; unsigned int memoryOffset; hipDeviceProp_t deviceProp; // This function call returns 0 if there are no CUDA capable devices. hipGetDeviceCount(&deviceCount); if(deviceCount == 0) { fprintf(stderr, "There are no available device(s) that support CUDA\n"); exit(0); } hipGetDeviceProperties(&deviceProp, 0); maxMultiprocessorCount = deviceProp.multiProcessorCount; pairWise = true; windowSize = wSize; maxSeqLength = maxSeqLen; memoryOffset = windowSize * windowSize; blockShape = ALIGNMENT_BLOCK_SHAPE; gpuErrchk(hipMemcpyToSymbol(gapOp, &gapOpen, sizeof(char))); gpuErrchk(hipMemcpyToSymbol(gapEx, &gapExt, sizeof(char))); gpuErrchk(hipMemcpyToSymbol(window, &windowSize, sizeof(unsigned int))); gpuErrchk(hipMemcpyToSymbol(offset, &memoryOffset, sizeof(unsigned int))); } AlignmentMatch::~AlignmentMatch() { } void AlignmentMatch::prepare(std::vector< std::string >& seqs, int myProc) { char tmp; std::string actualSeq; int i, seqNo, seqLength; for (seqNo = 0; seqNo < seqs.size(); seqNo++) { actualSeq = seqs[seqNo]; seqLength = actualSeq.size(); for (i = 0; i < seqLength / 2; i++) { tmp = conversion[(unsigned char) actualSeq[i]]; actualSeq[i] = conversion[(unsigned char) actualSeq[seqLength - 1 - i]]; actualSeq[seqLength - 1 - i] = tmp; } } } void AlignmentMatch::computeAlignment(std::vector< iNode >& aSeqs, std::vector< std::string >& seqs, int myProc) { computePairwise(aSeqs, seqs, myProc); computeMSA(aSeqs, seqs, myProc); } void AlignmentMatch::computePairwise(std::vector< iNode >& aSeqs, std::vector< std::string >& seqs, int myProc) { short2 *AF; unsigned int *back; unsigned int backSize; unsigned int *matchesSeqXDevPtr, *matchesSeqYDevPtr; //this array will have 4 characters packed in one int unsigned int *outMatchesSeqXDevPtr, *outMatchesSeqYDevPtr; //this array will have 4 characters packed in one int int *scoresDevPtr; TexVariablesAddresses addr; int startSeqs1No, startSeqs2No, maxSeqLengthAlignedTo4; int i, j, height, offset, numberOfAlignments, windowsNumber; dim3 block(blockShape, blockShape); dim3 reorderGrid((windowSize * windowSize) / blockShape); dim3 grid(((windowSize - 1) / blockShape + 1), ((windowSize - 1) / blockShape + 1)); int partId = 1; std::string seq; seqEntry *params; std::vector< seqEntry* > jobs; seq.clear(); starts.clear(); starts.push_back(0); numberOfAlignments = j = 0; for(i = 0; i < aSeqs.size(); i++) { // Search for pairwise alignment that can be done now. if(aSeqs[i].vecSize == 2) { //if (myProc == 0) //printf("aSeqs[%d].vecSize: %d iSpecies[0]: %d iSpecies[1]: %d\n", i, aSeqs[i].vecSize, aSeqs[i].iSpecies[0], aSeqs[i].iSpecies[1]); aSeqs[i].aligned = true; seq.append(seqs[aSeqs[i].iSpecies[0]]); lengths.push_back(seqs[aSeqs[i].iSpecies[0]].size()); starts.push_back(seqs[aSeqs[i].iSpecies[0]].size() + starts[j++]); seq.append(seqs[aSeqs[i].iSpecies[1]]); lengths.push_back(seqs[aSeqs[i].iSpecies[1]].size()); starts.push_back(seqs[aSeqs[i].iSpecies[1]].size() + starts[j++]); numberOfAlignments++; } } //printf("myProc: %d aSeqs.size: %d pairwise alignments: %d\n", myProc, aSeqs.size() - 3, numberOfAlignments); if(numberOfAlignments) { windowsNumber = (numberOfAlignments - 1) / windowSize + 1; //printf("windowsNumber: %d numberOfAlignments: %d\n", windowsNumber, numberOfAlignments); for(i = windowsNumber - 1; i > 0; i -= 2) { params = new seqEntry; params->windowX = i; params->windowSumX = 0; offset = windowSize * i; params->windowMaxX = lengths[offset]; for(j = 0; (j < windowSize) && (offset + j < numberOfAlignments); j += blockShape) params->windowSumX += lengths[(offset + j)]; params->windowY = (i - 1); params->windowSumY = 0; offset = windowSize * (i - 1); params->windowMaxY = lengths[offset]; for(j = 0; (j < windowSize) && (offset + j < numberOfAlignments); j += blockShape) params->windowSumY += lengths[(offset + j)]; params->size = numberOfAlignments; params->partId = partId++; params->blockShape = blockShape; params->windowSize = windowSize; params->maxMultiprocessorCount = maxMultiprocessorCount; jobs.push_back(params); } std::sort(jobs.begin(), jobs.end(), compare); try { //one element in AF matrix: // - 2 bytes for element of A matrix // - 2 bytes for element of F matrix gpuErrchk(hipMalloc(&AF, sizeof(int) * maxSeqLength * windowSize * windowSize)); //sizeof(int) - one element in A matrix: // - 2 bytes for element of A matrix // - 2 bytes for element of F matrix gpuErrchk(hipMalloc(&scoresDevPtr, sizeof(int) * windowSize * windowSize)); for (i = 0; i < jobs.size(); i++) { startSeqs1No = jobs[i]->windowX; startSeqs2No = jobs[i]->windowY; //height of this array must be dividable by 8 (ALIGNMENT_MATCH_Y_STEPS) height = ((maxSeqLength - 1) / 8 + 1) * 8; //8->8, 9->16, 10->16 ... backSize = sizeof(unsigned int) * (height + 8); backSize *= (maxSeqLength + 1) * windowSize * (windowSize / ALIGNMENT_MATCH_Y_STEPS); gpuErrchk(hipMalloc(&back, backSize)); //memory for temporary (intermediate) results (alignments/matches) //we need: 2x maxSeqLength * 2 * windowSize * windowSize maxSeqLengthAlignedTo4 = ((maxSeqLength - 1) / 4 + 1) * 4; gpuErrchk(hipMalloc(&matchesSeqXDevPtr, sizeof(char) * maxSeqLengthAlignedTo4 * 2 * windowSize * windowSize)); gpuErrchk(hipMalloc(&matchesSeqYDevPtr, sizeof(char) * maxSeqLengthAlignedTo4 * 2 * windowSize * windowSize)); //memory for final results (alignments/matches) gpuErrchk(hipMalloc(&outMatchesSeqXDevPtr, sizeof(char) * maxSeqLengthAlignedTo4 * 2 * windowSize * windowSize)); gpuErrchk(hipMalloc(&outMatchesSeqYDevPtr, sizeof(char) * maxSeqLengthAlignedTo4 * 2 * windowSize * windowSize)); //printf("copySeqsToTex startSeqs1No: %d windowX: %d startSeqs2No: %d windowY: %d\n", startSeqs1No, jobs[i]->windowX, startSeqs2No, jobs[i]->windowY); //copying sequences to texture memory addr = copySeqsToTex(startSeqs1No, startSeqs2No); /*********************************************************************** * KERNEL 1 * * score calculation and "back" matrix fill * ***********************************************************************/ //printf("MatchKernel\n"); //maxSeqLength+1 => +1 because we have to take into account the -1 column MatchKernel(grid, block, AF, back, scoresDevPtr, maxSeqLength + 1, (jobs[i]->windowX == jobs[i]->windowY)); /*********************************************************************** * KERNER 2 * * backtracing - alignment matches generation * ***********************************************************************/ //printf("BacktraceKernel\n"); BacktraceKernel(grid, block, back, maxSeqLength + 1, matchesSeqXDevPtr, matchesSeqYDevPtr, (jobs[i]->windowX == jobs[i]->windowY)); /*********************************************************************** * KERNER 3 * * changing order of the results in GPU memory * ***********************************************************************/ /*********************************************************************** * maxSeqLengthAlignedTo4 * 2 / 4 * * -> * 2 because alignment can be 2x as long as the longest * * sequence * * -> / 4 because we packed chars to int * ***********************************************************************/ //printf("ReorderMatches\n"); ReorderMatches(reorderGrid, block, matchesSeqXDevPtr, outMatchesSeqXDevPtr, matchesSeqYDevPtr, outMatchesSeqYDevPtr, maxSeqLengthAlignedTo4 * 2 / 4); /* gpuErrchk(hipMemcpy(&seqs[jobs[i]->windowX], outMatchesSeqXDevPtr, sizeof(char) * maxSeqLengthAlignedTo4 * 2 * windowSize * windowSize, hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(&seqs[jobs[i]->windowY], outMatchesSeqYDevPtr, sizeof(char) * maxSeqLengthAlignedTo4 * 2 * windowSize * windowSize, hipMemcpyDeviceToHost)); */ //dealocating memory on GPU gpuErrchk(hipFree(back)); gpuErrchk(hipFree(matchesSeqXDevPtr)); gpuErrchk(hipFree(matchesSeqYDevPtr)); gpuErrchk(hipFree(outMatchesSeqXDevPtr)); gpuErrchk(hipFree(outMatchesSeqYDevPtr)); gpuErrchk(hipFree(addr.texSeqs1DevPtr)); gpuErrchk(hipFree(addr.texSeqs2DevPtr)); } gpuErrchk(hipFree(AF)); gpuErrchk(hipFree(scoresDevPtr)); } catch(std::exception &ex) { printf("Error: %s\n", ex.what()); } } } void AlignmentMatch::computeMSA(std::vector< iNode >& aSeqs, std::vector< std::string >& seqs, int myProc) { short2 *AF; unsigned int *back; unsigned int backSize; unsigned int *matchesSeqXDevPtr, *matchesSeqYDevPtr; //this array will have 4 characters packed in one int unsigned int *outMatchesSeqXDevPtr, *outMatchesSeqYDevPtr; //this array will have 4 characters packed in one int bool canDoIt; int *scoresDevPtr; TexVariablesAddresses addr; int startSeqs1No, startSeqs2No, maxSeqLengthAlignedTo4; int i, j, k, height, offset, numberOfAlignments, windowsNumber; dim3 block(blockShape, blockShape); dim3 reorderGrid((windowSize * windowSize) / blockShape); dim3 grid(((windowSize - 1) / blockShape + 1), ((windowSize - 1) / blockShape + 1)); int partId = 2; std::string seq; seqEntry *params; std::vector< seqEntry* > jobs; seq.clear(); starts.clear(); starts.push_back(0); numberOfAlignments = 0; //printf("computeMSA\n"); for(i = 0; i < aSeqs.size(); i++) { if(aSeqs[i].vecSize == 0) { canDoIt = true; for(j = 0; j < aSeqs[i].iSpecies.size(); j++) { if((aSeqs[i].iSpecies[j] > aSeqs.size()) || !aSeqs[aSeqs[i].iSpecies[j]].aligned) { canDoIt = false; break; } } if(canDoIt) { aSeqs[i].aligned = true; for(j = 0; j < aSeqs[i].iSpecies.size(); j++) { seq.append(seqs[aSeqs[i].iSpecies[j]]); lengths.push_back(seqs[aSeqs[i].iSpecies[j]].size()); starts.push_back(seqs[aSeqs[i].iSpecies[j]].size() + starts[k++]); seq.append(seqs[aSeqs[i].iSpecies[j]]); lengths.push_back(seqs[aSeqs[i].iSpecies[j]].size()); starts.push_back(seqs[aSeqs[i].iSpecies[j]].size() + starts[k++]); aSeqs[i].vecSize += aSeqs[aSeqs[i].iSpecies[j]].vecSize; } numberOfAlignments++; } } } //printf("myProc: %d MSA alignments: %d\n", myProc, numberOfAlignments); if(numberOfAlignments) { windowsNumber = (numberOfAlignments - 1) / windowSize + 1; //printf("windowsNumber: %d numberOfAlignments: %d\n", windowsNumber, numberOfAlignments); for(i = windowsNumber - 1; i > 0; i -= 2) { params = new seqEntry; params->windowX = i; params->windowSumX = 0; offset = windowSize * i; //params->windowMaxX = lengths[offset]; //for(j = 0; (j < windowSize) && (offset + j < numberOfAlignments); j += blockShape) //params->windowSumX += lengths[(offset + j)]; params->windowY = (i - 1); params->windowSumY = 0; offset = windowSize * (i - 1); //params->windowMaxY = lengths[offset]; //for(j = 0; (j < windowSize) && (offset + j < numberOfAlignments); j += blockShape) //params->windowSumY += lengths[(offset + j)]; params->size = numberOfAlignments; params->partId = partId++; params->blockShape = blockShape; params->windowSize = windowSize; params->maxMultiprocessorCount = maxMultiprocessorCount; jobs.push_back(params); } std::sort(jobs.begin(), jobs.end(), compare); //printf("try catch\n"); //one element in AF matrix: // - 2 bytes for element of A matrix // - 2 bytes for element of F matrix gpuErrchk(hipMalloc(&AF, sizeof(int) * maxSeqLength * windowSize * windowSize)); //sizeof(int) - one element in A matrix: // - 2 bytes for element of A matrix // - 2 bytes for element of F matrix gpuErrchk(hipMalloc(&scoresDevPtr, sizeof(int) * windowSize * windowSize)); for(i = 0; i < jobs.size(); i++) { ; } gpuErrchk(hipFree(AF)); gpuErrchk(hipFree(scoresDevPtr)); } }
b505c8f0421e238ebbac2e6f4cec2b34e40543be.cu
#include "AlignmentMatch.cuh" #define gpuErrchk(code) { \ if(code != cudaSuccess) { \ printf("GPUassert: %s at function %s on file %s line %d\n", cudaGetErrorString(code), __FUNCTION__, __FILE__, __LINE__); \ exit(code); \ } \ } const char AlignmentMatch::conversion[256] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 23, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 20, 4, 3, 6, 13, 7, 8, 9, -1, 11, 10, 12, 2, -1, 14, 5, 1, 15, 16, -1, 19, 17, 22, 18, 21, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; const unsigned char AlignmentMatch::revConversion[128] = { 65, 82, 78, 68, 67, 81, 69, 71, 72, 73, 76, 75, 77, 70, 80, 83, 84, 87, 89, 86, 66, 90, 88, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; inline bool compare(seqEntry* first, seqEntry* second) { return (first->getEstimatedComplexity() < second->getEstimatedComplexity()); } TexVariablesAddresses AlignmentMatch::copySeqsToTex(int startSeqs1No, int startSeqs2No) { TexVariablesAddresses result; //the size of allocated memory for texSeqs1 and the size of allocated memory for texSeqs2 int size1, size2; char *firstSeqDev, *secondSeqDev; std::string firstSeqHost, secondSeqHost; //COPYING ARRAYS OF SEQUENCES STARTS INTO CONST MEMORY gpuErrchk(cudaMemcpyToSymbol(tex1Starts, &starts[startSeqs1No], sizeof(int))); gpuErrchk(cudaMemcpyToSymbol(tex2Starts, &starts[startSeqs2No], sizeof(int))); //COPYING X SEQUENCES TO TEXTURE firstSeqHost = seq[starts[startSeqs1No]]; size1 = lengths[startSeqs1No];//length of all sequences within the window gpuErrchk(cudaMalloc((void**) &firstSeqDev, sizeof(char) * size1)); gpuErrchk(cudaMemcpy(firstSeqDev, firstSeqHost.c_str(), sizeof(char) * size1, cudaMemcpyHostToDevice)); gpuErrchk(cudaBindTexture(0, texSeqsX, firstSeqDev, size1)); //COPYING Y SEQUENCES TO TEXTURE secondSeqHost = seq[starts[startSeqs2No]]; size2 = lengths[startSeqs2No]; gpuErrchk(cudaMalloc((void**) &secondSeqDev, sizeof(char) * size2)); gpuErrchk(cudaMemcpy(secondSeqDev, secondSeqHost.c_str(), sizeof(char) * size2, cudaMemcpyHostToDevice)); gpuErrchk(cudaBindTexture(0, texSeqsY, secondSeqDev, size2)); result.texSeqs1DevPtr = firstSeqDev; result.texSeqs2DevPtr = secondSeqDev; return result; } AlignmentMatch::AlignmentMatch(int gapOpen, int gapExt, unsigned int wSize, int maxSeqLen) { int deviceCount; unsigned int memoryOffset; cudaDeviceProp deviceProp; // This function call returns 0 if there are no CUDA capable devices. cudaGetDeviceCount(&deviceCount); if(deviceCount == 0) { fprintf(stderr, "There are no available device(s) that support CUDA\n"); exit(0); } cudaGetDeviceProperties(&deviceProp, 0); maxMultiprocessorCount = deviceProp.multiProcessorCount; pairWise = true; windowSize = wSize; maxSeqLength = maxSeqLen; memoryOffset = windowSize * windowSize; blockShape = ALIGNMENT_BLOCK_SHAPE; gpuErrchk(cudaMemcpyToSymbol(gapOp, &gapOpen, sizeof(char))); gpuErrchk(cudaMemcpyToSymbol(gapEx, &gapExt, sizeof(char))); gpuErrchk(cudaMemcpyToSymbol(window, &windowSize, sizeof(unsigned int))); gpuErrchk(cudaMemcpyToSymbol(offset, &memoryOffset, sizeof(unsigned int))); } AlignmentMatch::~AlignmentMatch() { } void AlignmentMatch::prepare(std::vector< std::string >& seqs, int myProc) { char tmp; std::string actualSeq; int i, seqNo, seqLength; for (seqNo = 0; seqNo < seqs.size(); seqNo++) { actualSeq = seqs[seqNo]; seqLength = actualSeq.size(); for (i = 0; i < seqLength / 2; i++) { tmp = conversion[(unsigned char) actualSeq[i]]; actualSeq[i] = conversion[(unsigned char) actualSeq[seqLength - 1 - i]]; actualSeq[seqLength - 1 - i] = tmp; } } } void AlignmentMatch::computeAlignment(std::vector< iNode >& aSeqs, std::vector< std::string >& seqs, int myProc) { computePairwise(aSeqs, seqs, myProc); computeMSA(aSeqs, seqs, myProc); } void AlignmentMatch::computePairwise(std::vector< iNode >& aSeqs, std::vector< std::string >& seqs, int myProc) { short2 *AF; unsigned int *back; unsigned int backSize; unsigned int *matchesSeqXDevPtr, *matchesSeqYDevPtr; //this array will have 4 characters packed in one int unsigned int *outMatchesSeqXDevPtr, *outMatchesSeqYDevPtr; //this array will have 4 characters packed in one int int *scoresDevPtr; TexVariablesAddresses addr; int startSeqs1No, startSeqs2No, maxSeqLengthAlignedTo4; int i, j, height, offset, numberOfAlignments, windowsNumber; dim3 block(blockShape, blockShape); dim3 reorderGrid((windowSize * windowSize) / blockShape); dim3 grid(((windowSize - 1) / blockShape + 1), ((windowSize - 1) / blockShape + 1)); int partId = 1; std::string seq; seqEntry *params; std::vector< seqEntry* > jobs; seq.clear(); starts.clear(); starts.push_back(0); numberOfAlignments = j = 0; for(i = 0; i < aSeqs.size(); i++) { // Search for pairwise alignment that can be done now. if(aSeqs[i].vecSize == 2) { //if (myProc == 0) //printf("aSeqs[%d].vecSize: %d iSpecies[0]: %d iSpecies[1]: %d\n", i, aSeqs[i].vecSize, aSeqs[i].iSpecies[0], aSeqs[i].iSpecies[1]); aSeqs[i].aligned = true; seq.append(seqs[aSeqs[i].iSpecies[0]]); lengths.push_back(seqs[aSeqs[i].iSpecies[0]].size()); starts.push_back(seqs[aSeqs[i].iSpecies[0]].size() + starts[j++]); seq.append(seqs[aSeqs[i].iSpecies[1]]); lengths.push_back(seqs[aSeqs[i].iSpecies[1]].size()); starts.push_back(seqs[aSeqs[i].iSpecies[1]].size() + starts[j++]); numberOfAlignments++; } } //printf("myProc: %d aSeqs.size: %d pairwise alignments: %d\n", myProc, aSeqs.size() - 3, numberOfAlignments); if(numberOfAlignments) { windowsNumber = (numberOfAlignments - 1) / windowSize + 1; //printf("windowsNumber: %d numberOfAlignments: %d\n", windowsNumber, numberOfAlignments); for(i = windowsNumber - 1; i > 0; i -= 2) { params = new seqEntry; params->windowX = i; params->windowSumX = 0; offset = windowSize * i; params->windowMaxX = lengths[offset]; for(j = 0; (j < windowSize) && (offset + j < numberOfAlignments); j += blockShape) params->windowSumX += lengths[(offset + j)]; params->windowY = (i - 1); params->windowSumY = 0; offset = windowSize * (i - 1); params->windowMaxY = lengths[offset]; for(j = 0; (j < windowSize) && (offset + j < numberOfAlignments); j += blockShape) params->windowSumY += lengths[(offset + j)]; params->size = numberOfAlignments; params->partId = partId++; params->blockShape = blockShape; params->windowSize = windowSize; params->maxMultiprocessorCount = maxMultiprocessorCount; jobs.push_back(params); } std::sort(jobs.begin(), jobs.end(), compare); try { //one element in AF matrix: // - 2 bytes for element of A matrix // - 2 bytes for element of F matrix gpuErrchk(cudaMalloc(&AF, sizeof(int) * maxSeqLength * windowSize * windowSize)); //sizeof(int) - one element in A matrix: // - 2 bytes for element of A matrix // - 2 bytes for element of F matrix gpuErrchk(cudaMalloc(&scoresDevPtr, sizeof(int) * windowSize * windowSize)); for (i = 0; i < jobs.size(); i++) { startSeqs1No = jobs[i]->windowX; startSeqs2No = jobs[i]->windowY; //height of this array must be dividable by 8 (ALIGNMENT_MATCH_Y_STEPS) height = ((maxSeqLength - 1) / 8 + 1) * 8; //8->8, 9->16, 10->16 ... backSize = sizeof(unsigned int) * (height + 8); backSize *= (maxSeqLength + 1) * windowSize * (windowSize / ALIGNMENT_MATCH_Y_STEPS); gpuErrchk(cudaMalloc(&back, backSize)); //memory for temporary (intermediate) results (alignments/matches) //we need: 2x maxSeqLength * 2 * windowSize * windowSize maxSeqLengthAlignedTo4 = ((maxSeqLength - 1) / 4 + 1) * 4; gpuErrchk(cudaMalloc(&matchesSeqXDevPtr, sizeof(char) * maxSeqLengthAlignedTo4 * 2 * windowSize * windowSize)); gpuErrchk(cudaMalloc(&matchesSeqYDevPtr, sizeof(char) * maxSeqLengthAlignedTo4 * 2 * windowSize * windowSize)); //memory for final results (alignments/matches) gpuErrchk(cudaMalloc(&outMatchesSeqXDevPtr, sizeof(char) * maxSeqLengthAlignedTo4 * 2 * windowSize * windowSize)); gpuErrchk(cudaMalloc(&outMatchesSeqYDevPtr, sizeof(char) * maxSeqLengthAlignedTo4 * 2 * windowSize * windowSize)); //printf("copySeqsToTex startSeqs1No: %d windowX: %d startSeqs2No: %d windowY: %d\n", startSeqs1No, jobs[i]->windowX, startSeqs2No, jobs[i]->windowY); //copying sequences to texture memory addr = copySeqsToTex(startSeqs1No, startSeqs2No); /*********************************************************************** * KERNEL 1 * * score calculation and "back" matrix fill * ***********************************************************************/ //printf("MatchKernel\n"); //maxSeqLength+1 => +1 because we have to take into account the -1 column MatchKernel(grid, block, AF, back, scoresDevPtr, maxSeqLength + 1, (jobs[i]->windowX == jobs[i]->windowY)); /*********************************************************************** * KERNER 2 * * backtracing - alignment matches generation * ***********************************************************************/ //printf("BacktraceKernel\n"); BacktraceKernel(grid, block, back, maxSeqLength + 1, matchesSeqXDevPtr, matchesSeqYDevPtr, (jobs[i]->windowX == jobs[i]->windowY)); /*********************************************************************** * KERNER 3 * * changing order of the results in GPU memory * ***********************************************************************/ /*********************************************************************** * maxSeqLengthAlignedTo4 * 2 / 4 * * -> * 2 because alignment can be 2x as long as the longest * * sequence * * -> / 4 because we packed chars to int * ***********************************************************************/ //printf("ReorderMatches\n"); ReorderMatches(reorderGrid, block, matchesSeqXDevPtr, outMatchesSeqXDevPtr, matchesSeqYDevPtr, outMatchesSeqYDevPtr, maxSeqLengthAlignedTo4 * 2 / 4); /* gpuErrchk(cudaMemcpy(&seqs[jobs[i]->windowX], outMatchesSeqXDevPtr, sizeof(char) * maxSeqLengthAlignedTo4 * 2 * windowSize * windowSize, cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(&seqs[jobs[i]->windowY], outMatchesSeqYDevPtr, sizeof(char) * maxSeqLengthAlignedTo4 * 2 * windowSize * windowSize, cudaMemcpyDeviceToHost)); */ //dealocating memory on GPU gpuErrchk(cudaFree(back)); gpuErrchk(cudaFree(matchesSeqXDevPtr)); gpuErrchk(cudaFree(matchesSeqYDevPtr)); gpuErrchk(cudaFree(outMatchesSeqXDevPtr)); gpuErrchk(cudaFree(outMatchesSeqYDevPtr)); gpuErrchk(cudaFree(addr.texSeqs1DevPtr)); gpuErrchk(cudaFree(addr.texSeqs2DevPtr)); } gpuErrchk(cudaFree(AF)); gpuErrchk(cudaFree(scoresDevPtr)); } catch(std::exception &ex) { printf("Error: %s\n", ex.what()); } } } void AlignmentMatch::computeMSA(std::vector< iNode >& aSeqs, std::vector< std::string >& seqs, int myProc) { short2 *AF; unsigned int *back; unsigned int backSize; unsigned int *matchesSeqXDevPtr, *matchesSeqYDevPtr; //this array will have 4 characters packed in one int unsigned int *outMatchesSeqXDevPtr, *outMatchesSeqYDevPtr; //this array will have 4 characters packed in one int bool canDoIt; int *scoresDevPtr; TexVariablesAddresses addr; int startSeqs1No, startSeqs2No, maxSeqLengthAlignedTo4; int i, j, k, height, offset, numberOfAlignments, windowsNumber; dim3 block(blockShape, blockShape); dim3 reorderGrid((windowSize * windowSize) / blockShape); dim3 grid(((windowSize - 1) / blockShape + 1), ((windowSize - 1) / blockShape + 1)); int partId = 2; std::string seq; seqEntry *params; std::vector< seqEntry* > jobs; seq.clear(); starts.clear(); starts.push_back(0); numberOfAlignments = 0; //printf("computeMSA\n"); for(i = 0; i < aSeqs.size(); i++) { if(aSeqs[i].vecSize == 0) { canDoIt = true; for(j = 0; j < aSeqs[i].iSpecies.size(); j++) { if((aSeqs[i].iSpecies[j] > aSeqs.size()) || !aSeqs[aSeqs[i].iSpecies[j]].aligned) { canDoIt = false; break; } } if(canDoIt) { aSeqs[i].aligned = true; for(j = 0; j < aSeqs[i].iSpecies.size(); j++) { seq.append(seqs[aSeqs[i].iSpecies[j]]); lengths.push_back(seqs[aSeqs[i].iSpecies[j]].size()); starts.push_back(seqs[aSeqs[i].iSpecies[j]].size() + starts[k++]); seq.append(seqs[aSeqs[i].iSpecies[j]]); lengths.push_back(seqs[aSeqs[i].iSpecies[j]].size()); starts.push_back(seqs[aSeqs[i].iSpecies[j]].size() + starts[k++]); aSeqs[i].vecSize += aSeqs[aSeqs[i].iSpecies[j]].vecSize; } numberOfAlignments++; } } } //printf("myProc: %d MSA alignments: %d\n", myProc, numberOfAlignments); if(numberOfAlignments) { windowsNumber = (numberOfAlignments - 1) / windowSize + 1; //printf("windowsNumber: %d numberOfAlignments: %d\n", windowsNumber, numberOfAlignments); for(i = windowsNumber - 1; i > 0; i -= 2) { params = new seqEntry; params->windowX = i; params->windowSumX = 0; offset = windowSize * i; //params->windowMaxX = lengths[offset]; //for(j = 0; (j < windowSize) && (offset + j < numberOfAlignments); j += blockShape) //params->windowSumX += lengths[(offset + j)]; params->windowY = (i - 1); params->windowSumY = 0; offset = windowSize * (i - 1); //params->windowMaxY = lengths[offset]; //for(j = 0; (j < windowSize) && (offset + j < numberOfAlignments); j += blockShape) //params->windowSumY += lengths[(offset + j)]; params->size = numberOfAlignments; params->partId = partId++; params->blockShape = blockShape; params->windowSize = windowSize; params->maxMultiprocessorCount = maxMultiprocessorCount; jobs.push_back(params); } std::sort(jobs.begin(), jobs.end(), compare); //printf("try catch\n"); //one element in AF matrix: // - 2 bytes for element of A matrix // - 2 bytes for element of F matrix gpuErrchk(cudaMalloc(&AF, sizeof(int) * maxSeqLength * windowSize * windowSize)); //sizeof(int) - one element in A matrix: // - 2 bytes for element of A matrix // - 2 bytes for element of F matrix gpuErrchk(cudaMalloc(&scoresDevPtr, sizeof(int) * windowSize * windowSize)); for(i = 0; i < jobs.size(); i++) { ; } gpuErrchk(cudaFree(AF)); gpuErrchk(cudaFree(scoresDevPtr)); } }
9da7b5376a08e9920da50540cbf619ab233ce3df.hip
// !!! This is a file automatically generated by hipify!!! #include <assert.h> #include <rocblas.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <float.h> #include "blas.h" #include "dark_cuda.h" #include "utils.h" __inline__ __device__ float warpAllReduceSum(float val) { for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2) #if CUDART_VERSION >= 9000 val += __shfl_xor_sync(0xffffffff, val, mask); #else val += __shfl_xor(val, mask); #endif return val; } __global__ void compare_2_arrays_kernel(float* one, float* two, int size) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= size) return; const float diff = 100 * fabs(one[index] - two[index]) / fabs(one[index]); if (diff > 10) printf(" i: %d - one = %f, two = %f, diff = %f %% \n", index, one[index], two[index], diff); } void compare_2_arrays_gpu(float* one, float* two, int size) { const int num_blocks = get_number_of_blocks(size, BLOCK); hipLaunchKernelGGL(( compare_2_arrays_kernel), dim3(num_blocks), dim3(BLOCK), 0, get_cuda_stream(), one, two, size); CHECK_CUDA(hipPeekAtLastError()); CHECK_CUDA(hipDeviceSynchronize()); } __global__ void scale_bias_kernel(float* output, float* scale, int batch, int filters, int spatial, int current_size) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= current_size) return; int f = (index / spatial) % filters; output[index] *= scale[f]; } void scale_bias_gpu( float* output, float* scale, int batch, int filters, int spatial) { const int current_size = batch * filters * spatial; const int num_blocks = get_number_of_blocks(current_size, BLOCK); hipLaunchKernelGGL(( scale_bias_kernel), dim3(num_blocks), dim3(BLOCK), 0, get_cuda_stream(), output, scale, batch, filters, spatial, current_size); CHECK_CUDA(hipPeekAtLastError()); } __global__ void backward_scale_kernel(float* x_norm, float* delta, int batch, int n, int size, float* scale_updates) { __shared__ float part[BLOCK]; int i, b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for (b = 0; b < batch; ++b) { for (i = 0; i < size; i += BLOCK) { int index = p + i + size * (filter + n * b); sum += (p + i < size) ? delta[index] * x_norm[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for (i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i]; } } void backward_scale_gpu(float* x_norm, float* delta, int batch, int n, int size, float* scale_updates) { hipLaunchKernelGGL(( backward_scale_kernel), dim3(n), dim3(BLOCK), 0, get_cuda_stream(), x_norm, delta, batch, n, size, scale_updates); CHECK_CUDA(hipPeekAtLastError()); } __global__ void adam_kernel(int N, float* x, float* m, float* v, float B1, float B2, float rate, float eps, int t) { int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; float mhat = m[index] / (1.f - powf(B1, t)); float vhat = v[index] / (1.f - powf(B2, t)); x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps); } void adam_gpu(int n, float* x, float* m, float* v, float B1, float B2, float rate, float eps, int t) { hipLaunchKernelGGL(( adam_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, get_cuda_stream(), n, x, m, v, B1, B2, rate, eps, t); CHECK_CUDA(hipPeekAtLastError()); } void adam_update_gpu(float* w, float* d, float* m, float* v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t) { scal_ongpu(n, B1, m, 1); scal_ongpu(n, B2, v, 1); axpy_ongpu(n, -decay * batch, w, 1, d, 1); axpy_ongpu(n, (1 - B1), d, 1, m, 1); mul_ongpu(n, d, 1, d, 1); axpy_ongpu(n, (1 - B2), d, 1, v, 1); adam_gpu(n, w, m, v, B1, B2, rate, eps, t); fill_ongpu(n, 0, d, 1); CHECK_CUDA(hipPeekAtLastError()); } __global__ void normalize_kernel(int N, float* x, float* mean, float* variance, int batch, int filters, int spatial) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index / spatial) % filters; x[index] = (x[index] - mean[f]) / (sqrtf(variance[f] + .00001f)); } void normalize_gpu( float* x, float* mean, float* variance, int batch, int filters, int spatial) { const int current_size = batch * filters * spatial; const int num_blocks = get_number_of_blocks(current_size, BLOCK); hipLaunchKernelGGL(( normalize_kernel), dim3(num_blocks), dim3(BLOCK), 0, get_cuda_stream(), current_size, x, mean, variance, batch, filters, spatial); CHECK_CUDA(hipPeekAtLastError()); } __global__ void normalize_delta_kernel(int N, float* x, float* mean, float* variance, float* mean_delta, float* variance_delta, int batch, int filters, int spatial, float* delta) { int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index / spatial) % filters; delta[index] = delta[index] * 1.F / (sqrtf(variance[f]) + .000001f) + variance_delta[f] * 2. * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f] / (spatial * batch); } void normalize_delta_gpu(float* x, float* mean, float* variance, float* mean_delta, float* variance_delta, int batch, int filters, int spatial, float* delta) { size_t N = batch * filters * spatial; hipLaunchKernelGGL(( normalize_delta_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta); CHECK_CUDA(hipPeekAtLastError()); } __global__ void variance_delta_kernel(float* x, float* delta, float* mean, float* variance, int batch, int filters, int spatial, float* variance_delta) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j, k; variance_delta[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j * filters * spatial + i * spatial + k; variance_delta[i] += delta[index] * (x[index] - mean[i]); } } variance_delta[i] *= -.5 * powf(variance[i] + .000001f, (float)(-3. / 2.)); } __global__ void accumulate_kernel(float* x, int n, int groups, float* sum) { int k; int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= groups) return; sum[i] = 0; for (k = 0; k < n; ++k) { sum[i] += x[k * groups + i]; } } __global__ void fast_mean_delta_kernel(float* delta, float* variance, int batch, int filters, int spatial, float* mean_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for (j = 0; j < batch; ++j) { for (i = 0; i < spatial; i += threads) { int index = j * spatial * filters + filter * spatial + i + id; local[id] += (i + id < spatial) ? delta[index] : 0; } } __syncthreads(); if (id == 0) { mean_delta[filter] = 0; for (i = 0; i < threads; ++i) { mean_delta[filter] += local[i]; } mean_delta[filter] *= (-1.F / sqrtf(variance[filter] + .000001f)); } } __global__ void fast_variance_delta_kernel(float* x, float* delta, float* mean, float* variance, int batch, int filters, int spatial, float* variance_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for (j = 0; j < batch; ++j) { for (i = 0; i < spatial; i += threads) { int index = j * spatial * filters + filter * spatial + i + id; local[id] += (i + id < spatial) ? delta[index] * (x[index] - mean[filter]) : 0; } } __syncthreads(); if (id == 0) { variance_delta[filter] = 0; for (i = 0; i < threads; ++i) { variance_delta[filter] += local[i]; } variance_delta[filter] *= -.5 * powf(variance[filter] + .000001f, (float)(-3. / 2.)); } } __global__ void mean_delta_kernel(float* delta, float* variance, int batch, int filters, int spatial, float* mean_delta) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j, k; mean_delta[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j * filters * spatial + i * spatial + k; mean_delta[i] += delta[index]; } } mean_delta[i] *= (-1.F / sqrtf(variance[i] + .000001f)); } void mean_delta_gpu(float* delta, float* variance, int batch, int filters, int spatial, float* mean_delta) { hipLaunchKernelGGL(( mean_delta_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, get_cuda_stream(), delta, variance, batch, filters, spatial, mean_delta); CHECK_CUDA(hipPeekAtLastError()); } void fast_mean_delta_gpu(float* delta, float* variance, int batch, int filters, int spatial, float* mean_delta) { hipLaunchKernelGGL(( fast_mean_delta_kernel), dim3(filters), dim3(BLOCK), 0, get_cuda_stream(), delta, variance, batch, filters, spatial, mean_delta); CHECK_CUDA(hipPeekAtLastError()); } void fast_variance_delta_gpu(float* x, float* delta, float* mean, float* variance, int batch, int filters, int spatial, float* variance_delta) { hipLaunchKernelGGL(( fast_variance_delta_kernel), dim3(filters), dim3(BLOCK), 0, get_cuda_stream(), x, delta, mean, variance, batch, filters, spatial, variance_delta); CHECK_CUDA(hipPeekAtLastError()); } __global__ void mean_kernel( float* x, int batch, int filters, int spatial, float* mean) { float scale = 1.F / (batch * spatial); int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j, k; mean[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j * filters * spatial + i * spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } __global__ void variance_kernel( float* x, float* mean, int batch, int filters, int spatial, float* variance) { float scale = 1.F / (batch * spatial - 1); int j, k; int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; variance[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j * filters * spatial + i * spatial + k; variance[i] += powf((x[index] - mean[i]), 2); } } variance[i] *= scale; } __global__ void reorg_kernel(int N, float* x, int w, int h, int c, int batch, int stride, int forward, float* out) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= N) return; int in_index = i; int in_w = i % w; i = i / w; int in_h = i % h; i = i / h; int in_c = i % c; i = i / c; int b = i % batch; int out_c = c / (stride * stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w * stride + offset % stride; int h2 = in_h * stride + offset / stride; // printf("%d\n", offset); int out_index = w2 + w * stride * (h2 + h * stride * (c2 + out_c * b)); // printf("%d %d %d\n", w2, h2, c2); // printf("%d %d\n", in_index, out_index); // if(out_index >= N || out_index < 0) printf("bad bad bad \n"); if (forward) out[out_index] = x[in_index]; else out[in_index] = x[out_index]; // if(forward) out[1] = x[1]; // else out[0] = x[0]; } __global__ void constrain_weight_updates_kernel( int N, float coef, float* weights_gpu, float* weight_updates_gpu) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) { const float w = weights_gpu[i]; const float wu = weight_updates_gpu[i]; const float wu_sign = (wu == 0) ? 0 : (fabs(wu) / wu); const float abs_limit = fabs(w * coef); if (fabs(wu) > abs_limit) weight_updates_gpu[i] = abs_limit * wu_sign; } } void constrain_weight_updates_ongpu( int N, float coef, float* weights_gpu, float* weight_updates_gpu) { hipLaunchKernelGGL(( constrain_weight_updates_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, coef, weights_gpu, weight_updates_gpu); CHECK_CUDA(hipPeekAtLastError()); } __global__ void axpy_kernel(int N, float ALPHA, float* X, int OFFX, int INCX, float* Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) Y[OFFY + i * INCY] += ALPHA * X[OFFX + i * INCX]; } __global__ void pow_kernel( int N, float ALPHA, float* X, int INCX, float* Y, int INCY) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) Y[i * INCY] = powf(X[i * INCX], ALPHA); } __global__ void const_kernel(int N, float ALPHA, float* X, int INCX) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) X[i * INCX] = ALPHA; } __global__ void constrain_kernel(int N, float ALPHA, float* X, int INCX) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) X[i * INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i * INCX])); } __global__ void constrain_min_max_kernel( int N, float MIN, float MAX, float* X, int INCX) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) X[i * INCX] = fminf(MAX, fmaxf(MIN, X[i * INCX])); } __global__ void supp_kernel(int N, float ALPHA, float* X, int INCX) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) { if ((X[i * INCX] * X[i * INCX]) < (ALPHA * ALPHA)) X[i * INCX] = 0; } } __global__ void scal_kernel(int N, float ALPHA, float* X, int INCX) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) X[i * INCX] *= ALPHA; } __global__ void scal_add_kernel( int N, float ALPHA, float BETA, float* X, int INCX) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) X[i * INCX] = X[i * INCX] * ALPHA + BETA; } __global__ void fill_kernel(int N, float ALPHA, float* X, int INCX) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= N) return; X[index * INCX] = ALPHA; } __global__ void mask_kernel_new_api( int n, float* x, float mask_num, float* mask, float val) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < n && mask[i] == mask_num) x[i] = val; } __global__ void mask_kernel(int n, float* x, float mask_num, float* mask) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < n && mask[i] == mask_num) x[i] = mask_num; } __global__ void copy_kernel( int N, float* X, int OFFX, int INCX, float* Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) Y[i * INCY + OFFY] = X[i * INCX + OFFX]; } __global__ void simple_copy_kernel(int size, float* src, float* dst) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) dst[index] = src[index]; } __global__ void mul_kernel(int N, float* X, int INCX, float* Y, int INCY) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) Y[i * INCY] *= X[i * INCX]; } __global__ void fast_mean_kernel( float* x, int batch, int filters, int spatial, float* mean) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for (j = 0; j < batch; ++j) { for (i = 0; i < spatial; i += threads) { int index = j * spatial * filters + filter * spatial + i + id; local[id] += (i + id < spatial) ? x[index] : 0; } } __syncthreads(); if (id == 0) { float mean_tmp = 0; for (i = 0; i < threads; ++i) { mean_tmp += local[i]; } mean_tmp /= spatial * batch; mean[filter] = mean_tmp; } } void fast_mean_gpu(float* x, int batch, int filters, int spatial, float* mean) { hipLaunchKernelGGL(( fast_mean_kernel), dim3(filters), dim3(BLOCK), 0, get_cuda_stream(), x, batch, filters, spatial, mean); CHECK_CUDA(hipPeekAtLastError()); } __global__ void fast_variance_kernel( float* x, float* mean, int batch, int filters, int spatial, float* variance) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for (j = 0; j < batch; ++j) { for (i = 0; i < spatial; i += threads) { int index = j * spatial * filters + filter * spatial + i + id; local[id] += (i + id < spatial) ? powf((x[index] - mean[filter]), 2) : 0; } } __syncthreads(); if (id == 0) { float variance_tmp = 0; for (i = 0; i < threads; ++i) { variance_tmp += local[i]; } variance_tmp /= (spatial * batch); // -1); variance[filter] = variance_tmp; } } void fast_variance_gpu( float* x, float* mean, int batch, int filters, int spatial, float* variance) { hipLaunchKernelGGL(( fast_variance_kernel), dim3(filters), dim3(BLOCK), 0, get_cuda_stream(), x, mean, batch, filters, spatial, variance); CHECK_CUDA(hipPeekAtLastError()); } __global__ void fast_v_cbn_kernel(const float* x, float* mean, int batch, int filters, int spatial, int minibatch_index, int max_minibatch_index, float* m_avg, float* v_avg, float* variance, const float alpha, float* rolling_mean_gpu, float* rolling_variance_gpu, int inverse_variance, float epsilon) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for (j = 0; j < batch; ++j) { for (i = 0; i < spatial; i += threads) { int index = j * spatial * filters + filter * spatial + i + id; local[id] += (i + id < spatial) ? powf(x[index], 2) : 0; } } __syncthreads(); if (id == 0) { float v_tmp = 0; v_tmp = 0; for (i = 0; i < threads; ++i) { v_tmp += local[i]; } v_tmp /= (spatial * batch - 1); v_tmp = fmax(v_tmp, powf(mean[filter], 2)); const float alpha_cbn = 1.0f / minibatch_index; m_avg[filter] = alpha_cbn * mean[filter] + (1 - alpha_cbn) * m_avg[filter]; mean[filter] = m_avg[filter]; v_avg[filter] = alpha_cbn * v_tmp + (1 - alpha_cbn) * v_avg[filter]; float variance_tmp = fmax(0.0f, v_avg[filter] - powf(m_avg[filter], 2)); if (inverse_variance) variance[filter] = 1.0f / sqrtf(variance_tmp + epsilon); else variance[filter] = variance_tmp; // if (max_minibatch_index == minibatch_index) { if (rolling_mean_gpu) rolling_mean_gpu[filter] = alpha * mean[filter] + (1 - alpha) * rolling_mean_gpu[filter]; if (rolling_variance_gpu) rolling_variance_gpu[filter] = alpha * variance_tmp + (1 - alpha) * rolling_variance_gpu[filter]; } } } void fast_v_cbn_gpu(const float* x, float* mean, int batch, int filters, int spatial, int minibatch_index, int max_minibatch_index, float* m_avg, float* v_avg, float* variance, const float alpha, float* rolling_mean_gpu, float* rolling_variance_gpu, int inverse_variance, float epsilon) { hipLaunchKernelGGL(( fast_v_cbn_kernel), dim3(filters), dim3(BLOCK), 0, get_cuda_stream(), x, mean, batch, filters, spatial, minibatch_index, max_minibatch_index, m_avg, v_avg, variance, alpha, rolling_mean_gpu, rolling_variance_gpu, inverse_variance, epsilon); CHECK_CUDA(hipPeekAtLastError()); } __global__ void inverse_variance_kernel( int size, float* src, float* dst, float epsilon) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) dst[index] = 1.0f / sqrtf(src[index] + epsilon); } void inverse_variance_ongpu(int size, float* src, float* dst, float epsilon) { const int num_blocks = size / BLOCK + 1; hipLaunchKernelGGL(( inverse_variance_kernel), dim3(num_blocks), dim3(BLOCK), 0, get_cuda_stream(), size, src, dst, epsilon); CHECK_CUDA(hipPeekAtLastError()); } __global__ void normalize_scale_bias_kernel(int N, float* x, float* mean, float* variance, float* scales, float* biases, int batch, int filters, int spatial, int inverse_variance, float epsilon) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index / spatial) % filters; float val = 0; if (inverse_variance) val = (x[index] - mean[f]) * variance[f]; else val = (x[index] - mean[f]) / (sqrtf(variance[f] + epsilon)); val *= scales[f]; val += biases[f]; if (!isnan(val) && !isinf(val)) x[index] = val; } void normalize_scale_bias_gpu(float* x, float* mean, float* variance, float* scales, float* biases, int batch, int filters, int spatial, int inverse_variance, float epsilon) { const int current_size = batch * filters * spatial; const int num_blocks = get_number_of_blocks(current_size, BLOCK); hipLaunchKernelGGL(( normalize_scale_bias_kernel), dim3(num_blocks), dim3(BLOCK), 0, get_cuda_stream(), current_size, x, mean, variance, scales, biases, batch, filters, spatial, inverse_variance, epsilon); CHECK_CUDA(hipPeekAtLastError()); } void mean_gpu(float* x, int batch, int filters, int spatial, float* mean) { hipLaunchKernelGGL(( mean_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, get_cuda_stream(), x, batch, filters, spatial, mean); CHECK_CUDA(hipPeekAtLastError()); } void variance_gpu( float* x, float* mean, int batch, int filters, int spatial, float* variance) { hipLaunchKernelGGL(( variance_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, get_cuda_stream(), x, mean, batch, filters, spatial, variance); CHECK_CUDA(hipPeekAtLastError()); } void axpy_ongpu(int N, float ALPHA, float* X, int INCX, float* Y, int INCY) { axpy_ongpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY); } void pow_ongpu(int N, float ALPHA, float* X, int INCX, float* Y, int INCY) { hipLaunchKernelGGL(( pow_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, ALPHA, X, INCX, Y, INCY); CHECK_CUDA(hipPeekAtLastError()); } void axpy_ongpu_offset(int N, float ALPHA, float* X, int OFFX, int INCX, float* Y, int OFFY, int INCY) { hipLaunchKernelGGL(( axpy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY); CHECK_CUDA(hipPeekAtLastError()); } void copy_ongpu(int N, float* X, int INCX, float* Y, int INCY) { copy_ongpu_offset(N, X, 0, INCX, Y, 0, INCY); } void simple_copy_ongpu(int size, float* src, float* dst) { const int num_blocks = size / BLOCK + 1; hipLaunchKernelGGL(( simple_copy_kernel), dim3(num_blocks), dim3(BLOCK), 0, get_cuda_stream(), size, src, dst); CHECK_CUDA(hipPeekAtLastError()); } void memcpy_ongpu(void* dst, void* src, int size_bytes) { CHECK_CUDA(hipMemcpyAsync( dst, src, size_bytes, hipMemcpyDefault, get_cuda_stream())); CHECK_CUDA(hipPeekAtLastError()); } void mul_ongpu(int N, float* X, int INCX, float* Y, int INCY) { hipLaunchKernelGGL(( mul_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, X, INCX, Y, INCY); CHECK_CUDA(hipPeekAtLastError()); } void copy_ongpu_offset( int N, float* X, int OFFX, int INCX, float* Y, int OFFY, int INCY) { hipLaunchKernelGGL(( copy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, X, OFFX, INCX, Y, OFFY, INCY); CHECK_CUDA(hipPeekAtLastError()); } __global__ void flatten_kernel(int N, float* x, int spatial, int layers, int batch, int forward, float* out) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= N) return; int in_s = i % spatial; i = i / spatial; int in_c = i % layers; i = i / layers; int b = i; int i1 = b * layers * spatial + in_c * spatial + in_s; int i2 = b * layers * spatial + in_s * layers + in_c; if (forward) out[i2] = x[i1]; else out[i1] = x[i2]; } void flatten_ongpu( float* x, int spatial, int layers, int batch, int forward, float* out) { int size = spatial * batch * layers; hipLaunchKernelGGL(( flatten_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, get_cuda_stream(), size, x, spatial, layers, batch, forward, out); CHECK_CUDA(hipPeekAtLastError()); } void reorg_ongpu(float* x, int w, int h, int c, int batch, int stride, int forward, float* out) { int size = w * h * c * batch; hipLaunchKernelGGL(( reorg_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, get_cuda_stream(), size, x, w, h, c, batch, stride, forward, out); CHECK_CUDA(hipPeekAtLastError()); } void mask_gpu_new_api(int N, float* X, float mask_num, float* mask, float val) { hipLaunchKernelGGL(( mask_kernel_new_api), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, X, mask_num, mask, val); CHECK_CUDA(hipPeekAtLastError()); } void mask_ongpu(int N, float* X, float mask_num, float* mask) { hipLaunchKernelGGL(( mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, X, mask_num, mask); CHECK_CUDA(hipPeekAtLastError()); } void const_ongpu(int N, float ALPHA, float* X, int INCX) { hipLaunchKernelGGL(( const_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, ALPHA, X, INCX); CHECK_CUDA(hipPeekAtLastError()); } void constrain_ongpu(int N, float ALPHA, float* X, int INCX) { hipLaunchKernelGGL(( constrain_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, ALPHA, X, INCX); CHECK_CUDA(hipPeekAtLastError()); } void constrain_min_max_ongpu(int N, float MIN, float MAX, float* X, int INCX) { hipLaunchKernelGGL(( constrain_min_max_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, MIN, MAX, X, INCX); CHECK_CUDA(hipPeekAtLastError()); } void scal_ongpu(int N, float ALPHA, float* X, int INCX) { hipLaunchKernelGGL(( scal_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, ALPHA, X, INCX); CHECK_CUDA(hipPeekAtLastError()); } void scal_add_ongpu(int N, float ALPHA, float BETA, float* X, int INCX) { hipLaunchKernelGGL(( scal_add_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, ALPHA, BETA, X, INCX); CHECK_CUDA(hipPeekAtLastError()); } void supp_ongpu(int N, float ALPHA, float* X, int INCX) { hipLaunchKernelGGL(( supp_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, get_cuda_stream(), N, ALPHA, X, INCX); CHECK_CUDA(hipPeekAtLastError()); } void fill_ongpu(int N, float ALPHA, float* X, int INCX) { // fill_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>(N, ALPHA, X, // INCX); CHECK_CUDA(hipPeekAtLastError()); hipLaunchKernelGGL(( fill_kernel), dim3(get_number_of_blocks(N, BLOCK)), dim3(BLOCK), 0, get_cuda_stream(), N, ALPHA, X, INCX); CHECK_CUDA(hipPeekAtLastError()); } __device__ float relu(float src) { if (src > 0) return src; return 0; } __device__ float lrelu(float src) { const float eps = 0.001; if (src > eps) return src; return eps; } __device__ float grad_relu(float src) { return (src > 0); } __device__ float grad_lrelu(float src) { const float eps = 0.001; return (src > eps); } __global__ void shortcut_singlelayer_simple_kernel(int size, int src_outputs, int* outputs_of_layers_gpu, float** layers_output_gpu, float* out, float* in) { const int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (id >= size) return; int src_id = id; const int src_i = src_id % src_outputs; src_id /= src_outputs; int src_b = src_id; float out_val = in[id]; int add_outputs = outputs_of_layers_gpu[0]; if (src_i < add_outputs) { int add_index = add_outputs * src_b + src_i; float* add = layers_output_gpu[0]; out_val += add[add_index]; } out[id] = out_val; } void ShortcutGpu(int src_outputs, int batch, int* outputs_of_layers_gpu, float** layers_output_gpu, float* out, float* in) { // printf(" src_outputs = %d, batch = %d, n = %d \n", src_outputs, batch, n); int size = batch * src_outputs; hipLaunchKernelGGL(( shortcut_singlelayer_simple_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, get_cuda_stream(), size, src_outputs, outputs_of_layers_gpu, layers_output_gpu, out, in); CHECK_CUDA(hipPeekAtLastError()); } __global__ void backward_shortcut_multilayer_kernel(int size, int src_outputs, int n, int* outputs_of_layers_gpu, float** layers_delta_gpu, float* delta_out, float* delta_in) { const int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (id >= size) return; int src_id = id; const int src_i = src_id % src_outputs; src_id /= src_outputs; int src_b = src_id; delta_out[id] += delta_in[id]; // layers for (int i = 0; i < n; ++i) { int add_outputs = outputs_of_layers_gpu[i]; if (src_i < add_outputs) { int add_index = add_outputs * src_b + src_i; float* layer_delta = layers_delta_gpu[i]; layer_delta[add_index] += delta_in[id]; } } } void BackwardShortcutGpu(int src_outputs, int batch, int n, int* outputs_of_layers_gpu, float** layers_delta_gpu, float* delta_out, float* delta_in) { int size = batch * src_outputs; hipLaunchKernelGGL(( backward_shortcut_multilayer_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, get_cuda_stream(), size, src_outputs, n, outputs_of_layers_gpu, layers_delta_gpu, delta_out, delta_in); CHECK_CUDA(hipPeekAtLastError()); } __global__ void smooth_l1_kernel( int n, float* pred, float* truth, float* delta, float* error) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { float diff = truth[i] - pred[i]; float abs_val = abs(diff); if (abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2 * abs_val - 1; delta[i] = (diff < 0) ? -1 : 1; } } } void smooth_l1_gpu(int n, float* pred, float* truth, float* delta, float* error) { hipLaunchKernelGGL(( smooth_l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, get_cuda_stream(), n, pred, truth, delta, error); CHECK_CUDA(hipPeekAtLastError()); } __global__ void l2_kernel( int n, float* pred, float* truth, float* delta, float* error) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { float diff = truth[i] - pred[i]; error[i] = diff * diff; // I know this is technically wrong, deal with it. delta[i] = diff; } } void l2_gpu(int n, float* pred, float* truth, float* delta, float* error) { hipLaunchKernelGGL(( l2_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, get_cuda_stream(), n, pred, truth, delta, error); CHECK_CUDA(hipPeekAtLastError()); } __global__ void weighted_sum_kernel( int n, float* a, float* b, float* s, float* c) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { c[i] = s[i] * a[i] + (1 - s[i]) * (b ? b[i] : 0); } } void weighted_sum_gpu(float* a, float* b, float* s, int num, float* c) { hipLaunchKernelGGL(( weighted_sum_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, get_cuda_stream(), num, a, b, s, c); CHECK_CUDA(hipPeekAtLastError()); } __global__ void weighted_delta_kernel(int n, float* a, float* b, float* s, float* da, float* db, float* ds, float* dc) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { if (da) da[i] += dc[i] * s[i]; db[i] += dc[i] * (1 - s[i]); ds[i] += dc[i] * a[i] + dc[i] * -b[i]; } } void weighted_delta_gpu(float* a, float* b, float* s, float* da, float* db, float* ds, int num, float* dc) { hipLaunchKernelGGL(( weighted_delta_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, get_cuda_stream(), num, a, b, s, da, db, ds, dc); CHECK_CUDA(hipPeekAtLastError()); } __global__ void mult_add_into_kernel(int n, float* a, float* b, float* c) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { c[i] += a[i] * b[i]; } } void mult_add_into_gpu(int num, float* a, float* b, float* c) { hipLaunchKernelGGL(( mult_add_into_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, get_cuda_stream(), num, a, b, c); CHECK_CUDA(hipPeekAtLastError()); } __global__ void upsample_kernel(size_t N, float* x, int w, int h, int c, int batch, int stride, int forward, float scale, float* out) { size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= N) return; int out_index = i; int out_w = i % (w * stride); i = i / (w * stride); int out_h = i % (h * stride); i = i / (h * stride); int out_c = i % c; i = i / c; int b = i % batch; int in_w = out_w / stride; int in_h = out_h / stride; int in_c = out_c; int in_index = b * w * h * c + in_c * w * h + in_h * w + in_w; if (forward) out[out_index] += scale * x[in_index]; else atomicAdd(x + in_index, scale * out[out_index]); } void upsample_gpu(float* in, int w, int h, int c, int batch, int stride, int forward, float scale, float* out) { size_t size = w * h * c * batch * stride * stride; hipLaunchKernelGGL(( upsample_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, get_cuda_stream(), size, in, w, h, c, batch, stride, forward, scale, out); CHECK_CUDA(hipPeekAtLastError()); } __global__ void fix_nan_and_inf_kernel(float* input, size_t size) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { float val = input[index]; if (isnan(val) || isinf(val)) { input[index] = 1.0f / (fabs((float)index) + 1); // pseudo random value } } } void fix_nan_and_inf(float* input, size_t size) { const int block_size = BLOCK; const int num_blocks = get_number_of_blocks(size, block_size); hipLaunchKernelGGL(( fix_nan_and_inf_kernel), dim3(num_blocks), dim3(block_size), 0, get_cuda_stream(), input, size); CHECK_CUDA(hipPeekAtLastError()); // CHECK_CUDA(hipDeviceSynchronize()); } __global__ void reset_nan_and_inf_kernel(float* input, size_t size) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { float val = input[index]; if (isnan(val) || isinf(val)) { input[index] = 0; } } } void reset_nan_and_inf(float* input, size_t size) { const int block_size = BLOCK; const int num_blocks = get_number_of_blocks(size, block_size); hipLaunchKernelGGL(( reset_nan_and_inf_kernel), dim3(num_blocks), dim3(block_size), 0, get_cuda_stream(), input, size); CHECK_CUDA(hipPeekAtLastError()); // CHECK_CUDA(hipDeviceSynchronize()); } __global__ void is_nan_or_inf_kernel( float* input, size_t size, int* pinned_return) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { float val = input[index]; if (isnan(val) || isinf(val)) *pinned_return = 1; } } int is_nan_or_inf(float* input, size_t size) { int* pinned_return; CHECK_CUDA( hipHostMalloc(&pinned_return, sizeof(int), hipHostRegisterMapped)); *pinned_return = 0; const int block_size = BLOCK; const int num_blocks = get_number_of_blocks(size, block_size); hipLaunchKernelGGL(( is_nan_or_inf_kernel), dim3(num_blocks), dim3(block_size), 0, get_cuda_stream(), input, size, pinned_return); CHECK_CUDA(hipDeviceSynchronize()); int ret_val = *pinned_return; CHECK_CUDA(hipHostFree(pinned_return)); return ret_val; } __global__ void add_3_arrays_activate_kernel( float* a1, float* a2, float* a3, size_t size, ACTIVATION a, float* dst) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { float val = 0; val += a1[index]; val += a2[index]; if (a3) val += a3[index]; if (a == LOGISTIC) val = 1.f / (1.f + expf(-val)); else if (a == TANH) val = (2 / (1 + expf(-2 * val)) - 1); dst[index] = val; } } void add_3_arrays_activate( float* a1, float* a2, float* a3, size_t size, ACTIVATION a, float* dst) { const int block_size = BLOCK; const int num_blocks = get_number_of_blocks(size, block_size); if (a != LOGISTIC && a != TANH) { printf( " add_3_arrays_activate() doesn't support activation %d, it supports " "only LOGISTIC and TANH \n", a); exit(EXIT_FAILURE); } hipLaunchKernelGGL(( add_3_arrays_activate_kernel), dim3(num_blocks), dim3(block_size), 0, get_cuda_stream(), a1, a2, a3, size, a, dst); } __global__ void sum_of_mults_kernel( float* a1, float* a2, float* b1, float* b2, size_t size, float* dst) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { dst[index] = a1[index] * a2[index] + b1[index] * b2[index]; } } void sum_of_mults( float* a1, float* a2, float* b1, float* b2, size_t size, float* dst) { const int block_size = BLOCK; const int num_blocks = get_number_of_blocks(size, block_size); hipLaunchKernelGGL(( sum_of_mults_kernel), dim3(num_blocks), dim3(block_size), 0, get_cuda_stream(), a1, a2, b1, b2, size, dst); } __global__ void activate_and_mult_kernel( float* a1, float* a2, size_t size, ACTIVATION a, float* dst) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { float val = a1[index]; if (a == TANH) val = (2 / (1 + expf(-2 * val)) - 1); dst[index] = val * a2[index]; } } void activate_and_mult( float* a1, float* a2, size_t size, ACTIVATION a, float* dst) { const int block_size = BLOCK; const int num_blocks = get_number_of_blocks(size, block_size); if (a != TANH) { printf( " activat_and_mult() doesn't support activation %d, it supports only " "TANH \n", a); exit(EXIT_FAILURE); } hipLaunchKernelGGL(( activate_and_mult_kernel), dim3(num_blocks), dim3(block_size), 0, get_cuda_stream(), a1, a2, size, a, dst); } __global__ void scale_channels_kernel(float* in_w_h_c, int size, int channel_size, int batch_size, int scale_wh, float* scales_c, float* out) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { if (scale_wh) { int osd_index = index % channel_size + (index / batch_size) * channel_size; out[index] = in_w_h_c[index] * scales_c[osd_index]; } else { out[index] = in_w_h_c[index] * scales_c[index / channel_size]; } } } void scale_channels_gpu(float* in_w_h_c, int size, int channel_size, int batch_size, int scale_wh, float* scales_c, float* out) { const int block_size = BLOCK; const int num_blocks = get_number_of_blocks(size, block_size); hipLaunchKernelGGL(( scale_channels_kernel), dim3(num_blocks), dim3(block_size), 0, get_cuda_stream(), in_w_h_c, size, channel_size, batch_size, scale_wh, scales_c, out); CHECK_CUDA(hipPeekAtLastError()); } __global__ void backward_scale_channels_kernel(float* in_w_h_c_delta, int size, int channel_size, int batch_size, int scale_wh, float* in_scales_c, float* out_from_delta, float* in_from_output, float* out_state_delta) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { if (scale_wh) { int osd_index = index % channel_size + (index / batch_size) * channel_size; // out_state_delta[osd_index] += in_w_h_c_delta[index] * // in_from_output[index]; // l.delta * from (should be divided by // channel_size?) atomicAdd(&out_state_delta[osd_index], in_w_h_c_delta[index] * in_from_output[index] / channel_size); // l.delta * from out_from_delta[index] += in_scales_c[osd_index] * in_w_h_c_delta[index]; // input * l.delta // atomic isn't required // here } else { int osd_index = index / channel_size; // out_state_delta[osd_index] += in_w_h_c_delta[index] * // in_from_output[index]; // l.delta * from (should be divided by // channel_size?) int warp_id = index / 32; int index_warp_start = warp_id * 32; int osd_index_warp_start = index_warp_start / channel_size; int osd_index_warp_end = (index_warp_start + 31) / channel_size; if (osd_index_warp_start == osd_index_warp_end) // all thread in warp process the same channel { float sum = warpAllReduceSum( in_w_h_c_delta[index] * in_from_output[index]); // l.delta * from if (threadIdx.x % 32 == 0) { atomicAdd(&out_state_delta[osd_index], sum); // out_state_delta[osd_index] += sum; } } else { atomicAdd(&out_state_delta[osd_index], in_w_h_c_delta[index] * in_from_output[index]); // l.delta * from } out_from_delta[index] += in_scales_c[osd_index] * in_w_h_c_delta[index]; // input * l.delta // atomic isn't required // here } } } void backward_scale_channels_gpu(float* in_w_h_c_delta, int size, int channel_size, int batch_size, int scale_wh, float* in_scales_c, float* out_from_delta, float* in_from_output, float* out_state_delta) { const int block_size = BLOCK; const int num_blocks = get_number_of_blocks(size, block_size); hipLaunchKernelGGL(( backward_scale_channels_kernel), dim3(num_blocks), dim3(block_size), 0, get_cuda_stream(), in_w_h_c_delta, size, channel_size, batch_size, scale_wh, in_scales_c, out_from_delta, in_from_output, out_state_delta); CHECK_CUDA(hipPeekAtLastError()); }
9da7b5376a08e9920da50540cbf619ab233ce3df.cu
#include <assert.h> #include <cublas_v2.h> #include <cuda_runtime.h> #include <curand.h> #include <float.h> #include "blas.h" #include "dark_cuda.h" #include "utils.h" __inline__ __device__ float warpAllReduceSum(float val) { for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2) #if CUDART_VERSION >= 9000 val += __shfl_xor_sync(0xffffffff, val, mask); #else val += __shfl_xor(val, mask); #endif return val; } __global__ void compare_2_arrays_kernel(float* one, float* two, int size) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= size) return; const float diff = 100 * fabs(one[index] - two[index]) / fabs(one[index]); if (diff > 10) printf(" i: %d - one = %f, two = %f, diff = %f %% \n", index, one[index], two[index], diff); } void compare_2_arrays_gpu(float* one, float* two, int size) { const int num_blocks = get_number_of_blocks(size, BLOCK); compare_2_arrays_kernel<<<num_blocks, BLOCK, 0, get_cuda_stream()>>>( one, two, size); CHECK_CUDA(cudaPeekAtLastError()); CHECK_CUDA(cudaDeviceSynchronize()); } __global__ void scale_bias_kernel(float* output, float* scale, int batch, int filters, int spatial, int current_size) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= current_size) return; int f = (index / spatial) % filters; output[index] *= scale[f]; } void scale_bias_gpu( float* output, float* scale, int batch, int filters, int spatial) { const int current_size = batch * filters * spatial; const int num_blocks = get_number_of_blocks(current_size, BLOCK); scale_bias_kernel<<<num_blocks, BLOCK, 0, get_cuda_stream()>>>( output, scale, batch, filters, spatial, current_size); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void backward_scale_kernel(float* x_norm, float* delta, int batch, int n, int size, float* scale_updates) { __shared__ float part[BLOCK]; int i, b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for (b = 0; b < batch; ++b) { for (i = 0; i < size; i += BLOCK) { int index = p + i + size * (filter + n * b); sum += (p + i < size) ? delta[index] * x_norm[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for (i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i]; } } void backward_scale_gpu(float* x_norm, float* delta, int batch, int n, int size, float* scale_updates) { backward_scale_kernel<<<n, BLOCK, 0, get_cuda_stream()>>>( x_norm, delta, batch, n, size, scale_updates); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void adam_kernel(int N, float* x, float* m, float* v, float B1, float B2, float rate, float eps, int t) { int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; float mhat = m[index] / (1.f - powf(B1, t)); float vhat = v[index] / (1.f - powf(B2, t)); x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps); } void adam_gpu(int n, float* x, float* m, float* v, float B1, float B2, float rate, float eps, int t) { adam_kernel<<<cuda_gridsize(n), BLOCK, 0, get_cuda_stream()>>>( n, x, m, v, B1, B2, rate, eps, t); CHECK_CUDA(cudaPeekAtLastError()); } void adam_update_gpu(float* w, float* d, float* m, float* v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t) { scal_ongpu(n, B1, m, 1); scal_ongpu(n, B2, v, 1); axpy_ongpu(n, -decay * batch, w, 1, d, 1); axpy_ongpu(n, (1 - B1), d, 1, m, 1); mul_ongpu(n, d, 1, d, 1); axpy_ongpu(n, (1 - B2), d, 1, v, 1); adam_gpu(n, w, m, v, B1, B2, rate, eps, t); fill_ongpu(n, 0, d, 1); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void normalize_kernel(int N, float* x, float* mean, float* variance, int batch, int filters, int spatial) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index / spatial) % filters; x[index] = (x[index] - mean[f]) / (sqrtf(variance[f] + .00001f)); } void normalize_gpu( float* x, float* mean, float* variance, int batch, int filters, int spatial) { const int current_size = batch * filters * spatial; const int num_blocks = get_number_of_blocks(current_size, BLOCK); normalize_kernel<<<num_blocks, BLOCK, 0, get_cuda_stream()>>>( current_size, x, mean, variance, batch, filters, spatial); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void normalize_delta_kernel(int N, float* x, float* mean, float* variance, float* mean_delta, float* variance_delta, int batch, int filters, int spatial, float* delta) { int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index / spatial) % filters; delta[index] = delta[index] * 1.F / (sqrtf(variance[f]) + .000001f) + variance_delta[f] * 2. * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f] / (spatial * batch); } void normalize_delta_gpu(float* x, float* mean, float* variance, float* mean_delta, float* variance_delta, int batch, int filters, int spatial, float* delta) { size_t N = batch * filters * spatial; normalize_delta_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>(N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void variance_delta_kernel(float* x, float* delta, float* mean, float* variance, int batch, int filters, int spatial, float* variance_delta) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j, k; variance_delta[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j * filters * spatial + i * spatial + k; variance_delta[i] += delta[index] * (x[index] - mean[i]); } } variance_delta[i] *= -.5 * powf(variance[i] + .000001f, (float)(-3. / 2.)); } __global__ void accumulate_kernel(float* x, int n, int groups, float* sum) { int k; int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= groups) return; sum[i] = 0; for (k = 0; k < n; ++k) { sum[i] += x[k * groups + i]; } } __global__ void fast_mean_delta_kernel(float* delta, float* variance, int batch, int filters, int spatial, float* mean_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for (j = 0; j < batch; ++j) { for (i = 0; i < spatial; i += threads) { int index = j * spatial * filters + filter * spatial + i + id; local[id] += (i + id < spatial) ? delta[index] : 0; } } __syncthreads(); if (id == 0) { mean_delta[filter] = 0; for (i = 0; i < threads; ++i) { mean_delta[filter] += local[i]; } mean_delta[filter] *= (-1.F / sqrtf(variance[filter] + .000001f)); } } __global__ void fast_variance_delta_kernel(float* x, float* delta, float* mean, float* variance, int batch, int filters, int spatial, float* variance_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for (j = 0; j < batch; ++j) { for (i = 0; i < spatial; i += threads) { int index = j * spatial * filters + filter * spatial + i + id; local[id] += (i + id < spatial) ? delta[index] * (x[index] - mean[filter]) : 0; } } __syncthreads(); if (id == 0) { variance_delta[filter] = 0; for (i = 0; i < threads; ++i) { variance_delta[filter] += local[i]; } variance_delta[filter] *= -.5 * powf(variance[filter] + .000001f, (float)(-3. / 2.)); } } __global__ void mean_delta_kernel(float* delta, float* variance, int batch, int filters, int spatial, float* mean_delta) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j, k; mean_delta[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j * filters * spatial + i * spatial + k; mean_delta[i] += delta[index]; } } mean_delta[i] *= (-1.F / sqrtf(variance[i] + .000001f)); } void mean_delta_gpu(float* delta, float* variance, int batch, int filters, int spatial, float* mean_delta) { mean_delta_kernel<<<cuda_gridsize(filters), BLOCK, 0, get_cuda_stream()>>>( delta, variance, batch, filters, spatial, mean_delta); CHECK_CUDA(cudaPeekAtLastError()); } void fast_mean_delta_gpu(float* delta, float* variance, int batch, int filters, int spatial, float* mean_delta) { fast_mean_delta_kernel<<<filters, BLOCK, 0, get_cuda_stream()>>>( delta, variance, batch, filters, spatial, mean_delta); CHECK_CUDA(cudaPeekAtLastError()); } void fast_variance_delta_gpu(float* x, float* delta, float* mean, float* variance, int batch, int filters, int spatial, float* variance_delta) { fast_variance_delta_kernel<<<filters, BLOCK, 0, get_cuda_stream()>>>( x, delta, mean, variance, batch, filters, spatial, variance_delta); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void mean_kernel( float* x, int batch, int filters, int spatial, float* mean) { float scale = 1.F / (batch * spatial); int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j, k; mean[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j * filters * spatial + i * spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } __global__ void variance_kernel( float* x, float* mean, int batch, int filters, int spatial, float* variance) { float scale = 1.F / (batch * spatial - 1); int j, k; int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; variance[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j * filters * spatial + i * spatial + k; variance[i] += powf((x[index] - mean[i]), 2); } } variance[i] *= scale; } __global__ void reorg_kernel(int N, float* x, int w, int h, int c, int batch, int stride, int forward, float* out) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= N) return; int in_index = i; int in_w = i % w; i = i / w; int in_h = i % h; i = i / h; int in_c = i % c; i = i / c; int b = i % batch; int out_c = c / (stride * stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w * stride + offset % stride; int h2 = in_h * stride + offset / stride; // printf("%d\n", offset); int out_index = w2 + w * stride * (h2 + h * stride * (c2 + out_c * b)); // printf("%d %d %d\n", w2, h2, c2); // printf("%d %d\n", in_index, out_index); // if(out_index >= N || out_index < 0) printf("bad bad bad \n"); if (forward) out[out_index] = x[in_index]; else out[in_index] = x[out_index]; // if(forward) out[1] = x[1]; // else out[0] = x[0]; } __global__ void constrain_weight_updates_kernel( int N, float coef, float* weights_gpu, float* weight_updates_gpu) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) { const float w = weights_gpu[i]; const float wu = weight_updates_gpu[i]; const float wu_sign = (wu == 0) ? 0 : (fabs(wu) / wu); const float abs_limit = fabs(w * coef); if (fabs(wu) > abs_limit) weight_updates_gpu[i] = abs_limit * wu_sign; } } void constrain_weight_updates_ongpu( int N, float coef, float* weights_gpu, float* weight_updates_gpu) { constrain_weight_updates_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>(N, coef, weights_gpu, weight_updates_gpu); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void axpy_kernel(int N, float ALPHA, float* X, int OFFX, int INCX, float* Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) Y[OFFY + i * INCY] += ALPHA * X[OFFX + i * INCX]; } __global__ void pow_kernel( int N, float ALPHA, float* X, int INCX, float* Y, int INCY) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) Y[i * INCY] = powf(X[i * INCX], ALPHA); } __global__ void const_kernel(int N, float ALPHA, float* X, int INCX) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) X[i * INCX] = ALPHA; } __global__ void constrain_kernel(int N, float ALPHA, float* X, int INCX) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) X[i * INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i * INCX])); } __global__ void constrain_min_max_kernel( int N, float MIN, float MAX, float* X, int INCX) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) X[i * INCX] = fminf(MAX, fmaxf(MIN, X[i * INCX])); } __global__ void supp_kernel(int N, float ALPHA, float* X, int INCX) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) { if ((X[i * INCX] * X[i * INCX]) < (ALPHA * ALPHA)) X[i * INCX] = 0; } } __global__ void scal_kernel(int N, float ALPHA, float* X, int INCX) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) X[i * INCX] *= ALPHA; } __global__ void scal_add_kernel( int N, float ALPHA, float BETA, float* X, int INCX) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) X[i * INCX] = X[i * INCX] * ALPHA + BETA; } __global__ void fill_kernel(int N, float ALPHA, float* X, int INCX) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= N) return; X[index * INCX] = ALPHA; } __global__ void mask_kernel_new_api( int n, float* x, float mask_num, float* mask, float val) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < n && mask[i] == mask_num) x[i] = val; } __global__ void mask_kernel(int n, float* x, float mask_num, float* mask) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < n && mask[i] == mask_num) x[i] = mask_num; } __global__ void copy_kernel( int N, float* X, int OFFX, int INCX, float* Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) Y[i * INCY + OFFY] = X[i * INCX + OFFX]; } __global__ void simple_copy_kernel(int size, float* src, float* dst) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) dst[index] = src[index]; } __global__ void mul_kernel(int N, float* X, int INCX, float* Y, int INCY) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < N) Y[i * INCY] *= X[i * INCX]; } __global__ void fast_mean_kernel( float* x, int batch, int filters, int spatial, float* mean) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for (j = 0; j < batch; ++j) { for (i = 0; i < spatial; i += threads) { int index = j * spatial * filters + filter * spatial + i + id; local[id] += (i + id < spatial) ? x[index] : 0; } } __syncthreads(); if (id == 0) { float mean_tmp = 0; for (i = 0; i < threads; ++i) { mean_tmp += local[i]; } mean_tmp /= spatial * batch; mean[filter] = mean_tmp; } } void fast_mean_gpu(float* x, int batch, int filters, int spatial, float* mean) { fast_mean_kernel<<<filters, BLOCK, 0, get_cuda_stream()>>>( x, batch, filters, spatial, mean); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void fast_variance_kernel( float* x, float* mean, int batch, int filters, int spatial, float* variance) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for (j = 0; j < batch; ++j) { for (i = 0; i < spatial; i += threads) { int index = j * spatial * filters + filter * spatial + i + id; local[id] += (i + id < spatial) ? powf((x[index] - mean[filter]), 2) : 0; } } __syncthreads(); if (id == 0) { float variance_tmp = 0; for (i = 0; i < threads; ++i) { variance_tmp += local[i]; } variance_tmp /= (spatial * batch); // -1); variance[filter] = variance_tmp; } } void fast_variance_gpu( float* x, float* mean, int batch, int filters, int spatial, float* variance) { fast_variance_kernel<<<filters, BLOCK, 0, get_cuda_stream()>>>( x, mean, batch, filters, spatial, variance); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void fast_v_cbn_kernel(const float* x, float* mean, int batch, int filters, int spatial, int minibatch_index, int max_minibatch_index, float* m_avg, float* v_avg, float* variance, const float alpha, float* rolling_mean_gpu, float* rolling_variance_gpu, int inverse_variance, float epsilon) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for (j = 0; j < batch; ++j) { for (i = 0; i < spatial; i += threads) { int index = j * spatial * filters + filter * spatial + i + id; local[id] += (i + id < spatial) ? powf(x[index], 2) : 0; } } __syncthreads(); if (id == 0) { float v_tmp = 0; v_tmp = 0; for (i = 0; i < threads; ++i) { v_tmp += local[i]; } v_tmp /= (spatial * batch - 1); v_tmp = fmax(v_tmp, powf(mean[filter], 2)); const float alpha_cbn = 1.0f / minibatch_index; m_avg[filter] = alpha_cbn * mean[filter] + (1 - alpha_cbn) * m_avg[filter]; mean[filter] = m_avg[filter]; v_avg[filter] = alpha_cbn * v_tmp + (1 - alpha_cbn) * v_avg[filter]; float variance_tmp = fmax(0.0f, v_avg[filter] - powf(m_avg[filter], 2)); if (inverse_variance) variance[filter] = 1.0f / sqrtf(variance_tmp + epsilon); else variance[filter] = variance_tmp; // if (max_minibatch_index == minibatch_index) { if (rolling_mean_gpu) rolling_mean_gpu[filter] = alpha * mean[filter] + (1 - alpha) * rolling_mean_gpu[filter]; if (rolling_variance_gpu) rolling_variance_gpu[filter] = alpha * variance_tmp + (1 - alpha) * rolling_variance_gpu[filter]; } } } void fast_v_cbn_gpu(const float* x, float* mean, int batch, int filters, int spatial, int minibatch_index, int max_minibatch_index, float* m_avg, float* v_avg, float* variance, const float alpha, float* rolling_mean_gpu, float* rolling_variance_gpu, int inverse_variance, float epsilon) { fast_v_cbn_kernel<<<filters, BLOCK, 0, get_cuda_stream()>>>(x, mean, batch, filters, spatial, minibatch_index, max_minibatch_index, m_avg, v_avg, variance, alpha, rolling_mean_gpu, rolling_variance_gpu, inverse_variance, epsilon); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void inverse_variance_kernel( int size, float* src, float* dst, float epsilon) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) dst[index] = 1.0f / sqrtf(src[index] + epsilon); } void inverse_variance_ongpu(int size, float* src, float* dst, float epsilon) { const int num_blocks = size / BLOCK + 1; inverse_variance_kernel<<<num_blocks, BLOCK, 0, get_cuda_stream()>>>( size, src, dst, epsilon); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void normalize_scale_bias_kernel(int N, float* x, float* mean, float* variance, float* scales, float* biases, int batch, int filters, int spatial, int inverse_variance, float epsilon) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index / spatial) % filters; float val = 0; if (inverse_variance) val = (x[index] - mean[f]) * variance[f]; else val = (x[index] - mean[f]) / (sqrtf(variance[f] + epsilon)); val *= scales[f]; val += biases[f]; if (!isnan(val) && !isinf(val)) x[index] = val; } void normalize_scale_bias_gpu(float* x, float* mean, float* variance, float* scales, float* biases, int batch, int filters, int spatial, int inverse_variance, float epsilon) { const int current_size = batch * filters * spatial; const int num_blocks = get_number_of_blocks(current_size, BLOCK); normalize_scale_bias_kernel<<<num_blocks, BLOCK, 0, get_cuda_stream()>>>( current_size, x, mean, variance, scales, biases, batch, filters, spatial, inverse_variance, epsilon); CHECK_CUDA(cudaPeekAtLastError()); } void mean_gpu(float* x, int batch, int filters, int spatial, float* mean) { mean_kernel<<<cuda_gridsize(filters), BLOCK, 0, get_cuda_stream()>>>( x, batch, filters, spatial, mean); CHECK_CUDA(cudaPeekAtLastError()); } void variance_gpu( float* x, float* mean, int batch, int filters, int spatial, float* variance) { variance_kernel<<<cuda_gridsize(filters), BLOCK, 0, get_cuda_stream()>>>( x, mean, batch, filters, spatial, variance); CHECK_CUDA(cudaPeekAtLastError()); } void axpy_ongpu(int N, float ALPHA, float* X, int INCX, float* Y, int INCY) { axpy_ongpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY); } void pow_ongpu(int N, float ALPHA, float* X, int INCX, float* Y, int INCY) { pow_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>( N, ALPHA, X, INCX, Y, INCY); CHECK_CUDA(cudaPeekAtLastError()); } void axpy_ongpu_offset(int N, float ALPHA, float* X, int OFFX, int INCX, float* Y, int OFFY, int INCY) { axpy_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>( N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY); CHECK_CUDA(cudaPeekAtLastError()); } void copy_ongpu(int N, float* X, int INCX, float* Y, int INCY) { copy_ongpu_offset(N, X, 0, INCX, Y, 0, INCY); } void simple_copy_ongpu(int size, float* src, float* dst) { const int num_blocks = size / BLOCK + 1; simple_copy_kernel<<<num_blocks, BLOCK, 0, get_cuda_stream()>>>( size, src, dst); CHECK_CUDA(cudaPeekAtLastError()); } void memcpy_ongpu(void* dst, void* src, int size_bytes) { CHECK_CUDA(cudaMemcpyAsync( dst, src, size_bytes, cudaMemcpyDefault, get_cuda_stream())); CHECK_CUDA(cudaPeekAtLastError()); } void mul_ongpu(int N, float* X, int INCX, float* Y, int INCY) { mul_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>( N, X, INCX, Y, INCY); CHECK_CUDA(cudaPeekAtLastError()); } void copy_ongpu_offset( int N, float* X, int OFFX, int INCX, float* Y, int OFFY, int INCY) { copy_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>( N, X, OFFX, INCX, Y, OFFY, INCY); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void flatten_kernel(int N, float* x, int spatial, int layers, int batch, int forward, float* out) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= N) return; int in_s = i % spatial; i = i / spatial; int in_c = i % layers; i = i / layers; int b = i; int i1 = b * layers * spatial + in_c * spatial + in_s; int i2 = b * layers * spatial + in_s * layers + in_c; if (forward) out[i2] = x[i1]; else out[i1] = x[i2]; } void flatten_ongpu( float* x, int spatial, int layers, int batch, int forward, float* out) { int size = spatial * batch * layers; flatten_kernel<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream()>>>( size, x, spatial, layers, batch, forward, out); CHECK_CUDA(cudaPeekAtLastError()); } void reorg_ongpu(float* x, int w, int h, int c, int batch, int stride, int forward, float* out) { int size = w * h * c * batch; reorg_kernel<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream()>>>( size, x, w, h, c, batch, stride, forward, out); CHECK_CUDA(cudaPeekAtLastError()); } void mask_gpu_new_api(int N, float* X, float mask_num, float* mask, float val) { mask_kernel_new_api<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>( N, X, mask_num, mask, val); CHECK_CUDA(cudaPeekAtLastError()); } void mask_ongpu(int N, float* X, float mask_num, float* mask) { mask_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>( N, X, mask_num, mask); CHECK_CUDA(cudaPeekAtLastError()); } void const_ongpu(int N, float ALPHA, float* X, int INCX) { const_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>( N, ALPHA, X, INCX); CHECK_CUDA(cudaPeekAtLastError()); } void constrain_ongpu(int N, float ALPHA, float* X, int INCX) { constrain_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>( N, ALPHA, X, INCX); CHECK_CUDA(cudaPeekAtLastError()); } void constrain_min_max_ongpu(int N, float MIN, float MAX, float* X, int INCX) { constrain_min_max_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>( N, MIN, MAX, X, INCX); CHECK_CUDA(cudaPeekAtLastError()); } void scal_ongpu(int N, float ALPHA, float* X, int INCX) { scal_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>( N, ALPHA, X, INCX); CHECK_CUDA(cudaPeekAtLastError()); } void scal_add_ongpu(int N, float ALPHA, float BETA, float* X, int INCX) { scal_add_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>( N, ALPHA, BETA, X, INCX); CHECK_CUDA(cudaPeekAtLastError()); } void supp_ongpu(int N, float ALPHA, float* X, int INCX) { supp_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>( N, ALPHA, X, INCX); CHECK_CUDA(cudaPeekAtLastError()); } void fill_ongpu(int N, float ALPHA, float* X, int INCX) { // fill_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>(N, ALPHA, X, // INCX); CHECK_CUDA(cudaPeekAtLastError()); fill_kernel<<<get_number_of_blocks(N, BLOCK), BLOCK, 0, get_cuda_stream()>>>( N, ALPHA, X, INCX); CHECK_CUDA(cudaPeekAtLastError()); } __device__ float relu(float src) { if (src > 0) return src; return 0; } __device__ float lrelu(float src) { const float eps = 0.001; if (src > eps) return src; return eps; } __device__ float grad_relu(float src) { return (src > 0); } __device__ float grad_lrelu(float src) { const float eps = 0.001; return (src > eps); } __global__ void shortcut_singlelayer_simple_kernel(int size, int src_outputs, int* outputs_of_layers_gpu, float** layers_output_gpu, float* out, float* in) { const int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (id >= size) return; int src_id = id; const int src_i = src_id % src_outputs; src_id /= src_outputs; int src_b = src_id; float out_val = in[id]; int add_outputs = outputs_of_layers_gpu[0]; if (src_i < add_outputs) { int add_index = add_outputs * src_b + src_i; float* add = layers_output_gpu[0]; out_val += add[add_index]; } out[id] = out_val; } void ShortcutGpu(int src_outputs, int batch, int* outputs_of_layers_gpu, float** layers_output_gpu, float* out, float* in) { // printf(" src_outputs = %d, batch = %d, n = %d \n", src_outputs, batch, n); int size = batch * src_outputs; shortcut_singlelayer_simple_kernel<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream()>>>( size, src_outputs, outputs_of_layers_gpu, layers_output_gpu, out, in); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void backward_shortcut_multilayer_kernel(int size, int src_outputs, int n, int* outputs_of_layers_gpu, float** layers_delta_gpu, float* delta_out, float* delta_in) { const int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (id >= size) return; int src_id = id; const int src_i = src_id % src_outputs; src_id /= src_outputs; int src_b = src_id; delta_out[id] += delta_in[id]; // layers for (int i = 0; i < n; ++i) { int add_outputs = outputs_of_layers_gpu[i]; if (src_i < add_outputs) { int add_index = add_outputs * src_b + src_i; float* layer_delta = layers_delta_gpu[i]; layer_delta[add_index] += delta_in[id]; } } } void BackwardShortcutGpu(int src_outputs, int batch, int n, int* outputs_of_layers_gpu, float** layers_delta_gpu, float* delta_out, float* delta_in) { int size = batch * src_outputs; backward_shortcut_multilayer_kernel<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream()>>>(size, src_outputs, n, outputs_of_layers_gpu, layers_delta_gpu, delta_out, delta_in); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void smooth_l1_kernel( int n, float* pred, float* truth, float* delta, float* error) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { float diff = truth[i] - pred[i]; float abs_val = abs(diff); if (abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2 * abs_val - 1; delta[i] = (diff < 0) ? -1 : 1; } } } void smooth_l1_gpu(int n, float* pred, float* truth, float* delta, float* error) { smooth_l1_kernel<<<cuda_gridsize(n), BLOCK, 0, get_cuda_stream()>>>( n, pred, truth, delta, error); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void l2_kernel( int n, float* pred, float* truth, float* delta, float* error) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { float diff = truth[i] - pred[i]; error[i] = diff * diff; // I know this is technically wrong, deal with it. delta[i] = diff; } } void l2_gpu(int n, float* pred, float* truth, float* delta, float* error) { l2_kernel<<<cuda_gridsize(n), BLOCK, 0, get_cuda_stream()>>>( n, pred, truth, delta, error); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void weighted_sum_kernel( int n, float* a, float* b, float* s, float* c) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { c[i] = s[i] * a[i] + (1 - s[i]) * (b ? b[i] : 0); } } void weighted_sum_gpu(float* a, float* b, float* s, int num, float* c) { weighted_sum_kernel<<<cuda_gridsize(num), BLOCK, 0, get_cuda_stream()>>>( num, a, b, s, c); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void weighted_delta_kernel(int n, float* a, float* b, float* s, float* da, float* db, float* ds, float* dc) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { if (da) da[i] += dc[i] * s[i]; db[i] += dc[i] * (1 - s[i]); ds[i] += dc[i] * a[i] + dc[i] * -b[i]; } } void weighted_delta_gpu(float* a, float* b, float* s, float* da, float* db, float* ds, int num, float* dc) { weighted_delta_kernel<<<cuda_gridsize(num), BLOCK, 0, get_cuda_stream()>>>( num, a, b, s, da, db, ds, dc); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void mult_add_into_kernel(int n, float* a, float* b, float* c) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { c[i] += a[i] * b[i]; } } void mult_add_into_gpu(int num, float* a, float* b, float* c) { mult_add_into_kernel<<<cuda_gridsize(num), BLOCK, 0, get_cuda_stream()>>>( num, a, b, c); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void upsample_kernel(size_t N, float* x, int w, int h, int c, int batch, int stride, int forward, float scale, float* out) { size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i >= N) return; int out_index = i; int out_w = i % (w * stride); i = i / (w * stride); int out_h = i % (h * stride); i = i / (h * stride); int out_c = i % c; i = i / c; int b = i % batch; int in_w = out_w / stride; int in_h = out_h / stride; int in_c = out_c; int in_index = b * w * h * c + in_c * w * h + in_h * w + in_w; if (forward) out[out_index] += scale * x[in_index]; else atomicAdd(x + in_index, scale * out[out_index]); } void upsample_gpu(float* in, int w, int h, int c, int batch, int stride, int forward, float scale, float* out) { size_t size = w * h * c * batch * stride * stride; upsample_kernel<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream()>>>( size, in, w, h, c, batch, stride, forward, scale, out); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void fix_nan_and_inf_kernel(float* input, size_t size) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { float val = input[index]; if (isnan(val) || isinf(val)) { input[index] = 1.0f / (fabs((float)index) + 1); // pseudo random value } } } void fix_nan_and_inf(float* input, size_t size) { const int block_size = BLOCK; const int num_blocks = get_number_of_blocks(size, block_size); fix_nan_and_inf_kernel<<<num_blocks, block_size, 0, get_cuda_stream()>>>( input, size); CHECK_CUDA(cudaPeekAtLastError()); // CHECK_CUDA(cudaDeviceSynchronize()); } __global__ void reset_nan_and_inf_kernel(float* input, size_t size) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { float val = input[index]; if (isnan(val) || isinf(val)) { input[index] = 0; } } } void reset_nan_and_inf(float* input, size_t size) { const int block_size = BLOCK; const int num_blocks = get_number_of_blocks(size, block_size); reset_nan_and_inf_kernel<<<num_blocks, block_size, 0, get_cuda_stream()>>>( input, size); CHECK_CUDA(cudaPeekAtLastError()); // CHECK_CUDA(cudaDeviceSynchronize()); } __global__ void is_nan_or_inf_kernel( float* input, size_t size, int* pinned_return) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { float val = input[index]; if (isnan(val) || isinf(val)) *pinned_return = 1; } } int is_nan_or_inf(float* input, size_t size) { int* pinned_return; CHECK_CUDA( cudaHostAlloc(&pinned_return, sizeof(int), cudaHostRegisterMapped)); *pinned_return = 0; const int block_size = BLOCK; const int num_blocks = get_number_of_blocks(size, block_size); is_nan_or_inf_kernel<<<num_blocks, block_size, 0, get_cuda_stream()>>>( input, size, pinned_return); CHECK_CUDA(cudaDeviceSynchronize()); int ret_val = *pinned_return; CHECK_CUDA(cudaFreeHost(pinned_return)); return ret_val; } __global__ void add_3_arrays_activate_kernel( float* a1, float* a2, float* a3, size_t size, ACTIVATION a, float* dst) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { float val = 0; val += a1[index]; val += a2[index]; if (a3) val += a3[index]; if (a == LOGISTIC) val = 1.f / (1.f + expf(-val)); else if (a == TANH) val = (2 / (1 + expf(-2 * val)) - 1); dst[index] = val; } } void add_3_arrays_activate( float* a1, float* a2, float* a3, size_t size, ACTIVATION a, float* dst) { const int block_size = BLOCK; const int num_blocks = get_number_of_blocks(size, block_size); if (a != LOGISTIC && a != TANH) { printf( " add_3_arrays_activate() doesn't support activation %d, it supports " "only LOGISTIC and TANH \n", a); exit(EXIT_FAILURE); } add_3_arrays_activate_kernel<<<num_blocks, block_size, 0, get_cuda_stream()>>>(a1, a2, a3, size, a, dst); } __global__ void sum_of_mults_kernel( float* a1, float* a2, float* b1, float* b2, size_t size, float* dst) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { dst[index] = a1[index] * a2[index] + b1[index] * b2[index]; } } void sum_of_mults( float* a1, float* a2, float* b1, float* b2, size_t size, float* dst) { const int block_size = BLOCK; const int num_blocks = get_number_of_blocks(size, block_size); sum_of_mults_kernel<<<num_blocks, block_size, 0, get_cuda_stream()>>>( a1, a2, b1, b2, size, dst); } __global__ void activate_and_mult_kernel( float* a1, float* a2, size_t size, ACTIVATION a, float* dst) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { float val = a1[index]; if (a == TANH) val = (2 / (1 + expf(-2 * val)) - 1); dst[index] = val * a2[index]; } } void activate_and_mult( float* a1, float* a2, size_t size, ACTIVATION a, float* dst) { const int block_size = BLOCK; const int num_blocks = get_number_of_blocks(size, block_size); if (a != TANH) { printf( " activat_and_mult() doesn't support activation %d, it supports only " "TANH \n", a); exit(EXIT_FAILURE); } activate_and_mult_kernel<<<num_blocks, block_size, 0, get_cuda_stream()>>>( a1, a2, size, a, dst); } __global__ void scale_channels_kernel(float* in_w_h_c, int size, int channel_size, int batch_size, int scale_wh, float* scales_c, float* out) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { if (scale_wh) { int osd_index = index % channel_size + (index / batch_size) * channel_size; out[index] = in_w_h_c[index] * scales_c[osd_index]; } else { out[index] = in_w_h_c[index] * scales_c[index / channel_size]; } } } void scale_channels_gpu(float* in_w_h_c, int size, int channel_size, int batch_size, int scale_wh, float* scales_c, float* out) { const int block_size = BLOCK; const int num_blocks = get_number_of_blocks(size, block_size); scale_channels_kernel<<<num_blocks, block_size, 0, get_cuda_stream()>>>( in_w_h_c, size, channel_size, batch_size, scale_wh, scales_c, out); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void backward_scale_channels_kernel(float* in_w_h_c_delta, int size, int channel_size, int batch_size, int scale_wh, float* in_scales_c, float* out_from_delta, float* in_from_output, float* out_state_delta) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { if (scale_wh) { int osd_index = index % channel_size + (index / batch_size) * channel_size; // out_state_delta[osd_index] += in_w_h_c_delta[index] * // in_from_output[index]; // l.delta * from (should be divided by // channel_size?) atomicAdd(&out_state_delta[osd_index], in_w_h_c_delta[index] * in_from_output[index] / channel_size); // l.delta * from out_from_delta[index] += in_scales_c[osd_index] * in_w_h_c_delta[index]; // input * l.delta // atomic isn't required // here } else { int osd_index = index / channel_size; // out_state_delta[osd_index] += in_w_h_c_delta[index] * // in_from_output[index]; // l.delta * from (should be divided by // channel_size?) int warp_id = index / 32; int index_warp_start = warp_id * 32; int osd_index_warp_start = index_warp_start / channel_size; int osd_index_warp_end = (index_warp_start + 31) / channel_size; if (osd_index_warp_start == osd_index_warp_end) // all thread in warp process the same channel { float sum = warpAllReduceSum( in_w_h_c_delta[index] * in_from_output[index]); // l.delta * from if (threadIdx.x % 32 == 0) { atomicAdd(&out_state_delta[osd_index], sum); // out_state_delta[osd_index] += sum; } } else { atomicAdd(&out_state_delta[osd_index], in_w_h_c_delta[index] * in_from_output[index]); // l.delta * from } out_from_delta[index] += in_scales_c[osd_index] * in_w_h_c_delta[index]; // input * l.delta // atomic isn't required // here } } } void backward_scale_channels_gpu(float* in_w_h_c_delta, int size, int channel_size, int batch_size, int scale_wh, float* in_scales_c, float* out_from_delta, float* in_from_output, float* out_state_delta) { const int block_size = BLOCK; const int num_blocks = get_number_of_blocks(size, block_size); backward_scale_channels_kernel<<<num_blocks, block_size, 0, get_cuda_stream()>>>(in_w_h_c_delta, size, channel_size, batch_size, scale_wh, in_scales_c, out_from_delta, in_from_output, out_state_delta); CHECK_CUDA(cudaPeekAtLastError()); }
19653efa5742e5a91b584c346a2e0f7d450d617f.hip
// !!! This is a file automatically generated by hipify!!! /** * covariance.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Updated by Grigori Fursin (http://cTuning.org/lab/people/gfursin) * to work with Collective Mind, OpenME plugin interface and * Collective Knowledge Frameworks for automatic, machine-learning based * and collective tuning and data mining: http://cTuning.org * */ #ifndef WINDOWS #include <unistd.h> #endif #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <hip/hip_runtime.h> #include "polybench.h" #ifdef OPENME #include <openme.h> #endif #ifdef XOPENME #include <xopenme.h> #endif //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 1.05 #define GPU_DEVICE 0 /* Problem size */ #ifndef NI #define NI 512 //2048 #endif #ifndef NJ #define NJ 512 //2048 #endif /* Thread block dimensions for kernel 1*/ #ifndef DIM_THREAD_BLOCK_KERNEL_1_X #define DIM_THREAD_BLOCK_KERNEL_1_X 256 #endif #ifndef DIM_THREAD_BLOCK_KERNEL_1_Y #define DIM_THREAD_BLOCK_KERNEL_1_Y 1 #endif /* Thread block dimensions for kernel 2*/ #ifndef DIM_THREAD_BLOCK_KERNEL_2_X #define DIM_THREAD_BLOCK_KERNEL_2_X 32 #endif #ifndef DIM_THREAD_BLOCK_KERNEL_2_Y #define DIM_THREAD_BLOCK_KERNEL_2_Y 8 #endif /* Thread block dimensions for kernel 3*/ #ifndef DIM_THREAD_BLOCK_KERNEL_3_X #define DIM_THREAD_BLOCK_KERNEL_3_X 256 #endif #ifndef DIM_THREAD_BLOCK_KERNEL_3_Y #define DIM_THREAD_BLOCK_KERNEL_3_Y 1 #endif #define sqrt_of_array_cell(x,j) sqrt(x[j]) #define FLOAT_N 3214212.01 #define EPS 0.005 /* Can switch DATA_TYPE between float and double */ # ifndef DATA_TYPE # define DATA_TYPE float # endif void init_arrays(DATA_TYPE* data) { int i, j; for (i = 1; i < (NI+1); i++) { for (j = 1; j < (NJ+1); j++) { data[i*(NJ+1) + j] = ((DATA_TYPE) i*j) / NI; } } } void covariance(DATA_TYPE* data, DATA_TYPE* symmat, DATA_TYPE* mean) { int i, j, j1,j2; /* Determine mean of column vectors of input data matrix */ for (j = 1; j < (NI+1); j++) { mean[j] = 0.0; for (i = 1; i < (NJ+1); i++) { mean[j] += data[i*(NI+1) + j]; } mean[j] /= FLOAT_N; } /* Center the column vectors. */ for (i = 1; i < (NJ+1); i++) { for (j = 1; j < (NI+1); j++) { data[i*(NI+1) + j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ for (j1 = 1; j1 < (NI+1); j1++) { for (j2 = j1; j2 < (NI+1); j2++) { symmat[j1*(NI+1) + j2] = 0.0; for (i = 1; i < NJ+1; i++) { symmat[j1*(NI+1) + j2] += data[i*(NI+1) + j1] * data[i*(NI+1) + j2]; } symmat[j2*(NI+1) + j1] = symmat[j1*(NI+1) + j2]; } } } void compareResults(DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu) { int i,j,fail; fail = 0; for (i=1; i < (NI+1); i++) { for (j=1; j < (NJ+1); j++) { if (percentDiff(symmat[i*(NJ+1) + j], symmat_outputFromGpu[i*(NJ+1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { /* Grigori Fursin added support for CK widgets */ int gpgpu_device_id=GPU_DEVICE; int devID = 0; hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); if (getenv("CK_COMPUTE_DEVICE_ID")!=NULL) gpgpu_device_id=atol(getenv("CK_COMPUTE_DEVICE_ID")); hipGetDeviceProperties(&deviceProp, gpgpu_device_id); if (deviceProp.computeMode == hipComputeModeProhibited) { printf("Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != hipSuccess) printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); else printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); hipSetDevice( gpgpu_device_id ); } __global__ void mean_kernel(DATA_TYPE *mean, DATA_TYPE *data) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; if ((j >= 1) && (j < (NI+1))) { mean[j] = 0.0; int i; for(i = 1; i < (NJ+1); i++) { mean[j] += data[i * (NI+1) + j]; } mean[j] /= (DATA_TYPE)FLOAT_N; } } __global__ void reduce_kernel(DATA_TYPE *mean, DATA_TYPE *data) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; int i = blockIdx.y * blockDim.y + threadIdx.y + 1; if ((i >= 1) && (i < (NJ+1)) && (j >= 1) && (j < (NI+1))) { data[i * (NI+1) + j] -= mean[j]; } } __global__ void covar_kernel(DATA_TYPE *symmat, DATA_TYPE *data) { int j1 = blockIdx.x * blockDim.x + threadIdx.x + 1; int i, j2; if ((j1 >= 1) && (j1 < (NI+1))) { for (j2 = j1; j2 < (NI+1); j2++) { symmat[j1*(NI+1) + j2] = 0.0; for(i = 1; i < (NJ+1); i++) { symmat[j1 * (NI+1) + j2] += data[i *(NI+1) + j1] * data[i *(NI+1) + j2]; } symmat[j2 * (NI+1) + j1] = symmat[j1 * (NI+1) + j2]; } } } void covarianceCuda(DATA_TYPE* data, DATA_TYPE* symmat, DATA_TYPE* mean, DATA_TYPE* symmat_outputFromGpu) { hipError_t error; double t_start, t_end; DATA_TYPE *data_gpu; DATA_TYPE *mean_gpu; DATA_TYPE *symmat_gpu; error=hipMalloc((void **)&data_gpu, sizeof(DATA_TYPE) * (NI+1) * (NJ+1)); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMalloc((void **)&symmat_gpu, sizeof(DATA_TYPE) * (NI+1) * (NI+1)); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMalloc((void **)&mean_gpu, sizeof(DATA_TYPE) * (NI+1)); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(data_gpu, data, sizeof(DATA_TYPE) * (NI+1) * (NJ+1), hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(symmat_gpu, symmat, sizeof(DATA_TYPE) * (NI+1) * (NI+1), hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(mean_gpu, mean, sizeof(DATA_TYPE) * (NI+1), hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y); dim3 grid1((size_t)(ceil((float)NI) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1); dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y); dim3 grid2((size_t)(ceil((float)NI) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), (size_t)(ceil((float)NJ) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X))); dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y); dim3 grid3((size_t)(ceil((float)NI) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), 1); // t_start = rtclock(); hipLaunchKernelGGL(( mean_kernel), dim3(grid1), dim3(block1), 0, 0, mean_gpu,data_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( reduce_kernel), dim3(grid2), dim3(block2), 0, 0, mean_gpu,data_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( covar_kernel), dim3(grid3), dim3(block3), 0, 0, symmat_gpu,data_gpu); hipDeviceSynchronize(); // t_end = rtclock(); // fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); error=hipMemcpy(symmat_outputFromGpu, symmat_gpu, sizeof(DATA_TYPE) * (NI+1) * (NJ+1), hipMemcpyDeviceToHost); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } hipFree(data_gpu); hipFree(symmat_gpu); hipFree(mean_gpu); } int main() { /* Prepare ctuning vars */ long ct_repeat=0; long ct_repeat_max=1; double t_start, t_end; DATA_TYPE* data; DATA_TYPE* symmat; DATA_TYPE* mean; DATA_TYPE* symmat_outputFromGpu; #ifdef XOPENME xopenme_init(2,0); #endif #ifdef OPENME openme_init(NULL,NULL,NULL,0); openme_callback("PROGRAM_START", NULL); #endif /* Run kernel. */ if (getenv("CT_REPEAT_MAIN")!=NULL) ct_repeat_max=atol(getenv("CT_REPEAT_MAIN")); data = (DATA_TYPE*)malloc((NI+1)*(NJ+1)*sizeof(DATA_TYPE)); symmat = (DATA_TYPE*)malloc((NI+1)*(NI+1)*sizeof(DATA_TYPE)); mean = (DATA_TYPE*)malloc((NI+1)*sizeof(DATA_TYPE)); symmat_outputFromGpu = (DATA_TYPE*)malloc((NI+1)*(NI+1)*sizeof(DATA_TYPE)); srand(1); init_arrays(data); GPU_argv_init(); #ifdef OPENME openme_callback("ACC_KERNEL_START", NULL); #endif #ifdef XOPENME xopenme_clock_start(0); #endif for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++) { covarianceCuda(data, symmat, mean, symmat_outputFromGpu); } #ifdef XOPENME xopenme_clock_end(0); #endif #ifdef OPENME openme_callback("ACC_KERNEL_END", NULL); #endif /* srand(1); init_arrays(data); #ifdef OPENME openme_callback("KERNEL_START", NULL); #endif #ifdef XOPENME xopenme_clock_start(1); #endif for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++) { covariance(data, symmat, mean); } #ifdef XOPENME xopenme_clock_end(1); #endif #ifdef OPENME openme_callback("KERNEL_END", NULL); #endif */ compareResults(symmat, symmat_outputFromGpu); free(data); free(symmat); free(mean); free(symmat_outputFromGpu); #ifdef XOPENME xopenme_dump_state(); xopenme_finish(); #endif #ifdef OPENME openme_callback("PROGRAM_END", NULL); #endif return 0; }
19653efa5742e5a91b584c346a2e0f7d450d617f.cu
/** * covariance.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Updated by Grigori Fursin (http://cTuning.org/lab/people/gfursin) * to work with Collective Mind, OpenME plugin interface and * Collective Knowledge Frameworks for automatic, machine-learning based * and collective tuning and data mining: http://cTuning.org * */ #ifndef WINDOWS #include <unistd.h> #endif #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <cuda.h> #include "polybench.h" #ifdef OPENME #include <openme.h> #endif #ifdef XOPENME #include <xopenme.h> #endif //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 1.05 #define GPU_DEVICE 0 /* Problem size */ #ifndef NI #define NI 512 //2048 #endif #ifndef NJ #define NJ 512 //2048 #endif /* Thread block dimensions for kernel 1*/ #ifndef DIM_THREAD_BLOCK_KERNEL_1_X #define DIM_THREAD_BLOCK_KERNEL_1_X 256 #endif #ifndef DIM_THREAD_BLOCK_KERNEL_1_Y #define DIM_THREAD_BLOCK_KERNEL_1_Y 1 #endif /* Thread block dimensions for kernel 2*/ #ifndef DIM_THREAD_BLOCK_KERNEL_2_X #define DIM_THREAD_BLOCK_KERNEL_2_X 32 #endif #ifndef DIM_THREAD_BLOCK_KERNEL_2_Y #define DIM_THREAD_BLOCK_KERNEL_2_Y 8 #endif /* Thread block dimensions for kernel 3*/ #ifndef DIM_THREAD_BLOCK_KERNEL_3_X #define DIM_THREAD_BLOCK_KERNEL_3_X 256 #endif #ifndef DIM_THREAD_BLOCK_KERNEL_3_Y #define DIM_THREAD_BLOCK_KERNEL_3_Y 1 #endif #define sqrt_of_array_cell(x,j) sqrt(x[j]) #define FLOAT_N 3214212.01 #define EPS 0.005 /* Can switch DATA_TYPE between float and double */ # ifndef DATA_TYPE # define DATA_TYPE float # endif void init_arrays(DATA_TYPE* data) { int i, j; for (i = 1; i < (NI+1); i++) { for (j = 1; j < (NJ+1); j++) { data[i*(NJ+1) + j] = ((DATA_TYPE) i*j) / NI; } } } void covariance(DATA_TYPE* data, DATA_TYPE* symmat, DATA_TYPE* mean) { int i, j, j1,j2; /* Determine mean of column vectors of input data matrix */ for (j = 1; j < (NI+1); j++) { mean[j] = 0.0; for (i = 1; i < (NJ+1); i++) { mean[j] += data[i*(NI+1) + j]; } mean[j] /= FLOAT_N; } /* Center the column vectors. */ for (i = 1; i < (NJ+1); i++) { for (j = 1; j < (NI+1); j++) { data[i*(NI+1) + j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ for (j1 = 1; j1 < (NI+1); j1++) { for (j2 = j1; j2 < (NI+1); j2++) { symmat[j1*(NI+1) + j2] = 0.0; for (i = 1; i < NJ+1; i++) { symmat[j1*(NI+1) + j2] += data[i*(NI+1) + j1] * data[i*(NI+1) + j2]; } symmat[j2*(NI+1) + j1] = symmat[j1*(NI+1) + j2]; } } } void compareResults(DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu) { int i,j,fail; fail = 0; for (i=1; i < (NI+1); i++) { for (j=1; j < (NJ+1); j++) { if (percentDiff(symmat[i*(NJ+1) + j], symmat_outputFromGpu[i*(NJ+1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { /* Grigori Fursin added support for CK widgets */ int gpgpu_device_id=GPU_DEVICE; int devID = 0; cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (getenv("CK_COMPUTE_DEVICE_ID")!=NULL) gpgpu_device_id=atol(getenv("CK_COMPUTE_DEVICE_ID")); cudaGetDeviceProperties(&deviceProp, gpgpu_device_id); if (deviceProp.computeMode == cudaComputeModeProhibited) { printf("Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess) printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); else printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); cudaSetDevice( gpgpu_device_id ); } __global__ void mean_kernel(DATA_TYPE *mean, DATA_TYPE *data) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; if ((j >= 1) && (j < (NI+1))) { mean[j] = 0.0; int i; for(i = 1; i < (NJ+1); i++) { mean[j] += data[i * (NI+1) + j]; } mean[j] /= (DATA_TYPE)FLOAT_N; } } __global__ void reduce_kernel(DATA_TYPE *mean, DATA_TYPE *data) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; int i = blockIdx.y * blockDim.y + threadIdx.y + 1; if ((i >= 1) && (i < (NJ+1)) && (j >= 1) && (j < (NI+1))) { data[i * (NI+1) + j] -= mean[j]; } } __global__ void covar_kernel(DATA_TYPE *symmat, DATA_TYPE *data) { int j1 = blockIdx.x * blockDim.x + threadIdx.x + 1; int i, j2; if ((j1 >= 1) && (j1 < (NI+1))) { for (j2 = j1; j2 < (NI+1); j2++) { symmat[j1*(NI+1) + j2] = 0.0; for(i = 1; i < (NJ+1); i++) { symmat[j1 * (NI+1) + j2] += data[i *(NI+1) + j1] * data[i *(NI+1) + j2]; } symmat[j2 * (NI+1) + j1] = symmat[j1 * (NI+1) + j2]; } } } void covarianceCuda(DATA_TYPE* data, DATA_TYPE* symmat, DATA_TYPE* mean, DATA_TYPE* symmat_outputFromGpu) { cudaError_t error; double t_start, t_end; DATA_TYPE *data_gpu; DATA_TYPE *mean_gpu; DATA_TYPE *symmat_gpu; error=cudaMalloc((void **)&data_gpu, sizeof(DATA_TYPE) * (NI+1) * (NJ+1)); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMalloc((void **)&symmat_gpu, sizeof(DATA_TYPE) * (NI+1) * (NI+1)); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMalloc((void **)&mean_gpu, sizeof(DATA_TYPE) * (NI+1)); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(data_gpu, data, sizeof(DATA_TYPE) * (NI+1) * (NJ+1), cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(symmat_gpu, symmat, sizeof(DATA_TYPE) * (NI+1) * (NI+1), cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(mean_gpu, mean, sizeof(DATA_TYPE) * (NI+1), cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y); dim3 grid1((size_t)(ceil((float)NI) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1); dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y); dim3 grid2((size_t)(ceil((float)NI) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), (size_t)(ceil((float)NJ) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X))); dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y); dim3 grid3((size_t)(ceil((float)NI) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), 1); // t_start = rtclock(); mean_kernel<<<grid1, block1>>>(mean_gpu,data_gpu); cudaThreadSynchronize(); reduce_kernel<<<grid2, block2>>>(mean_gpu,data_gpu); cudaThreadSynchronize(); covar_kernel<<<grid3, block3>>>(symmat_gpu,data_gpu); cudaThreadSynchronize(); // t_end = rtclock(); // fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); error=cudaMemcpy(symmat_outputFromGpu, symmat_gpu, sizeof(DATA_TYPE) * (NI+1) * (NJ+1), cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } cudaFree(data_gpu); cudaFree(symmat_gpu); cudaFree(mean_gpu); } int main() { /* Prepare ctuning vars */ long ct_repeat=0; long ct_repeat_max=1; double t_start, t_end; DATA_TYPE* data; DATA_TYPE* symmat; DATA_TYPE* mean; DATA_TYPE* symmat_outputFromGpu; #ifdef XOPENME xopenme_init(2,0); #endif #ifdef OPENME openme_init(NULL,NULL,NULL,0); openme_callback("PROGRAM_START", NULL); #endif /* Run kernel. */ if (getenv("CT_REPEAT_MAIN")!=NULL) ct_repeat_max=atol(getenv("CT_REPEAT_MAIN")); data = (DATA_TYPE*)malloc((NI+1)*(NJ+1)*sizeof(DATA_TYPE)); symmat = (DATA_TYPE*)malloc((NI+1)*(NI+1)*sizeof(DATA_TYPE)); mean = (DATA_TYPE*)malloc((NI+1)*sizeof(DATA_TYPE)); symmat_outputFromGpu = (DATA_TYPE*)malloc((NI+1)*(NI+1)*sizeof(DATA_TYPE)); srand(1); init_arrays(data); GPU_argv_init(); #ifdef OPENME openme_callback("ACC_KERNEL_START", NULL); #endif #ifdef XOPENME xopenme_clock_start(0); #endif for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++) { covarianceCuda(data, symmat, mean, symmat_outputFromGpu); } #ifdef XOPENME xopenme_clock_end(0); #endif #ifdef OPENME openme_callback("ACC_KERNEL_END", NULL); #endif /* srand(1); init_arrays(data); #ifdef OPENME openme_callback("KERNEL_START", NULL); #endif #ifdef XOPENME xopenme_clock_start(1); #endif for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++) { covariance(data, symmat, mean); } #ifdef XOPENME xopenme_clock_end(1); #endif #ifdef OPENME openme_callback("KERNEL_END", NULL); #endif */ compareResults(symmat, symmat_outputFromGpu); free(data); free(symmat); free(mean); free(symmat_outputFromGpu); #ifdef XOPENME xopenme_dump_state(); xopenme_finish(); #endif #ifdef OPENME openme_callback("PROGRAM_END", NULL); #endif return 0; }
98f7f64a9ef6bc8f202e7ec5f591e6d50a9985a8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "deltasBatch.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *inputs = NULL; hipMalloc(&inputs, XSIZE*YSIZE); float *outputs = NULL; hipMalloc(&outputs, XSIZE*YSIZE); float *weights = NULL; hipMalloc(&weights, XSIZE*YSIZE); float *weightsDeltas = NULL; hipMalloc(&weightsDeltas, XSIZE*YSIZE); int noInputs = 1; int inputSize = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( deltasBatch), dim3(gridBlock),dim3(threadBlock), 0, 0, inputs,outputs,weights,weightsDeltas,noInputs,inputSize); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( deltasBatch), dim3(gridBlock),dim3(threadBlock), 0, 0, inputs,outputs,weights,weightsDeltas,noInputs,inputSize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( deltasBatch), dim3(gridBlock),dim3(threadBlock), 0, 0, inputs,outputs,weights,weightsDeltas,noInputs,inputSize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
98f7f64a9ef6bc8f202e7ec5f591e6d50a9985a8.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "deltasBatch.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *inputs = NULL; cudaMalloc(&inputs, XSIZE*YSIZE); float *outputs = NULL; cudaMalloc(&outputs, XSIZE*YSIZE); float *weights = NULL; cudaMalloc(&weights, XSIZE*YSIZE); float *weightsDeltas = NULL; cudaMalloc(&weightsDeltas, XSIZE*YSIZE); int noInputs = 1; int inputSize = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); deltasBatch<<<gridBlock,threadBlock>>>(inputs,outputs,weights,weightsDeltas,noInputs,inputSize); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { deltasBatch<<<gridBlock,threadBlock>>>(inputs,outputs,weights,weightsDeltas,noInputs,inputSize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { deltasBatch<<<gridBlock,threadBlock>>>(inputs,outputs,weights,weightsDeltas,noInputs,inputSize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
284326144135dcb2c9ce5c77f105f5d1e9c145a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void copyGlobalRow(float *in, float *out, const int nx, const int ny) { unsigned int i = threadIdx.x+blockDim.x*blockIdx.x; unsigned int j = threadIdx.y+blockDim.y*blockIdx.y; if (i<nx && j<ny) { out[j*nx+i] = in[j*nx+i]; } }
284326144135dcb2c9ce5c77f105f5d1e9c145a6.cu
#include "includes.h" __global__ void copyGlobalRow(float *in, float *out, const int nx, const int ny) { unsigned int i = threadIdx.x+blockDim.x*blockIdx.x; unsigned int j = threadIdx.y+blockDim.y*blockIdx.y; if (i<nx && j<ny) { out[j*nx+i] = in[j*nx+i]; } }
695db3bec0d639713de001f2eca1b1782f7be110.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <helper_cuda.h> #include <helper_functions.h> #define BIN_COUNT 256 #define NUM_RUNS 5 #define NUM_TEST 10.0 #define BYTE_COUNT 25600 #define CHECK_ERR(x) \ if (x != hipSuccess) { \ fprintf(stderr,"%s in %s at line %d\n", \ hipGetErrorString(err),__FILE__,__LINE__); \ exit(-1); \ } __global__ void histogram( unsigned char *buffer,long size,unsigned int *histo ) { __shared__ unsigned int temp[256]; temp[threadIdx.x] = 0; __syncthreads(); int i = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while (i < size) { atomicAdd( &(temp[buffer[i]]), 1 ); i += stride; } __syncthreads(); atomicAdd( &(histo[threadIdx.x]), temp[threadIdx.x] ); } void print(unsigned int *histo){ int i; for(i=0;i<BIN_COUNT;i++){ printf("%d\t",histo[i]); } } int main(int argc, char *argv[]) { int Overfill = 0; unsigned char * h_data; unsigned int h_histogram[BIN_COUNT]; unsigned char * d_data; unsigned int * d_histogram; unsigned int byteCount = BYTE_COUNT; size_t size; hipError_t err; StopWatchInterface *hTimer = NULL; int iter; sdkCreateTimer(&hTimer); hipDeviceProp_t prop; checkCudaErrors( hipGetDeviceProperties( &prop, 0 ) ); int warps; int blocks = prop.multiProcessorCount; if(Overfill==1){ warps = prop.maxThreadsPerBlock/32; } if(Overfill==0){ int coresPerSM = _ConvertSMVer2Cores(prop.major, prop.minor); warps = coresPerSM/16; //A warp runs on 16 cores } if(Overfill==2){ warps =1; blocks = 1; } int NUM_TASKS = warps * blocks; for(iter =0 ; iter < NUM_RUNS;iter++){ byteCount = byteCount / NUM_TASKS; srand (time(NULL)); size = sizeof(unsigned char) * byteCount; h_data = (unsigned char *) malloc(sizeof(unsigned char) * byteCount); for (unsigned int i = 0; i < byteCount; i++) { h_data[i] = rand() % 256; } sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); int j; for(j=0; j < NUM_TASKS; j++) { err=hipMalloc((void **) &d_data, size); CHECK_ERR(err); err=hipMalloc((void **) &d_histogram, sizeof(unsigned int) * BIN_COUNT); CHECK_ERR(err); err = hipMemcpy(d_data,h_data,size,hipMemcpyHostToDevice); CHECK_ERR(err); err = hipMemcpy(d_histogram,h_histogram,sizeof(unsigned int) * BIN_COUNT, hipMemcpyHostToDevice); CHECK_ERR(err); hipLaunchKernelGGL(( histogram), dim3(blocks),dim3(BIN_COUNT), 0, 0, d_data,byteCount,d_histogram); hipDeviceSynchronize(); //Copy back the results from the device err = hipMemcpy(h_histogram,d_histogram,sizeof(unsigned int) * BIN_COUNT,hipMemcpyDeviceToHost); CHECK_ERR(err); //print(h_histogram); hipFree(d_data); hipFree(d_histogram); } sdkStopTimer(&hTimer); free(h_data); unsigned int problem_size = byteCount * 4 * NUM_TASKS; double dAvgSecs = 1.0e-3 * (double)sdkGetTimerValue(&hTimer);// / NUM_TEST; printf("%u\t%.4f\t%.5f\n", problem_size,(1.0e-6 * (double)problem_size / dAvgSecs), dAvgSecs); byteCount = problem_size /4 * 10; } // Print timing information sdkDeleteTimer(&hTimer); }
695db3bec0d639713de001f2eca1b1782f7be110.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <helper_cuda.h> #include <helper_functions.h> #define BIN_COUNT 256 #define NUM_RUNS 5 #define NUM_TEST 10.0 #define BYTE_COUNT 25600 #define CHECK_ERR(x) \ if (x != cudaSuccess) { \ fprintf(stderr,"%s in %s at line %d\n", \ cudaGetErrorString(err),__FILE__,__LINE__); \ exit(-1); \ } __global__ void histogram( unsigned char *buffer,long size,unsigned int *histo ) { __shared__ unsigned int temp[256]; temp[threadIdx.x] = 0; __syncthreads(); int i = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while (i < size) { atomicAdd( &(temp[buffer[i]]), 1 ); i += stride; } __syncthreads(); atomicAdd( &(histo[threadIdx.x]), temp[threadIdx.x] ); } void print(unsigned int *histo){ int i; for(i=0;i<BIN_COUNT;i++){ printf("%d\t",histo[i]); } } int main(int argc, char *argv[]) { int Overfill = 0; unsigned char * h_data; unsigned int h_histogram[BIN_COUNT]; unsigned char * d_data; unsigned int * d_histogram; unsigned int byteCount = BYTE_COUNT; size_t size; cudaError_t err; StopWatchInterface *hTimer = NULL; int iter; sdkCreateTimer(&hTimer); cudaDeviceProp prop; checkCudaErrors( cudaGetDeviceProperties( &prop, 0 ) ); int warps; int blocks = prop.multiProcessorCount; if(Overfill==1){ warps = prop.maxThreadsPerBlock/32; } if(Overfill==0){ int coresPerSM = _ConvertSMVer2Cores(prop.major, prop.minor); warps = coresPerSM/16; //A warp runs on 16 cores } if(Overfill==2){ warps =1; blocks = 1; } int NUM_TASKS = warps * blocks; for(iter =0 ; iter < NUM_RUNS;iter++){ byteCount = byteCount / NUM_TASKS; srand (time(NULL)); size = sizeof(unsigned char) * byteCount; h_data = (unsigned char *) malloc(sizeof(unsigned char) * byteCount); for (unsigned int i = 0; i < byteCount; i++) { h_data[i] = rand() % 256; } sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); int j; for(j=0; j < NUM_TASKS; j++) { err=cudaMalloc((void **) &d_data, size); CHECK_ERR(err); err=cudaMalloc((void **) &d_histogram, sizeof(unsigned int) * BIN_COUNT); CHECK_ERR(err); err = cudaMemcpy(d_data,h_data,size,cudaMemcpyHostToDevice); CHECK_ERR(err); err = cudaMemcpy(d_histogram,h_histogram,sizeof(unsigned int) * BIN_COUNT, cudaMemcpyHostToDevice); CHECK_ERR(err); histogram<<<blocks,BIN_COUNT>>>(d_data,byteCount,d_histogram); cudaDeviceSynchronize(); //Copy back the results from the device err = cudaMemcpy(h_histogram,d_histogram,sizeof(unsigned int) * BIN_COUNT,cudaMemcpyDeviceToHost); CHECK_ERR(err); //print(h_histogram); cudaFree(d_data); cudaFree(d_histogram); } sdkStopTimer(&hTimer); free(h_data); unsigned int problem_size = byteCount * 4 * NUM_TASKS; double dAvgSecs = 1.0e-3 * (double)sdkGetTimerValue(&hTimer);// / NUM_TEST; printf("%u\t%.4f\t%.5f\n", problem_size,(1.0e-6 * (double)problem_size / dAvgSecs), dAvgSecs); byteCount = problem_size /4 * 10; } // Print timing information sdkDeleteTimer(&hTimer); }
293cb573f9ad87d6c332382a79351c85b77de0c7.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2013-2015 The Regents of the University of California. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2013 Frank Ong, Martin Uecker, Pat Virtue, and Mark Murphy * [email protected] */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> #include <assert.h> #include <hip/hip_runtime.h> #include "num/multind.h" #include "dfwavelet_kernels.h" #include "dfwavelet_impl.h" # define _hdev_ __host__ __device__ // _data_t is the interal representation of data_t in CUDA // Must be float2/double2 for data_t=Complex float/double or float/double for data_t=float/double typedef float2 _data_t; // Float2 Operators inline _hdev_ float2 operator+ (float2 z1, float2 z2) { return make_float2 (z1.x + z2.x, z1.y + z2.y); } inline _hdev_ float2 operator- (float2 z1, float2 z2) { return make_float2 (z1.x - z2.x, z1.y - z2.y); } inline _hdev_ float2 operator* (float2 z1, float2 z2) { return make_float2 (z1.x*z2.x - z1.y*z2.y, z1.x*z2.y + z1.y*z2.x); } inline _hdev_ float2 operator* (float2 z1, float alpha) { return make_float2 (z1.x*alpha, z1.y*alpha); } inline _hdev_ float2 operator* (float alpha,float2 z1) { return make_float2 (z1.x*alpha, z1.y*alpha); } inline _hdev_ float2 operator/ (float alpha,float2 z1) { return make_float2 (1.f/z1.x, 1.f/z1.y); } inline _hdev_ void operator+= (float2 &z1, float2 z2) { z1.x += z2.x; z1.y += z2.y; } inline _hdev_ float abs(float2 z1) { return sqrt(z1.x*z1.x + z1.y*z1.y); } // Double2 Operators inline _hdev_ double2 operator+ (double2 z1, double2 z2) { return make_double2 (z1.x + z2.x, z1.y + z2.y); } inline _hdev_ double2 operator- (double2 z1, double2 z2) { return make_double2 (z1.x - z2.x, z1.y - z2.y); } inline _hdev_ double2 operator* (double2 z1, double2 z2) { return make_double2 (z1.x*z2.x - z1.y*z2.y, z1.x*z2.y + z1.y*z2.x); } inline _hdev_ double2 operator* (double2 z1, double alpha) { return make_double2 (z1.x*alpha, z1.y*alpha); } inline _hdev_ double2 operator* (double alpha,double2 z1) { return make_double2 (z1.x*alpha, z1.y*alpha); } inline _hdev_ double2 operator/ (double alpha,double2 z1) { return make_double2 (1.f/z1.x, 1.f/z1.y); } inline _hdev_ void operator+= (double2 &z1, double2 z2) { z1.x += z2.x; z1.y += z2.y; } inline _hdev_ double abs(double2 z1) { return sqrt(z1.x*z1.x + z1.y*z1.y); } /********** Macros ************/ #define cuda(Call) do { \ hipError_t err = cuda ## Call ; \ if (err != hipSuccess){ \ fprintf(stderr, "%s\n", hipGetErrorString(err)); \ throw; \ } \ } while(0) #define cuda_sync() do{ \ cuda (ThreadSynchronize()); \ cuda (GetLastError()); \ } while(0) /********** Macros ************/ #define cuda(Call) do { \ hipError_t err = cuda ## Call ; \ if (err != hipSuccess){ \ fprintf(stderr, "%s\n", hipGetErrorString(err)); \ throw; \ } \ } while(0) #define cuda_sync() do{ \ cuda (ThreadSynchronize()); \ cuda (GetLastError()); \ } while(0) // ############################################################################ // Headers // ############################################################################ static __global__ void cu_fwt3df_col(_data_t *Lx,_data_t *Hx,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen); static __global__ void cu_fwt3df_row(_data_t *Ly,_data_t *Hy,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen); static __global__ void cu_fwt3df_dep(_data_t *Lz,_data_t *Hz,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen); static __global__ void cu_iwt3df_dep(_data_t *out,_data_t *Lz,_data_t *Hz,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,int xOffset,int yOffset,int zOffset,scalar_t *lod,scalar_t *hid,int filterLen); static __global__ void cu_iwt3df_row(_data_t *out,_data_t *Ly,_data_t *Hy,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,int xOffset,int yOffset,int zOffset,scalar_t *lod,scalar_t *hid,int filterLen); static __global__ void cu_iwt3df_col(_data_t *out,_data_t *Lx,_data_t *Hx,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,int xOffset,int yOffset,int zOffset,scalar_t *lod,scalar_t *hid,int filterLen); static __global__ void cu_fwt3df_LC1(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dxNext, int dyNext, int dzNext); static __global__ void cu_fwt3df_LC2(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dxNext, int dyNext, int dzNext); static __global__ void cu_fwt3df_LC1_diff(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dxNext, int dyNext, int dzNext); static __global__ void cu_fwt3df_LC2_diff(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dxNext, int dyNext, int dzNext); static __global__ void cu_fwt3df_LC3(_data_t* HxHyHz_df1,_data_t* HxHyHz_df2,_data_t* HxHyHz_n,int dxNext, int dyNext, int dzNext); static __global__ void cu_iwt3df_LC1(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dx, int dy, int dz); static __global__ void cu_iwt3df_LC2(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dx, int dy, int dz); static __global__ void cu_iwt3df_LC1_diff(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dx, int dy, int dz); static __global__ void cu_iwt3df_LC2_diff(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dx, int dy, int dz); static __global__ void cu_iwt3df_LC3(_data_t* HxHyHz_df1,_data_t* HxHyHz_df2,_data_t* HxHyHz_n,int dx, int dy, int dz); static __global__ void cu_mult(_data_t* in, _data_t mult, int maxInd); static __global__ void cu_soft_thresh (_data_t* in, scalar_t thresh, int numMax); static __global__ void cu_circshift(_data_t* data, _data_t* dataCopy, int dx, int dy, int dz, int shift1, int shift2, int shift3); static __global__ void cu_circunshift(_data_t* data, _data_t* dataCopy, int dx, int dy, int dz, int shift1, int shift2, int shift3); extern "C" void dffwt3_gpuHost(struct dfwavelet_plan_s* plan, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn, data_t* in_vx,data_t* in_vy,data_t* in_vz) { assert(plan->use_gpu==2); data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn,*dev_vx,*dev_vy,*dev_vz; cuda(Malloc( (void**)&dev_vx, plan->numPixel*sizeof(data_t) )); cuda(Malloc( (void**)&dev_vy, plan->numPixel*sizeof(data_t) )); cuda(Malloc( (void**)&dev_vz, plan->numPixel*sizeof(data_t) )); cuda(Memcpy( dev_vx, in_vx, plan->numPixel*sizeof(data_t), hipMemcpyHostToDevice )); cuda(Memcpy( dev_vy, in_vy, plan->numPixel*sizeof(data_t), hipMemcpyHostToDevice )); cuda(Memcpy( dev_vz, in_vz, plan->numPixel*sizeof(data_t), hipMemcpyHostToDevice )); cuda(Malloc( (void**)&dev_wcdf1, plan->numCoeff*sizeof(data_t) )); cuda(Malloc( (void**)&dev_wcdf2, plan->numCoeff*sizeof(data_t) )); cuda(Malloc( (void**)&dev_wcn, plan->numCoeff*sizeof(data_t) )); dffwt3_gpu(plan,dev_wcdf1,dev_wcdf2,dev_wcn,dev_vx,dev_vy,dev_vz); cuda(Memcpy( out_wcdf1, dev_wcdf1, plan->numCoeff*sizeof(data_t), hipMemcpyDeviceToHost )); cuda(Memcpy( out_wcdf2, dev_wcdf2, plan->numCoeff*sizeof(data_t), hipMemcpyDeviceToHost )); cuda(Memcpy( out_wcn, dev_wcn, plan->numCoeff*sizeof(data_t), hipMemcpyDeviceToHost )); cuda(Free( dev_wcdf1 )); cuda(Free( dev_wcdf2 )); cuda(Free( dev_wcn )); cuda(Free( dev_vx )); cuda(Free( dev_vy )); cuda(Free( dev_vz )); } extern "C" void dfiwt3_gpuHost(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn) { assert(plan->use_gpu==2); data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn,*dev_vx,*dev_vy,*dev_vz; cuda(Malloc( (void**)&dev_wcdf1, plan->numCoeff*sizeof(data_t) )); cuda(Malloc( (void**)&dev_wcdf2, plan->numCoeff*sizeof(data_t) )); cuda(Malloc( (void**)&dev_wcn, plan->numCoeff*sizeof(data_t) )); cuda(Memcpy( dev_wcdf1, in_wcdf1, plan->numCoeff*sizeof(data_t), hipMemcpyHostToDevice )); cuda(Memcpy( dev_wcdf2, in_wcdf2, plan->numCoeff*sizeof(data_t), hipMemcpyHostToDevice )); cuda(Memcpy( dev_wcn, in_wcn, plan->numCoeff*sizeof(data_t), hipMemcpyHostToDevice )); cuda(Malloc( (void**)&dev_vx, plan->numPixel*sizeof(data_t) )); cuda(Malloc( (void**)&dev_vy, plan->numPixel*sizeof(data_t) )); cuda(Malloc( (void**)&dev_vz, plan->numPixel*sizeof(data_t) )); dfiwt3_gpu(plan,dev_vx,dev_vy,dev_vz,dev_wcdf1,dev_wcdf2,dev_wcn); cuda(Memcpy( out_vx, dev_vx, plan->numPixel*sizeof(data_t), hipMemcpyDeviceToHost )); cuda(Memcpy( out_vy, dev_vy, plan->numPixel*sizeof(data_t), hipMemcpyDeviceToHost )); cuda(Memcpy( out_vz, dev_vz, plan->numPixel*sizeof(data_t), hipMemcpyDeviceToHost )); cuda(Free( dev_wcdf1 )); cuda(Free( dev_wcdf2 )); cuda(Free( dev_wcn )); cuda(Free( dev_vx )); cuda(Free( dev_vy )); cuda(Free( dev_vz )); } extern "C" void dfsoftthresh_gpuHost(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn) { assert(plan->use_gpu==2); data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn; cuda(Malloc( (void**)&dev_wcdf1, plan->numCoeff*sizeof(data_t) )); cuda(Malloc( (void**)&dev_wcdf2, plan->numCoeff*sizeof(data_t) )); cuda(Malloc( (void**)&dev_wcn, plan->numCoeff*sizeof(data_t) )); cuda(Memcpy( dev_wcdf1, out_wcdf1, plan->numCoeff*sizeof(data_t), hipMemcpyHostToDevice )); cuda(Memcpy( dev_wcdf2, out_wcdf2, plan->numCoeff*sizeof(data_t), hipMemcpyHostToDevice )); cuda(Memcpy( dev_wcn, out_wcn, plan->numCoeff*sizeof(data_t), hipMemcpyHostToDevice )); dfsoftthresh_gpu(plan,dfthresh,nthresh,dev_wcdf1,dev_wcdf2,dev_wcn); cuda(Memcpy( out_wcdf1, dev_wcdf1, plan->numCoeff*sizeof(data_t), hipMemcpyDeviceToHost )); cuda(Memcpy( out_wcdf2, dev_wcdf2, plan->numCoeff*sizeof(data_t), hipMemcpyDeviceToHost )); cuda(Memcpy( out_wcn, dev_wcn, plan->numCoeff*sizeof(data_t), hipMemcpyDeviceToHost )); cuda(Free( dev_wcdf1 )); cuda(Free( dev_wcdf2 )); cuda(Free( dev_wcn )); } extern "C" void dfwavthresh3_gpuHost(struct dfwavelet_plan_s* plan, scalar_t dfthresh,scalar_t nthresh,data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_vx,data_t* in_vy,data_t* in_vz) { assert(plan->use_gpu==2); data_t*dev_vx,*dev_vy,*dev_vz; cuda(Malloc( (void**)&dev_vx, plan->numPixel*sizeof(data_t) )); cuda(Malloc( (void**)&dev_vy, plan->numPixel*sizeof(data_t) )); cuda(Malloc( (void**)&dev_vz, plan->numPixel*sizeof(data_t) )); cuda(Memcpy( dev_vx, in_vx, plan->numPixel*sizeof(data_t), hipMemcpyHostToDevice )); cuda(Memcpy( dev_vy, in_vy, plan->numPixel*sizeof(data_t), hipMemcpyHostToDevice )); cuda(Memcpy( dev_vz, in_vz, plan->numPixel*sizeof(data_t), hipMemcpyHostToDevice )); dfwavthresh3_gpu(plan,dfthresh,nthresh,dev_vx,dev_vy,dev_vz,dev_vx,dev_vy,dev_vz); cuda(Memcpy( out_vx, dev_vx, plan->numPixel*sizeof(data_t), hipMemcpyDeviceToHost )); cuda(Memcpy( out_vy, dev_vy, plan->numPixel*sizeof(data_t), hipMemcpyDeviceToHost )); cuda(Memcpy( out_vz, dev_vz, plan->numPixel*sizeof(data_t), hipMemcpyDeviceToHost )); cuda(Free( dev_vx )); cuda(Free( dev_vy )); cuda(Free( dev_vz )); } extern "C" void dffwt3_gpu(struct dfwavelet_plan_s* plan, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn, data_t* in_vx,data_t* in_vy,data_t* in_vz) { circshift_gpu(plan,in_vx); circshift_gpu(plan,in_vy); circshift_gpu(plan,in_vz); long numCoeff, filterLen,*waveSizes; numCoeff = plan->numCoeff; waveSizes = plan->waveSizes; filterLen = plan->filterLen; int numLevels = plan->numLevels; // Cast from generic data_t to device compatible _data_t _data_t* dev_wcdf1 = (_data_t*) out_wcdf1; _data_t* dev_wcdf2 = (_data_t*) out_wcdf2; _data_t* dev_wcn = (_data_t*) out_wcn; _data_t* dev_in_vx = (_data_t*) in_vx; _data_t* dev_in_vy = (_data_t*) in_vy; _data_t* dev_in_vz = (_data_t*) in_vz; _data_t* res = (_data_t*) plan->res; _data_t* dev_temp1,*dev_temp2; cuda(Malloc( (void**)&dev_temp1, numCoeff*sizeof(_data_t) )); cuda(Malloc( (void**)&dev_temp2, numCoeff*sizeof(_data_t) )); // Get dimensions int dx = plan->imSize[0]; int dy = plan->imSize[1]; int dz = plan->imSize[2]; int dxNext = waveSizes[0 + 3*numLevels]; int dyNext = waveSizes[1 + 3*numLevels]; int dzNext = waveSizes[2 + 3*numLevels]; int blockSize = dxNext*dyNext*dzNext; // allocate device memory and copy filters to device scalar_t *dev_filters; cuda(Malloc( (void**)&dev_filters, 4*plan->filterLen*sizeof(scalar_t) )); scalar_t *dev_lod0 = dev_filters + 0*plan->filterLen; scalar_t *dev_hid0 = dev_filters + 1*plan->filterLen; scalar_t *dev_lod1 = dev_filters + 2*plan->filterLen; scalar_t *dev_hid1 = dev_filters + 3*plan->filterLen; cuda(Memcpy( dev_lod0, plan->lod0, 2*plan->filterLen*sizeof(scalar_t), hipMemcpyHostToDevice )); cuda(Memcpy( dev_lod1, plan->lod1, 2*plan->filterLen*sizeof(scalar_t), hipMemcpyHostToDevice )); // Initialize variables and Pointers for FWT int const SHMEM_SIZE = 16384; int const T = 512; int mem, K; dim3 numBlocks, numThreads; // Temp Pointers _data_t *dev_tempLx,*dev_tempHx; dev_tempLx = dev_temp1; dev_tempHx = dev_tempLx + numCoeff/2; _data_t *dev_tempLxLy,*dev_tempHxLy,*dev_tempLxHy,*dev_tempHxHy; dev_tempLxLy = dev_temp2; dev_tempHxLy = dev_tempLxLy + numCoeff/4; dev_tempLxHy = dev_tempHxLy + numCoeff/4; dev_tempHxHy = dev_tempLxHy + numCoeff/4; // wcdf1 Pointers _data_t *dev_LxLyLz_df1,*dev_HxLyLz_df1,*dev_LxHyLz_df1,*dev_HxHyLz_df1,*dev_LxLyHz_df1,*dev_HxLyHz_df1,*dev_LxHyHz_df1,*dev_HxHyHz_df1,*dev_current_vx; dev_LxLyLz_df1 = dev_wcdf1; dev_HxLyLz_df1 = dev_LxLyLz_df1 + waveSizes[0]*waveSizes[1]*waveSizes[2]; for (int l = 1; l <= numLevels; ++l){ dev_HxLyLz_df1 += 7*waveSizes[0 + 3*l]*waveSizes[1 + 3*l]*waveSizes[2 + 3*l]; } dev_current_vx = dev_in_vx; // wcdf2 Pointers _data_t *dev_LxLyLz_df2,*dev_HxLyLz_df2,*dev_LxHyLz_df2,*dev_HxHyLz_df2,*dev_LxLyHz_df2,*dev_HxLyHz_df2,*dev_LxHyHz_df2,*dev_HxHyHz_df2,*dev_current_vy; dev_LxLyLz_df2 = dev_wcdf2; dev_HxLyLz_df2 = dev_LxLyLz_df2 + waveSizes[0]*waveSizes[1]*waveSizes[2]; for (int l = 1; l <= numLevels; ++l){ dev_HxLyLz_df2 += 7*waveSizes[0 + 3*l]*waveSizes[1 + 3*l]*waveSizes[2 + 3*l]; } dev_current_vy = dev_in_vy; // wcn Pointers _data_t *dev_LxLyLz_n,*dev_HxLyLz_n,*dev_LxHyLz_n,*dev_HxHyLz_n,*dev_LxLyHz_n,*dev_HxLyHz_n,*dev_LxHyHz_n,*dev_HxHyHz_n,*dev_current_vz; dev_LxLyLz_n = dev_wcn; dev_HxLyLz_n = dev_LxLyLz_n + waveSizes[0]*waveSizes[1]*waveSizes[2]; for (int l = 1; l <= numLevels; ++l){ dev_HxLyLz_n += 7*waveSizes[0 + 3*l]*waveSizes[1 + 3*l]*waveSizes[2 + 3*l]; } dev_current_vz = dev_in_vz; //*****************Loop through levels**************** for (int l = numLevels; l >= 1; --l) { dxNext = waveSizes[0 + 3*l]; dyNext = waveSizes[1 + 3*l]; dzNext = waveSizes[2 + 3*l]; blockSize = dxNext*dyNext*dzNext; // Update Pointers // df1 dev_HxLyLz_df1 = dev_HxLyLz_df1 - 7*blockSize; dev_LxHyLz_df1 = dev_HxLyLz_df1 + blockSize; dev_HxHyLz_df1 = dev_LxHyLz_df1 + blockSize; dev_LxLyHz_df1 = dev_HxHyLz_df1 + blockSize; dev_HxLyHz_df1 = dev_LxLyHz_df1 + blockSize; dev_LxHyHz_df1 = dev_HxLyHz_df1 + blockSize; dev_HxHyHz_df1 = dev_LxHyHz_df1 + blockSize; // df2 dev_HxLyLz_df2 = dev_HxLyLz_df2 - 7*blockSize; dev_LxHyLz_df2 = dev_HxLyLz_df2 + blockSize; dev_HxHyLz_df2 = dev_LxHyLz_df2 + blockSize; dev_LxLyHz_df2 = dev_HxHyLz_df2 + blockSize; dev_HxLyHz_df2 = dev_LxLyHz_df2 + blockSize; dev_LxHyHz_df2 = dev_HxLyHz_df2 + blockSize; dev_HxHyHz_df2 = dev_LxHyHz_df2 + blockSize; // n dev_HxLyLz_n = dev_HxLyLz_n - 7*blockSize; dev_LxHyLz_n = dev_HxLyLz_n + blockSize; dev_HxHyLz_n = dev_LxHyLz_n + blockSize; dev_LxLyHz_n = dev_HxHyLz_n + blockSize; dev_HxLyHz_n = dev_LxLyHz_n + blockSize; dev_LxHyHz_n = dev_HxLyHz_n + blockSize; dev_HxHyHz_n = dev_LxHyHz_n + blockSize; //************WCVX*********** // FWT Columns K = (SHMEM_SIZE-16)/(dx*sizeof(_data_t)); numBlocks = dim3(1,(dy+K-1)/K,dz); numThreads = dim3(T/K,K,1); mem = K*dx*sizeof(_data_t); hipLaunchKernelGGL(( cu_fwt3df_col) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempLx,dev_tempHx,dev_current_vx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen); cuda_sync(); // FWT Rows K = (SHMEM_SIZE-16)/(dy*sizeof(_data_t)); numBlocks = dim3(((dxNext)+K-1)/K,1,dz); numThreads = dim3(K,T/K,1); mem = K*dy*sizeof(_data_t); hipLaunchKernelGGL(( cu_fwt3df_row) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempLxLy,dev_tempLxHy,dev_tempLx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); hipLaunchKernelGGL(( cu_fwt3df_row) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempHxLy,dev_tempHxHy,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cuda_sync(); // FWT Depths K = (SHMEM_SIZE-16)/(dz*sizeof(_data_t)); numBlocks = dim3(((dxNext)+K-1)/K,dyNext,1); numThreads = dim3(K,1,T/K); mem = K*dz*sizeof(_data_t); hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_LxLyLz_df1,dev_LxLyHz_df1,dev_tempLxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_LxHyLz_df1,dev_LxHyHz_df1,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_HxLyLz_df1,dev_HxLyHz_df1,dev_tempHxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_HxHyLz_df1,dev_HxHyHz_df1,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cuda_sync(); //************WCVY*********** // FWT Columns K = (SHMEM_SIZE-16)/(dx*sizeof(_data_t)); numBlocks = dim3(1,(dy+K-1)/K,dz); numThreads = dim3(T/K,K,1); mem = K*dx*sizeof(_data_t); hipLaunchKernelGGL(( cu_fwt3df_col) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempLx,dev_tempHx,dev_current_vy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cuda_sync(); // FWT Rows K = (SHMEM_SIZE-16)/(dy*sizeof(_data_t)); numBlocks = dim3(((dxNext)+K-1)/K,1,dz); numThreads = dim3(K,T/K,1); mem = K*dy*sizeof(_data_t); hipLaunchKernelGGL(( cu_fwt3df_row) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempLxLy,dev_tempLxHy,dev_tempLx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen); hipLaunchKernelGGL(( cu_fwt3df_row) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempHxLy,dev_tempHxHy,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen); cuda_sync(); // FWT Depths K = (SHMEM_SIZE-16)/(dz*sizeof(_data_t)); numBlocks = dim3(((dxNext)+K-1)/K,dyNext,1); numThreads = dim3(K,1,T/K); mem = K*dz*sizeof(_data_t); hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_LxLyLz_df2,dev_LxLyHz_df2,dev_tempLxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_LxHyLz_df2,dev_LxHyHz_df2,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_HxLyLz_df2,dev_HxLyHz_df2,dev_tempHxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_HxHyLz_df2,dev_HxHyHz_df2,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cuda_sync(); //************WCVZ*********** // FWT Columns K = (SHMEM_SIZE-16)/(dx*sizeof(_data_t)); numBlocks = dim3(1,(dy+K-1)/K,dz); numThreads = dim3(T/K,K,1); mem = K*dx*sizeof(_data_t); hipLaunchKernelGGL(( cu_fwt3df_col) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempLx,dev_tempHx,dev_current_vz,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cuda_sync(); // FWT Rows K = (SHMEM_SIZE-16)/(dy*sizeof(_data_t)); numBlocks = dim3(((dxNext)+K-1)/K,1,dz); numThreads = dim3(K,T/K,1); mem = K*dy*sizeof(_data_t); hipLaunchKernelGGL(( cu_fwt3df_row) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempLxLy,dev_tempLxHy,dev_tempLx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); hipLaunchKernelGGL(( cu_fwt3df_row) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempHxLy,dev_tempHxHy,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cuda_sync(); // FWT Depths K = (SHMEM_SIZE-16)/(dz*sizeof(_data_t)); numBlocks = dim3(((dxNext)+K-1)/K,dyNext,1); numThreads = dim3(K,1,T/K); mem = K*dz*sizeof(_data_t); hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_LxLyLz_n,dev_LxLyHz_n,dev_tempLxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen); hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_LxHyLz_n,dev_LxHyHz_n,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen); hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_HxLyLz_n,dev_HxLyHz_n,dev_tempHxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen); hipLaunchKernelGGL(( cu_fwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_HxHyLz_n,dev_HxHyHz_n,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen); cuda_sync(); //******* Multi ****** int maxInd = 7*blockSize; numThreads = T; numBlocks = (maxInd+numThreads.x-1)/numThreads.x; hipLaunchKernelGGL(( cu_mult) , dim3(numBlocks), dim3(numThreads) , 0, 0, dev_HxLyLz_df1,1.f/res[0],maxInd); hipLaunchKernelGGL(( cu_mult) , dim3(numBlocks), dim3(numThreads) , 0, 0, dev_HxLyLz_df2,1.f/res[1],maxInd); hipLaunchKernelGGL(( cu_mult) , dim3(numBlocks), dim3(numThreads) , 0, 0, dev_HxLyLz_n,1.f/res[2],maxInd); cuda_sync(); //*******Linear Combination****** int t1 = min(dxNext,T); int t2 = T/t1; numBlocks = dim3( (dxNext+t1-1)/t1, (dyNext+t2-1)/t2, dzNext); numThreads = dim3(t1,t2,1); hipLaunchKernelGGL(( cu_fwt3df_LC1) , dim3(numBlocks),dim3(numThreads) , 0, 0, dev_HxLyLz_df1,dev_HxLyLz_df2,dev_HxLyLz_n,dev_LxHyLz_df1,dev_LxHyLz_df2,dev_LxHyLz_n,dev_LxLyHz_df1,dev_LxLyHz_df2,dev_LxLyHz_n,dxNext,dyNext,dzNext); hipLaunchKernelGGL(( cu_fwt3df_LC2) , dim3(numBlocks),dim3(numThreads) , 0, 0, dev_HxHyLz_df1,dev_HxHyLz_df2,dev_HxHyLz_n,dev_HxLyHz_df1,dev_HxLyHz_df2,dev_HxLyHz_n,dev_LxHyHz_df1,dev_LxHyHz_df2,dev_LxHyHz_n,dxNext,dyNext,dzNext); hipLaunchKernelGGL(( cu_fwt3df_LC3) , dim3(numBlocks),dim3(numThreads) , 0, 0, dev_HxHyHz_df1,dev_HxHyHz_df2,dev_HxHyHz_n,dxNext,dyNext,dzNext); cuda_sync(); hipLaunchKernelGGL(( cu_fwt3df_LC1_diff) , dim3(numBlocks),dim3(numThreads) , 0, 0, dev_HxLyLz_df1,dev_HxLyLz_df2,dev_HxLyLz_n,dev_LxHyLz_df1,dev_LxHyLz_df2,dev_LxHyLz_n,dev_LxLyHz_df1,dev_LxLyHz_df2,dev_LxLyHz_n,dxNext,dyNext,dzNext); hipLaunchKernelGGL(( cu_fwt3df_LC2_diff) , dim3(numBlocks),dim3(numThreads) , 0, 0, dev_HxHyLz_df1,dev_HxHyLz_df2,dev_HxHyLz_n,dev_HxLyHz_df1,dev_HxLyHz_df2,dev_HxLyHz_n,dev_LxHyHz_df1,dev_LxHyHz_df2,dev_LxHyHz_n,dxNext,dyNext,dzNext); cuda_sync(); dev_current_vx = dev_wcdf1; dev_current_vy = dev_wcdf2; dev_current_vz = dev_wcn; dx = dxNext; dy = dyNext; dz = dzNext; } cuda(Free( dev_filters )); cuda(Free( dev_temp1 )); cuda(Free( dev_temp2 )); circunshift_gpu(plan,in_vx); circunshift_gpu(plan,in_vy); circunshift_gpu(plan,in_vz); } extern "C" void dfiwt3_gpu(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn) { long numCoeff, filterLen,*waveSizes; numCoeff = plan->numCoeff; waveSizes = plan->waveSizes; filterLen = plan->filterLen; int numLevels = plan->numLevels; // Cast from generic data_t to device compatible _data_t _data_t* dev_out_vx = (_data_t*)out_vx; _data_t* dev_out_vy = (_data_t*)out_vy; _data_t* dev_out_vz = (_data_t*)out_vz; _data_t* dev_wcdf1 = (_data_t*)in_wcdf1; _data_t* dev_wcdf2 = (_data_t*)in_wcdf2; _data_t* dev_wcn = (_data_t*)in_wcn; _data_t* res = (_data_t*) plan->res; _data_t* dev_temp1, *dev_temp2; cuda(Malloc( (void**)&dev_temp1, numCoeff*sizeof(_data_t) )); cuda(Malloc( (void**)&dev_temp2, numCoeff*sizeof(_data_t)) ); // allocate device memory scalar_t *dev_filters; cuda(Malloc( (void**)&dev_filters, 4*(plan->filterLen)*sizeof(scalar_t) )); scalar_t *dev_lor0 = dev_filters + 0*plan->filterLen; scalar_t *dev_hir0 = dev_filters + 1*plan->filterLen; scalar_t *dev_lor1 = dev_filters + 2*plan->filterLen; scalar_t *dev_hir1 = dev_filters + 3*plan->filterLen; cuda(Memcpy( dev_lor0, plan->lor0, 2*plan->filterLen*sizeof(scalar_t), hipMemcpyHostToDevice )); cuda(Memcpy( dev_lor1, plan->lor1, 2*plan->filterLen*sizeof(scalar_t), hipMemcpyHostToDevice )); // Workspace dimensions int dxWork = waveSizes[0 + 3*numLevels]*2-1 + filterLen-1; int dyWork = waveSizes[1 + 3*numLevels]*2-1 + filterLen-1; int dzWork = waveSizes[2 + 3*numLevels]*2-1 + filterLen-1; // Initialize variables and pointers for IWT int const SHMEM_SIZE = 16384; int const T = 512; int mem,K; dim3 numBlocks, numThreads; int dx = waveSizes[0]; int dy = waveSizes[1]; int dz = waveSizes[2]; // Temp Pointers _data_t *dev_tempLxLy,*dev_tempHxLy,*dev_tempLxHy,*dev_tempHxHy; dev_tempLxLy = dev_temp1; dev_tempHxLy = dev_tempLxLy + numCoeff/4; dev_tempLxHy = dev_tempHxLy + numCoeff/4; dev_tempHxHy = dev_tempLxHy + numCoeff/4; _data_t *dev_tempLx,*dev_tempHx; dev_tempLx = dev_temp2; dev_tempHx = dev_tempLx + numCoeff/2; // wcdf1 Pointers _data_t *dev_LxLyLz_df1,*dev_HxLyLz_df1,*dev_LxHyLz_df1,*dev_HxHyLz_df1,*dev_LxLyHz_df1,*dev_HxLyHz_df1,*dev_LxHyHz_df1,*dev_HxHyHz_df1,*dev_current_vx; dev_LxLyLz_df1 = dev_wcdf1; dev_HxLyLz_df1 = dev_LxLyLz_df1 + dx*dy*dz; dev_current_vx = dev_LxLyLz_df1; // wcdf2 Pointers _data_t *dev_LxLyLz_df2,*dev_HxLyLz_df2,*dev_LxHyLz_df2,*dev_HxHyLz_df2,*dev_LxLyHz_df2,*dev_HxLyHz_df2,*dev_LxHyHz_df2,*dev_HxHyHz_df2,*dev_current_vy; dev_LxLyLz_df2 = dev_wcdf2; dev_HxLyLz_df2 = dev_LxLyLz_df2 + dx*dy*dz; dev_current_vy = dev_LxLyLz_df2; // wcn Pointers _data_t *dev_LxLyLz_n,*dev_HxLyLz_n,*dev_LxHyLz_n,*dev_HxHyLz_n,*dev_LxLyHz_n,*dev_HxLyHz_n,*dev_LxHyHz_n,*dev_HxHyHz_n,*dev_current_vz; dev_LxLyLz_n = dev_wcn; dev_HxLyLz_n = dev_LxLyLz_n + dx*dy*dz; dev_current_vz = dev_LxLyLz_n; for (int level = 1; level < numLevels+1; ++level) { dx = waveSizes[0 + 3*level]; dy = waveSizes[1 + 3*level]; dz = waveSizes[2 + 3*level]; int blockSize = dx*dy*dz; int dxNext = waveSizes[0+3*(level+1)]; int dyNext = waveSizes[1+3*(level+1)]; int dzNext = waveSizes[2+3*(level+1)]; // Calclate Offset dxWork = (2*dx-1 + filterLen-1); dyWork = (2*dy-1 + filterLen-1); dzWork = (2*dz-1 + filterLen-1); int xOffset = (int) floor((dxWork - dxNext) / 2.0); int yOffset = (int) floor((dyWork - dyNext) / 2.0); int zOffset = (int) floor((dzWork - dzNext) / 2.0); // Update Pointers // df1 dev_LxHyLz_df1 = dev_HxLyLz_df1 + blockSize; dev_HxHyLz_df1 = dev_LxHyLz_df1 + blockSize; dev_LxLyHz_df1 = dev_HxHyLz_df1 + blockSize; dev_HxLyHz_df1 = dev_LxLyHz_df1 + blockSize; dev_LxHyHz_df1 = dev_HxLyHz_df1 + blockSize; dev_HxHyHz_df1 = dev_LxHyHz_df1 + blockSize; // df2 dev_LxHyLz_df2 = dev_HxLyLz_df2 + blockSize; dev_HxHyLz_df2 = dev_LxHyLz_df2 + blockSize; dev_LxLyHz_df2 = dev_HxHyLz_df2 + blockSize; dev_HxLyHz_df2 = dev_LxLyHz_df2 + blockSize; dev_LxHyHz_df2 = dev_HxLyHz_df2 + blockSize; dev_HxHyHz_df2 = dev_LxHyHz_df2 + blockSize; // n dev_LxHyLz_n = dev_HxLyLz_n + blockSize; dev_HxHyLz_n = dev_LxHyLz_n + blockSize; dev_LxLyHz_n = dev_HxHyLz_n + blockSize; dev_HxLyHz_n = dev_LxLyHz_n + blockSize; dev_LxHyHz_n = dev_HxLyHz_n + blockSize; dev_HxHyHz_n = dev_LxHyHz_n + blockSize; //*******Linear Combination****** int t1 = min(dxNext,T); int t2 = T/t1; numBlocks = dim3( (dx+t1-1)/t1, (dy+t2-1)/t2, dz); numThreads = dim3(t1,t2,1); hipLaunchKernelGGL(( cu_iwt3df_LC1) , dim3(numBlocks),dim3(numThreads) , 0, 0, dev_HxLyLz_df1,dev_HxLyLz_df2,dev_HxLyLz_n,dev_LxHyLz_df1,dev_LxHyLz_df2,dev_LxHyLz_n,dev_LxLyHz_df1,dev_LxLyHz_df2,dev_LxLyHz_n,dx,dy,dz); hipLaunchKernelGGL(( cu_iwt3df_LC2) , dim3(numBlocks),dim3(numThreads) , 0, 0, dev_HxHyLz_df1,dev_HxHyLz_df2,dev_HxHyLz_n,dev_HxLyHz_df1,dev_HxLyHz_df2,dev_HxLyHz_n,dev_LxHyHz_df1,dev_LxHyHz_df2,dev_LxHyHz_n,dx,dy,dz); hipLaunchKernelGGL(( cu_iwt3df_LC3) , dim3(numBlocks),dim3(numThreads) , 0, 0, dev_HxHyHz_df1,dev_HxHyHz_df2,dev_HxHyHz_n,dx,dy,dz); cuda_sync(); hipLaunchKernelGGL(( cu_iwt3df_LC1_diff) , dim3(numBlocks),dim3(numThreads) , 0, 0, dev_HxLyLz_df1,dev_HxLyLz_df2,dev_HxLyLz_n,dev_LxHyLz_df1,dev_LxHyLz_df2,dev_LxHyLz_n,dev_LxLyHz_df1,dev_LxLyHz_df2,dev_LxLyHz_n,dx,dy,dz); hipLaunchKernelGGL(( cu_iwt3df_LC2_diff) , dim3(numBlocks),dim3(numThreads) , 0, 0, dev_HxHyLz_df1,dev_HxHyLz_df2,dev_HxHyLz_n,dev_HxLyHz_df1,dev_HxLyHz_df2,dev_HxLyHz_n,dev_LxHyHz_df1,dev_LxHyHz_df2,dev_LxHyHz_n,dx,dy,dz); cuda_sync(); //******* Multi ****** int maxInd = 7*blockSize; numThreads = T; numBlocks = (maxInd+numThreads.x-1)/numThreads.x; hipLaunchKernelGGL(( cu_mult) , dim3(numBlocks), dim3(numThreads) , 0, 0, dev_HxLyLz_df1,res[0],maxInd); hipLaunchKernelGGL(( cu_mult) , dim3(numBlocks), dim3(numThreads) , 0, 0, dev_HxLyLz_df2,res[1],maxInd); hipLaunchKernelGGL(( cu_mult) , dim3(numBlocks), dim3(numThreads) , 0, 0, dev_HxLyLz_n,res[2],maxInd); cuda_sync(); //************WCX************ // Update Pointers if (level==numLevels) dev_current_vx = dev_out_vx; // IWT Depths K = (SHMEM_SIZE-16)/(2*dz*sizeof(_data_t)); numBlocks = dim3((dx+K-1)/K,dy,1); numThreads = dim3(K,1,(T/K)); mem = K*2*dz*sizeof(_data_t); hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempLxLy,dev_LxLyLz_df1,dev_LxLyHz_df1,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen); hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempHxLy,dev_HxLyLz_df1,dev_HxLyHz_df1,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen); hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempLxHy,dev_LxHyLz_df1,dev_LxHyHz_df1,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen); hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempHxHy,dev_HxHyLz_df1,dev_HxHyHz_df1,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen); cuda_sync(); // IWT Rows K = (SHMEM_SIZE-16)/(2*dy*sizeof(_data_t)); numBlocks = dim3((dx+K-1)/K,1,dzNext); numThreads = dim3(K,(T/K),1); mem = K*2*dy*sizeof(_data_t); hipLaunchKernelGGL(( cu_iwt3df_row) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempLx,dev_tempLxLy,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen); hipLaunchKernelGGL(( cu_iwt3df_row) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempHx,dev_tempHxLy,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen); cuda_sync(); // IWT Columns K = (SHMEM_SIZE-16)/(2*dx*sizeof(_data_t)); numBlocks = dim3(1,(dyNext+K-1)/K,dzNext); numThreads = dim3((T/K),K,1); mem = K*2*dx*sizeof(_data_t); hipLaunchKernelGGL(( cu_iwt3df_col) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_current_vx,dev_tempLx,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,plan->filterLen); cuda_sync(); //************WCY************ // Update Pointers if (level==numLevels) dev_current_vy = dev_out_vy; // IWT Depths K = (SHMEM_SIZE-16)/(2*dz*sizeof(_data_t)); numBlocks = dim3((dx+K-1)/K,dy,1); numThreads = dim3(K,1,(T/K)); mem = K*2*dz*sizeof(_data_t); hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempLxLy,dev_LxLyLz_df2,dev_LxLyHz_df2,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen); hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempHxLy,dev_HxLyLz_df2,dev_HxLyHz_df2,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen); hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempLxHy,dev_LxHyLz_df2,dev_LxHyHz_df2,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen); hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempHxHy,dev_HxHyLz_df2,dev_HxHyHz_df2,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen); cuda_sync(); // IWT Rows K = (SHMEM_SIZE-16)/(2*dy*sizeof(_data_t)); numBlocks = dim3((dx+K-1)/K,1,dzNext); numThreads = dim3(K,(T/K),1); mem = K*2*dy*sizeof(_data_t); hipLaunchKernelGGL(( cu_iwt3df_row) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempLx,dev_tempLxLy,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,plan->filterLen); hipLaunchKernelGGL(( cu_iwt3df_row) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempHx,dev_tempHxLy,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,plan->filterLen); cuda_sync(); // IWT Columns K = (SHMEM_SIZE-16)/(2*dx*sizeof(_data_t)); numBlocks = dim3(1,(dyNext+K-1)/K,dzNext); numThreads = dim3((T/K),K,1); mem = K*2*dx*sizeof(_data_t); hipLaunchKernelGGL(( cu_iwt3df_col) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_current_vy,dev_tempLx,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen); cuda_sync(); //************WCZ************ // Update Pointers if (level==numLevels) dev_current_vz = dev_out_vz; // IWT Depths K = (SHMEM_SIZE-16)/(2*dz*sizeof(_data_t)); numBlocks = dim3((dx+K-1)/K,dy,1); numThreads = dim3(K,1,(T/K)); mem = K*2*dz*sizeof(_data_t); hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempLxLy,dev_LxLyLz_n,dev_LxLyHz_n,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,filterLen); hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempHxLy,dev_HxLyLz_n,dev_HxLyHz_n,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,filterLen); hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempLxHy,dev_LxHyLz_n,dev_LxHyHz_n,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,filterLen); hipLaunchKernelGGL(( cu_iwt3df_dep) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempHxHy,dev_HxHyLz_n,dev_HxHyHz_n,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,filterLen); cuda_sync(); // IWT Rows K = (SHMEM_SIZE-16)/(2*dy*sizeof(_data_t)); numBlocks = dim3((dx+K-1)/K,1,dzNext); numThreads = dim3(K,(T/K),1); mem = K*2*dy*sizeof(_data_t); hipLaunchKernelGGL(( cu_iwt3df_row) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempLx,dev_tempLxLy,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen); hipLaunchKernelGGL(( cu_iwt3df_row) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_tempHx,dev_tempHxLy,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen); cuda_sync(); // IWT Columns K = (SHMEM_SIZE-16)/(2*dx*sizeof(_data_t)); numBlocks = dim3(1,(dyNext+K-1)/K,dzNext); numThreads = dim3((T/K),K,1); mem = K*2*dx*sizeof(_data_t); hipLaunchKernelGGL(( cu_iwt3df_col) , dim3(numBlocks),dim3(numThreads),mem , 0, dev_current_vz,dev_tempLx,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen); cuda_sync(); dev_HxLyLz_df1 += 7*blockSize; dev_HxLyLz_df2 += 7*blockSize; dev_HxLyLz_n += 7*blockSize; } cuda(Free( dev_filters )); cuda(Free( dev_temp1 )); cuda(Free( dev_temp2 )); circunshift_gpu(plan,out_vx); circunshift_gpu(plan,out_vy); circunshift_gpu(plan,out_vz); } int rand_lim(int limit) { int divisor = RAND_MAX/(limit+1); int retval; do { retval = rand() / divisor; } while (retval > limit); return retval; } void dfwavelet_new_randshift_gpu (struct dfwavelet_plan_s* plan) { int i; i = rand(); for(i = 0; i < plan->numdims; i++) { // Determine maximum shift value for this dimension int log2dim = 1; while( (1<<log2dim) < plan->imSize[i]) { log2dim++; } int maxShift = 1 << (log2dim-plan->numLevels); if (maxShift > 8) { maxShift = 8; } // Generate random shift value between 0 and maxShift plan->randShift[i] = rand_lim(maxShift); } } extern "C" void dfwavthresh3_gpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz) { data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn; cuda(Malloc( (void**)&dev_wcdf1, plan->numCoeff*sizeof(_data_t) )); cuda(Malloc( (void**)&dev_wcdf2, plan->numCoeff*sizeof(_data_t) )); cuda(Malloc( (void**)&dev_wcn, plan->numCoeff*sizeof(_data_t) )); dffwt3_gpu(plan,dev_wcdf1,dev_wcdf2,dev_wcn,in_vx,in_vy,in_vz); dfsoftthresh_gpu(plan,dfthresh,nthresh,dev_wcdf1,dev_wcdf2,dev_wcn); dfiwt3_gpu(plan,out_vx,out_vy,out_vz,dev_wcdf1,dev_wcdf2,dev_wcn); cuda(Free( dev_wcdf1 )); cuda(Free( dev_wcdf2 )); cuda(Free( dev_wcn )); } extern "C" void dfsoftthresh_gpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn) { assert(plan->use_gpu==1||plan->use_gpu==2); _data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn; dev_wcdf1 = (_data_t*) out_wcdf1; dev_wcdf2 = (_data_t*) out_wcdf2; dev_wcn = (_data_t*) out_wcn; int numMax; int const T = 512; dim3 numBlocks, numThreads; numMax = plan->numCoeff-plan->numCoarse; numBlocks = dim3((numMax+T-1)/T,1,1); numThreads = dim3(T,1,1); hipLaunchKernelGGL(( cu_soft_thresh) , dim3(numBlocks),dim3(numThreads), 0, 0, dev_wcdf1+plan->numCoarse,dfthresh,numMax); hipLaunchKernelGGL(( cu_soft_thresh) , dim3(numBlocks),dim3(numThreads), 0, 0, dev_wcdf2+plan->numCoarse,dfthresh,numMax); hipLaunchKernelGGL(( cu_soft_thresh) , dim3(numBlocks),dim3(numThreads), 0, 0, dev_wcn+plan->numCoarse,nthresh,numMax); } /********** Aux functions **********/ extern "C" void circshift_gpu(struct dfwavelet_plan_s* plan, data_t* data_c) { // Return if no shifts int zeroShift = 1; int i; for (i = 0; i< plan->numdims; i++) { zeroShift &= (plan->randShift[i]==0); } if(zeroShift) { return; } _data_t* data = (_data_t*) data_c; // Copy data _data_t* dataCopy; cuda(Malloc((void**)&dataCopy, plan->numPixel*sizeof(_data_t))); cuda(Memcpy(dataCopy, data, plan->numPixel*sizeof(_data_t), hipMemcpyDeviceToDevice)); int T = 512; if (plan->numdims==2) { int dx,dy,r0,r1; dx = plan->imSize[0]; dy = plan->imSize[1]; r0 = plan->randShift[0]; r1 = plan->randShift[1]; hipLaunchKernelGGL(( cu_circshift) , dim3((plan->numPixel+T-1)/T), dim3(T), 0, 0, data,dataCopy,dx,dy,1,r0,r1,0); } else if (plan->numdims==3) { int dx,dy,dz,r0,r1,r2; dx = plan->imSize[0]; dy = plan->imSize[1]; dz = plan->imSize[2]; r0 = plan->randShift[0]; r1 = plan->randShift[1]; r2 = plan->randShift[2]; hipLaunchKernelGGL(( cu_circshift) , dim3((plan->numPixel+T-1)/T), dim3(T), 0, 0, data,dataCopy,dx,dy,dz,r0,r1,r2); } cuda(Free(dataCopy)); } extern "C" void circunshift_gpu(struct dfwavelet_plan_s* plan, data_t* data_c) { // Return if no shifts int zeroShift = 1; int i; for (i = 0; i< plan->numdims; i++) { zeroShift &= (plan->randShift[i]==0); } if(zeroShift) { return; } _data_t* data = (_data_t*) data_c; // Copy data _data_t* dataCopy; cuda(Malloc((void**)&dataCopy, plan->numPixel*sizeof(_data_t))); cuda(Memcpy(dataCopy, data, plan->numPixel*sizeof(_data_t), hipMemcpyDeviceToDevice)); int T = 512; if (plan->numdims==2) { int dx,dy,r0,r1; dx = plan->imSize[0]; dy = plan->imSize[1]; r0 = plan->randShift[0]; r1 = plan->randShift[1]; hipLaunchKernelGGL(( cu_circunshift) , dim3((plan->numPixel+T-1)/T), dim3(T), 0, 0, data,dataCopy,dx,dy,1,r0,r1,0); } else if (plan->numdims==3) { int dx,dy,dz,r0,r1,r2; dx = plan->imSize[0]; dy = plan->imSize[1]; dz = plan->imSize[2]; r0 = plan->randShift[0]; r1 = plan->randShift[1]; r2 = plan->randShift[2]; hipLaunchKernelGGL(( cu_circunshift) , dim3((plan->numPixel+T-1)/T), dim3(T), 0, 0, data,dataCopy,dx,dy,dz,r0,r1,r2); } cuda(Free(dataCopy)); } // ############################################################################ // CUDA function of fwt column convolution // Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass // Output: Lx, Hx // Input: in, dx, dy, dz, dxNext, lod, hid, filterLen // ############################################################################ extern "C" __global__ void cu_fwt3df_col(_data_t *Lx,_data_t *Hx,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen) { extern __shared__ _data_t cols []; int ti = threadIdx.x; int tj = threadIdx.y; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; if (j>=dy) { return; } // Load Input to Temp Array for (int i = ti; i < dx; i += blockDim.x){ cols[i + tj*dx] = in[i + j*dx + k*dx*dy]; } __syncthreads(); // Low-Pass and High-Pass Downsample int ind, lessThan, greaThan; for (int i = ti; i < dxNext; i += blockDim.x){ _data_t y = cols[0]-cols[0]; _data_t z = cols[0]-cols[0]; #pragma unroll for (int f = 0; f < filterLen; f++){ ind = 2*i+1 - (filterLen-1)+f; lessThan = (int) (ind<0); greaThan = (int) (ind>=dx); ind = -1*lessThan+ind*(-2*lessThan+1); ind = (2*dx-1)*greaThan+ind*(-2*greaThan+1); y += cols[ind + tj*dx] * lod[filterLen-1-f]; z += cols[ind + tj*dx] * hid[filterLen-1-f]; } Lx[i + j*dxNext + k*dxNext*dy] = y; Hx[i + j*dxNext + k*dxNext*dy] = z; } } // ############################################################################ // CUDA function of fwt row convolution. Assumes fwt_col() has already been called // Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass // Output: LxLy, LxHy / HxLy, HxHy // Input: Lx/Hx, dx, dy, dxNext, dyNext, lod, hid, filterLen // ############################################################################ extern "C" __global__ void cu_fwt3df_row(_data_t *Ly,_data_t *Hy,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen) { extern __shared__ _data_t rows []; int const K = blockDim.x; int ti = threadIdx.x; int tj = threadIdx.y; int i = blockIdx.x*blockDim.x+threadIdx.x; int k = blockIdx.z*blockDim.z+threadIdx.z; if (i>=dxNext) { return; } for (int j = tj; j < dy; j += blockDim.y){ rows[ti + j*K] = in[i + j*dxNext + k*dxNext*dy]; } __syncthreads(); // Low-Pass and High Pass Downsample int ind, lessThan, greaThan; for (int j = tj; j < dyNext; j += blockDim.y){ _data_t y = rows[0]-rows[0]; _data_t z = rows[0]-rows[0]; #pragma unroll for (int f = 0; f < filterLen; f++){ ind = 2*j+1 - (filterLen-1)+f; lessThan = (int) (ind<0); greaThan = (int) (ind>=dy); ind = -1*lessThan+ind*(-2*lessThan+1); ind = (2*dy-1)*greaThan+ind*(-2*greaThan+1); y += rows[ti + ind*K] * lod[filterLen-1-f]; z += rows[ti + ind*K] * hid[filterLen-1-f]; } Ly[i + j*dxNext + k*dxNext*dyNext] = y; Hy[i + j*dxNext + k*dxNext*dyNext] = z; } } // ############################################################################ // CUDA function of fwt depth convolution. Assumes fwt_row() has already been called // Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass // Output: LxLy, LxHy / HxLy, HxHy // Input: Lx/Hx, dx, dy, dxNext, dyNext, lod, hid, filterLen // ############################################################################ extern "C" __global__ void cu_fwt3df_dep(_data_t *Lz,_data_t *Hz,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen) { extern __shared__ _data_t deps []; int const K = blockDim.x; int ti = threadIdx.x; int tk = threadIdx.z; int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; if (i>=dxNext) { return; } for (int k = tk; k < dz; k += blockDim.z){ deps[ti + k*K] = in[i + j*dxNext + k*dxNext*dyNext]; } __syncthreads(); // Low-Pass and High Pass Downsample int ind, lessThan, greaThan; for (int k = tk; k < dzNext; k += blockDim.z){ _data_t y = deps[0]-deps[0]; _data_t z = deps[0]-deps[0]; #pragma unroll for (int f = 0; f < filterLen; f++){ ind = 2*k+1 - (filterLen-1)+f; lessThan = (int) (ind<0); greaThan = (int) (ind>=dz); ind = -1*lessThan+ind*(-2*lessThan+1); ind = (2*dz-1)*greaThan+ind*(-2*greaThan+1); y += deps[ti + ind*K] * lod[filterLen-1-f]; z += deps[ti + ind*K] * hid[filterLen-1-f]; } Lz[i + j*dxNext + k*dxNext*dyNext] = y; Hz[i + j*dxNext + k*dxNext*dyNext] = z; } } extern "C" __global__ void cu_fwt3df_LC1(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dxNext, int dyNext, int dzNext) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t x,y,z; scalar_t xGreatZero,yGreatZero,zGreatZero; if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext)) { return; } //HLL x = HxLyLz_df1[i+j*dxNext+k*dxNext*dyNext]; y = HxLyLz_df2[i+j*dxNext+k*dxNext*dyNext]; z = HxLyLz_n[i+j*dxNext+k*dxNext*dyNext]; HxLyLz_df1[i+j*dxNext+k*dxNext*dyNext] = y; HxLyLz_df2[i+j*dxNext+k*dxNext*dyNext] = z; yGreatZero = j>0; zGreatZero = k>0; HxLyLz_n[i+j*dxNext+k*dxNext*dyNext] = x + yGreatZero*0.25f*y + zGreatZero*0.25f*z; //LHL x = LxHyLz_df1[i+j*dxNext+k*dxNext*dyNext]; y = LxHyLz_df2[i+j*dxNext+k*dxNext*dyNext]; z = LxHyLz_n[i+j*dxNext+k*dxNext*dyNext]; LxHyLz_df2[i+j*dxNext+k*dxNext*dyNext] = z; xGreatZero = i>0; zGreatZero = k>0; LxHyLz_n[i+j*dxNext+k*dxNext*dyNext] = y + xGreatZero*0.25f*x + zGreatZero*0.25f*z; //LLH x = LxLyHz_df1[i+j*dxNext+k*dxNext*dyNext]; y = LxLyHz_df2[i+j*dxNext+k*dxNext*dyNext]; z = LxLyHz_n[i+j*dxNext+k*dxNext*dyNext]; LxLyHz_df1[i+j*dxNext+k*dxNext*dyNext] = y; LxLyHz_df2[i+j*dxNext+k*dxNext*dyNext] = x; yGreatZero = j>0; xGreatZero = i>0; LxLyHz_n[i+j*dxNext+k*dxNext*dyNext] = z + yGreatZero*0.25*y + xGreatZero*0.25*x; } extern "C" __global__ void cu_fwt3df_LC1_diff(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dxNext, int dyNext, int dzNext) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t x,y,z; _data_t zero = make_float2(0.f,0.f); if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext)) { return; } //HLL if (j>0) y = HxLyLz_df1[i+(j-1)*dxNext+k*dxNext*dyNext]; else y = zero; if (k>0) z = HxLyLz_df2[i+j*dxNext+(k-1)*dxNext*dyNext]; else z = zero; HxLyLz_n[i+j*dxNext+k*dxNext*dyNext] += -0.25*y - 0.25*z; //LHL if (i>0) x = LxHyLz_df1[(i-1)+j*dxNext+k*dxNext*dyNext]; else x = zero; if (k>0) z = LxHyLz_df2[i+j*dxNext+(k-1)*dxNext*dyNext]; else z = zero; LxHyLz_n[i+j*dxNext+k*dxNext*dyNext] += -0.25*x - 0.25*z; //LLH if (j>0) y = LxLyHz_df1[i+(j-1)*dxNext+k*dxNext*dyNext]; else y = zero; if (i>0) x = LxLyHz_df2[(i-1)+j*dxNext+k*dxNext*dyNext]; else x = zero; LxLyHz_n[i+j*dxNext+k*dxNext*dyNext] += -0.25*y - 0.25*x; } extern "C" __global__ void cu_fwt3df_LC2(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dxNext, int dyNext, int dzNext) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t x,y,z; scalar_t xGreatZero,yGreatZero,zGreatZero; if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext)) { return; } //HHL x = HxHyLz_df1[i+j*dxNext+k*dxNext*dyNext]; y = HxHyLz_df2[i+j*dxNext+k*dxNext*dyNext]; z = HxHyLz_n[i+j*dxNext+k*dxNext*dyNext]; HxHyLz_df1[i+j*dxNext+k*dxNext*dyNext] = 0.5*(x-y); HxHyLz_df2[i+j*dxNext+k*dxNext*dyNext] = z; zGreatZero = k>0; HxHyLz_n[i+j*dxNext+k*dxNext*dyNext] = 0.5*(x+y) + zGreatZero*0.125*z; //HLH x = HxLyHz_df1[i+j*dxNext+k*dxNext*dyNext]; y = HxLyHz_df2[i+j*dxNext+k*dxNext*dyNext]; z = HxLyHz_n[i+j*dxNext+k*dxNext*dyNext]; HxLyHz_df1[i+j*dxNext+k*dxNext*dyNext] = 0.5*(z-x); HxLyHz_df2[i+j*dxNext+k*dxNext*dyNext] = y; yGreatZero = j>0; HxLyHz_n[i+j*dxNext+k*dxNext*dyNext] = 0.5*(z+x) + yGreatZero*0.125*y; //LHH x = LxHyHz_df1[i+j*dxNext+k*dxNext*dyNext]; y = LxHyHz_df2[i+j*dxNext+k*dxNext*dyNext]; z = LxHyHz_n[i+j*dxNext+k*dxNext*dyNext]; LxHyHz_df1[i+j*dxNext+k*dxNext*dyNext] = 0.5*(y-z); LxHyHz_df2[i+j*dxNext+k*dxNext*dyNext] = x; xGreatZero = i>0; LxHyHz_n[i+j*dxNext+k*dxNext*dyNext] = 0.5*(y+z) + xGreatZero*0.125*x; } extern "C" __global__ void cu_fwt3df_LC2_diff(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dxNext, int dyNext, int dzNext) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t x,y,z; _data_t zero = make_float2(0.f,0.f); if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext)) { return; } //HHL if (k>0) z = HxHyLz_df2[i+j*dxNext+(k-1)*dxNext*dyNext]; else z = zero; HxHyLz_n[i+j*dxNext+k*dxNext*dyNext] += -0.125*z; //HLH if (j>0) y = HxLyHz_df2[i+(j-1)*dxNext+k*dxNext*dyNext]; else y = zero; HxLyHz_n[i+j*dxNext+k*dxNext*dyNext] += -0.125*y; //LHH if (i>0) x = LxHyHz_df2[(i-1)+j*dxNext+k*dxNext*dyNext]; else x = zero; LxHyHz_n[i+j*dxNext+k*dxNext*dyNext] += -0.125*x; } extern "C" __global__ void cu_fwt3df_LC3(_data_t* HxHyHz_df1,_data_t* HxHyHz_df2,_data_t* HxHyHz_n,int dxNext, int dyNext, int dzNext) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t x,y,z; if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext)) { return; } //HHH x = HxHyHz_df1[i+j*dxNext+k*dxNext*dyNext]; y = HxHyHz_df2[i+j*dxNext+k*dxNext*dyNext]; z = HxHyHz_n[i+j*dxNext+k*dxNext*dyNext]; HxHyHz_df1[i+j*dxNext+k*dxNext*dyNext] = 1.0/3.0*(-2.0*x+y+z); HxHyHz_df2[i+j*dxNext+k*dxNext*dyNext] = 1.0/3.0*(2*y-x-z); HxHyHz_n[i+j*dxNext+k*dxNext*dyNext] = 1.0/3.0*(x+y+z); } // ############################################################################ // CUDA function of iwt depth convolution. // Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass // Scratchpad size: K x 2*dy // Output: Lz/Hz // Input: LxLy,LxHy / HxLy, HxHy, dx, dy, dxNext, dyNext,xOffset, yOffset,lod, hid, filterLen // ############################################################################ extern "C" __global__ void cu_iwt3df_dep(_data_t *out, _data_t *Lz, _data_t *Hz, int dx, int dy,int dz,int dxNext, int dyNext, int dzNext,int xOffset, int yOffset,int zOffset,scalar_t *lod, scalar_t *hid, int filterLen) { extern __shared__ _data_t deps []; int const K = blockDim.x; int ti = threadIdx.x; int tk = threadIdx.z; int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; if (i>=dx){ return; } for (int k = tk; k < dz; k += blockDim.z){ deps[ti + k*K] = Lz[i + j*dx + k*dx*dy]; deps[ti + (k+dz)*K] = Hz[i + j*dx + k*dx*dy]; } __syncthreads(); // Low-Pass and High Pass Downsample int ind; for (int k = tk+zOffset; k < dzNext+zOffset; k += blockDim.z){ _data_t y = deps[0]-deps[0]; #pragma unroll for (int f = (k-(filterLen-1)) % 2; f < filterLen; f+=2){ ind = (k-(filterLen-1)+f)>>1; if ((ind >= 0) && (ind < dz)) { y += deps[ti + ind*K] * lod[filterLen-1-f]; y += deps[ti + (ind+dz)*K] * hid[filterLen-1-f]; } } out[i + j*dx + (k-zOffset)*dx*dy] = y; } } // ############################################################################ // CUDA function of iwt row convolution. Assumes fwt_col() has already been called. // Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass // Scratchpad size: K x 2*dy // Output: Lx/Hx // Input: LxLy,LxHy / HxLy, HxHy, dx, dy, dxNext, dyNext,xOffset, yOffset,lod, hid, filterLen // ############################################################################ extern "C" __global__ void cu_iwt3df_row(_data_t *out, _data_t *Ly, _data_t *Hy, int dx, int dy,int dz,int dxNext, int dyNext,int dzNext,int xOffset, int yOffset, int zOffset,scalar_t *lod, scalar_t *hid, int filterLen) { extern __shared__ _data_t rows []; int const K = blockDim.x; int ti = threadIdx.x; int tj = threadIdx.y; int i = blockIdx.x*blockDim.x+threadIdx.x; int k = blockIdx.z*blockDim.z+threadIdx.z; if (i>=dx){ return; } for (int j = tj; j < dy; j += blockDim.y){ rows[ti + j*K] = Ly[i + j*dx + k*dx*dy]; rows[ti + (j+dy)*K] = Hy[i + j*dx + k*dx*dy]; } __syncthreads(); // Low-Pass and High Pass Downsample int ind; for (int j = tj+yOffset; j < dyNext+yOffset; j += blockDim.y){ _data_t y = rows[0]-rows[0]; #pragma unroll for (int f = (j-(filterLen-1)) % 2; f < filterLen; f+=2){ ind = (j-(filterLen-1)+f)>>1; if ((ind >= 0) && (ind < dy)) { y += rows[ti + ind*K] * lod[filterLen-1-f]; y += rows[ti + (ind+dy)*K] * hid[filterLen-1-f]; } } out[i + (j-yOffset)*dx + k*dx*dyNext] = y; } } // ############################################################################ // CUDA function of iwt column convolution // Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass // Scratchpad size: 2*dx x K // Output: out // Input: Lx, Hx, dx, dy, dxNext, dyNext, lod, hid, filterLen // ############################################################################ extern "C" __global__ void cu_iwt3df_col(_data_t *out, _data_t *Lx, _data_t *Hx, int dx, int dy,int dz,int dxNext, int dyNext, int dzNext,int xOffset, int yOffset, int zOffset,scalar_t *lod, scalar_t *hid, int filterLen) { extern __shared__ _data_t cols []; int ti = threadIdx.x; int tj = threadIdx.y; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; if (j>=dyNext){ return; } int dx2 = 2*dx; // Load Input to Temp Array for (int i = ti; i < dx; i += blockDim.x){ cols[i + tj*dx2] = Lx[i + j*dx + k*dx*dyNext]; cols[dx+i + tj*dx2] = Hx[i + j*dx + k*dx*dyNext]; } __syncthreads(); // Low-Pass and High Pass Downsample int ind; for (int i = ti+xOffset; i < dxNext+xOffset; i += blockDim.x){ _data_t y = cols[0]-cols[0]; #pragma unroll for (int f = (i-(filterLen-1)) % 2; f < filterLen; f+=2){ ind = (i-(filterLen-1)+f)>>1; if (ind >= 0 && ind < dx) { y += cols[ind + tj*dx2] * lod[filterLen-1-f]; y += cols[dx+ind + tj*dx2] * hid[filterLen-1-f]; } } out[(i-xOffset) + j*dxNext + k*dxNext*dyNext] = y; } } extern "C" __global__ void cu_iwt3df_LC1 (_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dx, int dy, int dz) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t df1,df2,n; scalar_t xGreatZero,yGreatZero,zGreatZero; if ((i>=dx)||(j>=dy)||(k>=dz)) { return; } //HLL df1 = HxLyLz_df1[i+j*dx+k*dx*dy]; df2 = HxLyLz_df2[i+j*dx+k*dx*dy]; n = HxLyLz_n[i+j*dx+k*dx*dy]; HxLyLz_df2[i+j*dx+k*dx*dy] = df1; HxLyLz_n[i+j*dx+k*dx*dy] = df2; yGreatZero = j>0; zGreatZero = k>0; HxLyLz_df1[i+j*dx+k*dx*dy] = n - yGreatZero*0.25*df1 - zGreatZero*0.25*df2; //LHL df1 = LxHyLz_df1[i+j*dx+k*dx*dy]; df2 = LxHyLz_df2[i+j*dx+k*dx*dy]; n = LxHyLz_n[i+j*dx+k*dx*dy]; LxHyLz_n[i+j*dx+k*dx*dy] = df2; xGreatZero = i>0; zGreatZero = k>0; LxHyLz_df2[i+j*dx+k*dx*dy] = n - xGreatZero*0.25*df1 - zGreatZero*0.25*df2; //LLH df1 = LxLyHz_df1[i+j*dx+k*dx*dy]; df2 = LxLyHz_df2[i+j*dx+k*dx*dy]; n = LxLyHz_n[i+j*dx+k*dx*dy]; LxLyHz_df1[i+j*dx+k*dx*dy] = df2; LxLyHz_df2[i+j*dx+k*dx*dy] = df1; yGreatZero = j>0; xGreatZero = i>0; LxLyHz_n[i+j*dx+k*dx*dy] = n - yGreatZero*0.25*df1 - xGreatZero*0.25*df2; } extern "C" __global__ void cu_iwt3df_LC1_diff (_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dx, int dy, int dz) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t x,y,z; _data_t zero = make_float2(0.f,0.f); if ((i>=dx)||(j>=dy)||(k>=dz)) { return; } //HLL if (j>0) y = HxLyLz_df2[i+(j-1)*dx+k*dx*dy]; else y = zero; if (k>0) z = HxLyLz_n[i+j*dx+(k-1)*dx*dy]; else z = zero; HxLyLz_df1[i+j*dx+k*dx*dy] += 0.25*y + 0.25*z; //LHL if (i>0) x = LxHyLz_df1[(i-1)+j*dx+k*dx*dy]; else x = zero; if (k>0) z = LxHyLz_n[i+j*dx+(k-1)*dx*dy]; else z = zero; LxHyLz_df2[i+j*dx+k*dx*dy] += 0.25*x + 0.25*z; //LLH if (j>0) y = LxLyHz_df2[i+(j-1)*dx+k*dx*dy]; else y = zero; if (i>0) x = LxLyHz_df1[(i-1)+j*dx+k*dx*dy]; else x = zero; LxLyHz_n[i+j*dx+k*dx*dy] += 0.25*y + 0.25*x; } extern "C" __global__ void cu_iwt3df_LC2 (_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dx, int dy, int dz) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t df1,df2,n; scalar_t xGreatZero,yGreatZero,zGreatZero; if ((i>=dx)||(j>=dy)||(k>=dz)) { return; } //HHL df1 = HxHyLz_df1[i+j*dx+k*dx*dy]; df2 = HxHyLz_df2[i+j*dx+k*dx*dy]; n = HxHyLz_n[i+j*dx+k*dx*dy]; HxHyLz_n[i+j*dx+k*dx*dy] = df2; zGreatZero = k>0; HxHyLz_df1[i+j*dx+k*dx*dy] = df1+n-zGreatZero*0.125*df2; HxHyLz_df2[i+j*dx+k*dx*dy] = n-df1-zGreatZero*0.125*df2; //HLH df1 = HxLyHz_df1[i+j*dx+k*dx*dy]; df2 = HxLyHz_df2[i+j*dx+k*dx*dy]; n = HxLyHz_n[i+j*dx+k*dx*dy]; HxLyHz_df2[i+j*dx+k*dx*dy] = df2; yGreatZero = j>0; HxLyHz_n[i+j*dx+k*dx*dy] = df1+n-yGreatZero*0.125*df2; HxLyHz_df1[i+j*dx+k*dx*dy] = n-df1-yGreatZero*0.125*df2; //LHH df1 = LxHyHz_df1[i+j*dx+k*dx*dy]; df2 = LxHyHz_df2[i+j*dx+k*dx*dy]; n = LxHyHz_n[i+j*dx+k*dx*dy]; LxHyHz_df1[i+j*dx+k*dx*dy] = df2; xGreatZero = i>0; LxHyHz_df2[i+j*dx+k*dx*dy] = df1+n-xGreatZero*0.125*df2; LxHyHz_n[i+j*dx+k*dx*dy] = n-df1-xGreatZero*0.125*df2; } extern "C" __global__ void cu_iwt3df_LC2_diff (_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dx, int dy, int dz) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t x,y,z; _data_t zero = make_float2(0.f,0.f); if ((i>=dx)||(j>=dy)||(k>=dz)) { return; } //HHL if (k>0) z = HxHyLz_n[i+j*dx+(k-1)*dx*dy]; else z = zero; HxHyLz_df1[i+j*dx+k*dx*dy] += 0.125*z; HxHyLz_df2[i+j*dx+k*dx*dy] += 0.125*z; //HLH if (j>0) y = HxLyHz_df2[i+(j-1)*dx+k*dx*dy]; else y = zero; HxLyHz_df1[i+j*dx+k*dx*dy] += 0.125*y; HxLyHz_n[i+j*dx+k*dx*dy] += 0.125*y; //LHH if (i>0) x = LxHyHz_df1[(i-1)+j*dx+k*dx*dy]; else x = zero; LxHyHz_df2[i+j*dx+k*dx*dy] += 0.125*x; LxHyHz_n[i+j*dx+k*dx*dy] += 0.125*x; } extern "C" __global__ void cu_iwt3df_LC3 (_data_t* HxHyHz_df1,_data_t* HxHyHz_df2,_data_t* HxHyHz_n,int dx, int dy, int dz) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t df1,df2,n; if ((i>=dx)||(j>=dy)||(k>=dz)) { return; } //HHH df1 = HxHyHz_df1[i+j*dx+k*dx*dy]; df2 = HxHyHz_df2[i+j*dx+k*dx*dy]; n = HxHyHz_n[i+j*dx+k*dx*dy]; HxHyHz_df1[i+j*dx+k*dx*dy] = n-df1; HxHyHz_df2[i+j*dx+k*dx*dy] = df2+n; HxHyHz_n[i+j*dx+k*dx*dy] = df1-df2+n; } extern "C" __global__ void cu_mult(_data_t* in, _data_t mult, int maxInd) { int ind = blockIdx.x*blockDim.x+threadIdx.x; if (ind > maxInd) { return; } in[ind] = in[ind]*mult; } extern "C" __global__ void cu_add_mult(_data_t* out, _data_t* in, _data_t mult, int maxInd) { int ind = blockIdx.x*blockDim.x+threadIdx.x; if (ind > maxInd) { return; } _data_t i = out[ind]; out[ind] = i+(out[ind]-i)*mult; } __global__ void cu_soft_thresh (_data_t* in, scalar_t thresh, int numMax) { int const i = threadIdx.x + blockDim.x*blockIdx.x; if (i>numMax) return; scalar_t norm = abs(in[i]); scalar_t red = norm - thresh; in[i] = (red > 0.f) ? ((red / norm) * (in[i])) : in[i]-in[i]; } __global__ void cu_circshift(_data_t* data, _data_t* dataCopy, int dx, int dy, int dz,int shift1, int shift2,int shift3) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= dx*dy*dz) { return; } int indexShifted = (index+shift1+shift2*dx+shift3*dx*dy)%(dx*dy*dz); data[indexShifted] = dataCopy[index]; } __global__ void cu_circunshift(_data_t* data, _data_t* dataCopy, int dx, int dy, int dz,int shift1, int shift2,int shift3) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= dx*dy*dz) { return; } int indexShifted = (index+shift1+shift2*dx+shift3*dx*dy)%(dx*dy*dz); data[index] = dataCopy[indexShifted]; }
293cb573f9ad87d6c332382a79351c85b77de0c7.cu
/* * Copyright 2013-2015 The Regents of the University of California. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2013 Frank Ong, Martin Uecker, Pat Virtue, and Mark Murphy * [email protected] */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> #include <assert.h> #include <cuda.h> #include "num/multind.h" #include "dfwavelet_kernels.h" #include "dfwavelet_impl.h" # define _hdev_ __host__ __device__ // _data_t is the interal representation of data_t in CUDA // Must be float2/double2 for data_t=Complex float/double or float/double for data_t=float/double typedef float2 _data_t; // Float2 Operators inline _hdev_ float2 operator+ (float2 z1, float2 z2) { return make_float2 (z1.x + z2.x, z1.y + z2.y); } inline _hdev_ float2 operator- (float2 z1, float2 z2) { return make_float2 (z1.x - z2.x, z1.y - z2.y); } inline _hdev_ float2 operator* (float2 z1, float2 z2) { return make_float2 (z1.x*z2.x - z1.y*z2.y, z1.x*z2.y + z1.y*z2.x); } inline _hdev_ float2 operator* (float2 z1, float alpha) { return make_float2 (z1.x*alpha, z1.y*alpha); } inline _hdev_ float2 operator* (float alpha,float2 z1) { return make_float2 (z1.x*alpha, z1.y*alpha); } inline _hdev_ float2 operator/ (float alpha,float2 z1) { return make_float2 (1.f/z1.x, 1.f/z1.y); } inline _hdev_ void operator+= (float2 &z1, float2 z2) { z1.x += z2.x; z1.y += z2.y; } inline _hdev_ float abs(float2 z1) { return sqrt(z1.x*z1.x + z1.y*z1.y); } // Double2 Operators inline _hdev_ double2 operator+ (double2 z1, double2 z2) { return make_double2 (z1.x + z2.x, z1.y + z2.y); } inline _hdev_ double2 operator- (double2 z1, double2 z2) { return make_double2 (z1.x - z2.x, z1.y - z2.y); } inline _hdev_ double2 operator* (double2 z1, double2 z2) { return make_double2 (z1.x*z2.x - z1.y*z2.y, z1.x*z2.y + z1.y*z2.x); } inline _hdev_ double2 operator* (double2 z1, double alpha) { return make_double2 (z1.x*alpha, z1.y*alpha); } inline _hdev_ double2 operator* (double alpha,double2 z1) { return make_double2 (z1.x*alpha, z1.y*alpha); } inline _hdev_ double2 operator/ (double alpha,double2 z1) { return make_double2 (1.f/z1.x, 1.f/z1.y); } inline _hdev_ void operator+= (double2 &z1, double2 z2) { z1.x += z2.x; z1.y += z2.y; } inline _hdev_ double abs(double2 z1) { return sqrt(z1.x*z1.x + z1.y*z1.y); } /********** Macros ************/ #define cuda(Call) do { \ cudaError_t err = cuda ## Call ; \ if (err != cudaSuccess){ \ fprintf(stderr, "%s\n", cudaGetErrorString(err)); \ throw; \ } \ } while(0) #define cuda_sync() do{ \ cuda (ThreadSynchronize()); \ cuda (GetLastError()); \ } while(0) /********** Macros ************/ #define cuda(Call) do { \ cudaError_t err = cuda ## Call ; \ if (err != cudaSuccess){ \ fprintf(stderr, "%s\n", cudaGetErrorString(err)); \ throw; \ } \ } while(0) #define cuda_sync() do{ \ cuda (ThreadSynchronize()); \ cuda (GetLastError()); \ } while(0) // ############################################################################ // Headers // ############################################################################ static __global__ void cu_fwt3df_col(_data_t *Lx,_data_t *Hx,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen); static __global__ void cu_fwt3df_row(_data_t *Ly,_data_t *Hy,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen); static __global__ void cu_fwt3df_dep(_data_t *Lz,_data_t *Hz,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen); static __global__ void cu_iwt3df_dep(_data_t *out,_data_t *Lz,_data_t *Hz,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,int xOffset,int yOffset,int zOffset,scalar_t *lod,scalar_t *hid,int filterLen); static __global__ void cu_iwt3df_row(_data_t *out,_data_t *Ly,_data_t *Hy,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,int xOffset,int yOffset,int zOffset,scalar_t *lod,scalar_t *hid,int filterLen); static __global__ void cu_iwt3df_col(_data_t *out,_data_t *Lx,_data_t *Hx,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,int xOffset,int yOffset,int zOffset,scalar_t *lod,scalar_t *hid,int filterLen); static __global__ void cu_fwt3df_LC1(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dxNext, int dyNext, int dzNext); static __global__ void cu_fwt3df_LC2(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dxNext, int dyNext, int dzNext); static __global__ void cu_fwt3df_LC1_diff(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dxNext, int dyNext, int dzNext); static __global__ void cu_fwt3df_LC2_diff(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dxNext, int dyNext, int dzNext); static __global__ void cu_fwt3df_LC3(_data_t* HxHyHz_df1,_data_t* HxHyHz_df2,_data_t* HxHyHz_n,int dxNext, int dyNext, int dzNext); static __global__ void cu_iwt3df_LC1(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dx, int dy, int dz); static __global__ void cu_iwt3df_LC2(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dx, int dy, int dz); static __global__ void cu_iwt3df_LC1_diff(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dx, int dy, int dz); static __global__ void cu_iwt3df_LC2_diff(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dx, int dy, int dz); static __global__ void cu_iwt3df_LC3(_data_t* HxHyHz_df1,_data_t* HxHyHz_df2,_data_t* HxHyHz_n,int dx, int dy, int dz); static __global__ void cu_mult(_data_t* in, _data_t mult, int maxInd); static __global__ void cu_soft_thresh (_data_t* in, scalar_t thresh, int numMax); static __global__ void cu_circshift(_data_t* data, _data_t* dataCopy, int dx, int dy, int dz, int shift1, int shift2, int shift3); static __global__ void cu_circunshift(_data_t* data, _data_t* dataCopy, int dx, int dy, int dz, int shift1, int shift2, int shift3); extern "C" void dffwt3_gpuHost(struct dfwavelet_plan_s* plan, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn, data_t* in_vx,data_t* in_vy,data_t* in_vz) { assert(plan->use_gpu==2); data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn,*dev_vx,*dev_vy,*dev_vz; cuda(Malloc( (void**)&dev_vx, plan->numPixel*sizeof(data_t) )); cuda(Malloc( (void**)&dev_vy, plan->numPixel*sizeof(data_t) )); cuda(Malloc( (void**)&dev_vz, plan->numPixel*sizeof(data_t) )); cuda(Memcpy( dev_vx, in_vx, plan->numPixel*sizeof(data_t), cudaMemcpyHostToDevice )); cuda(Memcpy( dev_vy, in_vy, plan->numPixel*sizeof(data_t), cudaMemcpyHostToDevice )); cuda(Memcpy( dev_vz, in_vz, plan->numPixel*sizeof(data_t), cudaMemcpyHostToDevice )); cuda(Malloc( (void**)&dev_wcdf1, plan->numCoeff*sizeof(data_t) )); cuda(Malloc( (void**)&dev_wcdf2, plan->numCoeff*sizeof(data_t) )); cuda(Malloc( (void**)&dev_wcn, plan->numCoeff*sizeof(data_t) )); dffwt3_gpu(plan,dev_wcdf1,dev_wcdf2,dev_wcn,dev_vx,dev_vy,dev_vz); cuda(Memcpy( out_wcdf1, dev_wcdf1, plan->numCoeff*sizeof(data_t), cudaMemcpyDeviceToHost )); cuda(Memcpy( out_wcdf2, dev_wcdf2, plan->numCoeff*sizeof(data_t), cudaMemcpyDeviceToHost )); cuda(Memcpy( out_wcn, dev_wcn, plan->numCoeff*sizeof(data_t), cudaMemcpyDeviceToHost )); cuda(Free( dev_wcdf1 )); cuda(Free( dev_wcdf2 )); cuda(Free( dev_wcn )); cuda(Free( dev_vx )); cuda(Free( dev_vy )); cuda(Free( dev_vz )); } extern "C" void dfiwt3_gpuHost(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn) { assert(plan->use_gpu==2); data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn,*dev_vx,*dev_vy,*dev_vz; cuda(Malloc( (void**)&dev_wcdf1, plan->numCoeff*sizeof(data_t) )); cuda(Malloc( (void**)&dev_wcdf2, plan->numCoeff*sizeof(data_t) )); cuda(Malloc( (void**)&dev_wcn, plan->numCoeff*sizeof(data_t) )); cuda(Memcpy( dev_wcdf1, in_wcdf1, plan->numCoeff*sizeof(data_t), cudaMemcpyHostToDevice )); cuda(Memcpy( dev_wcdf2, in_wcdf2, plan->numCoeff*sizeof(data_t), cudaMemcpyHostToDevice )); cuda(Memcpy( dev_wcn, in_wcn, plan->numCoeff*sizeof(data_t), cudaMemcpyHostToDevice )); cuda(Malloc( (void**)&dev_vx, plan->numPixel*sizeof(data_t) )); cuda(Malloc( (void**)&dev_vy, plan->numPixel*sizeof(data_t) )); cuda(Malloc( (void**)&dev_vz, plan->numPixel*sizeof(data_t) )); dfiwt3_gpu(plan,dev_vx,dev_vy,dev_vz,dev_wcdf1,dev_wcdf2,dev_wcn); cuda(Memcpy( out_vx, dev_vx, plan->numPixel*sizeof(data_t), cudaMemcpyDeviceToHost )); cuda(Memcpy( out_vy, dev_vy, plan->numPixel*sizeof(data_t), cudaMemcpyDeviceToHost )); cuda(Memcpy( out_vz, dev_vz, plan->numPixel*sizeof(data_t), cudaMemcpyDeviceToHost )); cuda(Free( dev_wcdf1 )); cuda(Free( dev_wcdf2 )); cuda(Free( dev_wcn )); cuda(Free( dev_vx )); cuda(Free( dev_vy )); cuda(Free( dev_vz )); } extern "C" void dfsoftthresh_gpuHost(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn) { assert(plan->use_gpu==2); data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn; cuda(Malloc( (void**)&dev_wcdf1, plan->numCoeff*sizeof(data_t) )); cuda(Malloc( (void**)&dev_wcdf2, plan->numCoeff*sizeof(data_t) )); cuda(Malloc( (void**)&dev_wcn, plan->numCoeff*sizeof(data_t) )); cuda(Memcpy( dev_wcdf1, out_wcdf1, plan->numCoeff*sizeof(data_t), cudaMemcpyHostToDevice )); cuda(Memcpy( dev_wcdf2, out_wcdf2, plan->numCoeff*sizeof(data_t), cudaMemcpyHostToDevice )); cuda(Memcpy( dev_wcn, out_wcn, plan->numCoeff*sizeof(data_t), cudaMemcpyHostToDevice )); dfsoftthresh_gpu(plan,dfthresh,nthresh,dev_wcdf1,dev_wcdf2,dev_wcn); cuda(Memcpy( out_wcdf1, dev_wcdf1, plan->numCoeff*sizeof(data_t), cudaMemcpyDeviceToHost )); cuda(Memcpy( out_wcdf2, dev_wcdf2, plan->numCoeff*sizeof(data_t), cudaMemcpyDeviceToHost )); cuda(Memcpy( out_wcn, dev_wcn, plan->numCoeff*sizeof(data_t), cudaMemcpyDeviceToHost )); cuda(Free( dev_wcdf1 )); cuda(Free( dev_wcdf2 )); cuda(Free( dev_wcn )); } extern "C" void dfwavthresh3_gpuHost(struct dfwavelet_plan_s* plan, scalar_t dfthresh,scalar_t nthresh,data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_vx,data_t* in_vy,data_t* in_vz) { assert(plan->use_gpu==2); data_t*dev_vx,*dev_vy,*dev_vz; cuda(Malloc( (void**)&dev_vx, plan->numPixel*sizeof(data_t) )); cuda(Malloc( (void**)&dev_vy, plan->numPixel*sizeof(data_t) )); cuda(Malloc( (void**)&dev_vz, plan->numPixel*sizeof(data_t) )); cuda(Memcpy( dev_vx, in_vx, plan->numPixel*sizeof(data_t), cudaMemcpyHostToDevice )); cuda(Memcpy( dev_vy, in_vy, plan->numPixel*sizeof(data_t), cudaMemcpyHostToDevice )); cuda(Memcpy( dev_vz, in_vz, plan->numPixel*sizeof(data_t), cudaMemcpyHostToDevice )); dfwavthresh3_gpu(plan,dfthresh,nthresh,dev_vx,dev_vy,dev_vz,dev_vx,dev_vy,dev_vz); cuda(Memcpy( out_vx, dev_vx, plan->numPixel*sizeof(data_t), cudaMemcpyDeviceToHost )); cuda(Memcpy( out_vy, dev_vy, plan->numPixel*sizeof(data_t), cudaMemcpyDeviceToHost )); cuda(Memcpy( out_vz, dev_vz, plan->numPixel*sizeof(data_t), cudaMemcpyDeviceToHost )); cuda(Free( dev_vx )); cuda(Free( dev_vy )); cuda(Free( dev_vz )); } extern "C" void dffwt3_gpu(struct dfwavelet_plan_s* plan, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn, data_t* in_vx,data_t* in_vy,data_t* in_vz) { circshift_gpu(plan,in_vx); circshift_gpu(plan,in_vy); circshift_gpu(plan,in_vz); long numCoeff, filterLen,*waveSizes; numCoeff = plan->numCoeff; waveSizes = plan->waveSizes; filterLen = plan->filterLen; int numLevels = plan->numLevels; // Cast from generic data_t to device compatible _data_t _data_t* dev_wcdf1 = (_data_t*) out_wcdf1; _data_t* dev_wcdf2 = (_data_t*) out_wcdf2; _data_t* dev_wcn = (_data_t*) out_wcn; _data_t* dev_in_vx = (_data_t*) in_vx; _data_t* dev_in_vy = (_data_t*) in_vy; _data_t* dev_in_vz = (_data_t*) in_vz; _data_t* res = (_data_t*) plan->res; _data_t* dev_temp1,*dev_temp2; cuda(Malloc( (void**)&dev_temp1, numCoeff*sizeof(_data_t) )); cuda(Malloc( (void**)&dev_temp2, numCoeff*sizeof(_data_t) )); // Get dimensions int dx = plan->imSize[0]; int dy = plan->imSize[1]; int dz = plan->imSize[2]; int dxNext = waveSizes[0 + 3*numLevels]; int dyNext = waveSizes[1 + 3*numLevels]; int dzNext = waveSizes[2 + 3*numLevels]; int blockSize = dxNext*dyNext*dzNext; // allocate device memory and copy filters to device scalar_t *dev_filters; cuda(Malloc( (void**)&dev_filters, 4*plan->filterLen*sizeof(scalar_t) )); scalar_t *dev_lod0 = dev_filters + 0*plan->filterLen; scalar_t *dev_hid0 = dev_filters + 1*plan->filterLen; scalar_t *dev_lod1 = dev_filters + 2*plan->filterLen; scalar_t *dev_hid1 = dev_filters + 3*plan->filterLen; cuda(Memcpy( dev_lod0, plan->lod0, 2*plan->filterLen*sizeof(scalar_t), cudaMemcpyHostToDevice )); cuda(Memcpy( dev_lod1, plan->lod1, 2*plan->filterLen*sizeof(scalar_t), cudaMemcpyHostToDevice )); // Initialize variables and Pointers for FWT int const SHMEM_SIZE = 16384; int const T = 512; int mem, K; dim3 numBlocks, numThreads; // Temp Pointers _data_t *dev_tempLx,*dev_tempHx; dev_tempLx = dev_temp1; dev_tempHx = dev_tempLx + numCoeff/2; _data_t *dev_tempLxLy,*dev_tempHxLy,*dev_tempLxHy,*dev_tempHxHy; dev_tempLxLy = dev_temp2; dev_tempHxLy = dev_tempLxLy + numCoeff/4; dev_tempLxHy = dev_tempHxLy + numCoeff/4; dev_tempHxHy = dev_tempLxHy + numCoeff/4; // wcdf1 Pointers _data_t *dev_LxLyLz_df1,*dev_HxLyLz_df1,*dev_LxHyLz_df1,*dev_HxHyLz_df1,*dev_LxLyHz_df1,*dev_HxLyHz_df1,*dev_LxHyHz_df1,*dev_HxHyHz_df1,*dev_current_vx; dev_LxLyLz_df1 = dev_wcdf1; dev_HxLyLz_df1 = dev_LxLyLz_df1 + waveSizes[0]*waveSizes[1]*waveSizes[2]; for (int l = 1; l <= numLevels; ++l){ dev_HxLyLz_df1 += 7*waveSizes[0 + 3*l]*waveSizes[1 + 3*l]*waveSizes[2 + 3*l]; } dev_current_vx = dev_in_vx; // wcdf2 Pointers _data_t *dev_LxLyLz_df2,*dev_HxLyLz_df2,*dev_LxHyLz_df2,*dev_HxHyLz_df2,*dev_LxLyHz_df2,*dev_HxLyHz_df2,*dev_LxHyHz_df2,*dev_HxHyHz_df2,*dev_current_vy; dev_LxLyLz_df2 = dev_wcdf2; dev_HxLyLz_df2 = dev_LxLyLz_df2 + waveSizes[0]*waveSizes[1]*waveSizes[2]; for (int l = 1; l <= numLevels; ++l){ dev_HxLyLz_df2 += 7*waveSizes[0 + 3*l]*waveSizes[1 + 3*l]*waveSizes[2 + 3*l]; } dev_current_vy = dev_in_vy; // wcn Pointers _data_t *dev_LxLyLz_n,*dev_HxLyLz_n,*dev_LxHyLz_n,*dev_HxHyLz_n,*dev_LxLyHz_n,*dev_HxLyHz_n,*dev_LxHyHz_n,*dev_HxHyHz_n,*dev_current_vz; dev_LxLyLz_n = dev_wcn; dev_HxLyLz_n = dev_LxLyLz_n + waveSizes[0]*waveSizes[1]*waveSizes[2]; for (int l = 1; l <= numLevels; ++l){ dev_HxLyLz_n += 7*waveSizes[0 + 3*l]*waveSizes[1 + 3*l]*waveSizes[2 + 3*l]; } dev_current_vz = dev_in_vz; //*****************Loop through levels**************** for (int l = numLevels; l >= 1; --l) { dxNext = waveSizes[0 + 3*l]; dyNext = waveSizes[1 + 3*l]; dzNext = waveSizes[2 + 3*l]; blockSize = dxNext*dyNext*dzNext; // Update Pointers // df1 dev_HxLyLz_df1 = dev_HxLyLz_df1 - 7*blockSize; dev_LxHyLz_df1 = dev_HxLyLz_df1 + blockSize; dev_HxHyLz_df1 = dev_LxHyLz_df1 + blockSize; dev_LxLyHz_df1 = dev_HxHyLz_df1 + blockSize; dev_HxLyHz_df1 = dev_LxLyHz_df1 + blockSize; dev_LxHyHz_df1 = dev_HxLyHz_df1 + blockSize; dev_HxHyHz_df1 = dev_LxHyHz_df1 + blockSize; // df2 dev_HxLyLz_df2 = dev_HxLyLz_df2 - 7*blockSize; dev_LxHyLz_df2 = dev_HxLyLz_df2 + blockSize; dev_HxHyLz_df2 = dev_LxHyLz_df2 + blockSize; dev_LxLyHz_df2 = dev_HxHyLz_df2 + blockSize; dev_HxLyHz_df2 = dev_LxLyHz_df2 + blockSize; dev_LxHyHz_df2 = dev_HxLyHz_df2 + blockSize; dev_HxHyHz_df2 = dev_LxHyHz_df2 + blockSize; // n dev_HxLyLz_n = dev_HxLyLz_n - 7*blockSize; dev_LxHyLz_n = dev_HxLyLz_n + blockSize; dev_HxHyLz_n = dev_LxHyLz_n + blockSize; dev_LxLyHz_n = dev_HxHyLz_n + blockSize; dev_HxLyHz_n = dev_LxLyHz_n + blockSize; dev_LxHyHz_n = dev_HxLyHz_n + blockSize; dev_HxHyHz_n = dev_LxHyHz_n + blockSize; //************WCVX*********** // FWT Columns K = (SHMEM_SIZE-16)/(dx*sizeof(_data_t)); numBlocks = dim3(1,(dy+K-1)/K,dz); numThreads = dim3(T/K,K,1); mem = K*dx*sizeof(_data_t); cu_fwt3df_col <<< numBlocks,numThreads,mem >>>(dev_tempLx,dev_tempHx,dev_current_vx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen); cuda_sync(); // FWT Rows K = (SHMEM_SIZE-16)/(dy*sizeof(_data_t)); numBlocks = dim3(((dxNext)+K-1)/K,1,dz); numThreads = dim3(K,T/K,1); mem = K*dy*sizeof(_data_t); cu_fwt3df_row <<< numBlocks,numThreads,mem >>>(dev_tempLxLy,dev_tempLxHy,dev_tempLx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cu_fwt3df_row <<< numBlocks,numThreads,mem >>>(dev_tempHxLy,dev_tempHxHy,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cuda_sync(); // FWT Depths K = (SHMEM_SIZE-16)/(dz*sizeof(_data_t)); numBlocks = dim3(((dxNext)+K-1)/K,dyNext,1); numThreads = dim3(K,1,T/K); mem = K*dz*sizeof(_data_t); cu_fwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_LxLyLz_df1,dev_LxLyHz_df1,dev_tempLxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cu_fwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_LxHyLz_df1,dev_LxHyHz_df1,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cu_fwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_HxLyLz_df1,dev_HxLyHz_df1,dev_tempHxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cu_fwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_HxHyLz_df1,dev_HxHyHz_df1,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cuda_sync(); //************WCVY*********** // FWT Columns K = (SHMEM_SIZE-16)/(dx*sizeof(_data_t)); numBlocks = dim3(1,(dy+K-1)/K,dz); numThreads = dim3(T/K,K,1); mem = K*dx*sizeof(_data_t); cu_fwt3df_col <<< numBlocks,numThreads,mem >>>(dev_tempLx,dev_tempHx,dev_current_vy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cuda_sync(); // FWT Rows K = (SHMEM_SIZE-16)/(dy*sizeof(_data_t)); numBlocks = dim3(((dxNext)+K-1)/K,1,dz); numThreads = dim3(K,T/K,1); mem = K*dy*sizeof(_data_t); cu_fwt3df_row <<< numBlocks,numThreads,mem >>>(dev_tempLxLy,dev_tempLxHy,dev_tempLx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen); cu_fwt3df_row <<< numBlocks,numThreads,mem >>>(dev_tempHxLy,dev_tempHxHy,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen); cuda_sync(); // FWT Depths K = (SHMEM_SIZE-16)/(dz*sizeof(_data_t)); numBlocks = dim3(((dxNext)+K-1)/K,dyNext,1); numThreads = dim3(K,1,T/K); mem = K*dz*sizeof(_data_t); cu_fwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_LxLyLz_df2,dev_LxLyHz_df2,dev_tempLxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cu_fwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_LxHyLz_df2,dev_LxHyHz_df2,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cu_fwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_HxLyLz_df2,dev_HxLyHz_df2,dev_tempHxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cu_fwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_HxHyLz_df2,dev_HxHyHz_df2,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cuda_sync(); //************WCVZ*********** // FWT Columns K = (SHMEM_SIZE-16)/(dx*sizeof(_data_t)); numBlocks = dim3(1,(dy+K-1)/K,dz); numThreads = dim3(T/K,K,1); mem = K*dx*sizeof(_data_t); cu_fwt3df_col <<< numBlocks,numThreads,mem >>>(dev_tempLx,dev_tempHx,dev_current_vz,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cuda_sync(); // FWT Rows K = (SHMEM_SIZE-16)/(dy*sizeof(_data_t)); numBlocks = dim3(((dxNext)+K-1)/K,1,dz); numThreads = dim3(K,T/K,1); mem = K*dy*sizeof(_data_t); cu_fwt3df_row <<< numBlocks,numThreads,mem >>>(dev_tempLxLy,dev_tempLxHy,dev_tempLx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cu_fwt3df_row <<< numBlocks,numThreads,mem >>>(dev_tempHxLy,dev_tempHxHy,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod0,dev_hid0,filterLen); cuda_sync(); // FWT Depths K = (SHMEM_SIZE-16)/(dz*sizeof(_data_t)); numBlocks = dim3(((dxNext)+K-1)/K,dyNext,1); numThreads = dim3(K,1,T/K); mem = K*dz*sizeof(_data_t); cu_fwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_LxLyLz_n,dev_LxLyHz_n,dev_tempLxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen); cu_fwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_LxHyLz_n,dev_LxHyHz_n,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen); cu_fwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_HxLyLz_n,dev_HxLyHz_n,dev_tempHxLy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen); cu_fwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_HxHyLz_n,dev_HxHyHz_n,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,dev_lod1,dev_hid1,filterLen); cuda_sync(); //******* Multi ****** int maxInd = 7*blockSize; numThreads = T; numBlocks = (maxInd+numThreads.x-1)/numThreads.x; cu_mult <<< numBlocks, numThreads >>> (dev_HxLyLz_df1,1.f/res[0],maxInd); cu_mult <<< numBlocks, numThreads >>> (dev_HxLyLz_df2,1.f/res[1],maxInd); cu_mult <<< numBlocks, numThreads >>> (dev_HxLyLz_n,1.f/res[2],maxInd); cuda_sync(); //*******Linear Combination****** int t1 = min(dxNext,T); int t2 = T/t1; numBlocks = dim3( (dxNext+t1-1)/t1, (dyNext+t2-1)/t2, dzNext); numThreads = dim3(t1,t2,1); cu_fwt3df_LC1 <<< numBlocks,numThreads >>> (dev_HxLyLz_df1,dev_HxLyLz_df2,dev_HxLyLz_n,dev_LxHyLz_df1,dev_LxHyLz_df2,dev_LxHyLz_n,dev_LxLyHz_df1,dev_LxLyHz_df2,dev_LxLyHz_n,dxNext,dyNext,dzNext); cu_fwt3df_LC2 <<< numBlocks,numThreads >>> (dev_HxHyLz_df1,dev_HxHyLz_df2,dev_HxHyLz_n,dev_HxLyHz_df1,dev_HxLyHz_df2,dev_HxLyHz_n,dev_LxHyHz_df1,dev_LxHyHz_df2,dev_LxHyHz_n,dxNext,dyNext,dzNext); cu_fwt3df_LC3 <<< numBlocks,numThreads >>> (dev_HxHyHz_df1,dev_HxHyHz_df2,dev_HxHyHz_n,dxNext,dyNext,dzNext); cuda_sync(); cu_fwt3df_LC1_diff <<< numBlocks,numThreads >>> (dev_HxLyLz_df1,dev_HxLyLz_df2,dev_HxLyLz_n,dev_LxHyLz_df1,dev_LxHyLz_df2,dev_LxHyLz_n,dev_LxLyHz_df1,dev_LxLyHz_df2,dev_LxLyHz_n,dxNext,dyNext,dzNext); cu_fwt3df_LC2_diff <<< numBlocks,numThreads >>> (dev_HxHyLz_df1,dev_HxHyLz_df2,dev_HxHyLz_n,dev_HxLyHz_df1,dev_HxLyHz_df2,dev_HxLyHz_n,dev_LxHyHz_df1,dev_LxHyHz_df2,dev_LxHyHz_n,dxNext,dyNext,dzNext); cuda_sync(); dev_current_vx = dev_wcdf1; dev_current_vy = dev_wcdf2; dev_current_vz = dev_wcn; dx = dxNext; dy = dyNext; dz = dzNext; } cuda(Free( dev_filters )); cuda(Free( dev_temp1 )); cuda(Free( dev_temp2 )); circunshift_gpu(plan,in_vx); circunshift_gpu(plan,in_vy); circunshift_gpu(plan,in_vz); } extern "C" void dfiwt3_gpu(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn) { long numCoeff, filterLen,*waveSizes; numCoeff = plan->numCoeff; waveSizes = plan->waveSizes; filterLen = plan->filterLen; int numLevels = plan->numLevels; // Cast from generic data_t to device compatible _data_t _data_t* dev_out_vx = (_data_t*)out_vx; _data_t* dev_out_vy = (_data_t*)out_vy; _data_t* dev_out_vz = (_data_t*)out_vz; _data_t* dev_wcdf1 = (_data_t*)in_wcdf1; _data_t* dev_wcdf2 = (_data_t*)in_wcdf2; _data_t* dev_wcn = (_data_t*)in_wcn; _data_t* res = (_data_t*) plan->res; _data_t* dev_temp1, *dev_temp2; cuda(Malloc( (void**)&dev_temp1, numCoeff*sizeof(_data_t) )); cuda(Malloc( (void**)&dev_temp2, numCoeff*sizeof(_data_t)) ); // allocate device memory scalar_t *dev_filters; cuda(Malloc( (void**)&dev_filters, 4*(plan->filterLen)*sizeof(scalar_t) )); scalar_t *dev_lor0 = dev_filters + 0*plan->filterLen; scalar_t *dev_hir0 = dev_filters + 1*plan->filterLen; scalar_t *dev_lor1 = dev_filters + 2*plan->filterLen; scalar_t *dev_hir1 = dev_filters + 3*plan->filterLen; cuda(Memcpy( dev_lor0, plan->lor0, 2*plan->filterLen*sizeof(scalar_t), cudaMemcpyHostToDevice )); cuda(Memcpy( dev_lor1, plan->lor1, 2*plan->filterLen*sizeof(scalar_t), cudaMemcpyHostToDevice )); // Workspace dimensions int dxWork = waveSizes[0 + 3*numLevels]*2-1 + filterLen-1; int dyWork = waveSizes[1 + 3*numLevels]*2-1 + filterLen-1; int dzWork = waveSizes[2 + 3*numLevels]*2-1 + filterLen-1; // Initialize variables and pointers for IWT int const SHMEM_SIZE = 16384; int const T = 512; int mem,K; dim3 numBlocks, numThreads; int dx = waveSizes[0]; int dy = waveSizes[1]; int dz = waveSizes[2]; // Temp Pointers _data_t *dev_tempLxLy,*dev_tempHxLy,*dev_tempLxHy,*dev_tempHxHy; dev_tempLxLy = dev_temp1; dev_tempHxLy = dev_tempLxLy + numCoeff/4; dev_tempLxHy = dev_tempHxLy + numCoeff/4; dev_tempHxHy = dev_tempLxHy + numCoeff/4; _data_t *dev_tempLx,*dev_tempHx; dev_tempLx = dev_temp2; dev_tempHx = dev_tempLx + numCoeff/2; // wcdf1 Pointers _data_t *dev_LxLyLz_df1,*dev_HxLyLz_df1,*dev_LxHyLz_df1,*dev_HxHyLz_df1,*dev_LxLyHz_df1,*dev_HxLyHz_df1,*dev_LxHyHz_df1,*dev_HxHyHz_df1,*dev_current_vx; dev_LxLyLz_df1 = dev_wcdf1; dev_HxLyLz_df1 = dev_LxLyLz_df1 + dx*dy*dz; dev_current_vx = dev_LxLyLz_df1; // wcdf2 Pointers _data_t *dev_LxLyLz_df2,*dev_HxLyLz_df2,*dev_LxHyLz_df2,*dev_HxHyLz_df2,*dev_LxLyHz_df2,*dev_HxLyHz_df2,*dev_LxHyHz_df2,*dev_HxHyHz_df2,*dev_current_vy; dev_LxLyLz_df2 = dev_wcdf2; dev_HxLyLz_df2 = dev_LxLyLz_df2 + dx*dy*dz; dev_current_vy = dev_LxLyLz_df2; // wcn Pointers _data_t *dev_LxLyLz_n,*dev_HxLyLz_n,*dev_LxHyLz_n,*dev_HxHyLz_n,*dev_LxLyHz_n,*dev_HxLyHz_n,*dev_LxHyHz_n,*dev_HxHyHz_n,*dev_current_vz; dev_LxLyLz_n = dev_wcn; dev_HxLyLz_n = dev_LxLyLz_n + dx*dy*dz; dev_current_vz = dev_LxLyLz_n; for (int level = 1; level < numLevels+1; ++level) { dx = waveSizes[0 + 3*level]; dy = waveSizes[1 + 3*level]; dz = waveSizes[2 + 3*level]; int blockSize = dx*dy*dz; int dxNext = waveSizes[0+3*(level+1)]; int dyNext = waveSizes[1+3*(level+1)]; int dzNext = waveSizes[2+3*(level+1)]; // Calclate Offset dxWork = (2*dx-1 + filterLen-1); dyWork = (2*dy-1 + filterLen-1); dzWork = (2*dz-1 + filterLen-1); int xOffset = (int) floor((dxWork - dxNext) / 2.0); int yOffset = (int) floor((dyWork - dyNext) / 2.0); int zOffset = (int) floor((dzWork - dzNext) / 2.0); // Update Pointers // df1 dev_LxHyLz_df1 = dev_HxLyLz_df1 + blockSize; dev_HxHyLz_df1 = dev_LxHyLz_df1 + blockSize; dev_LxLyHz_df1 = dev_HxHyLz_df1 + blockSize; dev_HxLyHz_df1 = dev_LxLyHz_df1 + blockSize; dev_LxHyHz_df1 = dev_HxLyHz_df1 + blockSize; dev_HxHyHz_df1 = dev_LxHyHz_df1 + blockSize; // df2 dev_LxHyLz_df2 = dev_HxLyLz_df2 + blockSize; dev_HxHyLz_df2 = dev_LxHyLz_df2 + blockSize; dev_LxLyHz_df2 = dev_HxHyLz_df2 + blockSize; dev_HxLyHz_df2 = dev_LxLyHz_df2 + blockSize; dev_LxHyHz_df2 = dev_HxLyHz_df2 + blockSize; dev_HxHyHz_df2 = dev_LxHyHz_df2 + blockSize; // n dev_LxHyLz_n = dev_HxLyLz_n + blockSize; dev_HxHyLz_n = dev_LxHyLz_n + blockSize; dev_LxLyHz_n = dev_HxHyLz_n + blockSize; dev_HxLyHz_n = dev_LxLyHz_n + blockSize; dev_LxHyHz_n = dev_HxLyHz_n + blockSize; dev_HxHyHz_n = dev_LxHyHz_n + blockSize; //*******Linear Combination****** int t1 = min(dxNext,T); int t2 = T/t1; numBlocks = dim3( (dx+t1-1)/t1, (dy+t2-1)/t2, dz); numThreads = dim3(t1,t2,1); cu_iwt3df_LC1 <<< numBlocks,numThreads >>> (dev_HxLyLz_df1,dev_HxLyLz_df2,dev_HxLyLz_n,dev_LxHyLz_df1,dev_LxHyLz_df2,dev_LxHyLz_n,dev_LxLyHz_df1,dev_LxLyHz_df2,dev_LxLyHz_n,dx,dy,dz); cu_iwt3df_LC2 <<< numBlocks,numThreads >>> (dev_HxHyLz_df1,dev_HxHyLz_df2,dev_HxHyLz_n,dev_HxLyHz_df1,dev_HxLyHz_df2,dev_HxLyHz_n,dev_LxHyHz_df1,dev_LxHyHz_df2,dev_LxHyHz_n,dx,dy,dz); cu_iwt3df_LC3 <<< numBlocks,numThreads >>> (dev_HxHyHz_df1,dev_HxHyHz_df2,dev_HxHyHz_n,dx,dy,dz); cuda_sync(); cu_iwt3df_LC1_diff <<< numBlocks,numThreads >>> (dev_HxLyLz_df1,dev_HxLyLz_df2,dev_HxLyLz_n,dev_LxHyLz_df1,dev_LxHyLz_df2,dev_LxHyLz_n,dev_LxLyHz_df1,dev_LxLyHz_df2,dev_LxLyHz_n,dx,dy,dz); cu_iwt3df_LC2_diff <<< numBlocks,numThreads >>> (dev_HxHyLz_df1,dev_HxHyLz_df2,dev_HxHyLz_n,dev_HxLyHz_df1,dev_HxLyHz_df2,dev_HxLyHz_n,dev_LxHyHz_df1,dev_LxHyHz_df2,dev_LxHyHz_n,dx,dy,dz); cuda_sync(); //******* Multi ****** int maxInd = 7*blockSize; numThreads = T; numBlocks = (maxInd+numThreads.x-1)/numThreads.x; cu_mult <<< numBlocks, numThreads >>> (dev_HxLyLz_df1,res[0],maxInd); cu_mult <<< numBlocks, numThreads >>> (dev_HxLyLz_df2,res[1],maxInd); cu_mult <<< numBlocks, numThreads >>> (dev_HxLyLz_n,res[2],maxInd); cuda_sync(); //************WCX************ // Update Pointers if (level==numLevels) dev_current_vx = dev_out_vx; // IWT Depths K = (SHMEM_SIZE-16)/(2*dz*sizeof(_data_t)); numBlocks = dim3((dx+K-1)/K,dy,1); numThreads = dim3(K,1,(T/K)); mem = K*2*dz*sizeof(_data_t); cu_iwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_tempLxLy,dev_LxLyLz_df1,dev_LxLyHz_df1,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen); cu_iwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_tempHxLy,dev_HxLyLz_df1,dev_HxLyHz_df1,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen); cu_iwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_tempLxHy,dev_LxHyLz_df1,dev_LxHyHz_df1,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen); cu_iwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_tempHxHy,dev_HxHyLz_df1,dev_HxHyHz_df1,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen); cuda_sync(); // IWT Rows K = (SHMEM_SIZE-16)/(2*dy*sizeof(_data_t)); numBlocks = dim3((dx+K-1)/K,1,dzNext); numThreads = dim3(K,(T/K),1); mem = K*2*dy*sizeof(_data_t); cu_iwt3df_row <<< numBlocks,numThreads,mem >>>(dev_tempLx,dev_tempLxLy,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen); cu_iwt3df_row <<< numBlocks,numThreads,mem >>>(dev_tempHx,dev_tempHxLy,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen); cuda_sync(); // IWT Columns K = (SHMEM_SIZE-16)/(2*dx*sizeof(_data_t)); numBlocks = dim3(1,(dyNext+K-1)/K,dzNext); numThreads = dim3((T/K),K,1); mem = K*2*dx*sizeof(_data_t); cu_iwt3df_col <<< numBlocks,numThreads,mem >>>(dev_current_vx,dev_tempLx,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,plan->filterLen); cuda_sync(); //************WCY************ // Update Pointers if (level==numLevels) dev_current_vy = dev_out_vy; // IWT Depths K = (SHMEM_SIZE-16)/(2*dz*sizeof(_data_t)); numBlocks = dim3((dx+K-1)/K,dy,1); numThreads = dim3(K,1,(T/K)); mem = K*2*dz*sizeof(_data_t); cu_iwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_tempLxLy,dev_LxLyLz_df2,dev_LxLyHz_df2,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen); cu_iwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_tempHxLy,dev_HxLyLz_df2,dev_HxLyHz_df2,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen); cu_iwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_tempLxHy,dev_LxHyLz_df2,dev_LxHyHz_df2,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen); cu_iwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_tempHxHy,dev_HxHyLz_df2,dev_HxHyHz_df2,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,filterLen); cuda_sync(); // IWT Rows K = (SHMEM_SIZE-16)/(2*dy*sizeof(_data_t)); numBlocks = dim3((dx+K-1)/K,1,dzNext); numThreads = dim3(K,(T/K),1); mem = K*2*dy*sizeof(_data_t); cu_iwt3df_row <<< numBlocks,numThreads,mem >>>(dev_tempLx,dev_tempLxLy,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,plan->filterLen); cu_iwt3df_row <<< numBlocks,numThreads,mem >>>(dev_tempHx,dev_tempHxLy,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,plan->filterLen); cuda_sync(); // IWT Columns K = (SHMEM_SIZE-16)/(2*dx*sizeof(_data_t)); numBlocks = dim3(1,(dyNext+K-1)/K,dzNext); numThreads = dim3((T/K),K,1); mem = K*2*dx*sizeof(_data_t); cu_iwt3df_col <<< numBlocks,numThreads,mem >>>(dev_current_vy,dev_tempLx,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen); cuda_sync(); //************WCZ************ // Update Pointers if (level==numLevels) dev_current_vz = dev_out_vz; // IWT Depths K = (SHMEM_SIZE-16)/(2*dz*sizeof(_data_t)); numBlocks = dim3((dx+K-1)/K,dy,1); numThreads = dim3(K,1,(T/K)); mem = K*2*dz*sizeof(_data_t); cu_iwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_tempLxLy,dev_LxLyLz_n,dev_LxLyHz_n,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,filterLen); cu_iwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_tempHxLy,dev_HxLyLz_n,dev_HxLyHz_n,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,filterLen); cu_iwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_tempLxHy,dev_LxHyLz_n,dev_LxHyHz_n,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,filterLen); cu_iwt3df_dep <<< numBlocks,numThreads,mem >>>(dev_tempHxHy,dev_HxHyLz_n,dev_HxHyHz_n,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor1,dev_hir1,filterLen); cuda_sync(); // IWT Rows K = (SHMEM_SIZE-16)/(2*dy*sizeof(_data_t)); numBlocks = dim3((dx+K-1)/K,1,dzNext); numThreads = dim3(K,(T/K),1); mem = K*2*dy*sizeof(_data_t); cu_iwt3df_row <<< numBlocks,numThreads,mem >>>(dev_tempLx,dev_tempLxLy,dev_tempLxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen); cu_iwt3df_row <<< numBlocks,numThreads,mem >>>(dev_tempHx,dev_tempHxLy,dev_tempHxHy,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen); cuda_sync(); // IWT Columns K = (SHMEM_SIZE-16)/(2*dx*sizeof(_data_t)); numBlocks = dim3(1,(dyNext+K-1)/K,dzNext); numThreads = dim3((T/K),K,1); mem = K*2*dx*sizeof(_data_t); cu_iwt3df_col <<< numBlocks,numThreads,mem >>>(dev_current_vz,dev_tempLx,dev_tempHx,dx,dy,dz,dxNext,dyNext,dzNext,xOffset,yOffset,zOffset,dev_lor0,dev_hir0,plan->filterLen); cuda_sync(); dev_HxLyLz_df1 += 7*blockSize; dev_HxLyLz_df2 += 7*blockSize; dev_HxLyLz_n += 7*blockSize; } cuda(Free( dev_filters )); cuda(Free( dev_temp1 )); cuda(Free( dev_temp2 )); circunshift_gpu(plan,out_vx); circunshift_gpu(plan,out_vy); circunshift_gpu(plan,out_vz); } int rand_lim(int limit) { int divisor = RAND_MAX/(limit+1); int retval; do { retval = rand() / divisor; } while (retval > limit); return retval; } void dfwavelet_new_randshift_gpu (struct dfwavelet_plan_s* plan) { int i; i = rand(); for(i = 0; i < plan->numdims; i++) { // Determine maximum shift value for this dimension int log2dim = 1; while( (1<<log2dim) < plan->imSize[i]) { log2dim++; } int maxShift = 1 << (log2dim-plan->numLevels); if (maxShift > 8) { maxShift = 8; } // Generate random shift value between 0 and maxShift plan->randShift[i] = rand_lim(maxShift); } } extern "C" void dfwavthresh3_gpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz) { data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn; cuda(Malloc( (void**)&dev_wcdf1, plan->numCoeff*sizeof(_data_t) )); cuda(Malloc( (void**)&dev_wcdf2, plan->numCoeff*sizeof(_data_t) )); cuda(Malloc( (void**)&dev_wcn, plan->numCoeff*sizeof(_data_t) )); dffwt3_gpu(plan,dev_wcdf1,dev_wcdf2,dev_wcn,in_vx,in_vy,in_vz); dfsoftthresh_gpu(plan,dfthresh,nthresh,dev_wcdf1,dev_wcdf2,dev_wcn); dfiwt3_gpu(plan,out_vx,out_vy,out_vz,dev_wcdf1,dev_wcdf2,dev_wcn); cuda(Free( dev_wcdf1 )); cuda(Free( dev_wcdf2 )); cuda(Free( dev_wcn )); } extern "C" void dfsoftthresh_gpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn) { assert(plan->use_gpu==1||plan->use_gpu==2); _data_t* dev_wcdf1,*dev_wcdf2,*dev_wcn; dev_wcdf1 = (_data_t*) out_wcdf1; dev_wcdf2 = (_data_t*) out_wcdf2; dev_wcn = (_data_t*) out_wcn; int numMax; int const T = 512; dim3 numBlocks, numThreads; numMax = plan->numCoeff-plan->numCoarse; numBlocks = dim3((numMax+T-1)/T,1,1); numThreads = dim3(T,1,1); cu_soft_thresh <<< numBlocks,numThreads>>> (dev_wcdf1+plan->numCoarse,dfthresh,numMax); cu_soft_thresh <<< numBlocks,numThreads>>> (dev_wcdf2+plan->numCoarse,dfthresh,numMax); cu_soft_thresh <<< numBlocks,numThreads>>> (dev_wcn+plan->numCoarse,nthresh,numMax); } /********** Aux functions **********/ extern "C" void circshift_gpu(struct dfwavelet_plan_s* plan, data_t* data_c) { // Return if no shifts int zeroShift = 1; int i; for (i = 0; i< plan->numdims; i++) { zeroShift &= (plan->randShift[i]==0); } if(zeroShift) { return; } _data_t* data = (_data_t*) data_c; // Copy data _data_t* dataCopy; cuda(Malloc((void**)&dataCopy, plan->numPixel*sizeof(_data_t))); cuda(Memcpy(dataCopy, data, plan->numPixel*sizeof(_data_t), cudaMemcpyDeviceToDevice)); int T = 512; if (plan->numdims==2) { int dx,dy,r0,r1; dx = plan->imSize[0]; dy = plan->imSize[1]; r0 = plan->randShift[0]; r1 = plan->randShift[1]; cu_circshift <<< (plan->numPixel+T-1)/T, T>>>(data,dataCopy,dx,dy,1,r0,r1,0); } else if (plan->numdims==3) { int dx,dy,dz,r0,r1,r2; dx = plan->imSize[0]; dy = plan->imSize[1]; dz = plan->imSize[2]; r0 = plan->randShift[0]; r1 = plan->randShift[1]; r2 = plan->randShift[2]; cu_circshift <<< (plan->numPixel+T-1)/T, T>>>(data,dataCopy,dx,dy,dz,r0,r1,r2); } cuda(Free(dataCopy)); } extern "C" void circunshift_gpu(struct dfwavelet_plan_s* plan, data_t* data_c) { // Return if no shifts int zeroShift = 1; int i; for (i = 0; i< plan->numdims; i++) { zeroShift &= (plan->randShift[i]==0); } if(zeroShift) { return; } _data_t* data = (_data_t*) data_c; // Copy data _data_t* dataCopy; cuda(Malloc((void**)&dataCopy, plan->numPixel*sizeof(_data_t))); cuda(Memcpy(dataCopy, data, plan->numPixel*sizeof(_data_t), cudaMemcpyDeviceToDevice)); int T = 512; if (plan->numdims==2) { int dx,dy,r0,r1; dx = plan->imSize[0]; dy = plan->imSize[1]; r0 = plan->randShift[0]; r1 = plan->randShift[1]; cu_circunshift <<< (plan->numPixel+T-1)/T, T>>>(data,dataCopy,dx,dy,1,r0,r1,0); } else if (plan->numdims==3) { int dx,dy,dz,r0,r1,r2; dx = plan->imSize[0]; dy = plan->imSize[1]; dz = plan->imSize[2]; r0 = plan->randShift[0]; r1 = plan->randShift[1]; r2 = plan->randShift[2]; cu_circunshift <<< (plan->numPixel+T-1)/T, T>>>(data,dataCopy,dx,dy,dz,r0,r1,r2); } cuda(Free(dataCopy)); } // ############################################################################ // CUDA function of fwt column convolution // Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass // Output: Lx, Hx // Input: in, dx, dy, dz, dxNext, lod, hid, filterLen // ############################################################################ extern "C" __global__ void cu_fwt3df_col(_data_t *Lx,_data_t *Hx,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen) { extern __shared__ _data_t cols []; int ti = threadIdx.x; int tj = threadIdx.y; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; if (j>=dy) { return; } // Load Input to Temp Array for (int i = ti; i < dx; i += blockDim.x){ cols[i + tj*dx] = in[i + j*dx + k*dx*dy]; } __syncthreads(); // Low-Pass and High-Pass Downsample int ind, lessThan, greaThan; for (int i = ti; i < dxNext; i += blockDim.x){ _data_t y = cols[0]-cols[0]; _data_t z = cols[0]-cols[0]; #pragma unroll for (int f = 0; f < filterLen; f++){ ind = 2*i+1 - (filterLen-1)+f; lessThan = (int) (ind<0); greaThan = (int) (ind>=dx); ind = -1*lessThan+ind*(-2*lessThan+1); ind = (2*dx-1)*greaThan+ind*(-2*greaThan+1); y += cols[ind + tj*dx] * lod[filterLen-1-f]; z += cols[ind + tj*dx] * hid[filterLen-1-f]; } Lx[i + j*dxNext + k*dxNext*dy] = y; Hx[i + j*dxNext + k*dxNext*dy] = z; } } // ############################################################################ // CUDA function of fwt row convolution. Assumes fwt_col() has already been called // Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass // Output: LxLy, LxHy / HxLy, HxHy // Input: Lx/Hx, dx, dy, dxNext, dyNext, lod, hid, filterLen // ############################################################################ extern "C" __global__ void cu_fwt3df_row(_data_t *Ly,_data_t *Hy,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen) { extern __shared__ _data_t rows []; int const K = blockDim.x; int ti = threadIdx.x; int tj = threadIdx.y; int i = blockIdx.x*blockDim.x+threadIdx.x; int k = blockIdx.z*blockDim.z+threadIdx.z; if (i>=dxNext) { return; } for (int j = tj; j < dy; j += blockDim.y){ rows[ti + j*K] = in[i + j*dxNext + k*dxNext*dy]; } __syncthreads(); // Low-Pass and High Pass Downsample int ind, lessThan, greaThan; for (int j = tj; j < dyNext; j += blockDim.y){ _data_t y = rows[0]-rows[0]; _data_t z = rows[0]-rows[0]; #pragma unroll for (int f = 0; f < filterLen; f++){ ind = 2*j+1 - (filterLen-1)+f; lessThan = (int) (ind<0); greaThan = (int) (ind>=dy); ind = -1*lessThan+ind*(-2*lessThan+1); ind = (2*dy-1)*greaThan+ind*(-2*greaThan+1); y += rows[ti + ind*K] * lod[filterLen-1-f]; z += rows[ti + ind*K] * hid[filterLen-1-f]; } Ly[i + j*dxNext + k*dxNext*dyNext] = y; Hy[i + j*dxNext + k*dxNext*dyNext] = z; } } // ############################################################################ // CUDA function of fwt depth convolution. Assumes fwt_row() has already been called // Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass // Output: LxLy, LxHy / HxLy, HxHy // Input: Lx/Hx, dx, dy, dxNext, dyNext, lod, hid, filterLen // ############################################################################ extern "C" __global__ void cu_fwt3df_dep(_data_t *Lz,_data_t *Hz,_data_t *in,int dx,int dy,int dz,int dxNext,int dyNext,int dzNext,scalar_t *lod,scalar_t *hid,int filterLen) { extern __shared__ _data_t deps []; int const K = blockDim.x; int ti = threadIdx.x; int tk = threadIdx.z; int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; if (i>=dxNext) { return; } for (int k = tk; k < dz; k += blockDim.z){ deps[ti + k*K] = in[i + j*dxNext + k*dxNext*dyNext]; } __syncthreads(); // Low-Pass and High Pass Downsample int ind, lessThan, greaThan; for (int k = tk; k < dzNext; k += blockDim.z){ _data_t y = deps[0]-deps[0]; _data_t z = deps[0]-deps[0]; #pragma unroll for (int f = 0; f < filterLen; f++){ ind = 2*k+1 - (filterLen-1)+f; lessThan = (int) (ind<0); greaThan = (int) (ind>=dz); ind = -1*lessThan+ind*(-2*lessThan+1); ind = (2*dz-1)*greaThan+ind*(-2*greaThan+1); y += deps[ti + ind*K] * lod[filterLen-1-f]; z += deps[ti + ind*K] * hid[filterLen-1-f]; } Lz[i + j*dxNext + k*dxNext*dyNext] = y; Hz[i + j*dxNext + k*dxNext*dyNext] = z; } } extern "C" __global__ void cu_fwt3df_LC1(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dxNext, int dyNext, int dzNext) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t x,y,z; scalar_t xGreatZero,yGreatZero,zGreatZero; if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext)) { return; } //HLL x = HxLyLz_df1[i+j*dxNext+k*dxNext*dyNext]; y = HxLyLz_df2[i+j*dxNext+k*dxNext*dyNext]; z = HxLyLz_n[i+j*dxNext+k*dxNext*dyNext]; HxLyLz_df1[i+j*dxNext+k*dxNext*dyNext] = y; HxLyLz_df2[i+j*dxNext+k*dxNext*dyNext] = z; yGreatZero = j>0; zGreatZero = k>0; HxLyLz_n[i+j*dxNext+k*dxNext*dyNext] = x + yGreatZero*0.25f*y + zGreatZero*0.25f*z; //LHL x = LxHyLz_df1[i+j*dxNext+k*dxNext*dyNext]; y = LxHyLz_df2[i+j*dxNext+k*dxNext*dyNext]; z = LxHyLz_n[i+j*dxNext+k*dxNext*dyNext]; LxHyLz_df2[i+j*dxNext+k*dxNext*dyNext] = z; xGreatZero = i>0; zGreatZero = k>0; LxHyLz_n[i+j*dxNext+k*dxNext*dyNext] = y + xGreatZero*0.25f*x + zGreatZero*0.25f*z; //LLH x = LxLyHz_df1[i+j*dxNext+k*dxNext*dyNext]; y = LxLyHz_df2[i+j*dxNext+k*dxNext*dyNext]; z = LxLyHz_n[i+j*dxNext+k*dxNext*dyNext]; LxLyHz_df1[i+j*dxNext+k*dxNext*dyNext] = y; LxLyHz_df2[i+j*dxNext+k*dxNext*dyNext] = x; yGreatZero = j>0; xGreatZero = i>0; LxLyHz_n[i+j*dxNext+k*dxNext*dyNext] = z + yGreatZero*0.25*y + xGreatZero*0.25*x; } extern "C" __global__ void cu_fwt3df_LC1_diff(_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dxNext, int dyNext, int dzNext) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t x,y,z; _data_t zero = make_float2(0.f,0.f); if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext)) { return; } //HLL if (j>0) y = HxLyLz_df1[i+(j-1)*dxNext+k*dxNext*dyNext]; else y = zero; if (k>0) z = HxLyLz_df2[i+j*dxNext+(k-1)*dxNext*dyNext]; else z = zero; HxLyLz_n[i+j*dxNext+k*dxNext*dyNext] += -0.25*y - 0.25*z; //LHL if (i>0) x = LxHyLz_df1[(i-1)+j*dxNext+k*dxNext*dyNext]; else x = zero; if (k>0) z = LxHyLz_df2[i+j*dxNext+(k-1)*dxNext*dyNext]; else z = zero; LxHyLz_n[i+j*dxNext+k*dxNext*dyNext] += -0.25*x - 0.25*z; //LLH if (j>0) y = LxLyHz_df1[i+(j-1)*dxNext+k*dxNext*dyNext]; else y = zero; if (i>0) x = LxLyHz_df2[(i-1)+j*dxNext+k*dxNext*dyNext]; else x = zero; LxLyHz_n[i+j*dxNext+k*dxNext*dyNext] += -0.25*y - 0.25*x; } extern "C" __global__ void cu_fwt3df_LC2(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dxNext, int dyNext, int dzNext) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t x,y,z; scalar_t xGreatZero,yGreatZero,zGreatZero; if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext)) { return; } //HHL x = HxHyLz_df1[i+j*dxNext+k*dxNext*dyNext]; y = HxHyLz_df2[i+j*dxNext+k*dxNext*dyNext]; z = HxHyLz_n[i+j*dxNext+k*dxNext*dyNext]; HxHyLz_df1[i+j*dxNext+k*dxNext*dyNext] = 0.5*(x-y); HxHyLz_df2[i+j*dxNext+k*dxNext*dyNext] = z; zGreatZero = k>0; HxHyLz_n[i+j*dxNext+k*dxNext*dyNext] = 0.5*(x+y) + zGreatZero*0.125*z; //HLH x = HxLyHz_df1[i+j*dxNext+k*dxNext*dyNext]; y = HxLyHz_df2[i+j*dxNext+k*dxNext*dyNext]; z = HxLyHz_n[i+j*dxNext+k*dxNext*dyNext]; HxLyHz_df1[i+j*dxNext+k*dxNext*dyNext] = 0.5*(z-x); HxLyHz_df2[i+j*dxNext+k*dxNext*dyNext] = y; yGreatZero = j>0; HxLyHz_n[i+j*dxNext+k*dxNext*dyNext] = 0.5*(z+x) + yGreatZero*0.125*y; //LHH x = LxHyHz_df1[i+j*dxNext+k*dxNext*dyNext]; y = LxHyHz_df2[i+j*dxNext+k*dxNext*dyNext]; z = LxHyHz_n[i+j*dxNext+k*dxNext*dyNext]; LxHyHz_df1[i+j*dxNext+k*dxNext*dyNext] = 0.5*(y-z); LxHyHz_df2[i+j*dxNext+k*dxNext*dyNext] = x; xGreatZero = i>0; LxHyHz_n[i+j*dxNext+k*dxNext*dyNext] = 0.5*(y+z) + xGreatZero*0.125*x; } extern "C" __global__ void cu_fwt3df_LC2_diff(_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dxNext, int dyNext, int dzNext) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t x,y,z; _data_t zero = make_float2(0.f,0.f); if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext)) { return; } //HHL if (k>0) z = HxHyLz_df2[i+j*dxNext+(k-1)*dxNext*dyNext]; else z = zero; HxHyLz_n[i+j*dxNext+k*dxNext*dyNext] += -0.125*z; //HLH if (j>0) y = HxLyHz_df2[i+(j-1)*dxNext+k*dxNext*dyNext]; else y = zero; HxLyHz_n[i+j*dxNext+k*dxNext*dyNext] += -0.125*y; //LHH if (i>0) x = LxHyHz_df2[(i-1)+j*dxNext+k*dxNext*dyNext]; else x = zero; LxHyHz_n[i+j*dxNext+k*dxNext*dyNext] += -0.125*x; } extern "C" __global__ void cu_fwt3df_LC3(_data_t* HxHyHz_df1,_data_t* HxHyHz_df2,_data_t* HxHyHz_n,int dxNext, int dyNext, int dzNext) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t x,y,z; if ((i>=dxNext)||(j>=dyNext)||(k>=dzNext)) { return; } //HHH x = HxHyHz_df1[i+j*dxNext+k*dxNext*dyNext]; y = HxHyHz_df2[i+j*dxNext+k*dxNext*dyNext]; z = HxHyHz_n[i+j*dxNext+k*dxNext*dyNext]; HxHyHz_df1[i+j*dxNext+k*dxNext*dyNext] = 1.0/3.0*(-2.0*x+y+z); HxHyHz_df2[i+j*dxNext+k*dxNext*dyNext] = 1.0/3.0*(2*y-x-z); HxHyHz_n[i+j*dxNext+k*dxNext*dyNext] = 1.0/3.0*(x+y+z); } // ############################################################################ // CUDA function of iwt depth convolution. // Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass // Scratchpad size: K x 2*dy // Output: Lz/Hz // Input: LxLy,LxHy / HxLy, HxHy, dx, dy, dxNext, dyNext,xOffset, yOffset,lod, hid, filterLen // ############################################################################ extern "C" __global__ void cu_iwt3df_dep(_data_t *out, _data_t *Lz, _data_t *Hz, int dx, int dy,int dz,int dxNext, int dyNext, int dzNext,int xOffset, int yOffset,int zOffset,scalar_t *lod, scalar_t *hid, int filterLen) { extern __shared__ _data_t deps []; int const K = blockDim.x; int ti = threadIdx.x; int tk = threadIdx.z; int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; if (i>=dx){ return; } for (int k = tk; k < dz; k += blockDim.z){ deps[ti + k*K] = Lz[i + j*dx + k*dx*dy]; deps[ti + (k+dz)*K] = Hz[i + j*dx + k*dx*dy]; } __syncthreads(); // Low-Pass and High Pass Downsample int ind; for (int k = tk+zOffset; k < dzNext+zOffset; k += blockDim.z){ _data_t y = deps[0]-deps[0]; #pragma unroll for (int f = (k-(filterLen-1)) % 2; f < filterLen; f+=2){ ind = (k-(filterLen-1)+f)>>1; if ((ind >= 0) && (ind < dz)) { y += deps[ti + ind*K] * lod[filterLen-1-f]; y += deps[ti + (ind+dz)*K] * hid[filterLen-1-f]; } } out[i + j*dx + (k-zOffset)*dx*dy] = y; } } // ############################################################################ // CUDA function of iwt row convolution. Assumes fwt_col() has already been called. // Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass // Scratchpad size: K x 2*dy // Output: Lx/Hx // Input: LxLy,LxHy / HxLy, HxHy, dx, dy, dxNext, dyNext,xOffset, yOffset,lod, hid, filterLen // ############################################################################ extern "C" __global__ void cu_iwt3df_row(_data_t *out, _data_t *Ly, _data_t *Hy, int dx, int dy,int dz,int dxNext, int dyNext,int dzNext,int xOffset, int yOffset, int zOffset,scalar_t *lod, scalar_t *hid, int filterLen) { extern __shared__ _data_t rows []; int const K = blockDim.x; int ti = threadIdx.x; int tj = threadIdx.y; int i = blockIdx.x*blockDim.x+threadIdx.x; int k = blockIdx.z*blockDim.z+threadIdx.z; if (i>=dx){ return; } for (int j = tj; j < dy; j += blockDim.y){ rows[ti + j*K] = Ly[i + j*dx + k*dx*dy]; rows[ti + (j+dy)*K] = Hy[i + j*dx + k*dx*dy]; } __syncthreads(); // Low-Pass and High Pass Downsample int ind; for (int j = tj+yOffset; j < dyNext+yOffset; j += blockDim.y){ _data_t y = rows[0]-rows[0]; #pragma unroll for (int f = (j-(filterLen-1)) % 2; f < filterLen; f+=2){ ind = (j-(filterLen-1)+f)>>1; if ((ind >= 0) && (ind < dy)) { y += rows[ti + ind*K] * lod[filterLen-1-f]; y += rows[ti + (ind+dy)*K] * hid[filterLen-1-f]; } } out[i + (j-yOffset)*dx + k*dx*dyNext] = y; } } // ############################################################################ // CUDA function of iwt column convolution // Loads data to scratchpad (shared memory) and convolve w/ low pass and high pass // Scratchpad size: 2*dx x K // Output: out // Input: Lx, Hx, dx, dy, dxNext, dyNext, lod, hid, filterLen // ############################################################################ extern "C" __global__ void cu_iwt3df_col(_data_t *out, _data_t *Lx, _data_t *Hx, int dx, int dy,int dz,int dxNext, int dyNext, int dzNext,int xOffset, int yOffset, int zOffset,scalar_t *lod, scalar_t *hid, int filterLen) { extern __shared__ _data_t cols []; int ti = threadIdx.x; int tj = threadIdx.y; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; if (j>=dyNext){ return; } int dx2 = 2*dx; // Load Input to Temp Array for (int i = ti; i < dx; i += blockDim.x){ cols[i + tj*dx2] = Lx[i + j*dx + k*dx*dyNext]; cols[dx+i + tj*dx2] = Hx[i + j*dx + k*dx*dyNext]; } __syncthreads(); // Low-Pass and High Pass Downsample int ind; for (int i = ti+xOffset; i < dxNext+xOffset; i += blockDim.x){ _data_t y = cols[0]-cols[0]; #pragma unroll for (int f = (i-(filterLen-1)) % 2; f < filterLen; f+=2){ ind = (i-(filterLen-1)+f)>>1; if (ind >= 0 && ind < dx) { y += cols[ind + tj*dx2] * lod[filterLen-1-f]; y += cols[dx+ind + tj*dx2] * hid[filterLen-1-f]; } } out[(i-xOffset) + j*dxNext + k*dxNext*dyNext] = y; } } extern "C" __global__ void cu_iwt3df_LC1 (_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dx, int dy, int dz) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t df1,df2,n; scalar_t xGreatZero,yGreatZero,zGreatZero; if ((i>=dx)||(j>=dy)||(k>=dz)) { return; } //HLL df1 = HxLyLz_df1[i+j*dx+k*dx*dy]; df2 = HxLyLz_df2[i+j*dx+k*dx*dy]; n = HxLyLz_n[i+j*dx+k*dx*dy]; HxLyLz_df2[i+j*dx+k*dx*dy] = df1; HxLyLz_n[i+j*dx+k*dx*dy] = df2; yGreatZero = j>0; zGreatZero = k>0; HxLyLz_df1[i+j*dx+k*dx*dy] = n - yGreatZero*0.25*df1 - zGreatZero*0.25*df2; //LHL df1 = LxHyLz_df1[i+j*dx+k*dx*dy]; df2 = LxHyLz_df2[i+j*dx+k*dx*dy]; n = LxHyLz_n[i+j*dx+k*dx*dy]; LxHyLz_n[i+j*dx+k*dx*dy] = df2; xGreatZero = i>0; zGreatZero = k>0; LxHyLz_df2[i+j*dx+k*dx*dy] = n - xGreatZero*0.25*df1 - zGreatZero*0.25*df2; //LLH df1 = LxLyHz_df1[i+j*dx+k*dx*dy]; df2 = LxLyHz_df2[i+j*dx+k*dx*dy]; n = LxLyHz_n[i+j*dx+k*dx*dy]; LxLyHz_df1[i+j*dx+k*dx*dy] = df2; LxLyHz_df2[i+j*dx+k*dx*dy] = df1; yGreatZero = j>0; xGreatZero = i>0; LxLyHz_n[i+j*dx+k*dx*dy] = n - yGreatZero*0.25*df1 - xGreatZero*0.25*df2; } extern "C" __global__ void cu_iwt3df_LC1_diff (_data_t *HxLyLz_df1,_data_t *HxLyLz_df2,_data_t *HxLyLz_n,_data_t *LxHyLz_df1,_data_t *LxHyLz_df2,_data_t *LxHyLz_n,_data_t *LxLyHz_df1,_data_t *LxLyHz_df2,_data_t *LxLyHz_n,int dx, int dy, int dz) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t x,y,z; _data_t zero = make_float2(0.f,0.f); if ((i>=dx)||(j>=dy)||(k>=dz)) { return; } //HLL if (j>0) y = HxLyLz_df2[i+(j-1)*dx+k*dx*dy]; else y = zero; if (k>0) z = HxLyLz_n[i+j*dx+(k-1)*dx*dy]; else z = zero; HxLyLz_df1[i+j*dx+k*dx*dy] += 0.25*y + 0.25*z; //LHL if (i>0) x = LxHyLz_df1[(i-1)+j*dx+k*dx*dy]; else x = zero; if (k>0) z = LxHyLz_n[i+j*dx+(k-1)*dx*dy]; else z = zero; LxHyLz_df2[i+j*dx+k*dx*dy] += 0.25*x + 0.25*z; //LLH if (j>0) y = LxLyHz_df2[i+(j-1)*dx+k*dx*dy]; else y = zero; if (i>0) x = LxLyHz_df1[(i-1)+j*dx+k*dx*dy]; else x = zero; LxLyHz_n[i+j*dx+k*dx*dy] += 0.25*y + 0.25*x; } extern "C" __global__ void cu_iwt3df_LC2 (_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dx, int dy, int dz) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t df1,df2,n; scalar_t xGreatZero,yGreatZero,zGreatZero; if ((i>=dx)||(j>=dy)||(k>=dz)) { return; } //HHL df1 = HxHyLz_df1[i+j*dx+k*dx*dy]; df2 = HxHyLz_df2[i+j*dx+k*dx*dy]; n = HxHyLz_n[i+j*dx+k*dx*dy]; HxHyLz_n[i+j*dx+k*dx*dy] = df2; zGreatZero = k>0; HxHyLz_df1[i+j*dx+k*dx*dy] = df1+n-zGreatZero*0.125*df2; HxHyLz_df2[i+j*dx+k*dx*dy] = n-df1-zGreatZero*0.125*df2; //HLH df1 = HxLyHz_df1[i+j*dx+k*dx*dy]; df2 = HxLyHz_df2[i+j*dx+k*dx*dy]; n = HxLyHz_n[i+j*dx+k*dx*dy]; HxLyHz_df2[i+j*dx+k*dx*dy] = df2; yGreatZero = j>0; HxLyHz_n[i+j*dx+k*dx*dy] = df1+n-yGreatZero*0.125*df2; HxLyHz_df1[i+j*dx+k*dx*dy] = n-df1-yGreatZero*0.125*df2; //LHH df1 = LxHyHz_df1[i+j*dx+k*dx*dy]; df2 = LxHyHz_df2[i+j*dx+k*dx*dy]; n = LxHyHz_n[i+j*dx+k*dx*dy]; LxHyHz_df1[i+j*dx+k*dx*dy] = df2; xGreatZero = i>0; LxHyHz_df2[i+j*dx+k*dx*dy] = df1+n-xGreatZero*0.125*df2; LxHyHz_n[i+j*dx+k*dx*dy] = n-df1-xGreatZero*0.125*df2; } extern "C" __global__ void cu_iwt3df_LC2_diff (_data_t* HxHyLz_df1,_data_t* HxHyLz_df2,_data_t* HxHyLz_n,_data_t* HxLyHz_df1,_data_t* HxLyHz_df2,_data_t* HxLyHz_n,_data_t* LxHyHz_df1,_data_t* LxHyHz_df2,_data_t* LxHyHz_n,int dx, int dy, int dz) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t x,y,z; _data_t zero = make_float2(0.f,0.f); if ((i>=dx)||(j>=dy)||(k>=dz)) { return; } //HHL if (k>0) z = HxHyLz_n[i+j*dx+(k-1)*dx*dy]; else z = zero; HxHyLz_df1[i+j*dx+k*dx*dy] += 0.125*z; HxHyLz_df2[i+j*dx+k*dx*dy] += 0.125*z; //HLH if (j>0) y = HxLyHz_df2[i+(j-1)*dx+k*dx*dy]; else y = zero; HxLyHz_df1[i+j*dx+k*dx*dy] += 0.125*y; HxLyHz_n[i+j*dx+k*dx*dy] += 0.125*y; //LHH if (i>0) x = LxHyHz_df1[(i-1)+j*dx+k*dx*dy]; else x = zero; LxHyHz_df2[i+j*dx+k*dx*dy] += 0.125*x; LxHyHz_n[i+j*dx+k*dx*dy] += 0.125*x; } extern "C" __global__ void cu_iwt3df_LC3 (_data_t* HxHyHz_df1,_data_t* HxHyHz_df2,_data_t* HxHyHz_n,int dx, int dy, int dz) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; int k = blockIdx.z*blockDim.z+threadIdx.z; _data_t df1,df2,n; if ((i>=dx)||(j>=dy)||(k>=dz)) { return; } //HHH df1 = HxHyHz_df1[i+j*dx+k*dx*dy]; df2 = HxHyHz_df2[i+j*dx+k*dx*dy]; n = HxHyHz_n[i+j*dx+k*dx*dy]; HxHyHz_df1[i+j*dx+k*dx*dy] = n-df1; HxHyHz_df2[i+j*dx+k*dx*dy] = df2+n; HxHyHz_n[i+j*dx+k*dx*dy] = df1-df2+n; } extern "C" __global__ void cu_mult(_data_t* in, _data_t mult, int maxInd) { int ind = blockIdx.x*blockDim.x+threadIdx.x; if (ind > maxInd) { return; } in[ind] = in[ind]*mult; } extern "C" __global__ void cu_add_mult(_data_t* out, _data_t* in, _data_t mult, int maxInd) { int ind = blockIdx.x*blockDim.x+threadIdx.x; if (ind > maxInd) { return; } _data_t i = out[ind]; out[ind] = i+(out[ind]-i)*mult; } __global__ void cu_soft_thresh (_data_t* in, scalar_t thresh, int numMax) { int const i = threadIdx.x + blockDim.x*blockIdx.x; if (i>numMax) return; scalar_t norm = abs(in[i]); scalar_t red = norm - thresh; in[i] = (red > 0.f) ? ((red / norm) * (in[i])) : in[i]-in[i]; } __global__ void cu_circshift(_data_t* data, _data_t* dataCopy, int dx, int dy, int dz,int shift1, int shift2,int shift3) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= dx*dy*dz) { return; } int indexShifted = (index+shift1+shift2*dx+shift3*dx*dy)%(dx*dy*dz); data[indexShifted] = dataCopy[index]; } __global__ void cu_circunshift(_data_t* data, _data_t* dataCopy, int dx, int dy, int dz,int shift1, int shift2,int shift3) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= dx*dy*dz) { return; } int indexShifted = (index+shift1+shift2*dx+shift3*dx*dy)%(dx*dy*dz); data[index] = dataCopy[indexShifted]; }
28ea2eaef9baf61817db7baff6dedcf3814334c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Green, and Blue is in it. //The 'A' stands for Alpha and is used for transparency; it will be //ignored in this homework. //Each channel Red, Blue, Green, and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "reference_calc.cpp" #include "utils.h" #include <stdio.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (y >= numCols || x >= numRows) { return; } int index = numRows*y +x; uchar4 color = rgbaImage[index]; unsigned char grey = (unsigned char)(0.299f*color.x+ 0.587f*color.y + 0.114f*color.z); greyImage[index] = grey; //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched int blockWidth = 1; const dim3 blockSize(blockWidth, blockWidth, 1); int blocksX = numRows/blockWidth+1; int blocksY = numCols/blockWidth+1; //TODO const dim3 gridSize(blocksX, blocksY, 1); //TODO hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
28ea2eaef9baf61817db7baff6dedcf3814334c3.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Green, and Blue is in it. //The 'A' stands for Alpha and is used for transparency; it will be //ignored in this homework. //Each channel Red, Blue, Green, and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "reference_calc.cpp" #include "utils.h" #include <stdio.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (y >= numCols || x >= numRows) { return; } int index = numRows*y +x; uchar4 color = rgbaImage[index]; unsigned char grey = (unsigned char)(0.299f*color.x+ 0.587f*color.y + 0.114f*color.z); greyImage[index] = grey; //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched int blockWidth = 1; const dim3 blockSize(blockWidth, blockWidth, 1); int blocksX = numRows/blockWidth+1; int blocksY = numCols/blockWidth+1; //TODO const dim3 gridSize(blocksX, blocksY, 1); //TODO rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
80642c3eb5c7f56cc23b43f10b6638c6d220b287.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/copying.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/merge.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/dictionary/detail/merge.hpp> #include <cudf/dictionary/detail/update_keys.hpp> #include <cudf/strings/detail/merge.cuh> #include <cudf/structs/structs_column_view.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_device_view.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/merge.h> #include <thrust/pair.h> #include <queue> #include <vector> #include "cudf/utilities/traits.hpp" namespace cudf { namespace detail { namespace { using detail::side; using index_type = detail::index_type; /** * @brief Merges the bits of two validity bitmasks. * * Merges the bits from two column_device_views into the destination validity buffer * according to `merged_indices` map such that bit `i` in `out_validity` * will be equal to bit `thrust::get<1>(merged_indices[i])` from `left_dcol` * if `thrust::get<0>(merged_indices[i])` equals `side::LEFT`; otherwise, * from `right_dcol`. * * `left_dcol` and `right_dcol` must not overlap. * * @tparam left_have_valids Indicates whether left_dcol mask is unallocated (hence, ALL_VALID) * @tparam right_have_valids Indicates whether right_dcol mask is unallocated (hence ALL_VALID) * @param[in] left_dcol The left column_device_view whose bits will be merged * @param[in] right_dcol The right column_device_view whose bits will be merged * @param[out] out_validity The output validity buffer after merging the left and right buffers * @param[in] num_destination_rows The number of rows in the out_validity buffer * @param[in] merged_indices The map that indicates the source of the input and index * to be copied to the output. Length must be equal to `num_destination_rows` */ template <bool left_have_valids, bool right_have_valids> __global__ void materialize_merged_bitmask_kernel( column_device_view left_dcol, column_device_view right_dcol, bitmask_type* out_validity, size_type const num_destination_rows, index_type const* const __restrict__ merged_indices) { size_type destination_row = threadIdx.x + blockIdx.x * blockDim.x; auto active_threads = __ballot_sync(0xffffffff, destination_row < num_destination_rows); while (destination_row < num_destination_rows) { index_type const& merged_idx = merged_indices[destination_row]; side const src_side = thrust::get<0>(merged_idx); size_type const src_row = thrust::get<1>(merged_idx); bool const from_left{src_side == side::LEFT}; bool source_bit_is_valid{true}; if (left_have_valids && from_left) { source_bit_is_valid = left_dcol.is_valid_nocheck(src_row); } else if (right_have_valids && !from_left) { source_bit_is_valid = right_dcol.is_valid_nocheck(src_row); } // Use ballot to find all valid bits in this warp and create the output // bitmask element bitmask_type const result_mask{__ballot_sync(active_threads, source_bit_is_valid)}; // Only one thread writes output if (0 == threadIdx.x % warpSize) { out_validity[word_index(destination_row)] = result_mask; } destination_row += blockDim.x * gridDim.x; active_threads = __ballot_sync(active_threads, destination_row < num_destination_rows); } } void materialize_bitmask(column_view const& left_col, column_view const& right_col, bitmask_type* out_validity, size_type num_elements, index_type const* merged_indices, rmm::cuda_stream_view stream) { constexpr size_type BLOCK_SIZE{256}; detail::grid_1d grid_config{num_elements, BLOCK_SIZE}; auto p_left_dcol = column_device_view::create(left_col); auto p_right_dcol = column_device_view::create(right_col); auto left_valid = *p_left_dcol; auto right_valid = *p_right_dcol; if (left_col.has_nulls()) { if (right_col.has_nulls()) { hipLaunchKernelGGL(( materialize_merged_bitmask_kernel<true, true>) , dim3(grid_config.num_blocks), dim3(grid_config.num_threads_per_block), 0, stream.value(), left_valid, right_valid, out_validity, num_elements, merged_indices); } else { hipLaunchKernelGGL(( materialize_merged_bitmask_kernel<true, false>) , dim3(grid_config.num_blocks), dim3(grid_config.num_threads_per_block), 0, stream.value(), left_valid, right_valid, out_validity, num_elements, merged_indices); } } else { if (right_col.has_nulls()) { hipLaunchKernelGGL(( materialize_merged_bitmask_kernel<false, true>) , dim3(grid_config.num_blocks), dim3(grid_config.num_threads_per_block), 0, stream.value(), left_valid, right_valid, out_validity, num_elements, merged_indices); } else { CUDF_FAIL("materialize_merged_bitmask_kernel<false, false>() should never be called."); } } CHECK_CUDA(stream.value()); } struct side_index_generator { side _side; __device__ index_type operator()(size_type i) const noexcept { return index_type{_side, i}; } }; /** * @brief Generates the row indices and source side (left or right) in accordance with the index * columns. * * * @tparam index_type Indicates the type to be used to collect index and side information; * @param[in] left_table The left table_view to be merged * @param[in] right_table The right table_view to be merged * @param[in] column_order Sort order types of index columns * @param[in] null_precedence Array indicating the order of nulls with respect to non-nulls for the * index columns * @param[in] nullable Flag indicating if at least one of the table_view arguments has nulls * (defaults to true) * @param[in] stream CUDA stream used for device memory operations and kernel launches. * * @return A device_uvector of merged indices */ index_vector generate_merged_indices(table_view const& left_table, table_view const& right_table, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, bool nullable = true, rmm::cuda_stream_view stream = rmm::cuda_stream_default) { const size_type left_size = left_table.num_rows(); const size_type right_size = right_table.num_rows(); const size_type total_size = left_size + right_size; auto left_gen = side_index_generator{side::LEFT}; auto right_gen = side_index_generator{side::RIGHT}; auto left_begin = cudf::detail::make_counting_transform_iterator(0, left_gen); auto right_begin = cudf::detail::make_counting_transform_iterator(0, right_gen); index_vector merged_indices(total_size, stream); auto lhs_device_view = table_device_view::create(left_table, stream); auto rhs_device_view = table_device_view::create(right_table, stream); auto d_column_order = cudf::detail::make_device_uvector_async(column_order, stream); if (nullable) { auto d_null_precedence = cudf::detail::make_device_uvector_async(null_precedence, stream); auto ineq_op = detail::row_lexicographic_tagged_comparator<true>( *lhs_device_view, *rhs_device_view, d_column_order.data(), d_null_precedence.data()); thrust::merge(rmm::exec_policy(stream), left_begin, left_begin + left_size, right_begin, right_begin + right_size, merged_indices.begin(), ineq_op); } else { auto ineq_op = detail::row_lexicographic_tagged_comparator<false>( *lhs_device_view, *rhs_device_view, d_column_order.data()); thrust::merge(rmm::exec_policy(stream), left_begin, left_begin + left_size, right_begin, right_begin + right_size, merged_indices.begin(), ineq_op); } CHECK_CUDA(stream.value()); return merged_indices; } /** * @brief Generate merged column given row-order of merged tables * (ordered according to indices of key_cols) and the 2 columns to merge. */ struct column_merger { explicit column_merger(index_vector const& row_order) : row_order_(row_order) {} template <typename Element, CUDF_ENABLE_IF(not is_rep_layout_compatible<Element>())> std::unique_ptr<column> operator()( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) const { CUDF_FAIL("Unsupported type for merge."); } // column merger operator; // template <typename Element> std::enable_if_t<is_rep_layout_compatible<Element>(), std::unique_ptr<column>> operator()( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) const { auto lsz = lcol.size(); auto merged_size = lsz + rcol.size(); auto merged_col = cudf::detail::allocate_like(lcol.has_nulls() ? lcol : rcol, merged_size, cudf::mask_allocation_policy::RETAIN, stream, mr); //"gather" data from lcol, rcol according to row_order_ "map" //(directly calling gather() won't work because // lcol, rcol indices overlap!) // cudf::mutable_column_view merged_view = merged_col->mutable_view(); // initialize null_mask to all valid: // // Note: this initialization in conjunction with // _conditionally_ calling materialize_bitmask() below covers // the case materialize_merged_bitmask_kernel<false, false>() // which won't be called anymore (because of the _condition_ // below) // cudf::detail::set_null_mask(merged_view.null_mask(), 0, merged_view.size(), true, stream); // set the null count: // merged_col->set_null_count(lcol.null_count() + rcol.null_count()); // to resolve view.data()'s types use: Element // auto const d_lcol = lcol.data<Element>(); auto const d_rcol = rcol.data<Element>(); // capture lcol, rcol // and "gather" into merged_view.data()[indx_merged] // from lcol or rcol, depending on side; // thrust::transform(rmm::exec_policy(stream), row_order_.begin(), row_order_.end(), merged_view.begin<Element>(), [d_lcol, d_rcol] __device__(index_type const& index_pair) { auto side = thrust::get<0>(index_pair); auto index = thrust::get<1>(index_pair); return side == side::LEFT ? d_lcol[index] : d_rcol[index]; }); // CAVEAT: conditional call below is erroneous without // set_null_mask() call (see TODO above): // if (lcol.has_nulls() || rcol.has_nulls()) { // resolve null mask: // materialize_bitmask( lcol, rcol, merged_view.null_mask(), merged_view.size(), row_order_.data(), stream); } return merged_col; } private: index_vector const& row_order_; }; // specialization for strings template <> std::unique_ptr<column> column_merger::operator()<cudf::string_view>( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { auto column = strings::detail::merge<index_type>(strings_column_view(lcol), strings_column_view(rcol), row_order_.begin(), row_order_.end(), stream, mr); if (lcol.has_nulls() || rcol.has_nulls()) { auto merged_view = column->mutable_view(); materialize_bitmask( lcol, rcol, merged_view.null_mask(), merged_view.size(), row_order_.data(), stream); } return column; } // specialization for dictionary template <> std::unique_ptr<column> column_merger::operator()<cudf::dictionary32>( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { auto result = cudf::dictionary::detail::merge( cudf::dictionary_column_view(lcol), cudf::dictionary_column_view(rcol), row_order_, stream, mr); // set the validity mask if (lcol.has_nulls() || rcol.has_nulls()) { auto merged_view = result->mutable_view(); materialize_bitmask( lcol, rcol, merged_view.null_mask(), merged_view.size(), row_order_.data(), stream); } return result; } // specialization for structs template <> std::unique_ptr<column> column_merger::operator()<cudf::struct_view>( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { // merge each child. auto const lhs = structs_column_view{lcol}; auto const rhs = structs_column_view{rcol}; auto it = cudf::detail::make_counting_transform_iterator( 0, [&, merger = column_merger{row_order_}](size_type i) { return cudf::type_dispatcher<dispatch_storage_type>( lhs.child(i).type(), merger, lhs.get_sliced_child(i), rhs.get_sliced_child(i), stream, mr); }); auto merged_children = std::vector<std::unique_ptr<column>>(it, it + lhs.num_children()); auto const merged_size = lcol.size() + rcol.size(); // materialize the output buffer rmm::device_buffer validity = lcol.has_nulls() || rcol.has_nulls() ? create_null_mask(merged_size, mask_state::UNINITIALIZED, stream, mr) : rmm::device_buffer{}; if (lcol.has_nulls() || rcol.has_nulls()) { materialize_bitmask(lcol, rcol, static_cast<bitmask_type*>(validity.data()), merged_size, row_order_.data(), stream); } return make_structs_column(merged_size, std::move(merged_children), lcol.null_count() + rcol.null_count(), std::move(validity), stream, mr); } using table_ptr_type = std::unique_ptr<cudf::table>; table_ptr_type merge(cudf::table_view const& left_table, cudf::table_view const& right_table, std::vector<cudf::size_type> const& key_cols, std::vector<cudf::order> const& column_order, std::vector<cudf::null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // collect index columns for lhs, rhs, resp. // cudf::table_view index_left_view{left_table.select(key_cols)}; cudf::table_view index_right_view{right_table.select(key_cols)}; bool const nullable = cudf::has_nulls(index_left_view) || cudf::has_nulls(index_right_view); // extract merged row order according to indices: // auto const merged_indices = generate_merged_indices( index_left_view, index_right_view, column_order, null_precedence, nullable); // create merged table: // auto const n_cols = left_table.num_columns(); std::vector<std::unique_ptr<column>> merged_cols; merged_cols.reserve(n_cols); column_merger merger{merged_indices}; transform(left_table.begin(), left_table.end(), right_table.begin(), std::back_inserter(merged_cols), [&](auto const& left_col, auto const& right_col) { return cudf::type_dispatcher<dispatch_storage_type>( left_col.type(), merger, left_col, right_col, stream, mr); }); return std::make_unique<cudf::table>(std::move(merged_cols)); } struct merge_queue_item { table_view view; table_ptr_type table; // Priority is a separate member to ensure that moving from an object // does not change its priority (which would ruin the queue invariant) cudf::size_type priority = 0; merge_queue_item(table_view const& view, table_ptr_type&& table) : view{view}, table{std::move(table)}, priority{-view.num_rows()} { } bool operator<(merge_queue_item const& other) const { return priority < other.priority; } }; // Helper function to ensure that moving out of the priority_queue is "atomic" template <typename T> T top_and_pop(std::priority_queue<T>& q) { auto moved = std::move(const_cast<T&>(q.top())); q.pop(); return moved; } } // anonymous namespace table_ptr_type merge(std::vector<table_view> const& tables_to_merge, std::vector<cudf::size_type> const& key_cols, std::vector<cudf::order> const& column_order, std::vector<cudf::null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (tables_to_merge.empty()) { return std::make_unique<cudf::table>(); } auto const& first_table = tables_to_merge.front(); auto const n_cols = first_table.num_columns(); CUDF_EXPECTS(std::all_of(tables_to_merge.cbegin(), tables_to_merge.cend(), [n_cols](auto const& tbl) { return n_cols == tbl.num_columns(); }), "Mismatched number of columns"); CUDF_EXPECTS( std::all_of(tables_to_merge.cbegin(), tables_to_merge.cend(), [&](auto const& tbl) { return cudf::have_same_types(first_table, tbl); }), "Mismatched column types"); CUDF_EXPECTS(!key_cols.empty(), "Empty key_cols"); CUDF_EXPECTS(key_cols.size() <= static_cast<size_t>(n_cols), "Too many values in key_cols"); CUDF_EXPECTS(key_cols.size() == column_order.size(), "Mismatched size between key_cols and column_order"); // This utility will ensure all corresponding dictionary columns have matching keys. // It will return any new dictionary columns created as well as updated table_views. auto matched = cudf::dictionary::detail::match_dictionaries( tables_to_merge, stream, rmm::mr::get_current_device_resource()); auto merge_tables = matched.second; // A queue of (table view, table) pairs std::priority_queue<merge_queue_item> merge_queue; // The table pointer is null if we do not own the table (input tables) std::for_each(merge_tables.begin(), merge_tables.end(), [&](auto const& table) { if (table.num_rows() > 0) merge_queue.emplace(table, table_ptr_type()); }); // If there is only one non-empty table_view, return its copy if (merge_queue.size() == 1) { return std::make_unique<cudf::table>(merge_queue.top().view); } // No inputs have rows, return a table with same columns as the first one if (merge_queue.empty()) { return empty_like(first_table); } // Pick the two smallest tables and merge them // Until there is only one table left in the queue while (merge_queue.size() > 1) { // To delete the intermediate table at the end of the block auto const left_table = top_and_pop(merge_queue); // Deallocated at the end of the block auto const right_table = top_and_pop(merge_queue); // Only use mr for the output table auto const& new_tbl_mr = merge_queue.empty() ? mr : rmm::mr::get_current_device_resource(); auto merged_table = merge(left_table.view, right_table.view, key_cols, column_order, null_precedence, stream, new_tbl_mr); auto const merged_table_view = merged_table->view(); merge_queue.emplace(merged_table_view, std::move(merged_table)); } return std::move(top_and_pop(merge_queue).table); } } // namespace detail std::unique_ptr<cudf::table> merge(std::vector<table_view> const& tables_to_merge, std::vector<cudf::size_type> const& key_cols, std::vector<cudf::order> const& column_order, std::vector<cudf::null_order> const& null_precedence, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::merge( tables_to_merge, key_cols, column_order, null_precedence, rmm::cuda_stream_default, mr); } } // namespace cudf
80642c3eb5c7f56cc23b43f10b6638c6d220b287.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/copying.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/merge.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/dictionary/detail/merge.hpp> #include <cudf/dictionary/detail/update_keys.hpp> #include <cudf/strings/detail/merge.cuh> #include <cudf/structs/structs_column_view.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_device_view.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/merge.h> #include <thrust/pair.h> #include <queue> #include <vector> #include "cudf/utilities/traits.hpp" namespace cudf { namespace detail { namespace { using detail::side; using index_type = detail::index_type; /** * @brief Merges the bits of two validity bitmasks. * * Merges the bits from two column_device_views into the destination validity buffer * according to `merged_indices` map such that bit `i` in `out_validity` * will be equal to bit `thrust::get<1>(merged_indices[i])` from `left_dcol` * if `thrust::get<0>(merged_indices[i])` equals `side::LEFT`; otherwise, * from `right_dcol`. * * `left_dcol` and `right_dcol` must not overlap. * * @tparam left_have_valids Indicates whether left_dcol mask is unallocated (hence, ALL_VALID) * @tparam right_have_valids Indicates whether right_dcol mask is unallocated (hence ALL_VALID) * @param[in] left_dcol The left column_device_view whose bits will be merged * @param[in] right_dcol The right column_device_view whose bits will be merged * @param[out] out_validity The output validity buffer after merging the left and right buffers * @param[in] num_destination_rows The number of rows in the out_validity buffer * @param[in] merged_indices The map that indicates the source of the input and index * to be copied to the output. Length must be equal to `num_destination_rows` */ template <bool left_have_valids, bool right_have_valids> __global__ void materialize_merged_bitmask_kernel( column_device_view left_dcol, column_device_view right_dcol, bitmask_type* out_validity, size_type const num_destination_rows, index_type const* const __restrict__ merged_indices) { size_type destination_row = threadIdx.x + blockIdx.x * blockDim.x; auto active_threads = __ballot_sync(0xffffffff, destination_row < num_destination_rows); while (destination_row < num_destination_rows) { index_type const& merged_idx = merged_indices[destination_row]; side const src_side = thrust::get<0>(merged_idx); size_type const src_row = thrust::get<1>(merged_idx); bool const from_left{src_side == side::LEFT}; bool source_bit_is_valid{true}; if (left_have_valids && from_left) { source_bit_is_valid = left_dcol.is_valid_nocheck(src_row); } else if (right_have_valids && !from_left) { source_bit_is_valid = right_dcol.is_valid_nocheck(src_row); } // Use ballot to find all valid bits in this warp and create the output // bitmask element bitmask_type const result_mask{__ballot_sync(active_threads, source_bit_is_valid)}; // Only one thread writes output if (0 == threadIdx.x % warpSize) { out_validity[word_index(destination_row)] = result_mask; } destination_row += blockDim.x * gridDim.x; active_threads = __ballot_sync(active_threads, destination_row < num_destination_rows); } } void materialize_bitmask(column_view const& left_col, column_view const& right_col, bitmask_type* out_validity, size_type num_elements, index_type const* merged_indices, rmm::cuda_stream_view stream) { constexpr size_type BLOCK_SIZE{256}; detail::grid_1d grid_config{num_elements, BLOCK_SIZE}; auto p_left_dcol = column_device_view::create(left_col); auto p_right_dcol = column_device_view::create(right_col); auto left_valid = *p_left_dcol; auto right_valid = *p_right_dcol; if (left_col.has_nulls()) { if (right_col.has_nulls()) { materialize_merged_bitmask_kernel<true, true> <<<grid_config.num_blocks, grid_config.num_threads_per_block, 0, stream.value()>>>( left_valid, right_valid, out_validity, num_elements, merged_indices); } else { materialize_merged_bitmask_kernel<true, false> <<<grid_config.num_blocks, grid_config.num_threads_per_block, 0, stream.value()>>>( left_valid, right_valid, out_validity, num_elements, merged_indices); } } else { if (right_col.has_nulls()) { materialize_merged_bitmask_kernel<false, true> <<<grid_config.num_blocks, grid_config.num_threads_per_block, 0, stream.value()>>>( left_valid, right_valid, out_validity, num_elements, merged_indices); } else { CUDF_FAIL("materialize_merged_bitmask_kernel<false, false>() should never be called."); } } CHECK_CUDA(stream.value()); } struct side_index_generator { side _side; __device__ index_type operator()(size_type i) const noexcept { return index_type{_side, i}; } }; /** * @brief Generates the row indices and source side (left or right) in accordance with the index * columns. * * * @tparam index_type Indicates the type to be used to collect index and side information; * @param[in] left_table The left table_view to be merged * @param[in] right_table The right table_view to be merged * @param[in] column_order Sort order types of index columns * @param[in] null_precedence Array indicating the order of nulls with respect to non-nulls for the * index columns * @param[in] nullable Flag indicating if at least one of the table_view arguments has nulls * (defaults to true) * @param[in] stream CUDA stream used for device memory operations and kernel launches. * * @return A device_uvector of merged indices */ index_vector generate_merged_indices(table_view const& left_table, table_view const& right_table, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, bool nullable = true, rmm::cuda_stream_view stream = rmm::cuda_stream_default) { const size_type left_size = left_table.num_rows(); const size_type right_size = right_table.num_rows(); const size_type total_size = left_size + right_size; auto left_gen = side_index_generator{side::LEFT}; auto right_gen = side_index_generator{side::RIGHT}; auto left_begin = cudf::detail::make_counting_transform_iterator(0, left_gen); auto right_begin = cudf::detail::make_counting_transform_iterator(0, right_gen); index_vector merged_indices(total_size, stream); auto lhs_device_view = table_device_view::create(left_table, stream); auto rhs_device_view = table_device_view::create(right_table, stream); auto d_column_order = cudf::detail::make_device_uvector_async(column_order, stream); if (nullable) { auto d_null_precedence = cudf::detail::make_device_uvector_async(null_precedence, stream); auto ineq_op = detail::row_lexicographic_tagged_comparator<true>( *lhs_device_view, *rhs_device_view, d_column_order.data(), d_null_precedence.data()); thrust::merge(rmm::exec_policy(stream), left_begin, left_begin + left_size, right_begin, right_begin + right_size, merged_indices.begin(), ineq_op); } else { auto ineq_op = detail::row_lexicographic_tagged_comparator<false>( *lhs_device_view, *rhs_device_view, d_column_order.data()); thrust::merge(rmm::exec_policy(stream), left_begin, left_begin + left_size, right_begin, right_begin + right_size, merged_indices.begin(), ineq_op); } CHECK_CUDA(stream.value()); return merged_indices; } /** * @brief Generate merged column given row-order of merged tables * (ordered according to indices of key_cols) and the 2 columns to merge. */ struct column_merger { explicit column_merger(index_vector const& row_order) : row_order_(row_order) {} template <typename Element, CUDF_ENABLE_IF(not is_rep_layout_compatible<Element>())> std::unique_ptr<column> operator()( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) const { CUDF_FAIL("Unsupported type for merge."); } // column merger operator; // template <typename Element> std::enable_if_t<is_rep_layout_compatible<Element>(), std::unique_ptr<column>> operator()( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) const { auto lsz = lcol.size(); auto merged_size = lsz + rcol.size(); auto merged_col = cudf::detail::allocate_like(lcol.has_nulls() ? lcol : rcol, merged_size, cudf::mask_allocation_policy::RETAIN, stream, mr); //"gather" data from lcol, rcol according to row_order_ "map" //(directly calling gather() won't work because // lcol, rcol indices overlap!) // cudf::mutable_column_view merged_view = merged_col->mutable_view(); // initialize null_mask to all valid: // // Note: this initialization in conjunction with // _conditionally_ calling materialize_bitmask() below covers // the case materialize_merged_bitmask_kernel<false, false>() // which won't be called anymore (because of the _condition_ // below) // cudf::detail::set_null_mask(merged_view.null_mask(), 0, merged_view.size(), true, stream); // set the null count: // merged_col->set_null_count(lcol.null_count() + rcol.null_count()); // to resolve view.data()'s types use: Element // auto const d_lcol = lcol.data<Element>(); auto const d_rcol = rcol.data<Element>(); // capture lcol, rcol // and "gather" into merged_view.data()[indx_merged] // from lcol or rcol, depending on side; // thrust::transform(rmm::exec_policy(stream), row_order_.begin(), row_order_.end(), merged_view.begin<Element>(), [d_lcol, d_rcol] __device__(index_type const& index_pair) { auto side = thrust::get<0>(index_pair); auto index = thrust::get<1>(index_pair); return side == side::LEFT ? d_lcol[index] : d_rcol[index]; }); // CAVEAT: conditional call below is erroneous without // set_null_mask() call (see TODO above): // if (lcol.has_nulls() || rcol.has_nulls()) { // resolve null mask: // materialize_bitmask( lcol, rcol, merged_view.null_mask(), merged_view.size(), row_order_.data(), stream); } return merged_col; } private: index_vector const& row_order_; }; // specialization for strings template <> std::unique_ptr<column> column_merger::operator()<cudf::string_view>( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { auto column = strings::detail::merge<index_type>(strings_column_view(lcol), strings_column_view(rcol), row_order_.begin(), row_order_.end(), stream, mr); if (lcol.has_nulls() || rcol.has_nulls()) { auto merged_view = column->mutable_view(); materialize_bitmask( lcol, rcol, merged_view.null_mask(), merged_view.size(), row_order_.data(), stream); } return column; } // specialization for dictionary template <> std::unique_ptr<column> column_merger::operator()<cudf::dictionary32>( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { auto result = cudf::dictionary::detail::merge( cudf::dictionary_column_view(lcol), cudf::dictionary_column_view(rcol), row_order_, stream, mr); // set the validity mask if (lcol.has_nulls() || rcol.has_nulls()) { auto merged_view = result->mutable_view(); materialize_bitmask( lcol, rcol, merged_view.null_mask(), merged_view.size(), row_order_.data(), stream); } return result; } // specialization for structs template <> std::unique_ptr<column> column_merger::operator()<cudf::struct_view>( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { // merge each child. auto const lhs = structs_column_view{lcol}; auto const rhs = structs_column_view{rcol}; auto it = cudf::detail::make_counting_transform_iterator( 0, [&, merger = column_merger{row_order_}](size_type i) { return cudf::type_dispatcher<dispatch_storage_type>( lhs.child(i).type(), merger, lhs.get_sliced_child(i), rhs.get_sliced_child(i), stream, mr); }); auto merged_children = std::vector<std::unique_ptr<column>>(it, it + lhs.num_children()); auto const merged_size = lcol.size() + rcol.size(); // materialize the output buffer rmm::device_buffer validity = lcol.has_nulls() || rcol.has_nulls() ? create_null_mask(merged_size, mask_state::UNINITIALIZED, stream, mr) : rmm::device_buffer{}; if (lcol.has_nulls() || rcol.has_nulls()) { materialize_bitmask(lcol, rcol, static_cast<bitmask_type*>(validity.data()), merged_size, row_order_.data(), stream); } return make_structs_column(merged_size, std::move(merged_children), lcol.null_count() + rcol.null_count(), std::move(validity), stream, mr); } using table_ptr_type = std::unique_ptr<cudf::table>; table_ptr_type merge(cudf::table_view const& left_table, cudf::table_view const& right_table, std::vector<cudf::size_type> const& key_cols, std::vector<cudf::order> const& column_order, std::vector<cudf::null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // collect index columns for lhs, rhs, resp. // cudf::table_view index_left_view{left_table.select(key_cols)}; cudf::table_view index_right_view{right_table.select(key_cols)}; bool const nullable = cudf::has_nulls(index_left_view) || cudf::has_nulls(index_right_view); // extract merged row order according to indices: // auto const merged_indices = generate_merged_indices( index_left_view, index_right_view, column_order, null_precedence, nullable); // create merged table: // auto const n_cols = left_table.num_columns(); std::vector<std::unique_ptr<column>> merged_cols; merged_cols.reserve(n_cols); column_merger merger{merged_indices}; transform(left_table.begin(), left_table.end(), right_table.begin(), std::back_inserter(merged_cols), [&](auto const& left_col, auto const& right_col) { return cudf::type_dispatcher<dispatch_storage_type>( left_col.type(), merger, left_col, right_col, stream, mr); }); return std::make_unique<cudf::table>(std::move(merged_cols)); } struct merge_queue_item { table_view view; table_ptr_type table; // Priority is a separate member to ensure that moving from an object // does not change its priority (which would ruin the queue invariant) cudf::size_type priority = 0; merge_queue_item(table_view const& view, table_ptr_type&& table) : view{view}, table{std::move(table)}, priority{-view.num_rows()} { } bool operator<(merge_queue_item const& other) const { return priority < other.priority; } }; // Helper function to ensure that moving out of the priority_queue is "atomic" template <typename T> T top_and_pop(std::priority_queue<T>& q) { auto moved = std::move(const_cast<T&>(q.top())); q.pop(); return moved; } } // anonymous namespace table_ptr_type merge(std::vector<table_view> const& tables_to_merge, std::vector<cudf::size_type> const& key_cols, std::vector<cudf::order> const& column_order, std::vector<cudf::null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (tables_to_merge.empty()) { return std::make_unique<cudf::table>(); } auto const& first_table = tables_to_merge.front(); auto const n_cols = first_table.num_columns(); CUDF_EXPECTS(std::all_of(tables_to_merge.cbegin(), tables_to_merge.cend(), [n_cols](auto const& tbl) { return n_cols == tbl.num_columns(); }), "Mismatched number of columns"); CUDF_EXPECTS( std::all_of(tables_to_merge.cbegin(), tables_to_merge.cend(), [&](auto const& tbl) { return cudf::have_same_types(first_table, tbl); }), "Mismatched column types"); CUDF_EXPECTS(!key_cols.empty(), "Empty key_cols"); CUDF_EXPECTS(key_cols.size() <= static_cast<size_t>(n_cols), "Too many values in key_cols"); CUDF_EXPECTS(key_cols.size() == column_order.size(), "Mismatched size between key_cols and column_order"); // This utility will ensure all corresponding dictionary columns have matching keys. // It will return any new dictionary columns created as well as updated table_views. auto matched = cudf::dictionary::detail::match_dictionaries( tables_to_merge, stream, rmm::mr::get_current_device_resource()); auto merge_tables = matched.second; // A queue of (table view, table) pairs std::priority_queue<merge_queue_item> merge_queue; // The table pointer is null if we do not own the table (input tables) std::for_each(merge_tables.begin(), merge_tables.end(), [&](auto const& table) { if (table.num_rows() > 0) merge_queue.emplace(table, table_ptr_type()); }); // If there is only one non-empty table_view, return its copy if (merge_queue.size() == 1) { return std::make_unique<cudf::table>(merge_queue.top().view); } // No inputs have rows, return a table with same columns as the first one if (merge_queue.empty()) { return empty_like(first_table); } // Pick the two smallest tables and merge them // Until there is only one table left in the queue while (merge_queue.size() > 1) { // To delete the intermediate table at the end of the block auto const left_table = top_and_pop(merge_queue); // Deallocated at the end of the block auto const right_table = top_and_pop(merge_queue); // Only use mr for the output table auto const& new_tbl_mr = merge_queue.empty() ? mr : rmm::mr::get_current_device_resource(); auto merged_table = merge(left_table.view, right_table.view, key_cols, column_order, null_precedence, stream, new_tbl_mr); auto const merged_table_view = merged_table->view(); merge_queue.emplace(merged_table_view, std::move(merged_table)); } return std::move(top_and_pop(merge_queue).table); } } // namespace detail std::unique_ptr<cudf::table> merge(std::vector<table_view> const& tables_to_merge, std::vector<cudf::size_type> const& key_cols, std::vector<cudf::order> const& column_order, std::vector<cudf::null_order> const& null_precedence, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::merge( tables_to_merge, key_cols, column_order, null_precedence, rmm::cuda_stream_default, mr); } } // namespace cudf
bedd387cccc6653a007cc5ed64468c5d883f57b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> void initWith(float num, float *a, int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int inx = threadIdx.x + blockIdx.x * blockDim.x; int gridStride = gridDim.x * blockDim.x; for(int i = inx; i < N; i += gridStride) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *array, int N) { for(int i = 0; i < N; i++) { if(array[i] != target) { printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target); exit(1); } } printf("SUCCESS! All values added correctly.\n"); } int main() { const int N = 2<<20; size_t size = N * sizeof(float); size_t threads_per_block = 512; size_t number_of_blocks = (N + threads_per_block - 1) / threads_per_block; float *a; float *b; float *c; hipMallocManaged(&a, size); hipMallocManaged(&b, size); hipMallocManaged(&c, size); initWith(3, a, N); initWith(4, b, N); initWith(0, c, N); hipLaunchKernelGGL(( addVectorsInto), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, c, a, b, N); hipDeviceSynchronize(); checkElementsAre(7, c, N); hipFree(a); hipFree(b); hipFree(c); }
bedd387cccc6653a007cc5ed64468c5d883f57b8.cu
#include <stdio.h> void initWith(float num, float *a, int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int inx = threadIdx.x + blockIdx.x * blockDim.x; int gridStride = gridDim.x * blockDim.x; for(int i = inx; i < N; i += gridStride) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *array, int N) { for(int i = 0; i < N; i++) { if(array[i] != target) { printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target); exit(1); } } printf("SUCCESS! All values added correctly.\n"); } int main() { const int N = 2<<20; size_t size = N * sizeof(float); size_t threads_per_block = 512; size_t number_of_blocks = (N + threads_per_block - 1) / threads_per_block; float *a; float *b; float *c; cudaMallocManaged(&a, size); cudaMallocManaged(&b, size); cudaMallocManaged(&c, size); initWith(3, a, N); initWith(4, b, N); initWith(0, c, N); addVectorsInto<<<number_of_blocks, threads_per_block>>>(c, a, b, N); cudaDeviceSynchronize(); checkElementsAre(7, c, N); cudaFree(a); cudaFree(b); cudaFree(c); }
5cdbab7d5e2a57d43df4178d73b484170f7b403b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void add2_kernel(float* c, const float* a, const float* b, int n) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < n; i += gridDim.x * blockDim.x) { c[i] = a[i] + b[i]; } } void launch_add2(float* c, const float* a, const float* b, int n) { dim3 grid((n + 1023) / 1024); dim3 block(1024); hipLaunchKernelGGL(( add2_kernel), dim3(grid), dim3(block), 0, 0, c, a, b, n); }
5cdbab7d5e2a57d43df4178d73b484170f7b403b.cu
__global__ void add2_kernel(float* c, const float* a, const float* b, int n) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < n; i += gridDim.x * blockDim.x) { c[i] = a[i] + b[i]; } } void launch_add2(float* c, const float* a, const float* b, int n) { dim3 grid((n + 1023) / 1024); dim3 block(1024); add2_kernel<<<grid, block>>>(c, a, b, n); }
e901d1c24abed4db0aeb5433d8ef8cd446a31313.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include<time.h> #include<cstring> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #define NUM_THREADS 512 bool InitCUDA() { int count; hipGetDeviceCount(&count); if (count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for (i = 0; i < count; i++) { hipDeviceProp_t prop; if (hipGetDeviceProperties(&prop, i) == hipSuccess) { if (prop.major >= 1) { break; } } } if (i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } hipSetDevice(i); return true; } __global__ static void matrixmul(const int* a, size_t lda, const int* b, size_t ldb, int* ans, size_t ldans, int n) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int idx = bid * blockDim.x + tid; const int row = idx / n; const int column = idx % n; int i; if (row < n && column < n) { int t = 0; for (i = 0; i < n; i++) { t += a[row * lda + i] * b[i * ldb + column]; } ans[row * ldans + column] = t; } } int *cudamatrixmul(int *ma, int *mb, int n) { int *gpuma, *gpumb, *gpuans ,*ans; ans = (int*)malloc(sizeof(int)*n*n); hipMalloc((void**)&gpuma, sizeof(int) * n * n); hipMalloc((void**)&gpumb, sizeof(int) * n * n); hipMalloc((void**)&gpuans, sizeof(int) * n * n); hipMemcpy2D(gpuma, sizeof(int) * n, ma, sizeof(int) * n, sizeof(int) * n, n, hipMemcpyHostToDevice); hipMemcpy2D(gpumb, sizeof(int) * n, mb, sizeof(int) * n, sizeof(int) * n, n, hipMemcpyHostToDevice); int blocks = (n + NUM_THREADS - 1) / NUM_THREADS; matrixmul << <blocks * n, NUM_THREADS >> > (gpuma, n, gpumb, n, gpuans, n, n); hipMemcpy2D(ans, sizeof(int) * n, gpuans, sizeof(int) * n, sizeof(int) * n, n, hipMemcpyDeviceToHost); hipFree(gpuma); hipFree(gpumb); hipFree(gpuans); return ans; } int* strassen(int *ma, int *mb, int t) { int *m, *a, *b, *ans; a = (int*)malloc(sizeof(int) * t / 2 * t / 2); b = (int*)malloc(sizeof(int) * t / 2 * t / 2); m = (int*)malloc(sizeof(int) * t / 2 * t / 2); ans = (int*)malloc(sizeof(int) * t * t); int i, j, k; for (i = 0; i < t; i++) for (j = 0; j < t; j++) ans[i*t + j] = 0; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) a[i*t / 2 + j] = ma[i*t + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) b[i*t / 2 + j] = mb[i*t + j + t / 2] - mb[(i + t / 2)*t + j + t / 2]; m = cudamatrixmul(a, b, t / 2);//1 for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[i*t + j + t / 2] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[(i + t / 2)*t + j + t / 2] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) a[i*t / 2 + j] = ma[i*t + j] + ma[i*t + j + t / 2]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) b[i*t / 2 + j] = mb[(i + t / 2)*t + j + t / 2]; m = cudamatrixmul(a, b, t / 2);//2 for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[i*t + j + t / 2] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[i*t + j] -= m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) a[i*t / 2 + j] = ma[(i + t / 2)*t + j] + ma[(i + t / 2)*t + j + t / 2]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) b[i*t / 2 + j] = mb[i*t + j]; m = cudamatrixmul(a, b, t / 2);//3 for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[(i + t / 2)*t + j] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[(i + t / 2)*t + j + t / 2] -= m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) a[i*t / 2 + j] = ma[(i + t / 2)*t + j + t / 2]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) b[i*t / 2 + j] = mb[(i + t / 2)*t + j] - mb[i*t + j]; m = cudamatrixmul(a, b, t / 2);//4 for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[i*t + j] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[(i + t / 2)*t + j] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) a[i*t / 2 + j] = ma[i*t + j] + ma[(i + t / 2)*t + j + t / 2]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) b[i*t / 2 + j] = mb[i*t + j] + mb[(i + t / 2)*t + j + t / 2]; m = cudamatrixmul(a, b, t / 2);//5 for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[i*t + j] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[(i + t / 2)*t + j + t / 2] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) a[i*t / 2 + j] = ma[i*t + j + t / 2] - ma[(i + t / 2)*t + j + t / 2]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) b[i*t / 2 + j] = mb[(i + t / 2)*t + j] + mb[(i + t / 2)*t + j + t / 2]; m = cudamatrixmul(a, b, t / 2);//6 for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[i*t + j] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) a[i*t / 2 + j] = ma[(i + t / 2)*t + j] - ma[i*t + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) b[i*t / 2 + j] = mb[i*t + j] + mb[i*t + j + t / 2]; m = cudamatrixmul(a, b, t / 2);//7 for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[(i + t / 2)*t + j + t / 2] += m[i*t / 2 + j]; free(m); free(a); free(b); return ans; } int main() { if (!InitCUDA()) { return 0; } int *ma, *mb, *ans; int n = 1035, t; clock_t start, end; while (n < 1041) { start = clock(); if (n % 2 == 1) t = n + 1; else t = n; ma = (int*)malloc(sizeof(int) * t * t); mb = (int*)malloc(sizeof(int) * t * t); ans = (int*)malloc(sizeof(int) * t * t); srand(0); for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) { ma[i * t + j] = rand() % 10; mb[i * t + j] = rand() % 10; } for (int i = n; i < t; i++) for (int j = 0; j < n; j++) { ma[i*t + j] = 0; mb[i*t + j] = 0; } for (int i = 0; i < t; i++) for (int j = n; j < t; j++) { ma[i*t + j] = 0; mb[i*t + j] = 0; } ans = strassen(ma, mb, t); free(ma); free(mb); free(ans); end = clock(); int s = (int)start; int e = (int)end; printf("%d\n", e - s); n++; } return 0; }
e901d1c24abed4db0aeb5433d8ef8cd446a31313.cu
#include <stdio.h> #include <stdlib.h> #include<time.h> #include<cstring> #include <cuda_runtime.h> #include <device_launch_parameters.h> #define NUM_THREADS 512 bool InitCUDA() { int count; cudaGetDeviceCount(&count); if (count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for (i = 0; i < count; i++) { cudaDeviceProp prop; if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if (prop.major >= 1) { break; } } } if (i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i); return true; } __global__ static void matrixmul(const int* a, size_t lda, const int* b, size_t ldb, int* ans, size_t ldans, int n) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int idx = bid * blockDim.x + tid; const int row = idx / n; const int column = idx % n; int i; if (row < n && column < n) { int t = 0; for (i = 0; i < n; i++) { t += a[row * lda + i] * b[i * ldb + column]; } ans[row * ldans + column] = t; } } int *cudamatrixmul(int *ma, int *mb, int n) { int *gpuma, *gpumb, *gpuans ,*ans; ans = (int*)malloc(sizeof(int)*n*n); cudaMalloc((void**)&gpuma, sizeof(int) * n * n); cudaMalloc((void**)&gpumb, sizeof(int) * n * n); cudaMalloc((void**)&gpuans, sizeof(int) * n * n); cudaMemcpy2D(gpuma, sizeof(int) * n, ma, sizeof(int) * n, sizeof(int) * n, n, cudaMemcpyHostToDevice); cudaMemcpy2D(gpumb, sizeof(int) * n, mb, sizeof(int) * n, sizeof(int) * n, n, cudaMemcpyHostToDevice); int blocks = (n + NUM_THREADS - 1) / NUM_THREADS; matrixmul << <blocks * n, NUM_THREADS >> > (gpuma, n, gpumb, n, gpuans, n, n); cudaMemcpy2D(ans, sizeof(int) * n, gpuans, sizeof(int) * n, sizeof(int) * n, n, cudaMemcpyDeviceToHost); cudaFree(gpuma); cudaFree(gpumb); cudaFree(gpuans); return ans; } int* strassen(int *ma, int *mb, int t) { int *m, *a, *b, *ans; a = (int*)malloc(sizeof(int) * t / 2 * t / 2); b = (int*)malloc(sizeof(int) * t / 2 * t / 2); m = (int*)malloc(sizeof(int) * t / 2 * t / 2); ans = (int*)malloc(sizeof(int) * t * t); int i, j, k; for (i = 0; i < t; i++) for (j = 0; j < t; j++) ans[i*t + j] = 0; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) a[i*t / 2 + j] = ma[i*t + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) b[i*t / 2 + j] = mb[i*t + j + t / 2] - mb[(i + t / 2)*t + j + t / 2]; m = cudamatrixmul(a, b, t / 2);//1 for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[i*t + j + t / 2] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[(i + t / 2)*t + j + t / 2] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) a[i*t / 2 + j] = ma[i*t + j] + ma[i*t + j + t / 2]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) b[i*t / 2 + j] = mb[(i + t / 2)*t + j + t / 2]; m = cudamatrixmul(a, b, t / 2);//2 for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[i*t + j + t / 2] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[i*t + j] -= m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) a[i*t / 2 + j] = ma[(i + t / 2)*t + j] + ma[(i + t / 2)*t + j + t / 2]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) b[i*t / 2 + j] = mb[i*t + j]; m = cudamatrixmul(a, b, t / 2);//3 for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[(i + t / 2)*t + j] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[(i + t / 2)*t + j + t / 2] -= m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) a[i*t / 2 + j] = ma[(i + t / 2)*t + j + t / 2]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) b[i*t / 2 + j] = mb[(i + t / 2)*t + j] - mb[i*t + j]; m = cudamatrixmul(a, b, t / 2);//4 for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[i*t + j] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[(i + t / 2)*t + j] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) a[i*t / 2 + j] = ma[i*t + j] + ma[(i + t / 2)*t + j + t / 2]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) b[i*t / 2 + j] = mb[i*t + j] + mb[(i + t / 2)*t + j + t / 2]; m = cudamatrixmul(a, b, t / 2);//5 for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[i*t + j] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[(i + t / 2)*t + j + t / 2] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) a[i*t / 2 + j] = ma[i*t + j + t / 2] - ma[(i + t / 2)*t + j + t / 2]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) b[i*t / 2 + j] = mb[(i + t / 2)*t + j] + mb[(i + t / 2)*t + j + t / 2]; m = cudamatrixmul(a, b, t / 2);//6 for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[i*t + j] += m[i*t / 2 + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) a[i*t / 2 + j] = ma[(i + t / 2)*t + j] - ma[i*t + j]; for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) b[i*t / 2 + j] = mb[i*t + j] + mb[i*t + j + t / 2]; m = cudamatrixmul(a, b, t / 2);//7 for (i = 0; i < t / 2; i++) for (j = 0; j < t / 2; j++) ans[(i + t / 2)*t + j + t / 2] += m[i*t / 2 + j]; free(m); free(a); free(b); return ans; } int main() { if (!InitCUDA()) { return 0; } int *ma, *mb, *ans; int n = 1035, t; clock_t start, end; while (n < 1041) { start = clock(); if (n % 2 == 1) t = n + 1; else t = n; ma = (int*)malloc(sizeof(int) * t * t); mb = (int*)malloc(sizeof(int) * t * t); ans = (int*)malloc(sizeof(int) * t * t); srand(0); for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) { ma[i * t + j] = rand() % 10; mb[i * t + j] = rand() % 10; } for (int i = n; i < t; i++) for (int j = 0; j < n; j++) { ma[i*t + j] = 0; mb[i*t + j] = 0; } for (int i = 0; i < t; i++) for (int j = n; j < t; j++) { ma[i*t + j] = 0; mb[i*t + j] = 0; } ans = strassen(ma, mb, t); free(ma); free(mb); free(ans); end = clock(); int s = (int)start; int e = (int)end; printf("%d\n", e - s); n++; } return 0; }
476469a8769ac5a4217785d0da7c26037a8de673.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /********************************************************************************** * Numerical Solution for the Cubic Nonlinear Schrodinger Equation * * using second order split step Fourier method. * * Coded by: Omar Ashour, Texas A&M University at Qatar, February 2015. * **********************************************************************************/ #include "../lib/cu_helpers.h" #include <hipfft.h> // Grid Parameters #define XN 32 // Number of Fourier modes #define TN 10000 // Number of temporal nodes #define LX 10.0 // x-spatial domain [-LX,LX) #define TT 10.0 // Max time #define DX (2*LX / XN) // x-spatial step size #define DT (TT / TN) // temporal step size // Timing parameters #define IRVL 100 // Timing interval. Take a reading every N iterations. // Output files #define PLOT_F "gpu_fft_plot.m" #define TIME_F argv[1] // Function prototypes __global__ void nonlin(hipfftDoubleComplex *psi, double dt, int xn); __global__ void lin(hipfftDoubleComplex *psi, double *k2, double dt, int xn); __global__ void normalize(hipfftDoubleComplex *psi, int size); int main(int argc, char *argv[]) { // Timing info hipEvent_t begin_event, end_event; hipEventCreate(&begin_event); hipEventCreate(&end_event); // Print basic info about simulation printf("XN: %d. DX: %f, DT: %f, dt/dx^2: %f\n", XN, DX, DT, DT/(DX*DX)); // Allocate host arrays double *h_x = (double*)malloc(sizeof(double) * XN); double *h_k2 = (double*)malloc(sizeof(double) * XN); double *h_kx = (double*)malloc(XN * sizeof(double)); float *h_time = (float*)malloc(sizeof(float) * TN/IRVL); hipfftDoubleComplex *h_psi = (hipfftDoubleComplex*) malloc(sizeof(hipfftDoubleComplex)*XN); hipfftDoubleComplex *h_psi_0 = (hipfftDoubleComplex*) malloc(sizeof(hipfftDoubleComplex)*XN); // Create transform plans hipfftHandle plan; CUFFT_SAFE_CALL(hipfftPlan1d(&plan, XN, HIPFFT_Z2Z, 1)); // Create wave number double dkx = 2*M_PI/XN/DX; for(int i = XN/2; i >= 0; i--) h_kx[XN/2 - i]=(XN/2 - i) * dkx; for(int i = XN/2+1; i < XN; i++) h_kx[i]=(i - XN) * dkx; // Initial conditions on host for(int i = 0; i < XN; i++) { h_x[i] = (i-XN/2)*DX; h_psi[i].x = sqrt(2)/cosh(h_x[i]); //h_psi[i].x = 2*exp(-(x[i]*x[i]/2.0/2.0)); h_psi[i].y = 0; h_psi_0[i].x = h_psi[i].x; h_psi_0[i].y = h_psi[i].y; h_k2[i] = h_kx[i]*h_kx[i]; } // Allocate device arrays and copy from host hipfftDoubleComplex *d_psi; double *d_k2; CUDAR_SAFE_CALL(hipMalloc(&d_psi, sizeof(hipfftDoubleComplex)*XN)); CUDAR_SAFE_CALL(hipMalloc(&d_k2, sizeof(double)*XN)); CUDAR_SAFE_CALL(hipMemcpy(d_psi, h_psi, sizeof(hipfftDoubleComplex)*XN, hipMemcpyHostToDevice)); CUDAR_SAFE_CALL(hipMemcpy(d_k2, h_k2, sizeof(double)*XN, hipMemcpyHostToDevice)); // Initialize the grid dim3 threadsPerBlock(128,1,1); dim3 blocksPerGrid((XN + 127)/128,1,1); // Forward transform CUFFT_SAFE_CALL(hipfftExecZ2Z(plan, d_psi, d_psi, HIPFFT_FORWARD)); // Timing starts here float time_value; hipEventRecord(begin_event, 0); // Start time evolution for (int i = 1; i <= TN; i++) { // Solve linear part hipLaunchKernelGGL(( lin), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, d_k2, DT/2, XN); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(hipPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING // Backward transform CUFFT_SAFE_CALL(hipfftExecZ2Z(plan, d_psi, d_psi, HIPFFT_BACKWARD)); // Normalize the transform hipLaunchKernelGGL(( normalize), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, XN); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(hipPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING // Solve nonlinear part hipLaunchKernelGGL(( nonlin), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, DT, XN); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(hipPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING // Forward transform CUFFT_SAFE_CALL(hipfftExecZ2Z(plan, d_psi, d_psi, HIPFFT_FORWARD)); // Solve linear part hipLaunchKernelGGL(( lin), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, d_k2, DT/2, XN); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(hipPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING // Print time at specific intervals if(i % IRVL == 0) { hipEventRecord(end_event, 0); hipEventSynchronize(end_event); hipEventElapsedTime(&time_value, begin_event, end_event); h_time[i/IRVL-1] = time_value; } } // Plot timing results print_time(h_time, TN, IRVL, TIME_F); // Backward tranform to retreive data CUFFT_SAFE_CALL(hipfftExecZ2Z(plan, d_psi, d_psi, HIPFFT_BACKWARD)); // Normalize the transform hipLaunchKernelGGL(( normalize), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, XN); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(hipPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING // Copy results to device CUDAR_SAFE_CALL(hipMemcpy(h_psi, d_psi, sizeof(hipfftDoubleComplex)*XN, hipMemcpyDeviceToHost)); // Plot results cm_plot_1d(h_psi_0, h_psi, LX, XN, PLOT_F); // Clean up CUFFT_SAFE_CALL(hipfftDestroy(plan)); free(h_x); free(h_k2); free(h_kx); free(h_psi_0); free(h_psi); free(h_time); CUDAR_SAFE_CALL(hipFree(d_psi)); CUDAR_SAFE_CALL(hipFree(d_k2)); return 0; } __global__ void nonlin(hipfftDoubleComplex *psi, double dt, int xn) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Avoid first and last point (boundary conditions) (needs fixing) //if (i >= xn - 1 || i == 0) return; if (i >= xn) return; double psi2 = cuCabs(psi[i])*cuCabs(psi[i]); psi[i] = cuCmul(psi[i], make_cuDoubleComplex(cos(psi2*dt), sin(psi2*dt))); } __global__ void lin(hipfftDoubleComplex *psi, double *k2, double dt, int xn) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Avoid first and last point (boundary conditions) (needs fixing) //if (i >= xn - 1 || i == 0) return; if (i >= xn) return; psi[i] = cuCmul(psi[i], make_cuDoubleComplex(cos(k2[i]*dt), -sin(k2[i]*dt))); } __global__ void normalize(hipfftDoubleComplex *psi, int size) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Stay within range since grid might be larger if (i >= size) return; psi[i].x = psi[i].x/size; psi[i].y = psi[i].y/size; }
476469a8769ac5a4217785d0da7c26037a8de673.cu
/********************************************************************************** * Numerical Solution for the Cubic Nonlinear Schrodinger Equation * * using second order split step Fourier method. * * Coded by: Omar Ashour, Texas A&M University at Qatar, February 2015. * **********************************************************************************/ #include "../lib/cu_helpers.h" #include <cufft.h> // Grid Parameters #define XN 32 // Number of Fourier modes #define TN 10000 // Number of temporal nodes #define LX 10.0 // x-spatial domain [-LX,LX) #define TT 10.0 // Max time #define DX (2*LX / XN) // x-spatial step size #define DT (TT / TN) // temporal step size // Timing parameters #define IRVL 100 // Timing interval. Take a reading every N iterations. // Output files #define PLOT_F "gpu_fft_plot.m" #define TIME_F argv[1] // Function prototypes __global__ void nonlin(cufftDoubleComplex *psi, double dt, int xn); __global__ void lin(cufftDoubleComplex *psi, double *k2, double dt, int xn); __global__ void normalize(cufftDoubleComplex *psi, int size); int main(int argc, char *argv[]) { // Timing info cudaEvent_t begin_event, end_event; cudaEventCreate(&begin_event); cudaEventCreate(&end_event); // Print basic info about simulation printf("XN: %d. DX: %f, DT: %f, dt/dx^2: %f\n", XN, DX, DT, DT/(DX*DX)); // Allocate host arrays double *h_x = (double*)malloc(sizeof(double) * XN); double *h_k2 = (double*)malloc(sizeof(double) * XN); double *h_kx = (double*)malloc(XN * sizeof(double)); float *h_time = (float*)malloc(sizeof(float) * TN/IRVL); cufftDoubleComplex *h_psi = (cufftDoubleComplex*) malloc(sizeof(cufftDoubleComplex)*XN); cufftDoubleComplex *h_psi_0 = (cufftDoubleComplex*) malloc(sizeof(cufftDoubleComplex)*XN); // Create transform plans cufftHandle plan; CUFFT_SAFE_CALL(cufftPlan1d(&plan, XN, CUFFT_Z2Z, 1)); // Create wave number double dkx = 2*M_PI/XN/DX; for(int i = XN/2; i >= 0; i--) h_kx[XN/2 - i]=(XN/2 - i) * dkx; for(int i = XN/2+1; i < XN; i++) h_kx[i]=(i - XN) * dkx; // Initial conditions on host for(int i = 0; i < XN; i++) { h_x[i] = (i-XN/2)*DX; h_psi[i].x = sqrt(2)/cosh(h_x[i]); //h_psi[i].x = 2*exp(-(x[i]*x[i]/2.0/2.0)); h_psi[i].y = 0; h_psi_0[i].x = h_psi[i].x; h_psi_0[i].y = h_psi[i].y; h_k2[i] = h_kx[i]*h_kx[i]; } // Allocate device arrays and copy from host cufftDoubleComplex *d_psi; double *d_k2; CUDAR_SAFE_CALL(cudaMalloc(&d_psi, sizeof(cufftDoubleComplex)*XN)); CUDAR_SAFE_CALL(cudaMalloc(&d_k2, sizeof(double)*XN)); CUDAR_SAFE_CALL(cudaMemcpy(d_psi, h_psi, sizeof(cufftDoubleComplex)*XN, cudaMemcpyHostToDevice)); CUDAR_SAFE_CALL(cudaMemcpy(d_k2, h_k2, sizeof(double)*XN, cudaMemcpyHostToDevice)); // Initialize the grid dim3 threadsPerBlock(128,1,1); dim3 blocksPerGrid((XN + 127)/128,1,1); // Forward transform CUFFT_SAFE_CALL(cufftExecZ2Z(plan, d_psi, d_psi, CUFFT_FORWARD)); // Timing starts here float time_value; cudaEventRecord(begin_event, 0); // Start time evolution for (int i = 1; i <= TN; i++) { // Solve linear part lin<<<blocksPerGrid, threadsPerBlock>>>(d_psi, d_k2, DT/2, XN); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(cudaPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING // Backward transform CUFFT_SAFE_CALL(cufftExecZ2Z(plan, d_psi, d_psi, CUFFT_INVERSE)); // Normalize the transform normalize<<<blocksPerGrid, threadsPerBlock>>>(d_psi, XN); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(cudaPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING // Solve nonlinear part nonlin<<<blocksPerGrid, threadsPerBlock>>>(d_psi, DT, XN); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(cudaPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING // Forward transform CUFFT_SAFE_CALL(cufftExecZ2Z(plan, d_psi, d_psi, CUFFT_FORWARD)); // Solve linear part lin<<<blocksPerGrid, threadsPerBlock>>>(d_psi, d_k2, DT/2, XN); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(cudaPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING // Print time at specific intervals if(i % IRVL == 0) { cudaEventRecord(end_event, 0); cudaEventSynchronize(end_event); cudaEventElapsedTime(&time_value, begin_event, end_event); h_time[i/IRVL-1] = time_value; } } // Plot timing results print_time(h_time, TN, IRVL, TIME_F); // Backward tranform to retreive data CUFFT_SAFE_CALL(cufftExecZ2Z(plan, d_psi, d_psi, CUFFT_INVERSE)); // Normalize the transform normalize<<<blocksPerGrid, threadsPerBlock>>>(d_psi, XN); #if CUDAR_ERROR_CHECKING CUDAR_SAFE_CALL(cudaPeekAtLastError()); #endif // CUDAR_ERROR_CHECKING // Copy results to device CUDAR_SAFE_CALL(cudaMemcpy(h_psi, d_psi, sizeof(cufftDoubleComplex)*XN, cudaMemcpyDeviceToHost)); // Plot results cm_plot_1d(h_psi_0, h_psi, LX, XN, PLOT_F); // Clean up CUFFT_SAFE_CALL(cufftDestroy(plan)); free(h_x); free(h_k2); free(h_kx); free(h_psi_0); free(h_psi); free(h_time); CUDAR_SAFE_CALL(cudaFree(d_psi)); CUDAR_SAFE_CALL(cudaFree(d_k2)); return 0; } __global__ void nonlin(cufftDoubleComplex *psi, double dt, int xn) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Avoid first and last point (boundary conditions) (needs fixing) //if (i >= xn - 1 || i == 0) return; if (i >= xn) return; double psi2 = cuCabs(psi[i])*cuCabs(psi[i]); psi[i] = cuCmul(psi[i], make_cuDoubleComplex(cos(psi2*dt), sin(psi2*dt))); } __global__ void lin(cufftDoubleComplex *psi, double *k2, double dt, int xn) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Avoid first and last point (boundary conditions) (needs fixing) //if (i >= xn - 1 || i == 0) return; if (i >= xn) return; psi[i] = cuCmul(psi[i], make_cuDoubleComplex(cos(k2[i]*dt), -sin(k2[i]*dt))); } __global__ void normalize(cufftDoubleComplex *psi, int size) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Stay within range since grid might be larger if (i >= size) return; psi[i].x = psi[i].x/size; psi[i].y = psi[i].y/size; }
b57818618bff65b5411d8485867744759625f2fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_cell_kernel1_ydir; int xdim0_advec_cell_kernel1_ydir_h = -1; __constant__ int ydim0_advec_cell_kernel1_ydir; int ydim0_advec_cell_kernel1_ydir_h = -1; __constant__ int xdim1_advec_cell_kernel1_ydir; int xdim1_advec_cell_kernel1_ydir_h = -1; __constant__ int ydim1_advec_cell_kernel1_ydir; int ydim1_advec_cell_kernel1_ydir_h = -1; __constant__ int xdim2_advec_cell_kernel1_ydir; int xdim2_advec_cell_kernel1_ydir_h = -1; __constant__ int ydim2_advec_cell_kernel1_ydir; int ydim2_advec_cell_kernel1_ydir_h = -1; __constant__ int xdim3_advec_cell_kernel1_ydir; int xdim3_advec_cell_kernel1_ydir_h = -1; __constant__ int ydim3_advec_cell_kernel1_ydir; int ydim3_advec_cell_kernel1_ydir_h = -1; __constant__ int xdim4_advec_cell_kernel1_ydir; int xdim4_advec_cell_kernel1_ydir_h = -1; __constant__ int ydim4_advec_cell_kernel1_ydir; int ydim4_advec_cell_kernel1_ydir_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #define OPS_ACC0(x,y,z) (x+xdim0_advec_cell_kernel1_ydir*(y)+xdim0_advec_cell_kernel1_ydir*ydim0_advec_cell_kernel1_ydir*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_cell_kernel1_ydir*(y)+xdim1_advec_cell_kernel1_ydir*ydim1_advec_cell_kernel1_ydir*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_cell_kernel1_ydir*(y)+xdim2_advec_cell_kernel1_ydir*ydim2_advec_cell_kernel1_ydir*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_cell_kernel1_ydir*(y)+xdim3_advec_cell_kernel1_ydir*ydim3_advec_cell_kernel1_ydir*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_advec_cell_kernel1_ydir*(y)+xdim4_advec_cell_kernel1_ydir*ydim4_advec_cell_kernel1_ydir*(z)) //user function __device__ inline void advec_cell_kernel1_ydir_gpu( double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_z, const double *vol_flux_y) { pre_vol[OPS_ACC0(0,0,0)] = volume[OPS_ACC2(0,0,0)] + vol_flux_y[OPS_ACC4(0,1,0)] - vol_flux_y[OPS_ACC4(0,0,0)] + vol_flux_z[OPS_ACC3(0,0,1)] - vol_flux_z[OPS_ACC3(0,0,0)]; post_vol[OPS_ACC1(0,0,0)] = pre_vol[OPS_ACC0(0,0,0)]-(vol_flux_y[OPS_ACC4(0,1,0)] - vol_flux_y[OPS_ACC4(0,0,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 __global__ void ops_advec_cell_kernel1_ydir( double* __restrict arg0, double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, const double* __restrict arg4, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_cell_kernel1_ydir + idx_z * 1*1 * xdim0_advec_cell_kernel1_ydir * ydim0_advec_cell_kernel1_ydir; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_cell_kernel1_ydir + idx_z * 1*1 * xdim1_advec_cell_kernel1_ydir * ydim1_advec_cell_kernel1_ydir; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_cell_kernel1_ydir + idx_z * 1*1 * xdim2_advec_cell_kernel1_ydir * ydim2_advec_cell_kernel1_ydir; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_cell_kernel1_ydir + idx_z * 1*1 * xdim3_advec_cell_kernel1_ydir * ydim3_advec_cell_kernel1_ydir; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_advec_cell_kernel1_ydir + idx_z * 1*1 * xdim4_advec_cell_kernel1_ydir * ydim4_advec_cell_kernel1_ydir; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_cell_kernel1_ydir_gpu(arg0, arg1, arg2, arg3, arg4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_cell_kernel1_ydir(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_advec_cell_kernel1_ydir_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif //Timing double t1,t2,c1,c2; ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,5,range,113)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(113,"advec_cell_kernel1_ydir"); OPS_kernels[113].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != xdim0_advec_cell_kernel1_ydir_h || ydim0 != ydim0_advec_cell_kernel1_ydir_h || xdim1 != xdim1_advec_cell_kernel1_ydir_h || ydim1 != ydim1_advec_cell_kernel1_ydir_h || xdim2 != xdim2_advec_cell_kernel1_ydir_h || ydim2 != ydim2_advec_cell_kernel1_ydir_h || xdim3 != xdim3_advec_cell_kernel1_ydir_h || ydim3 != ydim3_advec_cell_kernel1_ydir_h || xdim4 != xdim4_advec_cell_kernel1_ydir_h || ydim4 != ydim4_advec_cell_kernel1_ydir_h) { hipMemcpyToSymbol( xdim0_advec_cell_kernel1_ydir, &xdim0, sizeof(int) ); xdim0_advec_cell_kernel1_ydir_h = xdim0; hipMemcpyToSymbol( ydim0_advec_cell_kernel1_ydir, &ydim0, sizeof(int) ); ydim0_advec_cell_kernel1_ydir_h = ydim0; hipMemcpyToSymbol( xdim1_advec_cell_kernel1_ydir, &xdim1, sizeof(int) ); xdim1_advec_cell_kernel1_ydir_h = xdim1; hipMemcpyToSymbol( ydim1_advec_cell_kernel1_ydir, &ydim1, sizeof(int) ); ydim1_advec_cell_kernel1_ydir_h = ydim1; hipMemcpyToSymbol( xdim2_advec_cell_kernel1_ydir, &xdim2, sizeof(int) ); xdim2_advec_cell_kernel1_ydir_h = xdim2; hipMemcpyToSymbol( ydim2_advec_cell_kernel1_ydir, &ydim2, sizeof(int) ); ydim2_advec_cell_kernel1_ydir_h = ydim2; hipMemcpyToSymbol( xdim3_advec_cell_kernel1_ydir, &xdim3, sizeof(int) ); xdim3_advec_cell_kernel1_ydir_h = xdim3; hipMemcpyToSymbol( ydim3_advec_cell_kernel1_ydir, &ydim3, sizeof(int) ); ydim3_advec_cell_kernel1_ydir_h = ydim3; hipMemcpyToSymbol( xdim4_advec_cell_kernel1_ydir, &xdim4, sizeof(int) ); xdim4_advec_cell_kernel1_ydir_h = xdim4; hipMemcpyToSymbol( ydim4_advec_cell_kernel1_ydir, &ydim4, sizeof(int) ); ydim4_advec_cell_kernel1_ydir_h = ydim4; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); char *p_a[5]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args,5,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[113].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_advec_cell_kernel1_ydir), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[113].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[113].mpi_time += t2-t1; OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg4); } } #ifdef OPS_LAZY void ops_par_loop_advec_cell_kernel1_ydir(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 113; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 113; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg*)malloc(5*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->function = ops_par_loop_advec_cell_kernel1_ydir_execute; if (OPS_diags > 1) { ops_timing_realloc(113,"advec_cell_kernel1_ydir"); } ops_enqueue_kernel(desc); } #endif
b57818618bff65b5411d8485867744759625f2fc.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_cell_kernel1_ydir; int xdim0_advec_cell_kernel1_ydir_h = -1; __constant__ int ydim0_advec_cell_kernel1_ydir; int ydim0_advec_cell_kernel1_ydir_h = -1; __constant__ int xdim1_advec_cell_kernel1_ydir; int xdim1_advec_cell_kernel1_ydir_h = -1; __constant__ int ydim1_advec_cell_kernel1_ydir; int ydim1_advec_cell_kernel1_ydir_h = -1; __constant__ int xdim2_advec_cell_kernel1_ydir; int xdim2_advec_cell_kernel1_ydir_h = -1; __constant__ int ydim2_advec_cell_kernel1_ydir; int ydim2_advec_cell_kernel1_ydir_h = -1; __constant__ int xdim3_advec_cell_kernel1_ydir; int xdim3_advec_cell_kernel1_ydir_h = -1; __constant__ int ydim3_advec_cell_kernel1_ydir; int ydim3_advec_cell_kernel1_ydir_h = -1; __constant__ int xdim4_advec_cell_kernel1_ydir; int xdim4_advec_cell_kernel1_ydir_h = -1; __constant__ int ydim4_advec_cell_kernel1_ydir; int ydim4_advec_cell_kernel1_ydir_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #define OPS_ACC0(x,y,z) (x+xdim0_advec_cell_kernel1_ydir*(y)+xdim0_advec_cell_kernel1_ydir*ydim0_advec_cell_kernel1_ydir*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_cell_kernel1_ydir*(y)+xdim1_advec_cell_kernel1_ydir*ydim1_advec_cell_kernel1_ydir*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_cell_kernel1_ydir*(y)+xdim2_advec_cell_kernel1_ydir*ydim2_advec_cell_kernel1_ydir*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_cell_kernel1_ydir*(y)+xdim3_advec_cell_kernel1_ydir*ydim3_advec_cell_kernel1_ydir*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_advec_cell_kernel1_ydir*(y)+xdim4_advec_cell_kernel1_ydir*ydim4_advec_cell_kernel1_ydir*(z)) //user function __device__ inline void advec_cell_kernel1_ydir_gpu( double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_z, const double *vol_flux_y) { pre_vol[OPS_ACC0(0,0,0)] = volume[OPS_ACC2(0,0,0)] + vol_flux_y[OPS_ACC4(0,1,0)] - vol_flux_y[OPS_ACC4(0,0,0)] + vol_flux_z[OPS_ACC3(0,0,1)] - vol_flux_z[OPS_ACC3(0,0,0)]; post_vol[OPS_ACC1(0,0,0)] = pre_vol[OPS_ACC0(0,0,0)]-(vol_flux_y[OPS_ACC4(0,1,0)] - vol_flux_y[OPS_ACC4(0,0,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 __global__ void ops_advec_cell_kernel1_ydir( double* __restrict arg0, double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, const double* __restrict arg4, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_cell_kernel1_ydir + idx_z * 1*1 * xdim0_advec_cell_kernel1_ydir * ydim0_advec_cell_kernel1_ydir; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_cell_kernel1_ydir + idx_z * 1*1 * xdim1_advec_cell_kernel1_ydir * ydim1_advec_cell_kernel1_ydir; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_cell_kernel1_ydir + idx_z * 1*1 * xdim2_advec_cell_kernel1_ydir * ydim2_advec_cell_kernel1_ydir; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_cell_kernel1_ydir + idx_z * 1*1 * xdim3_advec_cell_kernel1_ydir * ydim3_advec_cell_kernel1_ydir; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_advec_cell_kernel1_ydir + idx_z * 1*1 * xdim4_advec_cell_kernel1_ydir * ydim4_advec_cell_kernel1_ydir; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_cell_kernel1_ydir_gpu(arg0, arg1, arg2, arg3, arg4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_cell_kernel1_ydir(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_advec_cell_kernel1_ydir_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif //Timing double t1,t2,c1,c2; ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,5,range,113)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(113,"advec_cell_kernel1_ydir"); OPS_kernels[113].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != xdim0_advec_cell_kernel1_ydir_h || ydim0 != ydim0_advec_cell_kernel1_ydir_h || xdim1 != xdim1_advec_cell_kernel1_ydir_h || ydim1 != ydim1_advec_cell_kernel1_ydir_h || xdim2 != xdim2_advec_cell_kernel1_ydir_h || ydim2 != ydim2_advec_cell_kernel1_ydir_h || xdim3 != xdim3_advec_cell_kernel1_ydir_h || ydim3 != ydim3_advec_cell_kernel1_ydir_h || xdim4 != xdim4_advec_cell_kernel1_ydir_h || ydim4 != ydim4_advec_cell_kernel1_ydir_h) { cudaMemcpyToSymbol( xdim0_advec_cell_kernel1_ydir, &xdim0, sizeof(int) ); xdim0_advec_cell_kernel1_ydir_h = xdim0; cudaMemcpyToSymbol( ydim0_advec_cell_kernel1_ydir, &ydim0, sizeof(int) ); ydim0_advec_cell_kernel1_ydir_h = ydim0; cudaMemcpyToSymbol( xdim1_advec_cell_kernel1_ydir, &xdim1, sizeof(int) ); xdim1_advec_cell_kernel1_ydir_h = xdim1; cudaMemcpyToSymbol( ydim1_advec_cell_kernel1_ydir, &ydim1, sizeof(int) ); ydim1_advec_cell_kernel1_ydir_h = ydim1; cudaMemcpyToSymbol( xdim2_advec_cell_kernel1_ydir, &xdim2, sizeof(int) ); xdim2_advec_cell_kernel1_ydir_h = xdim2; cudaMemcpyToSymbol( ydim2_advec_cell_kernel1_ydir, &ydim2, sizeof(int) ); ydim2_advec_cell_kernel1_ydir_h = ydim2; cudaMemcpyToSymbol( xdim3_advec_cell_kernel1_ydir, &xdim3, sizeof(int) ); xdim3_advec_cell_kernel1_ydir_h = xdim3; cudaMemcpyToSymbol( ydim3_advec_cell_kernel1_ydir, &ydim3, sizeof(int) ); ydim3_advec_cell_kernel1_ydir_h = ydim3; cudaMemcpyToSymbol( xdim4_advec_cell_kernel1_ydir, &xdim4, sizeof(int) ); xdim4_advec_cell_kernel1_ydir_h = xdim4; cudaMemcpyToSymbol( ydim4_advec_cell_kernel1_ydir, &ydim4, sizeof(int) ); ydim4_advec_cell_kernel1_ydir_h = ydim4; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); char *p_a[5]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args,5,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[113].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_advec_cell_kernel1_ydir<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[113].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[113].mpi_time += t2-t1; OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[113].transfer += ops_compute_transfer(dim, start, end, &arg4); } } #ifdef OPS_LAZY void ops_par_loop_advec_cell_kernel1_ydir(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 113; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 113; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg*)malloc(5*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->function = ops_par_loop_advec_cell_kernel1_ydir_execute; if (OPS_diags > 1) { ops_timing_realloc(113,"advec_cell_kernel1_ydir"); } ops_enqueue_kernel(desc); } #endif
2625ec2986a581788abee531bc36a02a63bb4865.hip
// !!! This is a file automatically generated by hipify!!! /* icc propagate-toz-test.C -o propagate-toz-test.exe -fopenmp -O3 */ #include <hip/hip_runtime_api.h> #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <unistd.h> #include <sys/time.h> #include <iostream> #include <chrono> #include <iomanip> #ifndef nevts #define nevts 100 #endif #ifndef bsize #define bsize 128 #endif #ifndef ntrks #define ntrks 9600 //122880 #endif #define nb ntrks/bsize #define smear 0.1 #ifndef NITER #define NITER 5 #endif #ifndef nlayer #define nlayer 20 #endif #ifndef num_streams #define num_streams 7 //streams changes answers #endif #ifndef threadsperblockx #define threadsperblockx 32 #endif #define threadsperblocky 512/threadsperblockx //#define threadsperblocky 1024/threadsperblockx //unclear why bit 1024 total threads per block gives resource error when running with more than one layer #ifndef blockspergrid #define blockspergrid 15 #endif #define HOSTDEV __host__ __device__ HOSTDEV size_t PosInMtrx(size_t i, size_t j, size_t D) { return i*D+j; } HOSTDEV size_t SymOffsets33(size_t i) { const size_t offs[9] = {0, 1, 3, 1, 2, 4, 3, 4, 5}; return offs[i]; } HOSTDEV size_t SymOffsets66(size_t i) { const size_t offs[36] = {0, 1, 3, 6, 10, 15, 1, 2, 4, 7, 11, 16, 3, 4, 5, 8, 12, 17, 6, 7, 8, 9, 13, 18, 10, 11, 12, 13, 14, 19, 15, 16, 17, 18, 19, 20}; return offs[i]; } struct ATRK { float par[6]; float cov[21]; int q; // int hitidx[22]; }; struct AHIT { float pos[3]; float cov[6]; }; struct MP1I { int data[1*bsize]; }; struct MP22I { int data[22*bsize]; }; struct MP3F { float data[3*bsize]; }; struct MP6F { float data[6*bsize]; }; struct MP3x3 { float data[9*bsize]; }; struct MP3x6 { float data[18*bsize]; }; struct MP3x3SF { float data[6*bsize]; }; struct MP6x6SF { float data[21*bsize]; }; struct MP6x6F { float data[36*bsize]; }; struct MPTRK { MP6F par; MP6x6SF cov; MP1I q; // MP22I hitidx; }; struct MPHIT { MP3F pos; MP3x3SF cov; }; float randn(float mu, float sigma) { float U1, U2, W, mult; static float X1, X2; static int call = 0; if (call == 1) { call = !call; return (mu + sigma * (float) X2); } do { U1 = -1 + ((float) rand () / RAND_MAX) * 2; U2 = -1 + ((float) rand () / RAND_MAX) * 2; W = pow (U1, 2) + pow (U2, 2); } while (W >= 1 || W == 0); mult = sqrt ((-2 * log (W)) / W); X1 = U1 * mult; X2 = U2 * mult; call = !call; return (mu + sigma * (float) X1); } MPTRK* prepareTracks(ATRK inputtrk) { MPTRK* result; hipMallocManaged((void**)&result,nevts*nb*sizeof(MPTRK)); //fixme, align? hipMemAdvise(result,nevts*nb*sizeof(MPTRK),hipMemAdviseSetPreferredLocation,hipCpuDeviceId); for (size_t ie=0;ie<nevts;++ie) { for (size_t ib=0;ib<nb;++ib) { for (size_t it=0;it<bsize;++it) { //par for (size_t ip=0;ip<6;++ip) { result[ib + nb*ie].par.data[it + ip*bsize] = (1+smear*randn(0,1))*inputtrk.par[ip]; } //cov for (size_t ip=0;ip<21;++ip) { result[ib + nb*ie].cov.data[it + ip*bsize] = (1+smear*randn(0,1))*inputtrk.cov[ip]; } //q result[ib + nb*ie].q.data[it] = inputtrk.q-2*ceil(-0.5 + (float)rand() / RAND_MAX);//fixme check } } } return result; } MPHIT* prepareHits(AHIT inputhit) { //MPHIT* result = (MPHIT*) malloc(nevts*nb*sizeof(MPHIT)); MPHIT* result; hipMallocManaged((void**)&result,nlayer*nevts*nb*sizeof(MPHIT)); //fixme, align? hipMemAdvise(result,nlayer*nevts*nb*sizeof(MPHIT),hipMemAdviseSetPreferredLocation,hipCpuDeviceId); for (int lay=0;lay<nlayer;++lay) { for (size_t ie=0;ie<nevts;++ie) { for (size_t ib=0;ib<nb;++ib) { for (size_t it=0;it<bsize;++it) { //pos for (size_t ip=0;ip<3;++ip) { result[lay+nlayer*(ib + nb*ie)].pos.data[it + ip*bsize] = (1+smear*randn(0,1))*inputhit.pos[ip]; } //cov for (size_t ip=0;ip<6;++ip) { result[lay+nlayer*(ib + nb*ie)].cov.data[it + ip*bsize] = (1+smear*randn(0,1))*inputhit.cov[ip]; } } } } } return result; } HOSTDEV MPTRK* bTk(MPTRK* tracks, size_t ev, size_t ib,int layer) { return &(tracks[ib + nb*ev+layer*nevts]); } HOSTDEV MPTRK* bTk(MPTRK* tracks, size_t ev, size_t ib) { return &(tracks[ib + nb*ev]); } HOSTDEV const MPTRK* bTk(const MPTRK* tracks, size_t ev, size_t ib,int layer) { return &(tracks[ib + nb*ev+layer*nevts]); } HOSTDEV const MPTRK* bTk(const MPTRK* tracks, size_t ev, size_t ib) { return &(tracks[ib + nb*ev]); } HOSTDEV float q(const MP1I* bq, size_t it){ return (*bq).data[it]; } HOSTDEV float par(const MP6F* bpars, size_t it, size_t ipar){ return (*bpars).data[it + ipar*bsize]; } HOSTDEV float x (const MP6F* bpars, size_t it){ return par(bpars, it, 0); } HOSTDEV float y (const MP6F* bpars, size_t it){ return par(bpars, it, 1); } HOSTDEV float z (const MP6F* bpars, size_t it){ return par(bpars, it, 2); } HOSTDEV float ipt (const MP6F* bpars, size_t it){ return par(bpars, it, 3); } HOSTDEV float phi (const MP6F* bpars, size_t it){ return par(bpars, it, 4); } HOSTDEV float theta(const MP6F* bpars, size_t it){ return par(bpars, it, 5); } HOSTDEV float x (MP6F* bpars, size_t it){ return par(bpars, it, 0); } HOSTDEV float y (MP6F* bpars, size_t it){ return par(bpars, it, 1); } HOSTDEV float z (MP6F* bpars, size_t it){ return par(bpars, it, 2); } HOSTDEV float ipt (MP6F* bpars, size_t it){ return par(bpars, it, 3); } HOSTDEV float phi (MP6F* bpars, size_t it){ return par(bpars, it, 4); } HOSTDEV float theta(MP6F* bpars, size_t it){ return par(bpars, it, 5); } HOSTDEV float par(const MPTRK* btracks, size_t it, size_t ipar){ return par(&(*btracks).par,it,ipar); } HOSTDEV float x (const MPTRK* btracks, size_t it){ return par(btracks, it, 0); } HOSTDEV float y (const MPTRK* btracks, size_t it){ return par(btracks, it, 1); } HOSTDEV float z (const MPTRK* btracks, size_t it){ return par(btracks, it, 2); } HOSTDEV float ipt (const MPTRK* btracks, size_t it){ return par(btracks, it, 3); } HOSTDEV float phi (const MPTRK* btracks, size_t it){ return par(btracks, it, 4); } HOSTDEV float theta(const MPTRK* btracks, size_t it){ return par(btracks, it, 5); } HOSTDEV float par(const MPTRK* tracks, size_t ev, size_t tk, size_t ipar){ size_t ib = tk/bsize; const MPTRK* btracks = bTk(tracks, ev, ib); size_t it = tk % bsize; return par(btracks, it, ipar); } HOSTDEV float x (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 0); } HOSTDEV float y (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 1); } HOSTDEV float z (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 2); } HOSTDEV float ipt (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 3); } HOSTDEV float phi (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 4); } HOSTDEV float theta(const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 5); } HOSTDEV void setpar(MP6F* bpars, size_t it, size_t ipar, float val){ (*bpars).data[it + ipar*bsize] = val; } HOSTDEV void setx (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 0, val); } HOSTDEV void sety (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 1, val); } HOSTDEV void setz (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 2, val); } HOSTDEV void setipt (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 3, val); } HOSTDEV void setphi (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 4, val); } HOSTDEV void settheta(MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 5, val); } HOSTDEV void setpar(MPTRK* btracks, size_t it, size_t ipar, float val){ return setpar(&(*btracks).par,it,ipar,val); } HOSTDEV void setx (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 0, val); } HOSTDEV void sety (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 1, val); } HOSTDEV void setz (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 2, val); } HOSTDEV void setipt (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 3, val); } HOSTDEV void setphi (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 4, val); } HOSTDEV void settheta(MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 5, val); } HOSTDEV MPHIT* bHit(MPHIT* hits, size_t ev, size_t ib) { return &(hits[ib + nb*ev]); } HOSTDEV const MPHIT* bHit(const MPHIT* hits, size_t ev, size_t ib) { return &(hits[ib + nb*ev]); } HOSTDEV const MPHIT* bHit(const MPHIT* hits, size_t ev, size_t ib,int lay) { return &(hits[lay + (ib*nlayer) +(ev*nlayer*nb)]); } HOSTDEV float pos(const MP3F* hpos, size_t it, size_t ipar){ return (*hpos).data[it + ipar*bsize]; } HOSTDEV float x(const MP3F* hpos, size_t it) { return pos(hpos, it, 0); } HOSTDEV float y(const MP3F* hpos, size_t it) { return pos(hpos, it, 1); } HOSTDEV float z(const MP3F* hpos, size_t it) { return pos(hpos, it, 2); } HOSTDEV float pos(const MPHIT* hits, size_t it, size_t ipar){ return pos(&(*hits).pos,it,ipar); } HOSTDEV float x(const MPHIT* hits, size_t it) { return pos(hits, it, 0); } HOSTDEV float y(const MPHIT* hits, size_t it) { return pos(hits, it, 1); } HOSTDEV float z(const MPHIT* hits, size_t it) { return pos(hits, it, 2); } HOSTDEV float pos(const MPHIT* hits, size_t ev, size_t tk, size_t ipar){ size_t ib = tk/bsize; const MPHIT* bhits = bHit(hits, ev, ib); size_t it = tk % bsize; return pos(bhits,it,ipar); } HOSTDEV float x(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 0); } HOSTDEV float y(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 1); } HOSTDEV float z(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 2); } #define N bsize __forceinline__ __device__ void MultHelixPropEndcap(const MP6x6F* A, const MP6x6SF* B, MP6x6F* C) { const float* a = A->data; //ASSUME_ALIGNED(a, 64); const float* b = B->data; //ASSUME_ALIGNED(b, 64); float* c = C->data; //ASSUME_ALIGNED(c, 64); for(int n=threadIdx.x;n<N;n+=blockDim.x) { c[ 0*N+n] = b[ 0*N+n] + a[ 2*N+n]*b[ 3*N+n] + a[ 3*N+n]*b[ 6*N+n] + a[ 4*N+n]*b[10*N+n] + a[ 5*N+n]*b[15*N+n]; c[ 1*N+n] = b[ 1*N+n] + a[ 2*N+n]*b[ 4*N+n] + a[ 3*N+n]*b[ 7*N+n] + a[ 4*N+n]*b[11*N+n] + a[ 5*N+n]*b[16*N+n]; c[ 2*N+n] = b[ 3*N+n] + a[ 2*N+n]*b[ 5*N+n] + a[ 3*N+n]*b[ 8*N+n] + a[ 4*N+n]*b[12*N+n] + a[ 5*N+n]*b[17*N+n]; c[ 3*N+n] = b[ 6*N+n] + a[ 2*N+n]*b[ 8*N+n] + a[ 3*N+n]*b[ 9*N+n] + a[ 4*N+n]*b[13*N+n] + a[ 5*N+n]*b[18*N+n]; c[ 4*N+n] = b[10*N+n] + a[ 2*N+n]*b[12*N+n] + a[ 3*N+n]*b[13*N+n] + a[ 4*N+n]*b[14*N+n] + a[ 5*N+n]*b[19*N+n]; c[ 5*N+n] = b[15*N+n] + a[ 2*N+n]*b[17*N+n] + a[ 3*N+n]*b[18*N+n] + a[ 4*N+n]*b[19*N+n] + a[ 5*N+n]*b[20*N+n]; c[ 6*N+n] = b[ 1*N+n] + a[ 8*N+n]*b[ 3*N+n] + a[ 9*N+n]*b[ 6*N+n] + a[10*N+n]*b[10*N+n] + a[11*N+n]*b[15*N+n]; c[ 7*N+n] = b[ 2*N+n] + a[ 8*N+n]*b[ 4*N+n] + a[ 9*N+n]*b[ 7*N+n] + a[10*N+n]*b[11*N+n] + a[11*N+n]*b[16*N+n]; c[ 8*N+n] = b[ 4*N+n] + a[ 8*N+n]*b[ 5*N+n] + a[ 9*N+n]*b[ 8*N+n] + a[10*N+n]*b[12*N+n] + a[11*N+n]*b[17*N+n]; c[ 9*N+n] = b[ 7*N+n] + a[ 8*N+n]*b[ 8*N+n] + a[ 9*N+n]*b[ 9*N+n] + a[10*N+n]*b[13*N+n] + a[11*N+n]*b[18*N+n]; c[10*N+n] = b[11*N+n] + a[ 8*N+n]*b[12*N+n] + a[ 9*N+n]*b[13*N+n] + a[10*N+n]*b[14*N+n] + a[11*N+n]*b[19*N+n]; c[11*N+n] = b[16*N+n] + a[ 8*N+n]*b[17*N+n] + a[ 9*N+n]*b[18*N+n] + a[10*N+n]*b[19*N+n] + a[11*N+n]*b[20*N+n]; c[12*N+n] = 0; c[13*N+n] = 0; c[14*N+n] = 0; c[15*N+n] = 0; c[16*N+n] = 0; c[17*N+n] = 0; c[18*N+n] = b[ 6*N+n]; c[19*N+n] = b[ 7*N+n]; c[20*N+n] = b[ 8*N+n]; c[21*N+n] = b[ 9*N+n]; c[22*N+n] = b[13*N+n]; c[23*N+n] = b[18*N+n]; c[24*N+n] = a[26*N+n]*b[ 3*N+n] + a[27*N+n]*b[ 6*N+n] + b[10*N+n] + a[29*N+n]*b[15*N+n]; c[25*N+n] = a[26*N+n]*b[ 4*N+n] + a[27*N+n]*b[ 7*N+n] + b[11*N+n] + a[29*N+n]*b[16*N+n]; c[26*N+n] = a[26*N+n]*b[ 5*N+n] + a[27*N+n]*b[ 8*N+n] + b[12*N+n] + a[29*N+n]*b[17*N+n]; c[27*N+n] = a[26*N+n]*b[ 8*N+n] + a[27*N+n]*b[ 9*N+n] + b[13*N+n] + a[29*N+n]*b[18*N+n]; c[28*N+n] = a[26*N+n]*b[12*N+n] + a[27*N+n]*b[13*N+n] + b[14*N+n] + a[29*N+n]*b[19*N+n]; c[29*N+n] = a[26*N+n]*b[17*N+n] + a[27*N+n]*b[18*N+n] + b[19*N+n] + a[29*N+n]*b[20*N+n]; c[30*N+n] = b[15*N+n]; c[31*N+n] = b[16*N+n]; c[32*N+n] = b[17*N+n]; c[33*N+n] = b[18*N+n]; c[34*N+n] = b[19*N+n]; c[35*N+n] = b[20*N+n]; } } __forceinline__ __device__ void MultHelixPropTranspEndcap(MP6x6F* A, MP6x6F* B, MP6x6SF* C) { const float* a = A->data; //ASSUME_ALIGNED(a, 64); const float* b = B->data; //ASSUME_ALIGNED(b, 64); float* c = C->data; //ASSUME_ALIGNED(c, 64); for(int n=threadIdx.x;n<N;n+=blockDim.x) { c[ 0*N+n] = b[ 0*N+n] + b[ 2*N+n]*a[ 2*N+n] + b[ 3*N+n]*a[ 3*N+n] + b[ 4*N+n]*a[ 4*N+n] + b[ 5*N+n]*a[ 5*N+n]; c[ 1*N+n] = b[ 6*N+n] + b[ 8*N+n]*a[ 2*N+n] + b[ 9*N+n]*a[ 3*N+n] + b[10*N+n]*a[ 4*N+n] + b[11*N+n]*a[ 5*N+n]; c[ 2*N+n] = b[ 7*N+n] + b[ 8*N+n]*a[ 8*N+n] + b[ 9*N+n]*a[ 9*N+n] + b[10*N+n]*a[10*N+n] + b[11*N+n]*a[11*N+n]; c[ 3*N+n] = b[12*N+n] + b[14*N+n]*a[ 2*N+n] + b[15*N+n]*a[ 3*N+n] + b[16*N+n]*a[ 4*N+n] + b[17*N+n]*a[ 5*N+n]; c[ 4*N+n] = b[13*N+n] + b[14*N+n]*a[ 8*N+n] + b[15*N+n]*a[ 9*N+n] + b[16*N+n]*a[10*N+n] + b[17*N+n]*a[11*N+n]; c[ 5*N+n] = 0; c[ 6*N+n] = b[18*N+n] + b[20*N+n]*a[ 2*N+n] + b[21*N+n]*a[ 3*N+n] + b[22*N+n]*a[ 4*N+n] + b[23*N+n]*a[ 5*N+n]; c[ 7*N+n] = b[19*N+n] + b[20*N+n]*a[ 8*N+n] + b[21*N+n]*a[ 9*N+n] + b[22*N+n]*a[10*N+n] + b[23*N+n]*a[11*N+n]; c[ 8*N+n] = 0; c[ 9*N+n] = b[21*N+n]; c[10*N+n] = b[24*N+n] + b[26*N+n]*a[ 2*N+n] + b[27*N+n]*a[ 3*N+n] + b[28*N+n]*a[ 4*N+n] + b[29*N+n]*a[ 5*N+n]; c[11*N+n] = b[25*N+n] + b[26*N+n]*a[ 8*N+n] + b[27*N+n]*a[ 9*N+n] + b[28*N+n]*a[10*N+n] + b[29*N+n]*a[11*N+n]; c[12*N+n] = 0; c[13*N+n] = b[27*N+n]; c[14*N+n] = b[26*N+n]*a[26*N+n] + b[27*N+n]*a[27*N+n] + b[28*N+n] + b[29*N+n]*a[29*N+n]; c[15*N+n] = b[30*N+n] + b[32*N+n]*a[ 2*N+n] + b[33*N+n]*a[ 3*N+n] + b[34*N+n]*a[ 4*N+n] + b[35*N+n]*a[ 5*N+n]; c[16*N+n] = b[31*N+n] + b[32*N+n]*a[ 8*N+n] + b[33*N+n]*a[ 9*N+n] + b[34*N+n]*a[10*N+n] + b[35*N+n]*a[11*N+n]; c[17*N+n] = 0; c[18*N+n] = b[33*N+n]; c[19*N+n] = b[32*N+n]*a[26*N+n] + b[33*N+n]*a[27*N+n] + b[34*N+n] + b[35*N+n]*a[29*N+n]; c[20*N+n] = b[35*N+n]; } } __forceinline__ __device__ void KalmanGainInv(const MP6x6SF* A, const MP3x3SF* B, MP3x3* C) { // k = P Ht(HPHt + R)^-1 // HpHt -> cov of x,y,z. take upper 3x3 matrix of P // This calculates the inverse of HpHt +R const float* a = (*A).data; //ASSUME_ALIGNED(a, 64); const float* b = (*B).data; //ASSUME_ALIGNED(b, 64); float* c = (*C).data; //ASSUME_ALIGNED(c, 64); for(int n=threadIdx.x;n<N;n+=blockDim.x) { double det = ((a[0*N+n]+b[0*N+n])*(((a[ 6*N+n]+b[ 3*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[7*N+n]+b[4*N+n]) *(a[7*N+n]+b[4*N+n])))) - ((a[1*N+n]+b[1*N+n])*(((a[ 1*N+n]+b[ 1*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[7*N+n]+b[4*N+n]) *(a[2*N+n]+b[2*N+n])))) + ((a[2*N+n]+b[2*N+n])*(((a[ 1*N+n]+b[ 1*N+n]) *(a[7*N+n]+b[4*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[6*N+n]+b[3*N+n])))); double invdet = 1.0/det; c[ 0*N+n] = invdet*(((a[ 6*N+n]+b[ 3*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[7*N+n]+b[4*N+n]) *(a[7*N+n]+b[4*N+n]))); c[ 1*N+n] = -1*invdet*(((a[ 1*N+n]+b[ 1*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[7*N+n]+b[4*N+n]))); c[ 2*N+n] = invdet*(((a[ 1*N+n]+b[ 1*N+n]) *(a[7*N+n]+b[4*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[7*N+n]+b[4*N+n]))); c[ 3*N+n] = -1*invdet*(((a[ 1*N+n]+b[ 1*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[7*N+n]+b[4*N+n]) *(a[2*N+n]+b[2*N+n]))); c[ 4*N+n] = invdet*(((a[ 0*N+n]+b[ 0*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[2*N+n]+b[2*N+n]))); c[ 5*N+n] = -1*invdet*(((a[ 0*N+n]+b[ 0*N+n]) *(a[7*N+n]+b[4*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[1*N+n]+b[1*N+n]))); c[ 6*N+n] = invdet*(((a[ 1*N+n]+b[ 1*N+n]) *(a[7*N+n]+b[4*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[6*N+n]+b[3*N+n]))); c[ 7*N+n] = -1*invdet*(((a[ 0*N+n]+b[ 0*N+n]) *(a[7*N+n]+b[4*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[1*N+n]+b[1*N+n]))); c[ 8*N+n] = invdet*(((a[ 0*N+n]+b[ 0*N+n]) *(a[6*N+n]+b[3*N+n])) - ((a[1*N+n]+b[1*N+n]) *(a[1*N+n]+b[1*N+n]))); } // __syncthreads(); } __forceinline__ __device__ void KalmanGain(const MP6x6SF* A, const MP3x3* B, MP3x6* C) { // k = P Ht(HPHt + R)^-1 // HpHt -> cov of x,y,z. take upper 3x3 matrix of P // This calculates the kalman gain const float* a = (*A).data; //ASSUME_ALIGNED(a, 64); const float* b = (*B).data; //ASSUME_ALIGNED(b, 64); float* c = (*C).data; //ASSUME_ALIGNED(c, 64); for(int n=threadIdx.x;n<N;n+=blockDim.x) { c[ 0*N+n] = a[0*N+n]*b[0*N+n] + a[1*N+n]*b[3*N+n] + a[2*N+n]*b[6*N+n]; c[ 1*N+n] = a[0*N+n]*b[1*N+n] + a[1*N+n]*b[4*N+n] + a[2*N+n]*b[7*N+n]; c[ 2*N+n] = a[0*N+n]*b[2*N+n] + a[1*N+n]*b[5*N+n] + a[2*N+n]*b[8*N+n]; c[ 3*N+n] = a[1*N+n]*b[0*N+n] + a[6*N+n]*b[3*N+n] + a[7*N+n]*b[6*N+n]; c[ 4*N+n] = a[1*N+n]*b[1*N+n] + a[6*N+n]*b[4*N+n] + a[7*N+n]*b[7*N+n]; c[ 5*N+n] = a[1*N+n]*b[2*N+n] + a[6*N+n]*b[5*N+n] + a[7*N+n]*b[8*N+n]; c[ 6*N+n] = a[2*N+n]*b[0*N+n] + a[7*N+n]*b[3*N+n] + a[11*N+n]*b[6*N+n]; c[ 7*N+n] = a[2*N+n]*b[1*N+n] + a[7*N+n]*b[4*N+n] + a[11*N+n]*b[7*N+n]; c[ 8*N+n] = a[2*N+n]*b[2*N+n] + a[7*N+n]*b[5*N+n] + a[11*N+n]*b[8*N+n]; c[ 9*N+n] = a[3*N+n]*b[0*N+n] + a[8*N+n]*b[3*N+n] + a[12*N+n]*b[6*N+n]; c[ 10*N+n] = a[3*N+n]*b[1*N+n] + a[8*N+n]*b[4*N+n] + a[12*N+n]*b[7*N+n]; c[ 11*N+n] = a[3*N+n]*b[2*N+n] + a[8*N+n]*b[5*N+n] + a[12*N+n]*b[8*N+n]; c[ 12*N+n] = a[4*N+n]*b[0*N+n] + a[9*N+n]*b[3*N+n] + a[13*N+n]*b[6*N+n]; c[ 13*N+n] = a[4*N+n]*b[1*N+n] + a[9*N+n]*b[4*N+n] + a[13*N+n]*b[7*N+n]; c[ 14*N+n] = a[4*N+n]*b[2*N+n] + a[9*N+n]*b[5*N+n] + a[13*N+n]*b[8*N+n]; c[ 15*N+n] = a[5*N+n]*b[0*N+n] + a[10*N+n]*b[3*N+n] + a[14*N+n]*b[6*N+n]; c[ 16*N+n] = a[5*N+n]*b[1*N+n] + a[10*N+n]*b[4*N+n] + a[14*N+n]*b[7*N+n]; c[ 17*N+n] = a[5*N+n]*b[2*N+n] + a[10*N+n]*b[5*N+n] + a[14*N+n]*b[8*N+n]; } //__syncthreads(); } __forceinline__ __device__ void KalmanUpdate(MP6x6SF* trkErr, MP6F* inPar, const MP3x3SF* hitErr, const MP3F* msP){//, MP3x3* inverse_temp, MP3x6* kGain, MP6x6SF* newErr){ MP3x3 inverse_temp; MP3x6 kGain; MP6x6SF newErr; //MP6F newPar; KalmanGainInv(trkErr,hitErr,&inverse_temp); //__syncthreads(); KalmanGain(trkErr,&inverse_temp,&kGain); //__syncthreads(); for(size_t it=threadIdx.x;it<bsize;it+=blockDim.x){ float xin = x(inPar,it); float yin = y(inPar,it); float zin = z(inPar,it); float ptin = 1./ipt(inPar,it); float phiin = phi(inPar,it); float thetain = theta(inPar,it); float xout = x(msP,it); float yout = y(msP,it); float zout = z(msP,it); float ydiff = y(msP,it) - y(inPar,it); float xnew = xin + (kGain.data[0*bsize+it]*(xout-xin)) +(kGain.data[1*bsize+it]*(yout-yin)); // removed "zout-zin" term since zin is set to zout thus the term is 0 anyway. float ynew = yin + (kGain.data[3*bsize+it]*(xout-xin)) +(kGain.data[4*bsize+it]*(yout-yin)); float znew = zin + (kGain.data[6*bsize+it]*(xout-xin)) +(kGain.data[7*bsize+it]*(yout-yin)); float ptnew = ptin + (kGain.data[9*bsize+it]*(xout-xin)) +(kGain.data[10*bsize+it]*(yout-yin)); float phinew = phiin + (kGain.data[12*bsize+it]*(xout-xin)) +(kGain.data[13*bsize+it]*(yout-yin)); float thetanew = thetain + (kGain.data[15*bsize+it]*(xout-xin)) +(kGain.data[16*bsize+it]*(yout-yin)); newErr.data[0*bsize+it] = trkErr->data[0*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[0*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[1*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[2*bsize+it]); newErr.data[1*bsize+it] = trkErr->data[1*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[1*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[6*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[7*bsize+it]); newErr.data[2*bsize+it] = trkErr->data[2*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[2*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[7*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[11*bsize+it]); newErr.data[3*bsize+it] = trkErr->data[3*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[3*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[8*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[12*bsize+it]); newErr.data[4*bsize+it] = trkErr->data[4*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[4*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[9*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[13*bsize+it]); newErr.data[5*bsize+it] = trkErr->data[5*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[14*bsize+it]); newErr.data[6*bsize+it] = trkErr->data[6*bsize+it] - (kGain.data[3*bsize+it]*trkErr->data[1*bsize+it]+kGain.data[4*bsize+it]*trkErr->data[6*bsize+it]+kGain.data[5*bsize+it]*trkErr->data[7*bsize+it]); newErr.data[7*bsize+it] = trkErr->data[7*bsize+it] - (kGain.data[3*bsize+it]*trkErr->data[2*bsize+it]+kGain.data[4*bsize+it]*trkErr->data[7*bsize+it]+kGain.data[5*bsize+it]*trkErr->data[11*bsize+it]); newErr.data[8*bsize+it] = trkErr->data[8*bsize+it] - (kGain.data[3*bsize+it]*trkErr->data[3*bsize+it]+kGain.data[4*bsize+it]*trkErr->data[8*bsize+it]+kGain.data[5*bsize+it]*trkErr->data[12*bsize+it]); newErr.data[9*bsize+it] = trkErr->data[9*bsize+it] - (kGain.data[3*bsize+it]*trkErr->data[4*bsize+it]+kGain.data[4*bsize+it]*trkErr->data[9*bsize+it]+kGain.data[5*bsize+it]*trkErr->data[13*bsize+it]); newErr.data[10*bsize+it] = trkErr->data[10*bsize+it] - (kGain.data[3*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[4*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[5*bsize+it]*trkErr->data[14*bsize+it]); newErr.data[11*bsize+it] = trkErr->data[11*bsize+it] - (kGain.data[6*bsize+it]*trkErr->data[2*bsize+it]+kGain.data[7*bsize+it]*trkErr->data[7*bsize+it]+kGain.data[8*bsize+it]*trkErr->data[11*bsize+it]); newErr.data[12*bsize+it] = trkErr->data[12*bsize+it] - (kGain.data[6*bsize+it]*trkErr->data[3*bsize+it]+kGain.data[7*bsize+it]*trkErr->data[8*bsize+it]+kGain.data[8*bsize+it]*trkErr->data[12*bsize+it]); newErr.data[13*bsize+it] = trkErr->data[13*bsize+it] - (kGain.data[6*bsize+it]*trkErr->data[4*bsize+it]+kGain.data[7*bsize+it]*trkErr->data[9*bsize+it]+kGain.data[8*bsize+it]*trkErr->data[13*bsize+it]); newErr.data[14*bsize+it] = trkErr->data[14*bsize+it] - (kGain.data[6*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[7*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[8*bsize+it]*trkErr->data[14*bsize+it]); newErr.data[15*bsize+it] = trkErr->data[15*bsize+it] - (kGain.data[9*bsize+it]*trkErr->data[3*bsize+it]+kGain.data[10*bsize+it]*trkErr->data[8*bsize+it]+kGain.data[11*bsize+it]*trkErr->data[12*bsize+it]); newErr.data[16*bsize+it] = trkErr->data[16*bsize+it] - (kGain.data[9*bsize+it]*trkErr->data[4*bsize+it]+kGain.data[10*bsize+it]*trkErr->data[9*bsize+it]+kGain.data[11*bsize+it]*trkErr->data[13*bsize+it]); newErr.data[17*bsize+it] = trkErr->data[17*bsize+it] - (kGain.data[9*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[10*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[11*bsize+it]*trkErr->data[14*bsize+it]); newErr.data[18*bsize+it] = trkErr->data[18*bsize+it] - (kGain.data[12*bsize+it]*trkErr->data[4*bsize+it]+kGain.data[13*bsize+it]*trkErr->data[9*bsize+it]+kGain.data[14*bsize+it]*trkErr->data[13*bsize+it]); newErr.data[19*bsize+it] = trkErr->data[19*bsize+it] - (kGain.data[12*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[13*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[14*bsize+it]*trkErr->data[14*bsize+it]); newErr.data[20*bsize+it] = trkErr->data[20*bsize+it] - (kGain.data[15*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[16*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[17*bsize+it]*trkErr->data[14*bsize+it]); setx(inPar,it,xnew ); sety(inPar,it,ynew ); setz(inPar,it,znew); setipt(inPar,it, ptnew); setphi(inPar,it, phinew); settheta(inPar,it, thetanew); } //__syncthreads(); trkErr = &newErr; } __device__ __constant__ float kfact = 100/3.8; __device__ __forceinline__ void propagateToZ(const MP6x6SF* inErr, const MP6F* inPar, const MP1I* inChg,const MP3F* msP, MP6x6SF* outErr, MP6F* outPar, struct MP6x6F* errorProp, struct MP6x6F* temp,const MP3x3SF* hitErr) { for(size_t it=threadIdx.x;it<bsize;it+=blockDim.x){ const float zout = z(msP,it); const float k = q(inChg,it)*kfact;//100/3.8; const float deltaZ = zout - z(inPar,it); const float pt = 1./ipt(inPar,it); const float cosP = cosf(phi(inPar,it)); const float sinP = sinf(phi(inPar,it)); const float cosT = cosf(theta(inPar,it)); const float sinT = sinf(theta(inPar,it)); const float pxin = cosP*pt; const float pyin = sinP*pt; const float icosT = 1.0/cosT; const float icosTk = icosT/k; const float alpha = deltaZ*sinT*ipt(inPar,it)*icosTk;///(cosT*k); const float sina = sinf(alpha); // this can be approximated; const float cosa = cosf(alpha); // this can be approximated; setx(outPar,it, x(inPar,it) + k*(pxin*sina - pyin*(1.-cosa)) ); sety(outPar,it, y(inPar,it) + k*(pyin*sina + pxin*(1.-cosa)) ); setz(outPar,it,zout); setipt(outPar,it, ipt(inPar,it)); setphi(outPar,it, phi(inPar,it)+alpha ); settheta(outPar,it, theta(inPar,it) ); const float sCosPsina = sinf(cosP*sina); const float cCosPsina = cosf(cosP*sina); for (size_t i=0;i<6;++i) errorProp->data[bsize*PosInMtrx(i,i,6) + it] = 1.; errorProp->data[bsize*PosInMtrx(0,2,6) + it] = cosP*sinT*(sinP*cosa*sCosPsina-cosa)*icosT; errorProp->data[bsize*PosInMtrx(0,3,6) + it] = cosP*sinT*deltaZ*cosa*(1.-sinP*sCosPsina)*(icosT*pt)-k*(cosP*sina-sinP*(1.-cCosPsina))*(pt*pt); errorProp->data[bsize*PosInMtrx(0,4,6) + it] = (k*pt)*(-sinP*sina+sinP*sinP*sina*sCosPsina-cosP*(1.-cCosPsina)); errorProp->data[bsize*PosInMtrx(0,5,6) + it] = cosP*deltaZ*cosa*(1.-sinP*sCosPsina)*(icosT*icosT); errorProp->data[bsize*PosInMtrx(1,2,6) + it] = cosa*sinT*(cosP*cosP*sCosPsina-sinP)*icosT; errorProp->data[bsize*PosInMtrx(1,3,6) + it] = sinT*deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)*(icosT*pt)-k*(sinP*sina+cosP*(1.-cCosPsina))*(pt*pt); errorProp->data[bsize*PosInMtrx(1,4,6) + it] = (k*pt)*(-sinP*(1.-cCosPsina)-sinP*cosP*sina*sCosPsina+cosP*sina); errorProp->data[bsize*PosInMtrx(1,5,6) + it] = deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)*(icosT*icosT); errorProp->data[bsize*PosInMtrx(4,2,6) + it] = -ipt(inPar,it)*sinT*(icosTk); errorProp->data[bsize*PosInMtrx(4,3,6) + it] = sinT*deltaZ*(icosTk); errorProp->data[bsize*PosInMtrx(4,5,6) + it] = ipt(inPar,it)*deltaZ*(icosT*icosTk); // for (size_t i=0;i<6;++i) errorProp->data[bsize*PosInMtrx(i,i,6) + it] = 1.; // errorProp->data[bsize*PosInMtrx(0,2,6) + it] = cosP*sinT*(sinP*cosa*sCosPsina-cosa)/cosT; // errorProp->data[bsize*PosInMtrx(0,3,6) + it] = cosP*sinT*deltaZ*cosa*(1.-sinP*sCosPsina)/(cosT*ipt(inPar,it))-k*(cosP*sina-sinP*(1.-cCosPsina))/(ipt(inPar,it)*ipt(inPar,it)); // errorProp->data[bsize*PosInMtrx(0,4,6) + it] = (k/ipt(inPar,it))*(-sinP*sina+sinP*sinP*sina*sCosPsina-cosP*(1.-cCosPsina)); // errorProp->data[bsize*PosInMtrx(0,5,6) + it] = cosP*deltaZ*cosa*(1.-sinP*sCosPsina)/(cosT*cosT); // errorProp->data[bsize*PosInMtrx(1,2,6) + it] = cosa*sinT*(cosP*cosP*sCosPsina-sinP)/cosT; // errorProp->data[bsize*PosInMtrx(1,3,6) + it] = sinT*deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)/(cosT*ipt(inPar,it))-k*(sinP*sina+cosP*(1.-cCosPsina))/(ipt(inPar,it)*ipt(inPar,it)); // errorProp->data[bsize*PosInMtrx(1,4,6) + it] = (k/ipt(inPar,it))*(-sinP*(1.-cCosPsina)-sinP*cosP*sina*sCosPsina+cosP*sina); // errorProp->data[bsize*PosInMtrx(1,5,6) + it] = deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)/(cosT*cosT); // errorProp->data[bsize*PosInMtrx(4,2,6) + it] = -ipt(inPar,it)*sinT/(cosT*k); // errorProp->data[bsize*PosInMtrx(4,3,6) + it] = sinT*deltaZ/(cosT*k); // errorProp->data[bsize*PosInMtrx(4,5,6) + it] = ipt(inPar,it)*deltaZ/(cosT*cosT*k); } //__syncthreads(); MultHelixPropEndcap(errorProp, inErr, temp); //__syncthreads(); MultHelixPropTranspEndcap(errorProp, temp, outErr); } __global__ void GPUsequence(MPTRK* trk, MPHIT* hit, MPTRK* outtrk, const int stream){ int ie_range; if(stream == num_streams){ ie_range = (int)(nevts%num_streams);} else{ie_range = (int)(nevts/num_streams);} for (size_t ie = blockIdx.x; ie<ie_range; ie+=gridDim.x){ for(size_t ib = threadIdx.y; ib <nb; ib+=blockDim.y){ const MPTRK* btracks = bTk(trk,ie,ib); MPTRK* obtracks = bTk(outtrk,ie,ib); for(int layer=0;layer<nlayer;++layer){ const MPHIT* bhits = bHit(hit,ie,ib,layer); /*__shared__*/ struct MP6x6F errorProp, temp; // using shared here causes a race hazard. idk why i did it this way, might be to include shared. maybe move to inside p2z function propagateToZ(&(*btracks).cov, &(*btracks).par, &(*btracks).q, &(*bhits).pos, &(*obtracks).cov, &(*obtracks).par, &errorProp, &temp,&(*bhits).cov); KalmanUpdate(&(*obtracks).cov,&(*obtracks).par,&(*bhits).cov,&(*bhits).pos); } } } } int main (int argc, char* argv[]) { printf("RUNNING CUDA!!\n"); printf("Streams: %d, blocks: %d, threads(x,y): (%d,%d)\n",num_streams,blockspergrid,threadsperblockx,threadsperblocky); ATRK inputtrk = { {-12.806846618652344, -7.723824977874756, 38.13014221191406,0.23732035065189902, -2.613372802734375, 0.35594117641448975}, {6.290299552347278e-07,4.1375109560704004e-08,7.526661534029699e-07,2.0973730840978533e-07,1.5431574240665213e-07,9.626245400795597e-08,-2.804026640189443e-06, 6.219111130687595e-06,2.649119409845118e-07,0.00253512163402557,-2.419662877381737e-07,4.3124190760040646e-07,3.1068903991780678e-09,0.000923913115050627, 0.00040678296006807003,-7.755406890332818e-07,1.68539375883925e-06,6.676875566525437e-08,0.0008420574605423793,7.356584799406111e-05,0.0002306247719158348}, 1 }; AHIT inputhit = { {-20.7824649810791, -12.24150276184082, 57.8067626953125}, {2.545517190810642e-06,-2.6680759219743777e-06,2.8030024168401724e-06,0.00014160551654640585,0.00012282167153898627,11.385087966918945} }; printf("track in pos: %f, %f, %f \n", inputtrk.par[0], inputtrk.par[1], inputtrk.par[2]); printf("track in cov: %.2e, %.2e, %.2e \n", inputtrk.cov[SymOffsets66(PosInMtrx(0,0,6))], inputtrk.cov[SymOffsets66(PosInMtrx(1,1,6))], inputtrk.cov[SymOffsets66(PosInMtrx(2,2,6))]); printf("hit in pos: %f %f %f \n", inputhit.pos[0], inputhit.pos[1], inputhit.pos[2]); printf("produce nevts=%i ntrks=%i smearing by=%f \n", nevts, ntrks, smear); printf("NITER=%d\n", NITER); long setup_start, setup_stop; struct timeval timecheck; gettimeofday(&timecheck, NULL); setup_start = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000; MPTRK* trk = prepareTracks(inputtrk); MPHIT* hit = prepareHits(inputhit); MPTRK* outtrk; hipMallocManaged((void**)&outtrk,nevts*nb*sizeof(MPTRK)); dim3 grid(blockspergrid,1,1); dim3 block(threadsperblockx,threadsperblocky,1); int device = -1; hipGetDevice(&device); int stream_chunk = ((int)(nevts/num_streams))*nb;//*sizeof(MPTRK); int stream_remainder = ((int)(nevts%num_streams))*nb;//*sizeof(MPTRK); int stream_range; if (stream_remainder == 0){ stream_range =num_streams;} else{stream_range = num_streams+1;} hipStream_t streams[stream_range]; for (int s = 0; s<stream_range;s++){ hipStreamCreate(&streams[s]); //hipStreamCreateWithFlags(&streams[s],hipStreamNonBlocking); } gettimeofday(&timecheck, NULL); setup_stop = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000; printf("done preparing!\n"); printf("Size of struct MPTRK trk[] = %ld\n", nevts*nb*sizeof(struct MPTRK)); printf("Size of struct MPTRK outtrk[] = %ld\n", nevts*nb*sizeof(struct MPTRK)); printf("Size of struct struct MPHIT hit[] = %ld\n", nevts*nb*sizeof(struct MPHIT)); auto wall_start = std::chrono::high_resolution_clock::now(); for(int itr=0; itr<NITER; itr++){ for (int s = 0; s<num_streams;s++){ hipMemPrefetchAsync(trk+(s*stream_chunk),stream_chunk*sizeof(MPTRK), device,streams[s]); hipMemPrefetchAsync(hit+(s*stream_chunk*nlayer),nlayer*stream_chunk*sizeof(MPHIT), device,streams[s]); hipMemAdvise(trk+(s*stream_chunk),stream_chunk*sizeof(MPTRK),hipMemAdviseSetPreferredLocation,device); hipMemAdvise(hit+(s*stream_chunk*nlayer),nlayer*stream_chunk*sizeof(MPHIT),hipMemAdviseSetPreferredLocation,device); //} //hipStreamAttachMemAsync(streams[s],trk+(s*stream_chunk),stream_chunk*sizeof(MPTRK),hipMemAttachHost); //hipStreamAttachMemAsync(streams[s],hit+(s*stream_chunk),stream_chunk*sizeof(MPHIT),hipMemAttachHost); //hipMemAdvise(trk+(s*stream_chunk),stream_chunk*sizeof(MPTRK),hipMemAdviseSetReadMostly,device); //hipMemAdvise(hit+(s*stream_chunk),stream_chunk*sizeof(MPHIT),hipMemAdviseSetReadMostly,device); //hipMemAdvise(trk+(s*stream_chunk),stream_chunk*sizeof(MPTRK),hipMemAdviseSetAccessedBy,device); //hipMemAdvise(hit+(s*stream_chunk),stream_chunk*sizeof(MPHIT),hipMemAdviseSetAccessedBy,device); } if(stream_remainder != 0){ hipMemPrefetchAsync(trk+(num_streams*stream_chunk),stream_remainder*sizeof(MPTRK), device,streams[num_streams]); hipMemAdvise(trk+(num_streams*stream_chunk),stream_remainder*sizeof(MPTRK),hipMemAdviseSetPreferredLocation,device); hipMemPrefetchAsync(hit+(num_streams*stream_chunk*nlayer),nlayer*stream_remainder*sizeof(MPHIT), device,streams[num_streams]); hipMemAdvise(hit+(num_streams*stream_chunk*nlayer),nlayer*stream_remainder*sizeof(MPHIT),hipMemAdviseSetPreferredLocation,device); } // hipMemAdvise(trk,nevts*nb*sizeof(MPTRK),hipMemAdviseSetPreferredLocation,device); // hipMemAdvise(hit,nevts*nb*sizeof(MPHIT),hipMemAdviseSetPreferredLocation,device); // hipMemAdvise(trk,nevts*nb*sizeof(MPTRK),hipMemAdviseSetReadMostly,device); // hipMemAdvise(hit,nevts*nb*sizeof(MPHIT),hipMemAdviseSetReadMostly,device); for (int s = 0; s<num_streams;s++){ hipLaunchKernelGGL(( GPUsequence), dim3(grid),dim3(block),0,streams[s], trk+(s*stream_chunk),hit+(s*stream_chunk*nlayer),outtrk+(s*stream_chunk),s); } if(stream_remainder != 0){ hipLaunchKernelGGL(( GPUsequence), dim3(grid),dim3(block),0,streams[num_streams], trk+(num_streams*stream_chunk),hit+(num_streams*stream_chunk*nlayer),outtrk+(num_streams*stream_chunk),num_streams); } //hipDeviceSynchronize(); // Normal sync for (int s = 0; s<num_streams;s++){ hipMemPrefetchAsync(outtrk+(s*stream_chunk),stream_chunk*sizeof(MPTRK), hipCpuDeviceId,streams[s]); } if(stream_remainder != 0){ hipMemPrefetchAsync(outtrk+(num_streams*stream_chunk),stream_remainder*sizeof(MPTRK), hipCpuDeviceId,streams[num_streams]); } } //end itr loop hipDeviceSynchronize(); // shaves a few seconds auto wall_stop = std::chrono::high_resolution_clock::now(); for (int s = 0; s<stream_range;s++){ hipStreamDestroy(streams[s]); } auto wall_diff = wall_stop - wall_start; auto wall_time = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(wall_diff).count()) / 1e6; printf("setup time time=%f (s)\n", (setup_stop-setup_start)*0.001); printf("done ntracks=%i tot time=%f (s) time/trk=%e (s)\n", nevts*ntrks*int(NITER), wall_time, wall_time/(nevts*ntrks*int(NITER))); printf("formatted %i %i %i %i %i %f 0 %f %i\n",int(NITER),nevts, ntrks, bsize, nb, wall_time, (setup_stop-setup_start)*0.001, num_streams); float avgx = 0, avgy = 0, avgz = 0; float avgpt = 0, avgphi = 0, avgtheta = 0; float avgdx = 0, avgdy = 0, avgdz = 0; for (size_t ie=0;ie<nevts;++ie) { for (size_t it=0;it<ntrks;++it) { float x_ = x(outtrk,ie,it); float y_ = y(outtrk,ie,it); float z_ = z(outtrk,ie,it); float pt_ = 1./ipt(outtrk,ie,it); float phi_ = phi(outtrk,ie,it); float theta_ = theta(outtrk,ie,it); avgpt += pt_; avgphi += phi_; avgtheta += theta_; avgx += x_; avgy += y_; avgz += z_; float hx_ = x(hit,ie,it); float hy_ = y(hit,ie,it); float hz_ = z(hit,ie,it); avgdx += (x_-hx_)/x_; avgdy += (y_-hy_)/y_; avgdz += (z_-hz_)/z_; } } avgpt = avgpt/float(nevts*ntrks); avgphi = avgphi/float(nevts*ntrks); avgtheta = avgtheta/float(nevts*ntrks); avgx = avgx/float(nevts*ntrks); avgy = avgy/float(nevts*ntrks); avgz = avgz/float(nevts*ntrks); avgdx = avgdx/float(nevts*ntrks); avgdy = avgdy/float(nevts*ntrks); avgdz = avgdz/float(nevts*ntrks); float stdx = 0, stdy = 0, stdz = 0; float stddx = 0, stddy = 0, stddz = 0; for (size_t ie=0;ie<nevts;++ie) { for (size_t it=0;it<ntrks;++it) { float x_ = x(outtrk,ie,it); float y_ = y(outtrk,ie,it); float z_ = z(outtrk,ie,it); stdx += (x_-avgx)*(x_-avgx); stdy += (y_-avgy)*(y_-avgy); stdz += (z_-avgz)*(z_-avgz); float hx_ = x(hit,ie,it); float hy_ = y(hit,ie,it); float hz_ = z(hit,ie,it); stddx += ((x_-hx_)/x_-avgdx)*((x_-hx_)/x_-avgdx); stddy += ((y_-hy_)/y_-avgdy)*((y_-hy_)/y_-avgdy); stddz += ((z_-hz_)/z_-avgdz)*((z_-hz_)/z_-avgdz); } } stdx = sqrtf(stdx/float(nevts*ntrks)); stdy = sqrtf(stdy/float(nevts*ntrks)); stdz = sqrtf(stdz/float(nevts*ntrks)); stddx = sqrtf(stddx/float(nevts*ntrks)); stddy = sqrtf(stddy/float(nevts*ntrks)); stddz = sqrtf(stddz/float(nevts*ntrks)); printf("track x avg=%f std/avg=%f\n", avgx, fabs(stdx/avgx)); printf("track y avg=%f std/avg=%f\n", avgy, fabs(stdy/avgy)); printf("track z avg=%f std/avg=%f\n", avgz, fabs(stdz/avgz)); printf("track dx/x avg=%f std=%f\n", avgdx, stddx); printf("track dy/y avg=%f std=%f\n", avgdy, stddy); printf("track dz/z avg=%f std=%f\n", avgdz, stddz); printf("track pt avg=%f\n", avgpt); printf("track phi avg=%f\n", avgphi); printf("track theta avg=%f\n", avgtheta); hipFree(trk); hipFree(hit); hipFree(outtrk); return 0; }
2625ec2986a581788abee531bc36a02a63bb4865.cu
/* icc propagate-toz-test.C -o propagate-toz-test.exe -fopenmp -O3 */ #include <cuda_profiler_api.h> #include "cuda_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <unistd.h> #include <sys/time.h> #include <iostream> #include <chrono> #include <iomanip> #ifndef nevts #define nevts 100 #endif #ifndef bsize #define bsize 128 #endif #ifndef ntrks #define ntrks 9600 //122880 #endif #define nb ntrks/bsize #define smear 0.1 #ifndef NITER #define NITER 5 #endif #ifndef nlayer #define nlayer 20 #endif #ifndef num_streams #define num_streams 7 //streams changes answers #endif #ifndef threadsperblockx #define threadsperblockx 32 #endif #define threadsperblocky 512/threadsperblockx //#define threadsperblocky 1024/threadsperblockx //unclear why bit 1024 total threads per block gives resource error when running with more than one layer #ifndef blockspergrid #define blockspergrid 15 #endif #define HOSTDEV __host__ __device__ HOSTDEV size_t PosInMtrx(size_t i, size_t j, size_t D) { return i*D+j; } HOSTDEV size_t SymOffsets33(size_t i) { const size_t offs[9] = {0, 1, 3, 1, 2, 4, 3, 4, 5}; return offs[i]; } HOSTDEV size_t SymOffsets66(size_t i) { const size_t offs[36] = {0, 1, 3, 6, 10, 15, 1, 2, 4, 7, 11, 16, 3, 4, 5, 8, 12, 17, 6, 7, 8, 9, 13, 18, 10, 11, 12, 13, 14, 19, 15, 16, 17, 18, 19, 20}; return offs[i]; } struct ATRK { float par[6]; float cov[21]; int q; // int hitidx[22]; }; struct AHIT { float pos[3]; float cov[6]; }; struct MP1I { int data[1*bsize]; }; struct MP22I { int data[22*bsize]; }; struct MP3F { float data[3*bsize]; }; struct MP6F { float data[6*bsize]; }; struct MP3x3 { float data[9*bsize]; }; struct MP3x6 { float data[18*bsize]; }; struct MP3x3SF { float data[6*bsize]; }; struct MP6x6SF { float data[21*bsize]; }; struct MP6x6F { float data[36*bsize]; }; struct MPTRK { MP6F par; MP6x6SF cov; MP1I q; // MP22I hitidx; }; struct MPHIT { MP3F pos; MP3x3SF cov; }; float randn(float mu, float sigma) { float U1, U2, W, mult; static float X1, X2; static int call = 0; if (call == 1) { call = !call; return (mu + sigma * (float) X2); } do { U1 = -1 + ((float) rand () / RAND_MAX) * 2; U2 = -1 + ((float) rand () / RAND_MAX) * 2; W = pow (U1, 2) + pow (U2, 2); } while (W >= 1 || W == 0); mult = sqrt ((-2 * log (W)) / W); X1 = U1 * mult; X2 = U2 * mult; call = !call; return (mu + sigma * (float) X1); } MPTRK* prepareTracks(ATRK inputtrk) { MPTRK* result; cudaMallocManaged((void**)&result,nevts*nb*sizeof(MPTRK)); //fixme, align? cudaMemAdvise(result,nevts*nb*sizeof(MPTRK),cudaMemAdviseSetPreferredLocation,cudaCpuDeviceId); for (size_t ie=0;ie<nevts;++ie) { for (size_t ib=0;ib<nb;++ib) { for (size_t it=0;it<bsize;++it) { //par for (size_t ip=0;ip<6;++ip) { result[ib + nb*ie].par.data[it + ip*bsize] = (1+smear*randn(0,1))*inputtrk.par[ip]; } //cov for (size_t ip=0;ip<21;++ip) { result[ib + nb*ie].cov.data[it + ip*bsize] = (1+smear*randn(0,1))*inputtrk.cov[ip]; } //q result[ib + nb*ie].q.data[it] = inputtrk.q-2*ceil(-0.5 + (float)rand() / RAND_MAX);//fixme check } } } return result; } MPHIT* prepareHits(AHIT inputhit) { //MPHIT* result = (MPHIT*) malloc(nevts*nb*sizeof(MPHIT)); MPHIT* result; cudaMallocManaged((void**)&result,nlayer*nevts*nb*sizeof(MPHIT)); //fixme, align? cudaMemAdvise(result,nlayer*nevts*nb*sizeof(MPHIT),cudaMemAdviseSetPreferredLocation,cudaCpuDeviceId); for (int lay=0;lay<nlayer;++lay) { for (size_t ie=0;ie<nevts;++ie) { for (size_t ib=0;ib<nb;++ib) { for (size_t it=0;it<bsize;++it) { //pos for (size_t ip=0;ip<3;++ip) { result[lay+nlayer*(ib + nb*ie)].pos.data[it + ip*bsize] = (1+smear*randn(0,1))*inputhit.pos[ip]; } //cov for (size_t ip=0;ip<6;++ip) { result[lay+nlayer*(ib + nb*ie)].cov.data[it + ip*bsize] = (1+smear*randn(0,1))*inputhit.cov[ip]; } } } } } return result; } HOSTDEV MPTRK* bTk(MPTRK* tracks, size_t ev, size_t ib,int layer) { return &(tracks[ib + nb*ev+layer*nevts]); } HOSTDEV MPTRK* bTk(MPTRK* tracks, size_t ev, size_t ib) { return &(tracks[ib + nb*ev]); } HOSTDEV const MPTRK* bTk(const MPTRK* tracks, size_t ev, size_t ib,int layer) { return &(tracks[ib + nb*ev+layer*nevts]); } HOSTDEV const MPTRK* bTk(const MPTRK* tracks, size_t ev, size_t ib) { return &(tracks[ib + nb*ev]); } HOSTDEV float q(const MP1I* bq, size_t it){ return (*bq).data[it]; } HOSTDEV float par(const MP6F* bpars, size_t it, size_t ipar){ return (*bpars).data[it + ipar*bsize]; } HOSTDEV float x (const MP6F* bpars, size_t it){ return par(bpars, it, 0); } HOSTDEV float y (const MP6F* bpars, size_t it){ return par(bpars, it, 1); } HOSTDEV float z (const MP6F* bpars, size_t it){ return par(bpars, it, 2); } HOSTDEV float ipt (const MP6F* bpars, size_t it){ return par(bpars, it, 3); } HOSTDEV float phi (const MP6F* bpars, size_t it){ return par(bpars, it, 4); } HOSTDEV float theta(const MP6F* bpars, size_t it){ return par(bpars, it, 5); } HOSTDEV float x (MP6F* bpars, size_t it){ return par(bpars, it, 0); } HOSTDEV float y (MP6F* bpars, size_t it){ return par(bpars, it, 1); } HOSTDEV float z (MP6F* bpars, size_t it){ return par(bpars, it, 2); } HOSTDEV float ipt (MP6F* bpars, size_t it){ return par(bpars, it, 3); } HOSTDEV float phi (MP6F* bpars, size_t it){ return par(bpars, it, 4); } HOSTDEV float theta(MP6F* bpars, size_t it){ return par(bpars, it, 5); } HOSTDEV float par(const MPTRK* btracks, size_t it, size_t ipar){ return par(&(*btracks).par,it,ipar); } HOSTDEV float x (const MPTRK* btracks, size_t it){ return par(btracks, it, 0); } HOSTDEV float y (const MPTRK* btracks, size_t it){ return par(btracks, it, 1); } HOSTDEV float z (const MPTRK* btracks, size_t it){ return par(btracks, it, 2); } HOSTDEV float ipt (const MPTRK* btracks, size_t it){ return par(btracks, it, 3); } HOSTDEV float phi (const MPTRK* btracks, size_t it){ return par(btracks, it, 4); } HOSTDEV float theta(const MPTRK* btracks, size_t it){ return par(btracks, it, 5); } HOSTDEV float par(const MPTRK* tracks, size_t ev, size_t tk, size_t ipar){ size_t ib = tk/bsize; const MPTRK* btracks = bTk(tracks, ev, ib); size_t it = tk % bsize; return par(btracks, it, ipar); } HOSTDEV float x (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 0); } HOSTDEV float y (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 1); } HOSTDEV float z (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 2); } HOSTDEV float ipt (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 3); } HOSTDEV float phi (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 4); } HOSTDEV float theta(const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 5); } HOSTDEV void setpar(MP6F* bpars, size_t it, size_t ipar, float val){ (*bpars).data[it + ipar*bsize] = val; } HOSTDEV void setx (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 0, val); } HOSTDEV void sety (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 1, val); } HOSTDEV void setz (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 2, val); } HOSTDEV void setipt (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 3, val); } HOSTDEV void setphi (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 4, val); } HOSTDEV void settheta(MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 5, val); } HOSTDEV void setpar(MPTRK* btracks, size_t it, size_t ipar, float val){ return setpar(&(*btracks).par,it,ipar,val); } HOSTDEV void setx (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 0, val); } HOSTDEV void sety (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 1, val); } HOSTDEV void setz (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 2, val); } HOSTDEV void setipt (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 3, val); } HOSTDEV void setphi (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 4, val); } HOSTDEV void settheta(MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 5, val); } HOSTDEV MPHIT* bHit(MPHIT* hits, size_t ev, size_t ib) { return &(hits[ib + nb*ev]); } HOSTDEV const MPHIT* bHit(const MPHIT* hits, size_t ev, size_t ib) { return &(hits[ib + nb*ev]); } HOSTDEV const MPHIT* bHit(const MPHIT* hits, size_t ev, size_t ib,int lay) { return &(hits[lay + (ib*nlayer) +(ev*nlayer*nb)]); } HOSTDEV float pos(const MP3F* hpos, size_t it, size_t ipar){ return (*hpos).data[it + ipar*bsize]; } HOSTDEV float x(const MP3F* hpos, size_t it) { return pos(hpos, it, 0); } HOSTDEV float y(const MP3F* hpos, size_t it) { return pos(hpos, it, 1); } HOSTDEV float z(const MP3F* hpos, size_t it) { return pos(hpos, it, 2); } HOSTDEV float pos(const MPHIT* hits, size_t it, size_t ipar){ return pos(&(*hits).pos,it,ipar); } HOSTDEV float x(const MPHIT* hits, size_t it) { return pos(hits, it, 0); } HOSTDEV float y(const MPHIT* hits, size_t it) { return pos(hits, it, 1); } HOSTDEV float z(const MPHIT* hits, size_t it) { return pos(hits, it, 2); } HOSTDEV float pos(const MPHIT* hits, size_t ev, size_t tk, size_t ipar){ size_t ib = tk/bsize; const MPHIT* bhits = bHit(hits, ev, ib); size_t it = tk % bsize; return pos(bhits,it,ipar); } HOSTDEV float x(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 0); } HOSTDEV float y(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 1); } HOSTDEV float z(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 2); } #define N bsize __forceinline__ __device__ void MultHelixPropEndcap(const MP6x6F* A, const MP6x6SF* B, MP6x6F* C) { const float* a = A->data; //ASSUME_ALIGNED(a, 64); const float* b = B->data; //ASSUME_ALIGNED(b, 64); float* c = C->data; //ASSUME_ALIGNED(c, 64); for(int n=threadIdx.x;n<N;n+=blockDim.x) { c[ 0*N+n] = b[ 0*N+n] + a[ 2*N+n]*b[ 3*N+n] + a[ 3*N+n]*b[ 6*N+n] + a[ 4*N+n]*b[10*N+n] + a[ 5*N+n]*b[15*N+n]; c[ 1*N+n] = b[ 1*N+n] + a[ 2*N+n]*b[ 4*N+n] + a[ 3*N+n]*b[ 7*N+n] + a[ 4*N+n]*b[11*N+n] + a[ 5*N+n]*b[16*N+n]; c[ 2*N+n] = b[ 3*N+n] + a[ 2*N+n]*b[ 5*N+n] + a[ 3*N+n]*b[ 8*N+n] + a[ 4*N+n]*b[12*N+n] + a[ 5*N+n]*b[17*N+n]; c[ 3*N+n] = b[ 6*N+n] + a[ 2*N+n]*b[ 8*N+n] + a[ 3*N+n]*b[ 9*N+n] + a[ 4*N+n]*b[13*N+n] + a[ 5*N+n]*b[18*N+n]; c[ 4*N+n] = b[10*N+n] + a[ 2*N+n]*b[12*N+n] + a[ 3*N+n]*b[13*N+n] + a[ 4*N+n]*b[14*N+n] + a[ 5*N+n]*b[19*N+n]; c[ 5*N+n] = b[15*N+n] + a[ 2*N+n]*b[17*N+n] + a[ 3*N+n]*b[18*N+n] + a[ 4*N+n]*b[19*N+n] + a[ 5*N+n]*b[20*N+n]; c[ 6*N+n] = b[ 1*N+n] + a[ 8*N+n]*b[ 3*N+n] + a[ 9*N+n]*b[ 6*N+n] + a[10*N+n]*b[10*N+n] + a[11*N+n]*b[15*N+n]; c[ 7*N+n] = b[ 2*N+n] + a[ 8*N+n]*b[ 4*N+n] + a[ 9*N+n]*b[ 7*N+n] + a[10*N+n]*b[11*N+n] + a[11*N+n]*b[16*N+n]; c[ 8*N+n] = b[ 4*N+n] + a[ 8*N+n]*b[ 5*N+n] + a[ 9*N+n]*b[ 8*N+n] + a[10*N+n]*b[12*N+n] + a[11*N+n]*b[17*N+n]; c[ 9*N+n] = b[ 7*N+n] + a[ 8*N+n]*b[ 8*N+n] + a[ 9*N+n]*b[ 9*N+n] + a[10*N+n]*b[13*N+n] + a[11*N+n]*b[18*N+n]; c[10*N+n] = b[11*N+n] + a[ 8*N+n]*b[12*N+n] + a[ 9*N+n]*b[13*N+n] + a[10*N+n]*b[14*N+n] + a[11*N+n]*b[19*N+n]; c[11*N+n] = b[16*N+n] + a[ 8*N+n]*b[17*N+n] + a[ 9*N+n]*b[18*N+n] + a[10*N+n]*b[19*N+n] + a[11*N+n]*b[20*N+n]; c[12*N+n] = 0; c[13*N+n] = 0; c[14*N+n] = 0; c[15*N+n] = 0; c[16*N+n] = 0; c[17*N+n] = 0; c[18*N+n] = b[ 6*N+n]; c[19*N+n] = b[ 7*N+n]; c[20*N+n] = b[ 8*N+n]; c[21*N+n] = b[ 9*N+n]; c[22*N+n] = b[13*N+n]; c[23*N+n] = b[18*N+n]; c[24*N+n] = a[26*N+n]*b[ 3*N+n] + a[27*N+n]*b[ 6*N+n] + b[10*N+n] + a[29*N+n]*b[15*N+n]; c[25*N+n] = a[26*N+n]*b[ 4*N+n] + a[27*N+n]*b[ 7*N+n] + b[11*N+n] + a[29*N+n]*b[16*N+n]; c[26*N+n] = a[26*N+n]*b[ 5*N+n] + a[27*N+n]*b[ 8*N+n] + b[12*N+n] + a[29*N+n]*b[17*N+n]; c[27*N+n] = a[26*N+n]*b[ 8*N+n] + a[27*N+n]*b[ 9*N+n] + b[13*N+n] + a[29*N+n]*b[18*N+n]; c[28*N+n] = a[26*N+n]*b[12*N+n] + a[27*N+n]*b[13*N+n] + b[14*N+n] + a[29*N+n]*b[19*N+n]; c[29*N+n] = a[26*N+n]*b[17*N+n] + a[27*N+n]*b[18*N+n] + b[19*N+n] + a[29*N+n]*b[20*N+n]; c[30*N+n] = b[15*N+n]; c[31*N+n] = b[16*N+n]; c[32*N+n] = b[17*N+n]; c[33*N+n] = b[18*N+n]; c[34*N+n] = b[19*N+n]; c[35*N+n] = b[20*N+n]; } } __forceinline__ __device__ void MultHelixPropTranspEndcap(MP6x6F* A, MP6x6F* B, MP6x6SF* C) { const float* a = A->data; //ASSUME_ALIGNED(a, 64); const float* b = B->data; //ASSUME_ALIGNED(b, 64); float* c = C->data; //ASSUME_ALIGNED(c, 64); for(int n=threadIdx.x;n<N;n+=blockDim.x) { c[ 0*N+n] = b[ 0*N+n] + b[ 2*N+n]*a[ 2*N+n] + b[ 3*N+n]*a[ 3*N+n] + b[ 4*N+n]*a[ 4*N+n] + b[ 5*N+n]*a[ 5*N+n]; c[ 1*N+n] = b[ 6*N+n] + b[ 8*N+n]*a[ 2*N+n] + b[ 9*N+n]*a[ 3*N+n] + b[10*N+n]*a[ 4*N+n] + b[11*N+n]*a[ 5*N+n]; c[ 2*N+n] = b[ 7*N+n] + b[ 8*N+n]*a[ 8*N+n] + b[ 9*N+n]*a[ 9*N+n] + b[10*N+n]*a[10*N+n] + b[11*N+n]*a[11*N+n]; c[ 3*N+n] = b[12*N+n] + b[14*N+n]*a[ 2*N+n] + b[15*N+n]*a[ 3*N+n] + b[16*N+n]*a[ 4*N+n] + b[17*N+n]*a[ 5*N+n]; c[ 4*N+n] = b[13*N+n] + b[14*N+n]*a[ 8*N+n] + b[15*N+n]*a[ 9*N+n] + b[16*N+n]*a[10*N+n] + b[17*N+n]*a[11*N+n]; c[ 5*N+n] = 0; c[ 6*N+n] = b[18*N+n] + b[20*N+n]*a[ 2*N+n] + b[21*N+n]*a[ 3*N+n] + b[22*N+n]*a[ 4*N+n] + b[23*N+n]*a[ 5*N+n]; c[ 7*N+n] = b[19*N+n] + b[20*N+n]*a[ 8*N+n] + b[21*N+n]*a[ 9*N+n] + b[22*N+n]*a[10*N+n] + b[23*N+n]*a[11*N+n]; c[ 8*N+n] = 0; c[ 9*N+n] = b[21*N+n]; c[10*N+n] = b[24*N+n] + b[26*N+n]*a[ 2*N+n] + b[27*N+n]*a[ 3*N+n] + b[28*N+n]*a[ 4*N+n] + b[29*N+n]*a[ 5*N+n]; c[11*N+n] = b[25*N+n] + b[26*N+n]*a[ 8*N+n] + b[27*N+n]*a[ 9*N+n] + b[28*N+n]*a[10*N+n] + b[29*N+n]*a[11*N+n]; c[12*N+n] = 0; c[13*N+n] = b[27*N+n]; c[14*N+n] = b[26*N+n]*a[26*N+n] + b[27*N+n]*a[27*N+n] + b[28*N+n] + b[29*N+n]*a[29*N+n]; c[15*N+n] = b[30*N+n] + b[32*N+n]*a[ 2*N+n] + b[33*N+n]*a[ 3*N+n] + b[34*N+n]*a[ 4*N+n] + b[35*N+n]*a[ 5*N+n]; c[16*N+n] = b[31*N+n] + b[32*N+n]*a[ 8*N+n] + b[33*N+n]*a[ 9*N+n] + b[34*N+n]*a[10*N+n] + b[35*N+n]*a[11*N+n]; c[17*N+n] = 0; c[18*N+n] = b[33*N+n]; c[19*N+n] = b[32*N+n]*a[26*N+n] + b[33*N+n]*a[27*N+n] + b[34*N+n] + b[35*N+n]*a[29*N+n]; c[20*N+n] = b[35*N+n]; } } __forceinline__ __device__ void KalmanGainInv(const MP6x6SF* A, const MP3x3SF* B, MP3x3* C) { // k = P Ht(HPHt + R)^-1 // HpHt -> cov of x,y,z. take upper 3x3 matrix of P // This calculates the inverse of HpHt +R const float* a = (*A).data; //ASSUME_ALIGNED(a, 64); const float* b = (*B).data; //ASSUME_ALIGNED(b, 64); float* c = (*C).data; //ASSUME_ALIGNED(c, 64); for(int n=threadIdx.x;n<N;n+=blockDim.x) { double det = ((a[0*N+n]+b[0*N+n])*(((a[ 6*N+n]+b[ 3*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[7*N+n]+b[4*N+n]) *(a[7*N+n]+b[4*N+n])))) - ((a[1*N+n]+b[1*N+n])*(((a[ 1*N+n]+b[ 1*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[7*N+n]+b[4*N+n]) *(a[2*N+n]+b[2*N+n])))) + ((a[2*N+n]+b[2*N+n])*(((a[ 1*N+n]+b[ 1*N+n]) *(a[7*N+n]+b[4*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[6*N+n]+b[3*N+n])))); double invdet = 1.0/det; c[ 0*N+n] = invdet*(((a[ 6*N+n]+b[ 3*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[7*N+n]+b[4*N+n]) *(a[7*N+n]+b[4*N+n]))); c[ 1*N+n] = -1*invdet*(((a[ 1*N+n]+b[ 1*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[7*N+n]+b[4*N+n]))); c[ 2*N+n] = invdet*(((a[ 1*N+n]+b[ 1*N+n]) *(a[7*N+n]+b[4*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[7*N+n]+b[4*N+n]))); c[ 3*N+n] = -1*invdet*(((a[ 1*N+n]+b[ 1*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[7*N+n]+b[4*N+n]) *(a[2*N+n]+b[2*N+n]))); c[ 4*N+n] = invdet*(((a[ 0*N+n]+b[ 0*N+n]) *(a[11*N+n]+b[5*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[2*N+n]+b[2*N+n]))); c[ 5*N+n] = -1*invdet*(((a[ 0*N+n]+b[ 0*N+n]) *(a[7*N+n]+b[4*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[1*N+n]+b[1*N+n]))); c[ 6*N+n] = invdet*(((a[ 1*N+n]+b[ 1*N+n]) *(a[7*N+n]+b[4*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[6*N+n]+b[3*N+n]))); c[ 7*N+n] = -1*invdet*(((a[ 0*N+n]+b[ 0*N+n]) *(a[7*N+n]+b[4*N+n])) - ((a[2*N+n]+b[2*N+n]) *(a[1*N+n]+b[1*N+n]))); c[ 8*N+n] = invdet*(((a[ 0*N+n]+b[ 0*N+n]) *(a[6*N+n]+b[3*N+n])) - ((a[1*N+n]+b[1*N+n]) *(a[1*N+n]+b[1*N+n]))); } // __syncthreads(); } __forceinline__ __device__ void KalmanGain(const MP6x6SF* A, const MP3x3* B, MP3x6* C) { // k = P Ht(HPHt + R)^-1 // HpHt -> cov of x,y,z. take upper 3x3 matrix of P // This calculates the kalman gain const float* a = (*A).data; //ASSUME_ALIGNED(a, 64); const float* b = (*B).data; //ASSUME_ALIGNED(b, 64); float* c = (*C).data; //ASSUME_ALIGNED(c, 64); for(int n=threadIdx.x;n<N;n+=blockDim.x) { c[ 0*N+n] = a[0*N+n]*b[0*N+n] + a[1*N+n]*b[3*N+n] + a[2*N+n]*b[6*N+n]; c[ 1*N+n] = a[0*N+n]*b[1*N+n] + a[1*N+n]*b[4*N+n] + a[2*N+n]*b[7*N+n]; c[ 2*N+n] = a[0*N+n]*b[2*N+n] + a[1*N+n]*b[5*N+n] + a[2*N+n]*b[8*N+n]; c[ 3*N+n] = a[1*N+n]*b[0*N+n] + a[6*N+n]*b[3*N+n] + a[7*N+n]*b[6*N+n]; c[ 4*N+n] = a[1*N+n]*b[1*N+n] + a[6*N+n]*b[4*N+n] + a[7*N+n]*b[7*N+n]; c[ 5*N+n] = a[1*N+n]*b[2*N+n] + a[6*N+n]*b[5*N+n] + a[7*N+n]*b[8*N+n]; c[ 6*N+n] = a[2*N+n]*b[0*N+n] + a[7*N+n]*b[3*N+n] + a[11*N+n]*b[6*N+n]; c[ 7*N+n] = a[2*N+n]*b[1*N+n] + a[7*N+n]*b[4*N+n] + a[11*N+n]*b[7*N+n]; c[ 8*N+n] = a[2*N+n]*b[2*N+n] + a[7*N+n]*b[5*N+n] + a[11*N+n]*b[8*N+n]; c[ 9*N+n] = a[3*N+n]*b[0*N+n] + a[8*N+n]*b[3*N+n] + a[12*N+n]*b[6*N+n]; c[ 10*N+n] = a[3*N+n]*b[1*N+n] + a[8*N+n]*b[4*N+n] + a[12*N+n]*b[7*N+n]; c[ 11*N+n] = a[3*N+n]*b[2*N+n] + a[8*N+n]*b[5*N+n] + a[12*N+n]*b[8*N+n]; c[ 12*N+n] = a[4*N+n]*b[0*N+n] + a[9*N+n]*b[3*N+n] + a[13*N+n]*b[6*N+n]; c[ 13*N+n] = a[4*N+n]*b[1*N+n] + a[9*N+n]*b[4*N+n] + a[13*N+n]*b[7*N+n]; c[ 14*N+n] = a[4*N+n]*b[2*N+n] + a[9*N+n]*b[5*N+n] + a[13*N+n]*b[8*N+n]; c[ 15*N+n] = a[5*N+n]*b[0*N+n] + a[10*N+n]*b[3*N+n] + a[14*N+n]*b[6*N+n]; c[ 16*N+n] = a[5*N+n]*b[1*N+n] + a[10*N+n]*b[4*N+n] + a[14*N+n]*b[7*N+n]; c[ 17*N+n] = a[5*N+n]*b[2*N+n] + a[10*N+n]*b[5*N+n] + a[14*N+n]*b[8*N+n]; } //__syncthreads(); } __forceinline__ __device__ void KalmanUpdate(MP6x6SF* trkErr, MP6F* inPar, const MP3x3SF* hitErr, const MP3F* msP){//, MP3x3* inverse_temp, MP3x6* kGain, MP6x6SF* newErr){ MP3x3 inverse_temp; MP3x6 kGain; MP6x6SF newErr; //MP6F newPar; KalmanGainInv(trkErr,hitErr,&inverse_temp); //__syncthreads(); KalmanGain(trkErr,&inverse_temp,&kGain); //__syncthreads(); for(size_t it=threadIdx.x;it<bsize;it+=blockDim.x){ float xin = x(inPar,it); float yin = y(inPar,it); float zin = z(inPar,it); float ptin = 1./ipt(inPar,it); float phiin = phi(inPar,it); float thetain = theta(inPar,it); float xout = x(msP,it); float yout = y(msP,it); float zout = z(msP,it); float ydiff = y(msP,it) - y(inPar,it); float xnew = xin + (kGain.data[0*bsize+it]*(xout-xin)) +(kGain.data[1*bsize+it]*(yout-yin)); // removed "zout-zin" term since zin is set to zout thus the term is 0 anyway. float ynew = yin + (kGain.data[3*bsize+it]*(xout-xin)) +(kGain.data[4*bsize+it]*(yout-yin)); float znew = zin + (kGain.data[6*bsize+it]*(xout-xin)) +(kGain.data[7*bsize+it]*(yout-yin)); float ptnew = ptin + (kGain.data[9*bsize+it]*(xout-xin)) +(kGain.data[10*bsize+it]*(yout-yin)); float phinew = phiin + (kGain.data[12*bsize+it]*(xout-xin)) +(kGain.data[13*bsize+it]*(yout-yin)); float thetanew = thetain + (kGain.data[15*bsize+it]*(xout-xin)) +(kGain.data[16*bsize+it]*(yout-yin)); newErr.data[0*bsize+it] = trkErr->data[0*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[0*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[1*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[2*bsize+it]); newErr.data[1*bsize+it] = trkErr->data[1*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[1*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[6*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[7*bsize+it]); newErr.data[2*bsize+it] = trkErr->data[2*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[2*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[7*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[11*bsize+it]); newErr.data[3*bsize+it] = trkErr->data[3*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[3*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[8*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[12*bsize+it]); newErr.data[4*bsize+it] = trkErr->data[4*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[4*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[9*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[13*bsize+it]); newErr.data[5*bsize+it] = trkErr->data[5*bsize+it] - (kGain.data[0*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[1*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[2*bsize+it]*trkErr->data[14*bsize+it]); newErr.data[6*bsize+it] = trkErr->data[6*bsize+it] - (kGain.data[3*bsize+it]*trkErr->data[1*bsize+it]+kGain.data[4*bsize+it]*trkErr->data[6*bsize+it]+kGain.data[5*bsize+it]*trkErr->data[7*bsize+it]); newErr.data[7*bsize+it] = trkErr->data[7*bsize+it] - (kGain.data[3*bsize+it]*trkErr->data[2*bsize+it]+kGain.data[4*bsize+it]*trkErr->data[7*bsize+it]+kGain.data[5*bsize+it]*trkErr->data[11*bsize+it]); newErr.data[8*bsize+it] = trkErr->data[8*bsize+it] - (kGain.data[3*bsize+it]*trkErr->data[3*bsize+it]+kGain.data[4*bsize+it]*trkErr->data[8*bsize+it]+kGain.data[5*bsize+it]*trkErr->data[12*bsize+it]); newErr.data[9*bsize+it] = trkErr->data[9*bsize+it] - (kGain.data[3*bsize+it]*trkErr->data[4*bsize+it]+kGain.data[4*bsize+it]*trkErr->data[9*bsize+it]+kGain.data[5*bsize+it]*trkErr->data[13*bsize+it]); newErr.data[10*bsize+it] = trkErr->data[10*bsize+it] - (kGain.data[3*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[4*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[5*bsize+it]*trkErr->data[14*bsize+it]); newErr.data[11*bsize+it] = trkErr->data[11*bsize+it] - (kGain.data[6*bsize+it]*trkErr->data[2*bsize+it]+kGain.data[7*bsize+it]*trkErr->data[7*bsize+it]+kGain.data[8*bsize+it]*trkErr->data[11*bsize+it]); newErr.data[12*bsize+it] = trkErr->data[12*bsize+it] - (kGain.data[6*bsize+it]*trkErr->data[3*bsize+it]+kGain.data[7*bsize+it]*trkErr->data[8*bsize+it]+kGain.data[8*bsize+it]*trkErr->data[12*bsize+it]); newErr.data[13*bsize+it] = trkErr->data[13*bsize+it] - (kGain.data[6*bsize+it]*trkErr->data[4*bsize+it]+kGain.data[7*bsize+it]*trkErr->data[9*bsize+it]+kGain.data[8*bsize+it]*trkErr->data[13*bsize+it]); newErr.data[14*bsize+it] = trkErr->data[14*bsize+it] - (kGain.data[6*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[7*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[8*bsize+it]*trkErr->data[14*bsize+it]); newErr.data[15*bsize+it] = trkErr->data[15*bsize+it] - (kGain.data[9*bsize+it]*trkErr->data[3*bsize+it]+kGain.data[10*bsize+it]*trkErr->data[8*bsize+it]+kGain.data[11*bsize+it]*trkErr->data[12*bsize+it]); newErr.data[16*bsize+it] = trkErr->data[16*bsize+it] - (kGain.data[9*bsize+it]*trkErr->data[4*bsize+it]+kGain.data[10*bsize+it]*trkErr->data[9*bsize+it]+kGain.data[11*bsize+it]*trkErr->data[13*bsize+it]); newErr.data[17*bsize+it] = trkErr->data[17*bsize+it] - (kGain.data[9*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[10*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[11*bsize+it]*trkErr->data[14*bsize+it]); newErr.data[18*bsize+it] = trkErr->data[18*bsize+it] - (kGain.data[12*bsize+it]*trkErr->data[4*bsize+it]+kGain.data[13*bsize+it]*trkErr->data[9*bsize+it]+kGain.data[14*bsize+it]*trkErr->data[13*bsize+it]); newErr.data[19*bsize+it] = trkErr->data[19*bsize+it] - (kGain.data[12*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[13*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[14*bsize+it]*trkErr->data[14*bsize+it]); newErr.data[20*bsize+it] = trkErr->data[20*bsize+it] - (kGain.data[15*bsize+it]*trkErr->data[5*bsize+it]+kGain.data[16*bsize+it]*trkErr->data[10*bsize+it]+kGain.data[17*bsize+it]*trkErr->data[14*bsize+it]); setx(inPar,it,xnew ); sety(inPar,it,ynew ); setz(inPar,it,znew); setipt(inPar,it, ptnew); setphi(inPar,it, phinew); settheta(inPar,it, thetanew); } //__syncthreads(); trkErr = &newErr; } __device__ __constant__ float kfact = 100/3.8; __device__ __forceinline__ void propagateToZ(const MP6x6SF* inErr, const MP6F* inPar, const MP1I* inChg,const MP3F* msP, MP6x6SF* outErr, MP6F* outPar, struct MP6x6F* errorProp, struct MP6x6F* temp,const MP3x3SF* hitErr) { for(size_t it=threadIdx.x;it<bsize;it+=blockDim.x){ const float zout = z(msP,it); const float k = q(inChg,it)*kfact;//100/3.8; const float deltaZ = zout - z(inPar,it); const float pt = 1./ipt(inPar,it); const float cosP = cosf(phi(inPar,it)); const float sinP = sinf(phi(inPar,it)); const float cosT = cosf(theta(inPar,it)); const float sinT = sinf(theta(inPar,it)); const float pxin = cosP*pt; const float pyin = sinP*pt; const float icosT = 1.0/cosT; const float icosTk = icosT/k; const float alpha = deltaZ*sinT*ipt(inPar,it)*icosTk;///(cosT*k); const float sina = sinf(alpha); // this can be approximated; const float cosa = cosf(alpha); // this can be approximated; setx(outPar,it, x(inPar,it) + k*(pxin*sina - pyin*(1.-cosa)) ); sety(outPar,it, y(inPar,it) + k*(pyin*sina + pxin*(1.-cosa)) ); setz(outPar,it,zout); setipt(outPar,it, ipt(inPar,it)); setphi(outPar,it, phi(inPar,it)+alpha ); settheta(outPar,it, theta(inPar,it) ); const float sCosPsina = sinf(cosP*sina); const float cCosPsina = cosf(cosP*sina); for (size_t i=0;i<6;++i) errorProp->data[bsize*PosInMtrx(i,i,6) + it] = 1.; errorProp->data[bsize*PosInMtrx(0,2,6) + it] = cosP*sinT*(sinP*cosa*sCosPsina-cosa)*icosT; errorProp->data[bsize*PosInMtrx(0,3,6) + it] = cosP*sinT*deltaZ*cosa*(1.-sinP*sCosPsina)*(icosT*pt)-k*(cosP*sina-sinP*(1.-cCosPsina))*(pt*pt); errorProp->data[bsize*PosInMtrx(0,4,6) + it] = (k*pt)*(-sinP*sina+sinP*sinP*sina*sCosPsina-cosP*(1.-cCosPsina)); errorProp->data[bsize*PosInMtrx(0,5,6) + it] = cosP*deltaZ*cosa*(1.-sinP*sCosPsina)*(icosT*icosT); errorProp->data[bsize*PosInMtrx(1,2,6) + it] = cosa*sinT*(cosP*cosP*sCosPsina-sinP)*icosT; errorProp->data[bsize*PosInMtrx(1,3,6) + it] = sinT*deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)*(icosT*pt)-k*(sinP*sina+cosP*(1.-cCosPsina))*(pt*pt); errorProp->data[bsize*PosInMtrx(1,4,6) + it] = (k*pt)*(-sinP*(1.-cCosPsina)-sinP*cosP*sina*sCosPsina+cosP*sina); errorProp->data[bsize*PosInMtrx(1,5,6) + it] = deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)*(icosT*icosT); errorProp->data[bsize*PosInMtrx(4,2,6) + it] = -ipt(inPar,it)*sinT*(icosTk); errorProp->data[bsize*PosInMtrx(4,3,6) + it] = sinT*deltaZ*(icosTk); errorProp->data[bsize*PosInMtrx(4,5,6) + it] = ipt(inPar,it)*deltaZ*(icosT*icosTk); // for (size_t i=0;i<6;++i) errorProp->data[bsize*PosInMtrx(i,i,6) + it] = 1.; // errorProp->data[bsize*PosInMtrx(0,2,6) + it] = cosP*sinT*(sinP*cosa*sCosPsina-cosa)/cosT; // errorProp->data[bsize*PosInMtrx(0,3,6) + it] = cosP*sinT*deltaZ*cosa*(1.-sinP*sCosPsina)/(cosT*ipt(inPar,it))-k*(cosP*sina-sinP*(1.-cCosPsina))/(ipt(inPar,it)*ipt(inPar,it)); // errorProp->data[bsize*PosInMtrx(0,4,6) + it] = (k/ipt(inPar,it))*(-sinP*sina+sinP*sinP*sina*sCosPsina-cosP*(1.-cCosPsina)); // errorProp->data[bsize*PosInMtrx(0,5,6) + it] = cosP*deltaZ*cosa*(1.-sinP*sCosPsina)/(cosT*cosT); // errorProp->data[bsize*PosInMtrx(1,2,6) + it] = cosa*sinT*(cosP*cosP*sCosPsina-sinP)/cosT; // errorProp->data[bsize*PosInMtrx(1,3,6) + it] = sinT*deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)/(cosT*ipt(inPar,it))-k*(sinP*sina+cosP*(1.-cCosPsina))/(ipt(inPar,it)*ipt(inPar,it)); // errorProp->data[bsize*PosInMtrx(1,4,6) + it] = (k/ipt(inPar,it))*(-sinP*(1.-cCosPsina)-sinP*cosP*sina*sCosPsina+cosP*sina); // errorProp->data[bsize*PosInMtrx(1,5,6) + it] = deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)/(cosT*cosT); // errorProp->data[bsize*PosInMtrx(4,2,6) + it] = -ipt(inPar,it)*sinT/(cosT*k); // errorProp->data[bsize*PosInMtrx(4,3,6) + it] = sinT*deltaZ/(cosT*k); // errorProp->data[bsize*PosInMtrx(4,5,6) + it] = ipt(inPar,it)*deltaZ/(cosT*cosT*k); } //__syncthreads(); MultHelixPropEndcap(errorProp, inErr, temp); //__syncthreads(); MultHelixPropTranspEndcap(errorProp, temp, outErr); } __global__ void GPUsequence(MPTRK* trk, MPHIT* hit, MPTRK* outtrk, const int stream){ int ie_range; if(stream == num_streams){ ie_range = (int)(nevts%num_streams);} else{ie_range = (int)(nevts/num_streams);} for (size_t ie = blockIdx.x; ie<ie_range; ie+=gridDim.x){ for(size_t ib = threadIdx.y; ib <nb; ib+=blockDim.y){ const MPTRK* btracks = bTk(trk,ie,ib); MPTRK* obtracks = bTk(outtrk,ie,ib); for(int layer=0;layer<nlayer;++layer){ const MPHIT* bhits = bHit(hit,ie,ib,layer); /*__shared__*/ struct MP6x6F errorProp, temp; // using shared here causes a race hazard. idk why i did it this way, might be to include shared. maybe move to inside p2z function propagateToZ(&(*btracks).cov, &(*btracks).par, &(*btracks).q, &(*bhits).pos, &(*obtracks).cov, &(*obtracks).par, &errorProp, &temp,&(*bhits).cov); KalmanUpdate(&(*obtracks).cov,&(*obtracks).par,&(*bhits).cov,&(*bhits).pos); } } } } int main (int argc, char* argv[]) { printf("RUNNING CUDA!!\n"); printf("Streams: %d, blocks: %d, threads(x,y): (%d,%d)\n",num_streams,blockspergrid,threadsperblockx,threadsperblocky); ATRK inputtrk = { {-12.806846618652344, -7.723824977874756, 38.13014221191406,0.23732035065189902, -2.613372802734375, 0.35594117641448975}, {6.290299552347278e-07,4.1375109560704004e-08,7.526661534029699e-07,2.0973730840978533e-07,1.5431574240665213e-07,9.626245400795597e-08,-2.804026640189443e-06, 6.219111130687595e-06,2.649119409845118e-07,0.00253512163402557,-2.419662877381737e-07,4.3124190760040646e-07,3.1068903991780678e-09,0.000923913115050627, 0.00040678296006807003,-7.755406890332818e-07,1.68539375883925e-06,6.676875566525437e-08,0.0008420574605423793,7.356584799406111e-05,0.0002306247719158348}, 1 }; AHIT inputhit = { {-20.7824649810791, -12.24150276184082, 57.8067626953125}, {2.545517190810642e-06,-2.6680759219743777e-06,2.8030024168401724e-06,0.00014160551654640585,0.00012282167153898627,11.385087966918945} }; printf("track in pos: %f, %f, %f \n", inputtrk.par[0], inputtrk.par[1], inputtrk.par[2]); printf("track in cov: %.2e, %.2e, %.2e \n", inputtrk.cov[SymOffsets66(PosInMtrx(0,0,6))], inputtrk.cov[SymOffsets66(PosInMtrx(1,1,6))], inputtrk.cov[SymOffsets66(PosInMtrx(2,2,6))]); printf("hit in pos: %f %f %f \n", inputhit.pos[0], inputhit.pos[1], inputhit.pos[2]); printf("produce nevts=%i ntrks=%i smearing by=%f \n", nevts, ntrks, smear); printf("NITER=%d\n", NITER); long setup_start, setup_stop; struct timeval timecheck; gettimeofday(&timecheck, NULL); setup_start = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000; MPTRK* trk = prepareTracks(inputtrk); MPHIT* hit = prepareHits(inputhit); MPTRK* outtrk; cudaMallocManaged((void**)&outtrk,nevts*nb*sizeof(MPTRK)); dim3 grid(blockspergrid,1,1); dim3 block(threadsperblockx,threadsperblocky,1); int device = -1; cudaGetDevice(&device); int stream_chunk = ((int)(nevts/num_streams))*nb;//*sizeof(MPTRK); int stream_remainder = ((int)(nevts%num_streams))*nb;//*sizeof(MPTRK); int stream_range; if (stream_remainder == 0){ stream_range =num_streams;} else{stream_range = num_streams+1;} cudaStream_t streams[stream_range]; for (int s = 0; s<stream_range;s++){ cudaStreamCreate(&streams[s]); //cudaStreamCreateWithFlags(&streams[s],cudaStreamNonBlocking); } gettimeofday(&timecheck, NULL); setup_stop = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000; printf("done preparing!\n"); printf("Size of struct MPTRK trk[] = %ld\n", nevts*nb*sizeof(struct MPTRK)); printf("Size of struct MPTRK outtrk[] = %ld\n", nevts*nb*sizeof(struct MPTRK)); printf("Size of struct struct MPHIT hit[] = %ld\n", nevts*nb*sizeof(struct MPHIT)); auto wall_start = std::chrono::high_resolution_clock::now(); for(int itr=0; itr<NITER; itr++){ for (int s = 0; s<num_streams;s++){ cudaMemPrefetchAsync(trk+(s*stream_chunk),stream_chunk*sizeof(MPTRK), device,streams[s]); cudaMemPrefetchAsync(hit+(s*stream_chunk*nlayer),nlayer*stream_chunk*sizeof(MPHIT), device,streams[s]); cudaMemAdvise(trk+(s*stream_chunk),stream_chunk*sizeof(MPTRK),cudaMemAdviseSetPreferredLocation,device); cudaMemAdvise(hit+(s*stream_chunk*nlayer),nlayer*stream_chunk*sizeof(MPHIT),cudaMemAdviseSetPreferredLocation,device); //} //cudaStreamAttachMemAsync(streams[s],trk+(s*stream_chunk),stream_chunk*sizeof(MPTRK),cudaMemAttachHost); //cudaStreamAttachMemAsync(streams[s],hit+(s*stream_chunk),stream_chunk*sizeof(MPHIT),cudaMemAttachHost); //cudaMemAdvise(trk+(s*stream_chunk),stream_chunk*sizeof(MPTRK),cudaMemAdviseSetReadMostly,device); //cudaMemAdvise(hit+(s*stream_chunk),stream_chunk*sizeof(MPHIT),cudaMemAdviseSetReadMostly,device); //cudaMemAdvise(trk+(s*stream_chunk),stream_chunk*sizeof(MPTRK),cudaMemAdviseSetAccessedBy,device); //cudaMemAdvise(hit+(s*stream_chunk),stream_chunk*sizeof(MPHIT),cudaMemAdviseSetAccessedBy,device); } if(stream_remainder != 0){ cudaMemPrefetchAsync(trk+(num_streams*stream_chunk),stream_remainder*sizeof(MPTRK), device,streams[num_streams]); cudaMemAdvise(trk+(num_streams*stream_chunk),stream_remainder*sizeof(MPTRK),cudaMemAdviseSetPreferredLocation,device); cudaMemPrefetchAsync(hit+(num_streams*stream_chunk*nlayer),nlayer*stream_remainder*sizeof(MPHIT), device,streams[num_streams]); cudaMemAdvise(hit+(num_streams*stream_chunk*nlayer),nlayer*stream_remainder*sizeof(MPHIT),cudaMemAdviseSetPreferredLocation,device); } // cudaMemAdvise(trk,nevts*nb*sizeof(MPTRK),cudaMemAdviseSetPreferredLocation,device); // cudaMemAdvise(hit,nevts*nb*sizeof(MPHIT),cudaMemAdviseSetPreferredLocation,device); // cudaMemAdvise(trk,nevts*nb*sizeof(MPTRK),cudaMemAdviseSetReadMostly,device); // cudaMemAdvise(hit,nevts*nb*sizeof(MPHIT),cudaMemAdviseSetReadMostly,device); for (int s = 0; s<num_streams;s++){ GPUsequence<<<grid,block,0,streams[s]>>>(trk+(s*stream_chunk),hit+(s*stream_chunk*nlayer),outtrk+(s*stream_chunk),s); } if(stream_remainder != 0){ GPUsequence<<<grid,block,0,streams[num_streams]>>>(trk+(num_streams*stream_chunk),hit+(num_streams*stream_chunk*nlayer),outtrk+(num_streams*stream_chunk),num_streams); } //cudaDeviceSynchronize(); // Normal sync for (int s = 0; s<num_streams;s++){ cudaMemPrefetchAsync(outtrk+(s*stream_chunk),stream_chunk*sizeof(MPTRK), cudaCpuDeviceId,streams[s]); } if(stream_remainder != 0){ cudaMemPrefetchAsync(outtrk+(num_streams*stream_chunk),stream_remainder*sizeof(MPTRK), cudaCpuDeviceId,streams[num_streams]); } } //end itr loop cudaDeviceSynchronize(); // shaves a few seconds auto wall_stop = std::chrono::high_resolution_clock::now(); for (int s = 0; s<stream_range;s++){ cudaStreamDestroy(streams[s]); } auto wall_diff = wall_stop - wall_start; auto wall_time = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(wall_diff).count()) / 1e6; printf("setup time time=%f (s)\n", (setup_stop-setup_start)*0.001); printf("done ntracks=%i tot time=%f (s) time/trk=%e (s)\n", nevts*ntrks*int(NITER), wall_time, wall_time/(nevts*ntrks*int(NITER))); printf("formatted %i %i %i %i %i %f 0 %f %i\n",int(NITER),nevts, ntrks, bsize, nb, wall_time, (setup_stop-setup_start)*0.001, num_streams); float avgx = 0, avgy = 0, avgz = 0; float avgpt = 0, avgphi = 0, avgtheta = 0; float avgdx = 0, avgdy = 0, avgdz = 0; for (size_t ie=0;ie<nevts;++ie) { for (size_t it=0;it<ntrks;++it) { float x_ = x(outtrk,ie,it); float y_ = y(outtrk,ie,it); float z_ = z(outtrk,ie,it); float pt_ = 1./ipt(outtrk,ie,it); float phi_ = phi(outtrk,ie,it); float theta_ = theta(outtrk,ie,it); avgpt += pt_; avgphi += phi_; avgtheta += theta_; avgx += x_; avgy += y_; avgz += z_; float hx_ = x(hit,ie,it); float hy_ = y(hit,ie,it); float hz_ = z(hit,ie,it); avgdx += (x_-hx_)/x_; avgdy += (y_-hy_)/y_; avgdz += (z_-hz_)/z_; } } avgpt = avgpt/float(nevts*ntrks); avgphi = avgphi/float(nevts*ntrks); avgtheta = avgtheta/float(nevts*ntrks); avgx = avgx/float(nevts*ntrks); avgy = avgy/float(nevts*ntrks); avgz = avgz/float(nevts*ntrks); avgdx = avgdx/float(nevts*ntrks); avgdy = avgdy/float(nevts*ntrks); avgdz = avgdz/float(nevts*ntrks); float stdx = 0, stdy = 0, stdz = 0; float stddx = 0, stddy = 0, stddz = 0; for (size_t ie=0;ie<nevts;++ie) { for (size_t it=0;it<ntrks;++it) { float x_ = x(outtrk,ie,it); float y_ = y(outtrk,ie,it); float z_ = z(outtrk,ie,it); stdx += (x_-avgx)*(x_-avgx); stdy += (y_-avgy)*(y_-avgy); stdz += (z_-avgz)*(z_-avgz); float hx_ = x(hit,ie,it); float hy_ = y(hit,ie,it); float hz_ = z(hit,ie,it); stddx += ((x_-hx_)/x_-avgdx)*((x_-hx_)/x_-avgdx); stddy += ((y_-hy_)/y_-avgdy)*((y_-hy_)/y_-avgdy); stddz += ((z_-hz_)/z_-avgdz)*((z_-hz_)/z_-avgdz); } } stdx = sqrtf(stdx/float(nevts*ntrks)); stdy = sqrtf(stdy/float(nevts*ntrks)); stdz = sqrtf(stdz/float(nevts*ntrks)); stddx = sqrtf(stddx/float(nevts*ntrks)); stddy = sqrtf(stddy/float(nevts*ntrks)); stddz = sqrtf(stddz/float(nevts*ntrks)); printf("track x avg=%f std/avg=%f\n", avgx, fabs(stdx/avgx)); printf("track y avg=%f std/avg=%f\n", avgy, fabs(stdy/avgy)); printf("track z avg=%f std/avg=%f\n", avgz, fabs(stdz/avgz)); printf("track dx/x avg=%f std=%f\n", avgdx, stddx); printf("track dy/y avg=%f std=%f\n", avgdy, stddy); printf("track dz/z avg=%f std=%f\n", avgdz, stddz); printf("track pt avg=%f\n", avgpt); printf("track phi avg=%f\n", avgphi); printf("track theta avg=%f\n", avgtheta); cudaFree(trk); cudaFree(hit); cudaFree(outtrk); return 0; }
13a6081b7c2ad31b08c87522394aa6c03652133b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "reference_calc.cpp" #include "utils.h" using namespace std; __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } //// int row = blockIdx.x; //// int col = threadIdx.x; const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //const int thread_1D_pos = numCols*blockIdx.x + threadIdx.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; int row = thread_2D_pos.y; int col = thread_2D_pos.x; int radius = filterWidth/2; float res = 0.0; float pixel_value, filter_value; int conv_pixel_row, conv_pixel_col; for(int r=-radius; r<=radius; r++){ for (int c = -radius; c<= radius; c++){ conv_pixel_row = row+r; if(conv_pixel_row >= numRows){conv_pixel_row = numRows-1;} if(conv_pixel_row < 0){conv_pixel_row = 0;} conv_pixel_col = col+c; if(conv_pixel_col >= numCols){conv_pixel_col = numCols-1;} if(conv_pixel_col < 0){conv_pixel_col = 0;} pixel_value = inputChannel[numCols*conv_pixel_row + conv_pixel_col]; filter_value = filter[filterWidth*(r+radius) + (c+radius)]; res += filter_value*pixel_value; } } outputChannel[numCols*row + col] = (unsigned char) res; // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //const int thread_1D_pos = numCols*blockIdx.x + threadIdx.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; size_t row = thread_2D_pos.y; size_t col = thread_2D_pos.x; // int row, col; // for(row=0; row<numRows; row++){ // for(col=0; col<numCols; col++){ redChannel[row*numCols+col] = inputImageRGBA[row*numCols+col].x; greenChannel[row*numCols+col] = inputImageRGBA[row*numCols+col].y; blueChannel[row*numCols+col] = inputImageRGBA[row*numCols+col].z; // } // } // free(row); free(col); } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //const int thread_1D_pos = numCols*blockIdx.x + threadIdx.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(float)*filterWidth*filterWidth)); cout<<h_filter[1]<<"\n"; //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float)*filterWidth*filterWidth,hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(25,40,1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(77,27,1); //TODO: Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize),dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_red,d_redBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_green,d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_blue,d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,d_greenBlurred,d_blueBlurred,d_outputImageRGBA,numRows,numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); }
13a6081b7c2ad31b08c87522394aa6c03652133b.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "reference_calc.cpp" #include "utils.h" using namespace std; __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } //// int row = blockIdx.x; //// int col = threadIdx.x; const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //const int thread_1D_pos = numCols*blockIdx.x + threadIdx.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; int row = thread_2D_pos.y; int col = thread_2D_pos.x; int radius = filterWidth/2; float res = 0.0; float pixel_value, filter_value; int conv_pixel_row, conv_pixel_col; for(int r=-radius; r<=radius; r++){ for (int c = -radius; c<= radius; c++){ conv_pixel_row = row+r; if(conv_pixel_row >= numRows){conv_pixel_row = numRows-1;} if(conv_pixel_row < 0){conv_pixel_row = 0;} conv_pixel_col = col+c; if(conv_pixel_col >= numCols){conv_pixel_col = numCols-1;} if(conv_pixel_col < 0){conv_pixel_col = 0;} pixel_value = inputChannel[numCols*conv_pixel_row + conv_pixel_col]; filter_value = filter[filterWidth*(r+radius) + (c+radius)]; res += filter_value*pixel_value; } } outputChannel[numCols*row + col] = (unsigned char) res; // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //const int thread_1D_pos = numCols*blockIdx.x + threadIdx.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; size_t row = thread_2D_pos.y; size_t col = thread_2D_pos.x; // int row, col; // for(row=0; row<numRows; row++){ // for(col=0; col<numCols; col++){ redChannel[row*numCols+col] = inputImageRGBA[row*numCols+col].x; greenChannel[row*numCols+col] = inputImageRGBA[row*numCols+col].y; blueChannel[row*numCols+col] = inputImageRGBA[row*numCols+col].z; // } // } // free(row); free(col); } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //const int thread_1D_pos = numCols*blockIdx.x + threadIdx.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(float)*filterWidth*filterWidth)); cout<<h_filter[1]<<"\n"; //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float)*filterWidth*filterWidth,cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(25,40,1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(77,27,1); //TODO: Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize,blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize,blockSize>>>(d_red,d_redBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize,blockSize>>>(d_green,d_greenBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize,blockSize>>>(d_blue,d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,d_greenBlurred,d_blueBlurred,d_outputImageRGBA,numRows,numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); }
9f7f48e6691f5eca1e8cb2dd140e7660fa92e8c1.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hip/device_functions.h> #include <stdio.h> #include "vs2010_fix_cuda.h" #include "SeqBitmap.h" #include "Bitmaps_cuda.h" //#define __PRINT_DEBUG__ #define N 65536 #define BIG_ENDIAN 0xffff //#define __TRANSACTION_WISE__ //#define __CANDIDATE_WISE__ // Bitmap4 __device__ int _sBitmapLookupTable4_device[N]; __device__ int _countLookupTable4_device[N]; // Bitmap8 __device__ int _sBitmapLookupTable8_device[N]; __device__ int _countLookupTable8_device[N]; // Bitmap16 __device__ int _sBitmapLookupTable16_device[N]; // Bitmap32 __device__ int _sBitmapLookupTable32_device[N]; // Bitmap64 __device__ int _sBitmapLookupTable64_device[N]; // REDUCTION!! template <unsigned int blockSize> __device__ void reduce(int *g_idata, int *g_odata, unsigned int n) { extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; sdata[tid] = 0; while (i < n) { sdata[tid] += g_idata[i] + g_idata[i+blockSize]; i += gridSize; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; } if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } #ifdef __PRINT_DEBUG__ // Printing Bitmaps for Debugging __global__ void printBitmap(){ //get unique block index int blockId = blockIdx.x //1D + blockIdx.y * gridDim.x //2D + gridDim.x * gridDim.y * blockIdx.z; //3D //get unique thread index int threadId = blockId * blockDim.x + threadIdx.x; if(threadId >= N) return; __syncthreads(); printf("_sBitmap[%d]: %d\n", threadId, /*_sBitmapLookupTable4_device[threadId]*/); } #endif // Kernel functions for AND and OR Bitwise operations - common for all Bitmaps // Transaction-wise (need to test with Candidate-wise) __global__ void AndBitwiseOperation(unsigned int* _memory_device, const int b1_size, unsigned int* b1_memory, unsigned int* b2_memory) { // Candidate wise technique for ANDing operation #ifdef __CANDIDATE_WISE__ int i = threadIdx.x; while (i < b1_size){ _memory_device[i] = b1_memory[i] & b2_memory[i]; i += blockDim.x; } // global reduce // works with small dataset - Registers issues for sure reduce<256>(_memory_device, _memory_device, n); // after the ANDing there is the candidate generation (counting op)! #endif // Transaction wise technique for ANDing operation #ifdef __TRANSACTION_WISE__ int i = blockIdx.x * blockDim.x + threadIdx.x; int n = b1_size; while (i < b1_size){ _memory_device[i] = b1_memory[i] & b2_memory[i]; i += blockDim.x * gridDim.x; } // global reduce // works with small dataset - Registers issues for sure reduce<256>(_memory_device, _memory_device, n); // after the ANDing there is the candidate generation (counting op)! #else // Simple ANDing opeartion using CUDA //get unique block index int blockId = blockIdx.x //1D + blockIdx.y * gridDim.x //2D + gridDim.x * gridDim.y * blockIdx.z; //3D //get unique thread index int threadId = blockId * blockDim.x + threadIdx.x; //check global unique thread range if(threadId >= b1_size) return; //output bitwise and _memory_device[threadId] = b1_memory[threadId] & b2_memory[threadId]; #endif } // BITMAP4 __global__ void CreateSBitmap4(unsigned short* ms_device, unsigned short* msib_device, int* _memSizeShort_device) { // we walk the memory in terms of shorts // msib: pointer to memory of ibitmap in terms of short // ms: pointer to memory as a pointer to shorts // ss: size of the memory in short //get unique block index int blockId = blockIdx.x //1D + blockIdx.y * gridDim.x //2D + gridDim.x * gridDim.y * blockIdx.z; //3D //get unique thread index int threadId = blockId * blockDim.x + threadIdx.x; //check global unique thread range if(threadId >= _memSizeShort_device[0]) return; // go thru each SHORT (note that a SHORT is 4 customers) ms_device[threadId] = _sBitmapLookupTable4_device[msib_device[threadId]]; } __global__ void Count4_global(unsigned short* ms_device, int* support_device, int* _memSizeShort_device) { Count4_device(ms_device, support_device, _memSizeShort_device); } __device__ int Count4_device(unsigned short* ms_device, int* support_device, int* _memSizeShort_device) { // we count the support // go thru the whole bitmap in steps of SHORT (but note that each // customer uses 4 bits) int support = 0; int i = threadIdx.x; while(i < _memSizeShort_device[0]) support += _countLookupTable4_device[ms_device[i]]; support_device[0] = support; return *support_device; } // BITMAP8 __global__ void CreateSBitmap8(unsigned short* ms_device, unsigned short* msib_device, int* _memSizeShort_device) { // we walk the memory in terms of shorts // msib: pointer to memory of ibitmap in terms of short // ms: pointer to memory as a pointer to shorts // ss: size of the memory in short //get unique block index int blockId = blockIdx.x //1D + blockIdx.y * gridDim.x //2D + gridDim.x * gridDim.y * blockIdx.z; //3D //get unique thread index int threadId = blockId * blockDim.x + threadIdx.x; //check global unique thread range if(threadId >= _memSizeShort_device[0]) return; // go thru each SHORT (note that a SHORT is 4 customers) ms_device[threadId] = _sBitmapLookupTable8_device[msib_device[threadId]]; } __global__ void Count8_global(unsigned short* ms_device, int* support_device, int* _memSizeShort_device) { Count8_device(ms_device, support_device, _memSizeShort_device); } __device__ int Count8_device(unsigned short* ms_device, int* support_device, int* _memSizeShort_device) { // we count the support // go thru the whole bitmap in steps of SHORT (but note that each // customer uses 4 bits) int support = 0; int i = threadIdx.x; while(i < _memSizeShort_device[0]) support += _countLookupTable8_device[ms_device[i]]; support_device[0] = support; return *support_device; } // BITMAP16 __global__ void CreateSBitmap16(unsigned short* ms_device, unsigned short* msib_device, int* _memSizeShort_device) { // NEED TO TEST WITH BIG DATASETS!! // we walk the memory in terms of shorts // msib: pointer to memory of ibitmap in terms of short // ms: pointer to memory as a pointer to shorts // ss: size of the memory in short //get unique block index int blockId = blockIdx.x //1D + blockIdx.y * gridDim.x //2D + gridDim.x * gridDim.y * blockIdx.z; //3D //get unique thread index int threadId = blockId * blockDim.x + threadIdx.x; //check global unique thread range if(threadId >= _memSizeShort_device[0]) return; // go thru each SHORT (note that a SHORT is 4 customers) ms_device[threadId] = _sBitmapLookupTable16_device[msib_device[threadId]]; } __global__ void Count16_global(unsigned short* ms_device, int* support_device, int* _memSizeShort_device) { Count16_device(ms_device, support_device, _memSizeShort_device); } __device__ int Count16_device(unsigned short* _memory_device, int* support_device, int* _memSizeShort_device) { int support[N]; int n = _memSizeShort_device[0]; int i = threadIdx.x; // we count the support // go thru the whole bitmap in steps of SHORT while(i < _memSizeShort_device[0]) if (_memory_device[i] > 0) support[i] = 1; // reduction // works with small dataset - Registers issues for sure reduce<256>(support_device, support, n); return *support_device; } // BITMAP32 __global__ void CreateSBitmap32(unsigned short* ms_device, unsigned short* msib_device, int* _memSizeShort_device) { int i = threadIdx.x; // go thru each group of 2 shorts (note that 2 shorts is one customer) for (i = 0; i < _memSizeShort_device[0]; i += blockDim.x*blockIdx.x) // i += 2 { // _sBitmapLookupTable32 should take in a short and return a short // with the first 1 changed to a // 0 and all remaining bits set to 1 if (msib_device[i + 1] > 0) { // Post-process the first short, set the other to all 1's // big endian ms_device[i + 1] = _sBitmapLookupTable32_device[msib_device[i + 1]]; ms_device[i] = BIG_ENDIAN; } else { // Set first short to 0, post-process the second ms_device[i + 1] = 0; ms_device[i] = _sBitmapLookupTable32_device[msib_device[i]]; } } } // we can call the same function as Bitmap16 __global__ void Count32_global(unsigned short* ms_device, int* support_device, int* _memSizeShort_device) { // can call the same kernel function!! Count16_device(ms_device, support_device, _memSizeShort_device); } // BITMAP64 __global__ void CreateSBitmap64(unsigned short* ms_device, unsigned short* msib_device, int* _memSizeShort_device) { int i = threadIdx.x; // go thru each group of 4 shorts (note that 4 shorts is one customer) for (i = 0; i < _memSizeShort_device[0]; i += blockDim.x*blockIdx.x) // i += 4 { // _sBitmapLookupTable should take in a short and return a short // with the first 1 changed to a // 0 and all remaining bits set to 1 if (msib_device[i + 1] > 0) { // Post-process the first short, set the others to all 1's ms_device[i] = USHRT_MAX; ms_device[i + 1] = _sBitmapLookupTable64_device[msib_device[i + 1]]; ms_device[i + 2] = USHRT_MAX; ms_device[i + 3] = USHRT_MAX; } else if (msib_device[i] > 0) { ms_device[i] = _sBitmapLookupTable64_device[msib_device[i]]; ms_device[i + 1] = 0; ms_device[i + 2] = USHRT_MAX; ms_device[i + 3] = USHRT_MAX; } else if (msib_device[i + 3] > 0) { ms_device[i] = 0; ms_device[i + 1] = 0; ms_device[i + 2] = USHRT_MAX; ms_device[i + 3] = _sBitmapLookupTable64_device[msib_device[i + 3]]; } else { // Set first 3 shorts to 0, post-process the last short ms_device[i] = 0; ms_device[i + 1] = 0; ms_device[i + 2] = _sBitmapLookupTable64_device[msib_device[i + 2]]; ms_device[i + 3] = 0; } } } __global__ void Count64_global(unsigned int* ms_device, int* support_device, int* _memSizeShort_device) { Count64_device(ms_device, support_device, _memSizeShort_device); } __device__ int Count64_device(unsigned int* _memory_device, int* support_device, int* _memSizeInt_device) { int support[N]; int n = _memSizeInt_device[0]; int i = threadIdx.x; // Go through each group of 2 ints for (i = 0; i < _memSizeInt_device[0]; i += blockDim.x*blockIdx.x) // i += 2 if ((_memory_device[i] > 0) || (_memory_device[i + 1] > 0)) support[i] = 1; // local reduction // works with small dataset - Registers issues for sure reduce<256>(support_device, support, n); return *support_device; }
9f7f48e6691f5eca1e8cb2dd140e7660fa92e8c1.cu
#include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <device_functions.h> #include <stdio.h> #include "vs2010_fix_cuda.h" #include "SeqBitmap.h" #include "Bitmaps_cuda.h" //#define __PRINT_DEBUG__ #define N 65536 #define BIG_ENDIAN 0xffff //#define __TRANSACTION_WISE__ //#define __CANDIDATE_WISE__ // Bitmap4 __device__ int _sBitmapLookupTable4_device[N]; __device__ int _countLookupTable4_device[N]; // Bitmap8 __device__ int _sBitmapLookupTable8_device[N]; __device__ int _countLookupTable8_device[N]; // Bitmap16 __device__ int _sBitmapLookupTable16_device[N]; // Bitmap32 __device__ int _sBitmapLookupTable32_device[N]; // Bitmap64 __device__ int _sBitmapLookupTable64_device[N]; // REDUCTION!! template <unsigned int blockSize> __device__ void reduce(int *g_idata, int *g_odata, unsigned int n) { extern __shared__ int sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; sdata[tid] = 0; while (i < n) { sdata[tid] += g_idata[i] + g_idata[i+blockSize]; i += gridSize; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; } if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } #ifdef __PRINT_DEBUG__ // Printing Bitmaps for Debugging __global__ void printBitmap(){ //get unique block index int blockId = blockIdx.x //1D + blockIdx.y * gridDim.x //2D + gridDim.x * gridDim.y * blockIdx.z; //3D //get unique thread index int threadId = blockId * blockDim.x + threadIdx.x; if(threadId >= N) return; __syncthreads(); printf("_sBitmap[%d]: %d\n", threadId, /*_sBitmapLookupTable4_device[threadId]*/); } #endif // Kernel functions for AND and OR Bitwise operations - common for all Bitmaps // Transaction-wise (need to test with Candidate-wise) __global__ void AndBitwiseOperation(unsigned int* _memory_device, const int b1_size, unsigned int* b1_memory, unsigned int* b2_memory) { // Candidate wise technique for ANDing operation #ifdef __CANDIDATE_WISE__ int i = threadIdx.x; while (i < b1_size){ _memory_device[i] = b1_memory[i] & b2_memory[i]; i += blockDim.x; } // global reduce // works with small dataset - Registers issues for sure reduce<256>(_memory_device, _memory_device, n); // after the ANDing there is the candidate generation (counting op)! #endif // Transaction wise technique for ANDing operation #ifdef __TRANSACTION_WISE__ int i = blockIdx.x * blockDim.x + threadIdx.x; int n = b1_size; while (i < b1_size){ _memory_device[i] = b1_memory[i] & b2_memory[i]; i += blockDim.x * gridDim.x; } // global reduce // works with small dataset - Registers issues for sure reduce<256>(_memory_device, _memory_device, n); // after the ANDing there is the candidate generation (counting op)! #else // Simple ANDing opeartion using CUDA //get unique block index int blockId = blockIdx.x //1D + blockIdx.y * gridDim.x //2D + gridDim.x * gridDim.y * blockIdx.z; //3D //get unique thread index int threadId = blockId * blockDim.x + threadIdx.x; //check global unique thread range if(threadId >= b1_size) return; //output bitwise and _memory_device[threadId] = b1_memory[threadId] & b2_memory[threadId]; #endif } // BITMAP4 __global__ void CreateSBitmap4(unsigned short* ms_device, unsigned short* msib_device, int* _memSizeShort_device) { // we walk the memory in terms of shorts // msib: pointer to memory of ibitmap in terms of short // ms: pointer to memory as a pointer to shorts // ss: size of the memory in short //get unique block index int blockId = blockIdx.x //1D + blockIdx.y * gridDim.x //2D + gridDim.x * gridDim.y * blockIdx.z; //3D //get unique thread index int threadId = blockId * blockDim.x + threadIdx.x; //check global unique thread range if(threadId >= _memSizeShort_device[0]) return; // go thru each SHORT (note that a SHORT is 4 customers) ms_device[threadId] = _sBitmapLookupTable4_device[msib_device[threadId]]; } __global__ void Count4_global(unsigned short* ms_device, int* support_device, int* _memSizeShort_device) { Count4_device(ms_device, support_device, _memSizeShort_device); } __device__ int Count4_device(unsigned short* ms_device, int* support_device, int* _memSizeShort_device) { // we count the support // go thru the whole bitmap in steps of SHORT (but note that each // customer uses 4 bits) int support = 0; int i = threadIdx.x; while(i < _memSizeShort_device[0]) support += _countLookupTable4_device[ms_device[i]]; support_device[0] = support; return *support_device; } // BITMAP8 __global__ void CreateSBitmap8(unsigned short* ms_device, unsigned short* msib_device, int* _memSizeShort_device) { // we walk the memory in terms of shorts // msib: pointer to memory of ibitmap in terms of short // ms: pointer to memory as a pointer to shorts // ss: size of the memory in short //get unique block index int blockId = blockIdx.x //1D + blockIdx.y * gridDim.x //2D + gridDim.x * gridDim.y * blockIdx.z; //3D //get unique thread index int threadId = blockId * blockDim.x + threadIdx.x; //check global unique thread range if(threadId >= _memSizeShort_device[0]) return; // go thru each SHORT (note that a SHORT is 4 customers) ms_device[threadId] = _sBitmapLookupTable8_device[msib_device[threadId]]; } __global__ void Count8_global(unsigned short* ms_device, int* support_device, int* _memSizeShort_device) { Count8_device(ms_device, support_device, _memSizeShort_device); } __device__ int Count8_device(unsigned short* ms_device, int* support_device, int* _memSizeShort_device) { // we count the support // go thru the whole bitmap in steps of SHORT (but note that each // customer uses 4 bits) int support = 0; int i = threadIdx.x; while(i < _memSizeShort_device[0]) support += _countLookupTable8_device[ms_device[i]]; support_device[0] = support; return *support_device; } // BITMAP16 __global__ void CreateSBitmap16(unsigned short* ms_device, unsigned short* msib_device, int* _memSizeShort_device) { // NEED TO TEST WITH BIG DATASETS!! // we walk the memory in terms of shorts // msib: pointer to memory of ibitmap in terms of short // ms: pointer to memory as a pointer to shorts // ss: size of the memory in short //get unique block index int blockId = blockIdx.x //1D + blockIdx.y * gridDim.x //2D + gridDim.x * gridDim.y * blockIdx.z; //3D //get unique thread index int threadId = blockId * blockDim.x + threadIdx.x; //check global unique thread range if(threadId >= _memSizeShort_device[0]) return; // go thru each SHORT (note that a SHORT is 4 customers) ms_device[threadId] = _sBitmapLookupTable16_device[msib_device[threadId]]; } __global__ void Count16_global(unsigned short* ms_device, int* support_device, int* _memSizeShort_device) { Count16_device(ms_device, support_device, _memSizeShort_device); } __device__ int Count16_device(unsigned short* _memory_device, int* support_device, int* _memSizeShort_device) { int support[N]; int n = _memSizeShort_device[0]; int i = threadIdx.x; // we count the support // go thru the whole bitmap in steps of SHORT while(i < _memSizeShort_device[0]) if (_memory_device[i] > 0) support[i] = 1; // reduction // works with small dataset - Registers issues for sure reduce<256>(support_device, support, n); return *support_device; } // BITMAP32 __global__ void CreateSBitmap32(unsigned short* ms_device, unsigned short* msib_device, int* _memSizeShort_device) { int i = threadIdx.x; // go thru each group of 2 shorts (note that 2 shorts is one customer) for (i = 0; i < _memSizeShort_device[0]; i += blockDim.x*blockIdx.x) // i += 2 { // _sBitmapLookupTable32 should take in a short and return a short // with the first 1 changed to a // 0 and all remaining bits set to 1 if (msib_device[i + 1] > 0) { // Post-process the first short, set the other to all 1's // big endian ms_device[i + 1] = _sBitmapLookupTable32_device[msib_device[i + 1]]; ms_device[i] = BIG_ENDIAN; } else { // Set first short to 0, post-process the second ms_device[i + 1] = 0; ms_device[i] = _sBitmapLookupTable32_device[msib_device[i]]; } } } // we can call the same function as Bitmap16 __global__ void Count32_global(unsigned short* ms_device, int* support_device, int* _memSizeShort_device) { // can call the same kernel function!! Count16_device(ms_device, support_device, _memSizeShort_device); } // BITMAP64 __global__ void CreateSBitmap64(unsigned short* ms_device, unsigned short* msib_device, int* _memSizeShort_device) { int i = threadIdx.x; // go thru each group of 4 shorts (note that 4 shorts is one customer) for (i = 0; i < _memSizeShort_device[0]; i += blockDim.x*blockIdx.x) // i += 4 { // _sBitmapLookupTable should take in a short and return a short // with the first 1 changed to a // 0 and all remaining bits set to 1 if (msib_device[i + 1] > 0) { // Post-process the first short, set the others to all 1's ms_device[i] = USHRT_MAX; ms_device[i + 1] = _sBitmapLookupTable64_device[msib_device[i + 1]]; ms_device[i + 2] = USHRT_MAX; ms_device[i + 3] = USHRT_MAX; } else if (msib_device[i] > 0) { ms_device[i] = _sBitmapLookupTable64_device[msib_device[i]]; ms_device[i + 1] = 0; ms_device[i + 2] = USHRT_MAX; ms_device[i + 3] = USHRT_MAX; } else if (msib_device[i + 3] > 0) { ms_device[i] = 0; ms_device[i + 1] = 0; ms_device[i + 2] = USHRT_MAX; ms_device[i + 3] = _sBitmapLookupTable64_device[msib_device[i + 3]]; } else { // Set first 3 shorts to 0, post-process the last short ms_device[i] = 0; ms_device[i + 1] = 0; ms_device[i + 2] = _sBitmapLookupTable64_device[msib_device[i + 2]]; ms_device[i + 3] = 0; } } } __global__ void Count64_global(unsigned int* ms_device, int* support_device, int* _memSizeShort_device) { Count64_device(ms_device, support_device, _memSizeShort_device); } __device__ int Count64_device(unsigned int* _memory_device, int* support_device, int* _memSizeInt_device) { int support[N]; int n = _memSizeInt_device[0]; int i = threadIdx.x; // Go through each group of 2 ints for (i = 0; i < _memSizeInt_device[0]; i += blockDim.x*blockIdx.x) // i += 2 if ((_memory_device[i] > 0) || (_memory_device[i + 1] > 0)) support[i] = 1; // local reduction // works with small dataset - Registers issues for sure reduce<256>(support_device, support, n); return *support_device; }
3b09294a4f35c101a6f1e263ca7bafcd8a824e87.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2011, Duane Merrill * Copyright (c) 2011-2018, NVIDIA CORPORATION * Copyright (c) 2020 Savely Pototsky (SavaLione) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /****************************************************************************** * Simple example of DeviceSelect::Flagged(). * * Selects flagged items from from a sequence of int keys using a * corresponding sequence of unsigned char flags. * * To compile using the command line: * nvcc -arch=sm_XX example_device_select_flagged.cu -I/.. -lcudart -O3 * ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <stdio.h> #include <newcub/util_allocator.cuh> #include <newcub/device/device_select.cuh> // #include <newcub/test/test_util.h> #include <test_util.h> using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- bool g_verbose = false; // Whether to display input/output to console CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory //--------------------------------------------------------------------- // Test generation //--------------------------------------------------------------------- /** * Initialize problem, setting flags at distances of random length * chosen from [1..max_segment] */ void Initialize( int *h_in, unsigned char *h_flags, int num_items, int max_segment) { unsigned short max_short = (unsigned short) -1; int key = 0; int i = 0; while (i < num_items) { // Select number of repeating occurrences unsigned short repeat; RandomBits(repeat); repeat = (unsigned short) ((float(repeat) * (float(max_segment) / float(max_short)))); repeat = CUB_MAX(1, repeat); int j = i; while (j < CUB_MIN(i + repeat, num_items)) { h_flags[j] = 0; h_in[j] = key; j++; } h_flags[i] = 1; i = j; key++; } if (g_verbose) { printf("Input:\n"); DisplayResults(h_in, num_items); printf("Flags:\n"); DisplayResults(h_flags, num_items); printf("\n\n"); } } /** * Solve unique problem */ int Solve( int *h_in, unsigned char *h_flags, int *h_reference, int num_items) { int num_selected = 0; for (int i = 0; i < num_items; ++i) { if (h_flags[i]) { h_reference[num_selected] = h_in[i]; num_selected++; } else { h_reference[num_items - (i - num_selected) - 1] = h_in[i]; } } return num_selected; } //--------------------------------------------------------------------- // Main //--------------------------------------------------------------------- /** * Main */ int main(int argc, char** argv) { int num_items = 150; int max_segment = 40; // Maximum segment length // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); args.GetCmdLineArgument("n", num_items); args.GetCmdLineArgument("maxseg", max_segment); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--n=<input items> " "[--device=<device-id>] " "[--maxseg=<max segment length>] " "[--v] " "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); // Allocate host arrays int *h_in = new int[num_items]; int *h_reference = new int[num_items]; unsigned char *h_flags = new unsigned char[num_items]; // Initialize problem and solution Initialize(h_in, h_flags, num_items, max_segment); int num_selected = Solve(h_in, h_flags, h_reference, num_items); printf("hipcub::DeviceSelect::Flagged %d items, %d selected (avg distance %d), %d-byte elements\n", num_items, num_selected, (num_selected > 0) ? num_items / num_selected : 0, (int) sizeof(int)); fflush(stdout); // Allocate problem device arrays int *d_in = NULL; unsigned char *d_flags = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(int) * num_items)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(unsigned char) * num_items)); // Initialize device input CubDebugExit(hipMemcpy(d_in, h_in, sizeof(int) * num_items, hipMemcpyHostToDevice)); CubDebugExit(hipMemcpy(d_flags, h_flags, sizeof(unsigned char) * num_items, hipMemcpyHostToDevice)); // Allocate device output array and num selected int *d_out = NULL; int *d_num_selected_out = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(int) * num_items)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int))); // Allocate temporary storage void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; CubDebugExit(DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items)); CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); // Run CubDebugExit(DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items)); // Check for correctness (and display results, if specified) int compare = CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose); printf("\t Data %s ", compare ? "FAIL" : "PASS"); compare |= CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose); printf("\t Count %s ", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); // Cleanup if (h_in) delete[] h_in; if (h_reference) delete[] h_reference; if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out)); if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags)); printf("\n\n"); return 0; }
3b09294a4f35c101a6f1e263ca7bafcd8a824e87.cu
/* * Copyright (c) 2011, Duane Merrill * Copyright (c) 2011-2018, NVIDIA CORPORATION * Copyright (c) 2020 Savely Pototsky (SavaLione) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /****************************************************************************** * Simple example of DeviceSelect::Flagged(). * * Selects flagged items from from a sequence of int keys using a * corresponding sequence of unsigned char flags. * * To compile using the command line: * nvcc -arch=sm_XX example_device_select_flagged.cu -I/.. -lcudart -O3 * ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <stdio.h> #include <newcub/util_allocator.cuh> #include <newcub/device/device_select.cuh> // #include <newcub/test/test_util.h> #include <test_util.h> using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- bool g_verbose = false; // Whether to display input/output to console CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory //--------------------------------------------------------------------- // Test generation //--------------------------------------------------------------------- /** * Initialize problem, setting flags at distances of random length * chosen from [1..max_segment] */ void Initialize( int *h_in, unsigned char *h_flags, int num_items, int max_segment) { unsigned short max_short = (unsigned short) -1; int key = 0; int i = 0; while (i < num_items) { // Select number of repeating occurrences unsigned short repeat; RandomBits(repeat); repeat = (unsigned short) ((float(repeat) * (float(max_segment) / float(max_short)))); repeat = CUB_MAX(1, repeat); int j = i; while (j < CUB_MIN(i + repeat, num_items)) { h_flags[j] = 0; h_in[j] = key; j++; } h_flags[i] = 1; i = j; key++; } if (g_verbose) { printf("Input:\n"); DisplayResults(h_in, num_items); printf("Flags:\n"); DisplayResults(h_flags, num_items); printf("\n\n"); } } /** * Solve unique problem */ int Solve( int *h_in, unsigned char *h_flags, int *h_reference, int num_items) { int num_selected = 0; for (int i = 0; i < num_items; ++i) { if (h_flags[i]) { h_reference[num_selected] = h_in[i]; num_selected++; } else { h_reference[num_items - (i - num_selected) - 1] = h_in[i]; } } return num_selected; } //--------------------------------------------------------------------- // Main //--------------------------------------------------------------------- /** * Main */ int main(int argc, char** argv) { int num_items = 150; int max_segment = 40; // Maximum segment length // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); args.GetCmdLineArgument("n", num_items); args.GetCmdLineArgument("maxseg", max_segment); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--n=<input items> " "[--device=<device-id>] " "[--maxseg=<max segment length>] " "[--v] " "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); // Allocate host arrays int *h_in = new int[num_items]; int *h_reference = new int[num_items]; unsigned char *h_flags = new unsigned char[num_items]; // Initialize problem and solution Initialize(h_in, h_flags, num_items, max_segment); int num_selected = Solve(h_in, h_flags, h_reference, num_items); printf("cub::DeviceSelect::Flagged %d items, %d selected (avg distance %d), %d-byte elements\n", num_items, num_selected, (num_selected > 0) ? num_items / num_selected : 0, (int) sizeof(int)); fflush(stdout); // Allocate problem device arrays int *d_in = NULL; unsigned char *d_flags = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(int) * num_items)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(unsigned char) * num_items)); // Initialize device input CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(int) * num_items, cudaMemcpyHostToDevice)); CubDebugExit(cudaMemcpy(d_flags, h_flags, sizeof(unsigned char) * num_items, cudaMemcpyHostToDevice)); // Allocate device output array and num selected int *d_out = NULL; int *d_num_selected_out = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(int) * num_items)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int))); // Allocate temporary storage void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; CubDebugExit(DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items)); CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); // Run CubDebugExit(DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items)); // Check for correctness (and display results, if specified) int compare = CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose); printf("\t Data %s ", compare ? "FAIL" : "PASS"); compare |= CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose); printf("\t Count %s ", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); // Cleanup if (h_in) delete[] h_in; if (h_reference) delete[] h_reference; if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out)); if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags)); printf("\n\n"); return 0; }
ce1e041bab59dd96465b13d5acd94b5ab0ca34bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Preprocessing.h" static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) __global__ void cuda_normalization(float *data, int rows, int columns) { int total_threads_count = blockDim.x * gridDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; int min, max; float max_float = FLT_MAX; for (int i = tid+1; i < columns; i=i+total_threads_count) { min = max_float; max = 0; for (int j = 0; j < rows; ++j) { if (*(data + (j*columns)+i) < min) { min = *(data + (j*columns)+i); } else if (*(data + (j*columns)+i) > max) { max = *(data + (j*columns)+i); } } float max_min_reciprocal = max - min; if (max_min_reciprocal == 0) { continue; } max_min_reciprocal = 1. / max_min_reciprocal; for (int j = 0; j < rows; ++j) { *(data + (j*columns)+i) = (*(data + (j*columns)+i) - min) * max_min_reciprocal; } } } __global__ void cuda_standarization(float *data, int rows, int columns) { int total_threads_count = blockDim.x * gridDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; float var, ave, amo; for (int i = tid+1; i < columns; i=i+total_threads_count) { amo = 0, var = 0; for (int j = 0; j < rows; ++j) { amo = amo + *(data + (j * columns) + i); } ave = amo / float(rows); for (int j = 0; j < rows; ++j) { float factor = *(data + (j * columns) + i) - ave; var = var + (factor * factor); } if (var == 0) { for (int j = 0; j < rows; j++) { *(data + (j * columns) + i) = *(data + (j * columns) + i) / 255.; } continue; } float sd_reciprocal = 1./sqrt(var); for (int j = 0; j < rows; j++) { *(data + (j * columns) + i) = (*(data + (j * columns) + i) - ave) * sd_reciprocal; } } } Preprocessing::Preprocessing() { } Preprocessing::~Preprocessing() { } void Preprocessing::Normalization(float *data, int rows, int columns, int threads_count_per_block, int blocks_count) { hipDeviceProp_t cuda_properties; // information about gpu HANDLE_ERROR(hipGetDeviceProperties( &cuda_properties, 0)); // copy data to compute from RAM into gpu device memory float *cuda_data; int data_size = sizeof(float) * rows * columns; HANDLE_ERROR(hipMalloc((void**)&cuda_data, data_size)); HANDLE_ERROR(hipMemcpy(cuda_data, data, data_size, hipMemcpyHostToDevice)); // measure time using cuda events hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // standarize hipLaunchKernelGGL(( cuda_normalization), dim3(blocks_count), dim3(threads_count_per_block), 0, 0, cuda_data, rows, columns); hipEventRecord(stop); // copy computed data from gpu device memory to host RAM HANDLE_ERROR(hipMemcpy(data, cuda_data, data_size, hipMemcpyDeviceToHost)); // print elapsed time hipEventSynchronize(stop); float elapsed_time = 0; hipEventElapsedTime(&elapsed_time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("Czas obliczen normalizacja: %f\n", elapsed_time/1000); } void Preprocessing::Standarization(float *data, int rows, int columns, int threads_count_per_block, int blocks_count) { hipDeviceProp_t cuda_properties; // information about gpu HANDLE_ERROR(hipGetDeviceProperties( &cuda_properties, 0)); // copy data to compute into gpu device memory float *cuda_data; int data_size = sizeof(float) * rows * columns; HANDLE_ERROR(hipMalloc((void**)&cuda_data, data_size)); HANDLE_ERROR(hipMemcpy(cuda_data, data, data_size, hipMemcpyHostToDevice)); // measure time using cuda events hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // standarize hipLaunchKernelGGL(( cuda_standarization), dim3(blocks_count), dim3(threads_count_per_block), 0, 0, cuda_data, rows, columns); hipEventRecord(stop); // copy computed data from gpu device memory to host RAM HANDLE_ERROR(hipMemcpy(data, cuda_data, data_size, hipMemcpyDeviceToHost)); // print elapsed time hipEventSynchronize(stop); float elapsed_time = 0; hipEventElapsedTime(&elapsed_time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("Czas obliczen standaryzacja: %f\n", elapsed_time/1000); }
ce1e041bab59dd96465b13d5acd94b5ab0ca34bb.cu
#include "Preprocessing.h" static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) __global__ void cuda_normalization(float *data, int rows, int columns) { int total_threads_count = blockDim.x * gridDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; int min, max; float max_float = FLT_MAX; for (int i = tid+1; i < columns; i=i+total_threads_count) { min = max_float; max = 0; for (int j = 0; j < rows; ++j) { if (*(data + (j*columns)+i) < min) { min = *(data + (j*columns)+i); } else if (*(data + (j*columns)+i) > max) { max = *(data + (j*columns)+i); } } float max_min_reciprocal = max - min; if (max_min_reciprocal == 0) { continue; } max_min_reciprocal = 1. / max_min_reciprocal; for (int j = 0; j < rows; ++j) { *(data + (j*columns)+i) = (*(data + (j*columns)+i) - min) * max_min_reciprocal; } } } __global__ void cuda_standarization(float *data, int rows, int columns) { int total_threads_count = blockDim.x * gridDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; float var, ave, amo; for (int i = tid+1; i < columns; i=i+total_threads_count) { amo = 0, var = 0; for (int j = 0; j < rows; ++j) { amo = amo + *(data + (j * columns) + i); } ave = amo / float(rows); for (int j = 0; j < rows; ++j) { float factor = *(data + (j * columns) + i) - ave; var = var + (factor * factor); } if (var == 0) { for (int j = 0; j < rows; j++) { *(data + (j * columns) + i) = *(data + (j * columns) + i) / 255.; } continue; } float sd_reciprocal = 1./sqrt(var); for (int j = 0; j < rows; j++) { *(data + (j * columns) + i) = (*(data + (j * columns) + i) - ave) * sd_reciprocal; } } } Preprocessing::Preprocessing() { } Preprocessing::~Preprocessing() { } void Preprocessing::Normalization(float *data, int rows, int columns, int threads_count_per_block, int blocks_count) { cudaDeviceProp cuda_properties; // information about gpu HANDLE_ERROR(cudaGetDeviceProperties( &cuda_properties, 0)); // copy data to compute from RAM into gpu device memory float *cuda_data; int data_size = sizeof(float) * rows * columns; HANDLE_ERROR(cudaMalloc((void**)&cuda_data, data_size)); HANDLE_ERROR(cudaMemcpy(cuda_data, data, data_size, cudaMemcpyHostToDevice)); // measure time using cuda events cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // standarize cuda_normalization<<<blocks_count, threads_count_per_block>>>(cuda_data, rows, columns); cudaEventRecord(stop); // copy computed data from gpu device memory to host RAM HANDLE_ERROR(cudaMemcpy(data, cuda_data, data_size, cudaMemcpyDeviceToHost)); // print elapsed time cudaEventSynchronize(stop); float elapsed_time = 0; cudaEventElapsedTime(&elapsed_time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("Czas obliczen normalizacja: %f\n", elapsed_time/1000); } void Preprocessing::Standarization(float *data, int rows, int columns, int threads_count_per_block, int blocks_count) { cudaDeviceProp cuda_properties; // information about gpu HANDLE_ERROR(cudaGetDeviceProperties( &cuda_properties, 0)); // copy data to compute into gpu device memory float *cuda_data; int data_size = sizeof(float) * rows * columns; HANDLE_ERROR(cudaMalloc((void**)&cuda_data, data_size)); HANDLE_ERROR(cudaMemcpy(cuda_data, data, data_size, cudaMemcpyHostToDevice)); // measure time using cuda events cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // standarize cuda_standarization<<<blocks_count, threads_count_per_block>>>(cuda_data, rows, columns); cudaEventRecord(stop); // copy computed data from gpu device memory to host RAM HANDLE_ERROR(cudaMemcpy(data, cuda_data, data_size, cudaMemcpyDeviceToHost)); // print elapsed time cudaEventSynchronize(stop); float elapsed_time = 0; cudaEventElapsedTime(&elapsed_time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("Czas obliczen standaryzacja: %f\n", elapsed_time/1000); }
9d29460a58003647539f2b8cc437a1eea6149acf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_gpu.hpp" #include <cudf/io/orc_types.hpp> #include <io/comp/nvcomp_adapter.hpp> #include <io/utilities/block_utils.cuh> #include <io/utilities/config_utils.hpp> #include <io/utilities/time_utils.cuh> #include <cudf/column/column_device_view.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/lists/lists_column_view.hpp> #include <cudf/utilities/bit.hpp> #include <hipcub/hipcub.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/for_each.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform.h> #include <thrust/tuple.h> namespace cudf { namespace io { namespace orc { namespace gpu { using cudf::detail::device_2dspan; constexpr int scratch_buffer_size = 512 * 4; constexpr int compact_streams_block_size = 1024; // Apache ORC reader does not handle zero-length patch lists for RLEv2 mode2 // Workaround replaces zero-length patch lists by a dummy zero patch constexpr bool zero_pll_war = true; struct byterle_enc_state_s { uint32_t literal_run; uint32_t repeat_run; volatile uint32_t rpt_map[(512 / 32) + 1]; }; struct intrle_enc_state_s { uint32_t literal_run; uint32_t delta_run; uint32_t literal_mode; uint32_t literal_w; uint32_t hdr_bytes; uint32_t pl_bytes; volatile uint32_t delta_map[(512 / 32) + 1]; }; struct strdata_enc_state_s { uint32_t char_count; uint32_t lengths_red[(512 / 32)]; const char* str_data[512]; }; struct orcenc_state_s { uint32_t cur_row; // Current row in group uint32_t present_rows; // # of rows in present buffer uint32_t present_out; // # of rows in present buffer that have been flushed uint32_t nrows; // # of rows in current batch uint32_t numvals; // # of non-zero values in current batch (<=nrows) uint32_t numlengths; // # of non-zero values in DATA2 batch uint32_t nnz; // Running count of non-null values encoder_chunk_streams stream; EncChunk chunk; uint32_t strm_pos[CI_NUM_STREAMS]; uint8_t valid_buf[512]; // valid map bits union { byterle_enc_state_s byterle; intrle_enc_state_s intrle; strdata_enc_state_s strenc; StripeDictionary dict_stripe; } u; union { uint8_t u8[scratch_buffer_size]; // gblock_vminscratch buffer uint32_t u32[scratch_buffer_size / 4]; } buf; union { uint8_t u8[2048]; uint32_t u32[1024]; int32_t i32[1024]; uint64_t u64[1024]; int64_t i64[1024]; } vals; union { uint8_t u8[2048]; uint32_t u32[1024]; uint64_t u64[1024]; } lengths; }; static inline __device__ uint32_t zigzag(uint32_t v) { return v; } static inline __device__ uint32_t zigzag(int32_t v) { int32_t s = (v >> 31); return ((v ^ s) * 2) - s; } static inline __device__ uint64_t zigzag(uint64_t v) { return v; } static inline __device__ uint64_t zigzag(int64_t v) { int64_t s = (v < 0) ? 1 : 0; return ((v ^ -s) * 2) + s; } static inline __device__ __uint128_t zigzag(__int128_t v) { int64_t s = (v < 0) ? 1 : 0; return ((v ^ -s) * 2) + s; } static inline __device__ uint32_t CountLeadingBytes32(uint32_t v) { return __clz(v) >> 3; } static inline __device__ uint32_t CountLeadingBytes64(uint64_t v) { return __clzll(v) >> 3; } /** * @brief Raw data output * * @tparam cid stream type (strm_pos[cid] will be updated and output stored at * streams[cid]+strm_pos[cid]) * @tparam inmask input buffer position mask for circular buffers * @param[in] s encoder state * @param[in] inbuf base input buffer * @param[in] inpos position in input buffer * @param[in] count number of bytes to encode * @param[in] t thread id */ template <StreamIndexType cid, uint32_t inmask> static __device__ void StoreBytes( orcenc_state_s* s, const uint8_t* inbuf, uint32_t inpos, uint32_t count, int t) { uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid]; while (count > 0) { uint32_t n = min(count, 512); if (t < n) { dst[t] = inbuf[(inpos + t) & inmask]; } dst += n; inpos += n; count -= n; } __syncthreads(); if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); } } /** * @brief ByteRLE encoder * * @tparam cid stream type (strm_pos[cid] will be updated and output stored at * streams[cid]+strm_pos[cid]) * @tparam inmask input buffer position mask for circular buffers * @param[in] s encoder state * @param[in] inbuf base input buffer * @param[in] inpos position in input buffer * @param[in] numvals max number of values to encode * @param[in] flush encode all remaining values if nonzero * @param[in] t thread id * * @return number of input values encoded */ template <StreamIndexType cid, uint32_t inmask> static __device__ uint32_t ByteRLE( orcenc_state_s* s, const uint8_t* inbuf, uint32_t inpos, uint32_t numvals, uint32_t flush, int t) { uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid]; uint32_t out_cnt = 0; while (numvals > 0) { uint8_t v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0; uint8_t v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0; uint32_t rpt_map = ballot(t + 1 < numvals && v0 == v1), literal_run, repeat_run, maxvals = min(numvals, 512); if (!(t & 0x1f)) s->u.byterle.rpt_map[t >> 5] = rpt_map; __syncthreads(); if (t == 0) { // Find the start of an identical 3-byte sequence // TBD: The two loops below could be eliminated using more ballot+ffs using warp0 literal_run = 0; repeat_run = 0; while (literal_run < maxvals) { uint32_t next = s->u.byterle.rpt_map[(literal_run >> 5) + 1]; uint32_t mask = rpt_map & __funnelshift_r(rpt_map, next, 1); if (mask) { uint32_t literal_run_ofs = __ffs(mask) - 1; literal_run += literal_run_ofs; repeat_run = __ffs(~((rpt_map >> literal_run_ofs) >> 1)); if (repeat_run + literal_run_ofs == 32) { while (next == ~0) { uint32_t next_idx = ((literal_run + repeat_run) >> 5) + 1; next = (next_idx < 512 / 32) ? s->u.byterle.rpt_map[next_idx] : 0; repeat_run += 32; } repeat_run += __ffs(~next) - 1; } repeat_run = min(repeat_run + 1, maxvals - min(literal_run, maxvals)); if (repeat_run < 3) { literal_run += (flush && literal_run + repeat_run >= numvals) ? repeat_run : 0; repeat_run = 0; } break; } rpt_map = next; literal_run += 32; } if (repeat_run >= 130) { // Limit large runs to multiples of 130 repeat_run = (repeat_run >= 3 * 130) ? 3 * 130 : (repeat_run >= 2 * 130) ? 2 * 130 : 130; } else if (literal_run && literal_run + repeat_run == maxvals) { repeat_run = 0; // Try again at next iteration } s->u.byterle.repeat_run = repeat_run; s->u.byterle.literal_run = min(literal_run, maxvals); } __syncthreads(); literal_run = s->u.byterle.literal_run; if (!flush && literal_run == numvals) { literal_run &= ~0x7f; if (!literal_run) break; } if (literal_run > 0) { uint32_t num_runs = (literal_run + 0x7f) >> 7; if (t < literal_run) { uint32_t run_id = t >> 7; uint32_t run = min(literal_run - run_id * 128, 128); if (!(t & 0x7f)) dst[run_id + t] = 0x100 - run; dst[run_id + t + 1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0; } dst += num_runs + literal_run; out_cnt += literal_run; numvals -= literal_run; inpos += literal_run; } repeat_run = s->u.byterle.repeat_run; if (repeat_run > 0) { while (repeat_run >= 130) { if (t == literal_run) // repeat_run follows literal_run { dst[0] = 0x7f; dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0; } dst += 2; out_cnt += 130; numvals -= 130; inpos += 130; repeat_run -= 130; } if (!flush && repeat_run == numvals) { // Wait for more data in case we can continue the run later break; } if (repeat_run >= 3) { if (t == literal_run) // repeat_run follows literal_run { dst[0] = repeat_run - 3; dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0; } dst += 2; out_cnt += repeat_run; numvals -= repeat_run; inpos += repeat_run; } } } if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); } return out_cnt; } /** * @brief Maps the symbol size in bytes to RLEv2 5-bit length code */ static const __device__ __constant__ uint8_t kByteLengthToRLEv2_W[9] = { 0, 7, 15, 23, 27, 28, 29, 30, 31}; /** * @brief Encode a varint value, return the number of bytes written */ static inline __device__ uint32_t StoreVarint(uint8_t* dst, __uint128_t v) { uint32_t bytecnt = 0; for (;;) { auto c = static_cast<uint32_t>(v & 0x7f); v >>= 7u; if (v == 0) { dst[bytecnt++] = c; break; } else { dst[bytecnt++] = c + 0x80; } } return bytecnt; } template <class T> static inline __device__ void StoreBytesBigEndian(uint8_t* dst, T v, uint32_t w) { for (uint32_t i = 0, b = w * 8; i < w; ++i) { b -= 8; dst[i] = static_cast<uint8_t>(v >> b); } } // Combine and store bits for symbol widths less than 8 static inline __device__ void StoreBitsBigEndian( uint8_t* dst, uint32_t v, uint32_t w, int num_vals, int t) { if (t <= (num_vals | 0x1f)) { uint32_t mask; if (w <= 1) { v = (v << 1) | (shuffle_xor(v, 1) & 0x1); v = (v << 2) | (shuffle_xor(v, 2) & 0x3); v = (v << 4) | (shuffle_xor(v, 4) & 0xf); mask = 0x7; } else if (w <= 2) { v = (v << 2) | (shuffle_xor(v, 1) & 0x3); v = (v << 4) | (shuffle_xor(v, 2) & 0xf); mask = 0x3; } else // if (w <= 4) { v = (v << 4) | (shuffle_xor(v, 1) & 0xf); mask = 0x1; } if (t < num_vals && !(t & mask)) { dst[(t * w) >> 3] = static_cast<uint8_t>(v); } } } /** * @brief Integer RLEv2 encoder * * @tparam cid stream type (strm_pos[cid] will be updated and output stored at * streams[cid]+strm_pos[cid]) * @tparam inmask input buffer position mask for circular buffers * @param[in] s encoder state * @param[in] inbuf base input buffer * @param[in] inpos position in input buffer * @param[in] numvals max number of values to encode * @param[in] flush encode all remaining values if nonzero * @param[in] t thread id * @param[in] temp_storage shared memory storage to perform block reduce * * @return number of input values encoded */ template <StreamIndexType cid, class T, bool is_signed, uint32_t inmask, int block_size, typename Storage> static __device__ uint32_t IntegerRLE( orcenc_state_s* s, const T* inbuf, uint32_t inpos, uint32_t numvals, int t, Storage& temp_storage) { using block_reduce = hipcub::BlockReduce<T, block_size>; uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid]; uint32_t out_cnt = 0; __shared__ volatile uint64_t block_vmin; while (numvals > 0) { T v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0; T v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0; T v2 = (t + 2 < numvals) ? inbuf[(inpos + t + 2) & inmask] : 0; uint32_t delta_map = ballot(t + 2 < numvals && v1 - v0 == v2 - v1), maxvals = min(numvals, 512), literal_run, delta_run; if (!(t & 0x1f)) s->u.intrle.delta_map[t >> 5] = delta_map; __syncthreads(); if (!t) { // Find the start of the next delta run (2 consecutive values with the same delta) literal_run = delta_run = 0; while (literal_run < maxvals) { if (delta_map != 0) { uint32_t literal_run_ofs = __ffs(delta_map) - 1; literal_run += literal_run_ofs; delta_run = __ffs(~((delta_map >> literal_run_ofs) >> 1)); if (literal_run_ofs + delta_run == 32) { for (;;) { uint32_t delta_idx = (literal_run + delta_run) >> 5; delta_map = (delta_idx < 512 / 32) ? s->u.intrle.delta_map[delta_idx] : 0; if (delta_map != ~0) break; delta_run += 32; } delta_run += __ffs(~delta_map) - 1; } delta_run += 2; break; } literal_run += 32; delta_map = s->u.intrle.delta_map[(literal_run >> 5)]; } literal_run = min(literal_run, maxvals); s->u.intrle.literal_run = literal_run; s->u.intrle.delta_run = min(delta_run, maxvals - literal_run); } __syncthreads(); literal_run = s->u.intrle.literal_run; // Find minimum and maximum values if (literal_run > 0) { // Find min & max T vmin = (t < literal_run) ? v0 : std::numeric_limits<T>::max(); T vmax = (t < literal_run) ? v0 : std::numeric_limits<T>::min(); uint32_t literal_mode, literal_w; vmin = block_reduce(temp_storage).Reduce(vmin, hipcub::Min()); __syncthreads(); vmax = block_reduce(temp_storage).Reduce(vmax, hipcub::Max()); if (t == 0) { uint32_t mode1_w, mode2_w; typename std::make_unsigned<T>::type vrange_mode1, vrange_mode2; block_vmin = static_cast<uint64_t>(vmin); if constexpr (sizeof(T) > 4) { vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax; vrange_mode2 = vmax - vmin; mode1_w = 8 - min(CountLeadingBytes64(vrange_mode1), 7); mode2_w = 8 - min(CountLeadingBytes64(vrange_mode2), 7); } else { vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax; vrange_mode2 = vmax - vmin; mode1_w = 4 - min(CountLeadingBytes32(vrange_mode1), 3); mode2_w = 4 - min(CountLeadingBytes32(vrange_mode2), 3); } // Decide between mode1 & mode2 (also mode3 for length=2 repeat) if (vrange_mode2 == 0 && mode1_w > 1) { // Should only occur if literal_run==2 (otherwise would have resulted in repeat_run >= // 3) uint32_t bytecnt = 2; dst[0] = 0xC0 + ((literal_run - 1) >> 8); dst[1] = (literal_run - 1) & 0xff; bytecnt += StoreVarint(dst + 2, vrange_mode1); dst[bytecnt++] = 0; // Zero delta s->u.intrle.literal_mode = 3; s->u.intrle.literal_w = bytecnt; } else { uint32_t range, w; // Mode 2 base value cannot be bigger than max int64_t, i.e. the first bit has to be 0 if (vmin <= std::numeric_limits<int64_t>::max() and mode1_w > mode2_w and (literal_run - 1) * (mode1_w - mode2_w) > 4) { s->u.intrle.literal_mode = 2; w = mode2_w; range = (uint32_t)vrange_mode2; } else { s->u.intrle.literal_mode = 1; w = mode1_w; range = (uint32_t)vrange_mode1; } if (w == 1) w = (range >= 16) ? w << 3 : (range >= 4) ? 4 : (range >= 2) ? 2 : 1; else w <<= 3; // bytes -> bits s->u.intrle.literal_w = w; } } __syncthreads(); vmin = static_cast<T>(block_vmin); literal_mode = s->u.intrle.literal_mode; literal_w = s->u.intrle.literal_w; if (literal_mode == 1) { // Direct mode if (!t) { dst[0] = 0x40 + ((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 + ((literal_run - 1) >> 8); dst[1] = (literal_run - 1) & 0xff; } dst += 2; typename std::make_unsigned<T>::type zzv0 = v0; if (t < literal_run) { zzv0 = zigzag(v0); } if (literal_w < 8) { StoreBitsBigEndian(dst, zzv0, literal_w, literal_run, t); } else if (t < literal_run) { StoreBytesBigEndian(dst + t * (literal_w >> 3), zzv0, (literal_w >> 3)); } } else if (literal_mode == 2) { // Patched base mode if (!t) { uint32_t bw, pw = 1, pll, pgw = 1, bv_scale = (is_signed) ? 0 : 1; vmax = (is_signed) ? ((vmin < 0) ? -vmin : vmin) * 2 : vmin; bw = (sizeof(T) > 4) ? (8 - min(CountLeadingBytes64(vmax << bv_scale), 7)) : (4 - min(CountLeadingBytes32(vmax << bv_scale), 3)); if (zero_pll_war) { // Insert a dummy zero patch pll = 1; dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 0] = 0; dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 1] = 0; } else { pll = 0; } dst[0] = 0x80 + ((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 + ((literal_run - 1) >> 8); dst[1] = (literal_run - 1) & 0xff; dst[2] = ((bw - 1) << 5) | kByteLengthToRLEv2_W[pw]; dst[3] = ((pgw - 1) << 5) | pll; if (is_signed) { vmax >>= 1; vmax |= vmin & ((T)1 << (bw * 8 - 1)); } StoreBytesBigEndian(dst + 4, vmax, bw); s->u.intrle.hdr_bytes = 4 + bw; s->u.intrle.pl_bytes = (pll * (pw * 8 + pgw) + 7) >> 3; } __syncthreads(); dst += s->u.intrle.hdr_bytes; v0 -= (t < literal_run) ? vmin : 0; if (literal_w < 8) StoreBitsBigEndian(dst, (uint32_t)v0, literal_w, literal_run, t); else if (t < literal_run) StoreBytesBigEndian(dst + t * (literal_w >> 3), v0, (literal_w >> 3)); dst += s->u.intrle.pl_bytes; } else { // Delta mode dst += literal_w; literal_w = 0; } dst += (literal_run * literal_w + 7) >> 3; numvals -= literal_run; inpos += literal_run; out_cnt += literal_run; __syncthreads(); } delta_run = s->u.intrle.delta_run; if (delta_run > 0) { if (t == literal_run) { int64_t delta = (int64_t)v1 - (int64_t)v0; uint64_t delta_base = zigzag(v0); if (delta == 0 && delta_run >= 3 && delta_run <= 10) { // Short repeat uint32_t delta_bw = 8 - min(CountLeadingBytes64(delta_base), 7); dst[0] = ((delta_bw - 1) << 3) + (delta_run - 3); for (uint32_t i = 0, b = delta_bw * 8; i < delta_bw; i++) { b -= 8; dst[1 + i] = static_cast<uint8_t>(delta_base >> b); } s->u.intrle.hdr_bytes = 1 + delta_bw; } else { // Delta uint64_t delta_u = zigzag(delta); uint32_t bytecnt = 2; dst[0] = 0xC0 + ((delta_run - 1) >> 8); dst[1] = (delta_run - 1) & 0xff; bytecnt += StoreVarint(dst + bytecnt, delta_base); bytecnt += StoreVarint(dst + bytecnt, delta_u); s->u.intrle.hdr_bytes = bytecnt; } } __syncthreads(); dst += s->u.intrle.hdr_bytes; numvals -= delta_run; inpos += delta_run; out_cnt += delta_run; } } if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); } __syncthreads(); return out_cnt; } /** * @brief Store a group of strings as a single concatenated string * * @param[in] dst destination buffer * @param[in] strenc string encoder state * @param[in] len(t) string length (per thread) * @param[in] t thread id */ static __device__ void StoreStringData(uint8_t* dst, strdata_enc_state_s* strenc, uint32_t len, int t) { // Start with summing up all the lengths uint32_t pos = len; uint32_t wt = t & 0x1f; for (uint32_t n = 1; n < 32; n <<= 1) { uint32_t tmp = shuffle(pos, (wt & ~n) | (n - 1)); pos += (wt & n) ? tmp : 0; } if (wt == 0x1f) { strenc->lengths_red[t >> 5] = pos; } dst += pos - len; __syncthreads(); if (t < 32) { uint32_t wlen = (wt < 16) ? strenc->lengths_red[wt] : 0; uint32_t wpos = wlen; for (uint32_t n = 1; n < 16; n <<= 1) { uint32_t tmp = shuffle(wpos, (wt & ~n) | (n - 1)); wpos += (wt & n) ? tmp : 0; } if (wt < 16) { strenc->lengths_red[wt] = wpos - wlen; } if (wt == 0xf) { strenc->char_count = wpos; // Update stream position } } __syncthreads(); // TBD: Might be more efficient to loop over 4 strings and copy 8 consecutive character at a time // rather than have each thread to a memcpy if (len > 0) { memcpy(dst + strenc->lengths_red[t >> 5], strenc->str_data[t], len); } } /** * @brief In-place conversion from lengths to positions * * @param[in] vals input values * @param[in] numvals number of values * @param[in] t thread id */ template <class T> inline __device__ void lengths_to_positions(volatile T* vals, uint32_t numvals, unsigned int t) { for (uint32_t n = 1; n < numvals; n <<= 1) { __syncthreads(); if ((t & n) && (t < numvals)) vals[t] += vals[(t & ~n) | (n - 1)]; } } template <int block_size, typename Storage> static __device__ void encode_null_mask(orcenc_state_s* s, bitmask_type const* pushdown_mask, Storage& scan_storage, int t) { if (s->stream.ids[CI_PRESENT] < 0) return; auto const column = *s->chunk.column; while (s->present_rows < s->chunk.null_mask_num_rows or s->numvals > 0) { // Number of rows read so far auto present_rows = s->present_rows; // valid_buf capacity is byte per thread in block auto const buf_available_bits = encode_block_size * 8 - s->numvals; // Number of rows for the block to process in this iteration auto const nrows = min(s->chunk.null_mask_num_rows - present_rows, buf_available_bits); // Number of rows for this thread to process in this iteration auto const t_nrows = min(max(static_cast<int32_t>(nrows) - t * 8, 0), 8); auto const row = s->chunk.null_mask_start_row + present_rows + t * 8; auto get_mask_byte = [&](bitmask_type const* mask, size_type offset) -> uint8_t { if (t_nrows == 0) return 0; if (mask == nullptr) return 0xff; auto const begin_offset = row + offset; auto const end_offset = min(begin_offset + 8, offset + column.size()); auto const mask_word = cudf::detail::get_mask_offset_word(mask, 0, begin_offset, end_offset); return mask_word & 0xff; }; uint8_t pd_byte = (1 << t_nrows) - 1; uint32_t pd_set_cnt = t_nrows; uint32_t offset = t_nrows != 0 ? t * 8 : nrows; if (pushdown_mask != nullptr) { pd_byte = get_mask_byte(pushdown_mask, 0) & ((1 << t_nrows) - 1); pd_set_cnt = __popc(pd_byte); // Scan the number of valid bits to get dst offset for each thread hipcub::BlockScan<uint32_t, block_size>(scan_storage).ExclusiveSum(pd_set_cnt, offset); } auto const mask_byte = get_mask_byte(column.null_mask(), column.offset()); auto dst_offset = offset + s->nnz; auto vbuf_bit_idx = [](int row) { // valid_buf is a circular buffer with validity of 8 rows in each element return row % (encode_block_size * 8); }; if (dst_offset % 8 == 0 and pd_set_cnt == 8) { s->valid_buf[vbuf_bit_idx(dst_offset) / 8] = mask_byte; } else { for (auto bit_idx = 0; bit_idx < t_nrows; ++bit_idx) { // skip bits where pushdown mask is not set if (not(pd_byte & (1 << bit_idx))) continue; if (mask_byte & (1 << bit_idx)) { set_bit(reinterpret_cast<uint32_t*>(s->valid_buf), vbuf_bit_idx(dst_offset++)); } else { clear_bit(reinterpret_cast<uint32_t*>(s->valid_buf), vbuf_bit_idx(dst_offset++)); } } } __syncthreads(); if (t == block_size - 1) { // Number of loaded rows, available for encode s->numvals += offset + pd_set_cnt; // Number of loaded rows (different from present_rows because of pushdown masks) s->nnz += offset + pd_set_cnt; } present_rows += nrows; if (!t) { s->present_rows = present_rows; } __syncthreads(); // RLE encode the present stream if (s->numvals > ((present_rows < s->chunk.null_mask_num_rows) ? 130 * 8 : 0)) { auto const flush = (present_rows < s->chunk.null_mask_num_rows) ? 0 : 7; auto const nbytes_out = (s->numvals + flush) / 8; auto const nrows_encoded = ByteRLE<CI_PRESENT, 0x1ff>(s, s->valid_buf, s->present_out / 8, nbytes_out, flush, t) * 8; if (!t) { // Number of rows encoded so far s->present_out += nrows_encoded; s->numvals -= min(s->numvals, nrows_encoded); } __syncthreads(); } } // reset shared state if (t == 0) { s->nnz = 0; } } /** * @brief Encode column data * * @param[in] chunks encoder chunks device array [column][rowgroup] * @param[in, out] streams chunk streams device array [column][rowgroup] */ // blockDim {`encode_block_size`,1,1} template <int block_size> __global__ void __launch_bounds__(block_size) gpuEncodeOrcColumnData(device_2dspan<EncChunk const> chunks, device_2dspan<encoder_chunk_streams> streams) { __shared__ __align__(16) orcenc_state_s state_g; __shared__ union { typename hipcub::BlockScan<uint32_t, block_size>::TempStorage scan_u32; typename hipcub::BlockReduce<int32_t, block_size>::TempStorage i32; typename hipcub::BlockReduce<int64_t, block_size>::TempStorage i64; typename hipcub::BlockReduce<uint32_t, block_size>::TempStorage u32; typename hipcub::BlockReduce<uint64_t, block_size>::TempStorage u64; } temp_storage; orcenc_state_s* const s = &state_g; uint32_t col_id = blockIdx.x; uint32_t group_id = blockIdx.y; int t = threadIdx.x; if (t == 0) { s->chunk = chunks[col_id][group_id]; s->stream = streams[col_id][group_id]; s->cur_row = 0; s->present_rows = 0; s->present_out = 0; s->numvals = 0; s->numlengths = 0; s->nnz = 0; s->strm_pos[CI_DATA] = 0; s->strm_pos[CI_PRESENT] = 0; s->strm_pos[CI_INDEX] = 0; // Dictionary data is encoded in a separate kernel s->strm_pos[CI_DATA2] = s->chunk.encoding_kind == DICTIONARY_V2 ? s->stream.lengths[CI_DATA2] : 0; s->strm_pos[CI_DICTIONARY] = s->chunk.encoding_kind == DICTIONARY_V2 ? s->stream.lengths[CI_DICTIONARY] : 0; } __syncthreads(); auto const pushdown_mask = [&]() -> cudf::bitmask_type const* { auto const parent_index = s->chunk.column->parent_index; if (!parent_index.has_value()) return nullptr; return chunks[parent_index.value()][0].column->pushdown_mask; }(); encode_null_mask<block_size>(s, pushdown_mask, temp_storage.scan_u32, t); __syncthreads(); auto const column = *s->chunk.column; while (s->cur_row < s->chunk.num_rows || s->numvals + s->numlengths != 0) { // Fetch non-null values auto const length_stream_only = s->chunk.type_kind == LIST or s->chunk.type_kind == MAP; if (not length_stream_only && s->stream.data_ptrs[CI_DATA] == nullptr) { // Pass-through __syncthreads(); if (!t) { s->cur_row = s->chunk.num_rows; s->strm_pos[CI_DATA] = s->chunk.num_rows * s->chunk.dtype_len; } } else if (s->cur_row < s->chunk.num_rows) { uint32_t maxnumvals = (s->chunk.type_kind == BOOLEAN) ? 2048 : 1024; uint32_t nrows = min(min(s->chunk.num_rows - s->cur_row, maxnumvals - max(s->numvals, s->numlengths)), encode_block_size); auto const row = s->chunk.start_row + s->cur_row + t; auto const is_value_valid = [&]() { if (t >= nrows) return false; return bit_value_or(pushdown_mask, column.offset() + row, true) and bit_value_or(column.null_mask(), column.offset() + row, true); }(); s->buf.u32[t] = is_value_valid ? 1u : 0u; // TODO: Could use a faster reduction relying on _popc() for the initial phase lengths_to_positions(s->buf.u32, encode_block_size, t); __syncthreads(); if (is_value_valid) { int nz_idx = (s->nnz + s->buf.u32[t] - 1) & (maxnumvals - 1); switch (s->chunk.type_kind) { case INT: case DATE: case FLOAT: s->vals.u32[nz_idx] = column.element<uint32_t>(row); break; case DOUBLE: case LONG: s->vals.u64[nz_idx] = column.element<uint64_t>(row); break; case SHORT: s->vals.u32[nz_idx] = column.element<uint16_t>(row); break; case BOOLEAN: case BYTE: s->vals.u8[nz_idx] = column.element<uint8_t>(row); break; case TIMESTAMP: { int64_t ts = column.element<int64_t>(row); int32_t ts_scale = powers_of_ten[9 - min(s->chunk.scale, 9)]; int64_t seconds = ts / ts_scale; int64_t nanos = (ts - seconds * ts_scale); s->vals.i64[nz_idx] = seconds - orc_utc_epoch; if (nanos != 0) { // Trailing zeroes are encoded in the lower 3-bits uint32_t zeroes = 0; nanos *= powers_of_ten[min(s->chunk.scale, 9)]; if (!(nanos % 100)) { nanos /= 100; zeroes = 1; while (zeroes < 7 && !(nanos % 10)) { nanos /= 10; zeroes++; } } nanos = (nanos << 3) + zeroes; } s->lengths.u64[nz_idx] = nanos; break; } case STRING: if (s->chunk.encoding_kind == DICTIONARY_V2) { uint32_t dict_idx = s->chunk.dict_index[row]; if (dict_idx > 0x7fff'ffffu) { dict_idx = s->chunk.dict_index[dict_idx & 0x7fff'ffffu]; } s->vals.u32[nz_idx] = dict_idx; } else { string_view value = column.element<string_view>(row); s->u.strenc.str_data[s->buf.u32[t] - 1] = value.data(); s->lengths.u32[nz_idx] = value.size_bytes(); } break; // Reusing the lengths array for the scale stream // Note: can be written in a faster manner, given that all values are equal case DECIMAL: s->lengths.u32[nz_idx] = zigzag(s->chunk.scale); break; case LIST: case MAP: { auto const& offsets = column.child(lists_column_view::offsets_column_index); // Compute list length from the offsets s->lengths.u32[nz_idx] = offsets.element<size_type>(row + 1 + column.offset()) - offsets.element<size_type>(row + column.offset()); } break; default: break; } } __syncthreads(); if (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2) { // Store string data uint32_t nz = s->buf.u32[511]; uint32_t nz_idx = (s->nnz + t) & 0x3ff; uint32_t len = (t < nz && s->u.strenc.str_data[t]) ? s->lengths.u32[nz_idx] : 0; StoreStringData(s->stream.data_ptrs[CI_DATA] + s->strm_pos[CI_DATA], &s->u.strenc, len, t); if (!t) { s->strm_pos[CI_DATA] += s->u.strenc.char_count; } __syncthreads(); } else if (s->chunk.type_kind == BOOLEAN) { // bool8 -> 8x bool1 uint32_t nz = s->buf.u32[511]; uint8_t n = ((s->nnz + nz) - (s->nnz & ~7) + 7) >> 3; if (t < n) { uint32_t idx8 = (s->nnz & ~7) + (t << 3); s->lengths.u8[((s->nnz >> 3) + t) & 0x1ff] = ((s->vals.u8[(idx8 + 0) & 0x7ff] & 1) << 7) | ((s->vals.u8[(idx8 + 1) & 0x7ff] & 1) << 6) | ((s->vals.u8[(idx8 + 2) & 0x7ff] & 1) << 5) | ((s->vals.u8[(idx8 + 3) & 0x7ff] & 1) << 4) | ((s->vals.u8[(idx8 + 4) & 0x7ff] & 1) << 3) | ((s->vals.u8[(idx8 + 5) & 0x7ff] & 1) << 2) | ((s->vals.u8[(idx8 + 6) & 0x7ff] & 1) << 1) | ((s->vals.u8[(idx8 + 7) & 0x7ff] & 1) << 0); } __syncthreads(); } if (!t) { uint32_t nz = s->buf.u32[511]; s->nnz += nz; s->numvals += nz; s->numlengths += (s->chunk.type_kind == TIMESTAMP || s->chunk.type_kind == DECIMAL || s->chunk.type_kind == LIST || s->chunk.type_kind == MAP || (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2)) ? nz : 0; s->cur_row += nrows; } __syncthreads(); // Encode values if (s->numvals > 0) { uint32_t flush = (s->cur_row == s->chunk.num_rows) ? 7 : 0, n; switch (s->chunk.type_kind) { case SHORT: case INT: case DATE: n = IntegerRLE<CI_DATA, int32_t, true, 0x3ff, block_size>( s, s->vals.i32, s->nnz - s->numvals, s->numvals, t, temp_storage.i32); break; case LONG: case TIMESTAMP: n = IntegerRLE<CI_DATA, int64_t, true, 0x3ff, block_size>( s, s->vals.i64, s->nnz - s->numvals, s->numvals, t, temp_storage.i64); break; case BYTE: n = ByteRLE<CI_DATA, 0x3ff>(s, s->vals.u8, s->nnz - s->numvals, s->numvals, flush, t); break; case BOOLEAN: n = ByteRLE<CI_DATA, 0x1ff>(s, s->lengths.u8, (s->nnz - s->numvals + flush) >> 3, (s->numvals + flush) >> 3, flush, t) * 8; break; case FLOAT: StoreBytes<CI_DATA, 0xfff>(s, s->vals.u8, (s->nnz - s->numvals) * 4, s->numvals * 4, t); n = s->numvals; break; case DOUBLE: StoreBytes<CI_DATA, 0x1fff>( s, s->vals.u8, (s->nnz - s->numvals) * 8, s->numvals * 8, t); n = s->numvals; break; case STRING: if (s->chunk.encoding_kind == DICTIONARY_V2) { n = IntegerRLE<CI_DATA, uint32_t, false, 0x3ff, block_size>( s, s->vals.u32, s->nnz - s->numvals, s->numvals, t, temp_storage.u32); } else { n = s->numvals; } break; case DECIMAL: { if (is_value_valid) { auto const id = column.type().id(); __uint128_t const zz_val = id == type_id::DECIMAL32 ? zigzag(column.element<int32_t>(row)) : id == type_id::DECIMAL64 ? zigzag(column.element<int64_t>(row)) : zigzag(column.element<__int128_t>(row)); auto const offset = (row == s->chunk.start_row) ? 0 : s->chunk.decimal_offsets[row - 1]; StoreVarint(s->stream.data_ptrs[CI_DATA] + offset, zz_val); } n = s->numvals; } break; default: n = s->numvals; break; } __syncthreads(); if (!t) { s->numvals -= min(n, s->numvals); } } // Encode secondary stream values if (s->numlengths > 0) { uint32_t n; switch (s->chunk.type_kind) { case TIMESTAMP: n = IntegerRLE<CI_DATA2, uint64_t, false, 0x3ff, block_size>( s, s->lengths.u64, s->nnz - s->numlengths, s->numlengths, t, temp_storage.u64); break; case DECIMAL: case LIST: case MAP: case STRING: n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>( s, s->lengths.u32, s->nnz - s->numlengths, s->numlengths, t, temp_storage.u32); break; default: n = s->numlengths; break; } __syncthreads(); if (!t) { s->numlengths -= min(n, s->numlengths); } } } __syncthreads(); } __syncthreads(); if (t <= CI_PRESENT && s->stream.ids[t] >= 0) { // Update actual compressed length // (not needed for decimal data, whose exact size is known before encode) if (!(t == CI_DATA && s->chunk.type_kind == DECIMAL)) streams[col_id][group_id].lengths[t] = s->strm_pos[t]; if (!s->stream.data_ptrs[t]) { streams[col_id][group_id].data_ptrs[t] = static_cast<uint8_t*>(const_cast<void*>(column.head())) + (column.offset() + s->chunk.start_row) * s->chunk.dtype_len; } } } /** * @brief Encode column dictionaries * * @param[in] stripes Stripe dictionaries device array [stripe][string_column] * @param[in] chunks EncChunk device array [rowgroup][column] * @param[in] num_columns Number of columns */ // blockDim {512,1,1} template <int block_size> __global__ void __launch_bounds__(block_size) gpuEncodeStringDictionaries(StripeDictionary const* stripes, device_2dspan<EncChunk const> chunks, device_2dspan<encoder_chunk_streams> streams) { __shared__ __align__(16) orcenc_state_s state_g; __shared__ typename hipcub::BlockReduce<uint32_t, block_size>::TempStorage temp_storage; orcenc_state_s* const s = &state_g; uint32_t stripe_id = blockIdx.x; uint32_t cid = (blockIdx.y) ? CI_DICTIONARY : CI_DATA2; int t = threadIdx.x; if (t == 0) s->u.dict_stripe = stripes[stripe_id]; __syncthreads(); auto const strm_ptr = &streams[s->u.dict_stripe.column_id][s->u.dict_stripe.start_chunk]; if (t == 0) { s->chunk = chunks[s->u.dict_stripe.column_id][s->u.dict_stripe.start_chunk]; s->stream = *strm_ptr; s->strm_pos[cid] = 0; s->numlengths = 0; s->nrows = s->u.dict_stripe.num_strings; s->cur_row = 0; } auto const string_column = s->u.dict_stripe.leaf_column; auto const dict_data = s->u.dict_stripe.dict_data; __syncthreads(); if (s->chunk.encoding_kind != DICTIONARY_V2) { return; // This column isn't using dictionary encoding -> bail out } while (s->cur_row < s->nrows || s->numlengths != 0) { uint32_t numvals = min(s->nrows - s->cur_row, min(1024 - s->numlengths, 512)); uint32_t string_idx = (t < numvals) ? dict_data[s->cur_row + t] : 0; if (cid == CI_DICTIONARY) { // Encoding string contents const char* ptr = nullptr; uint32_t count = 0; if (t < numvals) { auto string_val = string_column->element<string_view>(string_idx); ptr = string_val.data(); count = string_val.size_bytes(); } s->u.strenc.str_data[t] = ptr; StoreStringData(s->stream.data_ptrs[CI_DICTIONARY] + s->strm_pos[CI_DICTIONARY], &s->u.strenc, (ptr) ? count : 0, t); if (!t) { s->strm_pos[CI_DICTIONARY] += s->u.strenc.char_count; } } else { // Encoding string lengths uint32_t count = (t < numvals) ? static_cast<uint32_t>(string_column->element<string_view>(string_idx).size_bytes()) : 0; uint32_t nz_idx = (s->cur_row + t) & 0x3ff; if (t < numvals) s->lengths.u32[nz_idx] = count; __syncthreads(); if (s->numlengths + numvals > 0) { uint32_t n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>( s, s->lengths.u32, s->cur_row, s->numlengths + numvals, t, temp_storage); __syncthreads(); if (!t) { s->numlengths += numvals; s->numlengths -= min(n, s->numlengths); } } } if (t == 0) { s->cur_row += numvals; } __syncthreads(); } if (t == 0) { strm_ptr->lengths[cid] = s->strm_pos[cid]; } } /** * @brief Merge chunked column data into a single contiguous stream * * @param[in,out] strm_desc StripeStream device array [stripe][stream] * @param[in,out] streams List of encoder chunk streams [column][rowgroup] */ // blockDim {compact_streams_block_size,1,1} __global__ void __launch_bounds__(compact_streams_block_size) gpuCompactOrcDataStreams(device_2dspan<StripeStream> strm_desc, device_2dspan<encoder_chunk_streams> streams) { __shared__ __align__(16) StripeStream ss; auto const stripe_id = blockIdx.x; auto const stream_id = blockIdx.y; auto const t = threadIdx.x; if (t == 0) { ss = strm_desc[stripe_id][stream_id]; } __syncthreads(); if (ss.data_ptr == nullptr) { return; } auto const cid = ss.stream_type; auto dst_ptr = ss.data_ptr; for (auto group = ss.first_chunk_id; group < ss.first_chunk_id + ss.num_chunks; ++group) { auto const len = streams[ss.column_id][group].lengths[cid]; if (len > 0) { auto const src_ptr = streams[ss.column_id][group].data_ptrs[cid]; for (uint32_t i = t; i < len; i += blockDim.x) { dst_ptr[i] = src_ptr[i]; } __syncthreads(); } if (t == 0) { streams[ss.column_id][group].data_ptrs[cid] = dst_ptr; } dst_ptr += len; } } /** * @brief Initializes compression input/output structures * * @param[in] strm_desc StripeStream device array [stripe][stream] * @param[in] chunks EncChunk device array [rowgroup][column] * @param[out] inputs Per-block compression input buffers * @param[out] outputs Per-block compression output buffers * @param[out] results Per-block compression status * @param[in] compressed_bfr Compression output buffer * @param[in] comp_blk_size Compression block size * @param[in] max_comp_blk_size Max size of any block after compression * @param[in] comp_block_align Required alignment for compressed blocks */ // blockDim {256,1,1} __global__ void __launch_bounds__(256) gpuInitCompressionBlocks(device_2dspan<StripeStream const> strm_desc, device_2dspan<encoder_chunk_streams> streams, // const? device_span<device_span<uint8_t const>> inputs, device_span<device_span<uint8_t>> outputs, device_span<compression_result> results, device_span<uint8_t> compressed_bfr, uint32_t comp_blk_size, uint32_t max_comp_blk_size, uint32_t comp_block_align) { __shared__ __align__(16) StripeStream ss; __shared__ uint8_t* volatile uncomp_base_g; auto const padded_block_header_size = util::round_up_unsafe(block_header_size, comp_block_align); auto const padded_comp_block_size = util::round_up_unsafe(max_comp_blk_size, comp_block_align); auto const stripe_id = blockIdx.x; auto const stream_id = blockIdx.y; uint32_t t = threadIdx.x; uint32_t num_blocks; uint8_t *src, *dst; if (t == 0) { ss = strm_desc[stripe_id][stream_id]; uncomp_base_g = streams[ss.column_id][ss.first_chunk_id].data_ptrs[ss.stream_type]; } __syncthreads(); src = uncomp_base_g; dst = compressed_bfr.data() + ss.bfr_offset; num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 1; for (uint32_t b = t; b < num_blocks; b += 256) { uint32_t blk_size = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size)); inputs[ss.first_block + b] = {src + b * comp_blk_size, blk_size}; auto const dst_offset = padded_block_header_size + b * (padded_block_header_size + padded_comp_block_size); outputs[ss.first_block + b] = {dst + dst_offset, max_comp_blk_size}; results[ss.first_block + b] = {0, compression_status::FAILURE}; } } /** * @brief Compacts compressed blocks in a single contiguous stream, and update 3-byte block length *fields * * @param[in,out] strm_desc StripeStream device array [stripe][stream] * @param[in] chunks EncChunk device array [rowgroup][column] * @param[in] inputs Per-block compression input buffers * @param[out] outputs Per-block compression output buffers * @param[out] results Per-block compression status * @param[in] compressed_bfr Compression output buffer * @param[in] comp_blk_size Compression block size * @param[in] max_comp_blk_size Max size of any block after compression */ // blockDim {1024,1,1} __global__ void __launch_bounds__(1024) gpuCompactCompressedBlocks(device_2dspan<StripeStream> strm_desc, device_span<device_span<uint8_t const> const> inputs, device_span<device_span<uint8_t> const> outputs, device_span<compression_result> results, device_span<uint8_t> compressed_bfr, uint32_t comp_blk_size, uint32_t max_comp_blk_size) { __shared__ __align__(16) StripeStream ss; __shared__ const uint8_t* volatile comp_src_g; __shared__ uint32_t volatile comp_len_g; auto const stripe_id = blockIdx.x; auto const stream_id = blockIdx.y; uint32_t t = threadIdx.x; uint32_t num_blocks, b, blk_size; const uint8_t* src; uint8_t* dst; if (t == 0) ss = strm_desc[stripe_id][stream_id]; __syncthreads(); num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 0; dst = compressed_bfr.data() + ss.bfr_offset; b = 0; do { if (t == 0) { auto const src_len = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size)); auto dst_len = (results[ss.first_block + b].status == compression_status::SUCCESS) ? results[ss.first_block + b].bytes_written : src_len; uint32_t blk_size24{}; // Only use the compressed block if it's smaller than the uncompressed // If compression failed, dst_len == src_len, so the uncompressed block will be used if (src_len < dst_len) { // Copy from uncompressed source src = inputs[ss.first_block + b].data(); results[ss.first_block + b].bytes_written = src_len; dst_len = src_len; blk_size24 = dst_len * 2 + 1; } else { // Compressed block src = outputs[ss.first_block + b].data(); blk_size24 = dst_len * 2 + 0; } dst[0] = static_cast<uint8_t>(blk_size24 >> 0); dst[1] = static_cast<uint8_t>(blk_size24 >> 8); dst[2] = static_cast<uint8_t>(blk_size24 >> 16); comp_src_g = src; comp_len_g = dst_len; } __syncthreads(); src = comp_src_g; blk_size = comp_len_g; dst += 3; // skip over length written by thread0 if (src != dst) { for (uint32_t i = 0; i < blk_size; i += 1024) { uint8_t v = (i + t < blk_size) ? src[i + t] : 0; __syncthreads(); if (i + t < blk_size) { dst[i + t] = v; } } } dst += blk_size; __syncthreads(); } while (++b < num_blocks); // Update stripe stream with the compressed size if (t == 0) { strm_desc[stripe_id][stream_id].stream_size = static_cast<uint32_t>(dst - (compressed_bfr.data() + ss.bfr_offset)); } } void EncodeOrcColumnData(device_2dspan<EncChunk const> chunks, device_2dspan<encoder_chunk_streams> streams, rmm::cuda_stream_view stream) { dim3 dim_block(encode_block_size, 1); // `encode_block_size` threads per chunk dim3 dim_grid(chunks.size().first, chunks.size().second); hipLaunchKernelGGL(( gpuEncodeOrcColumnData<encode_block_size>) , dim3(dim_grid), dim3(dim_block), 0, stream.value(), chunks, streams); } void EncodeStripeDictionaries(StripeDictionary const* stripes, device_2dspan<EncChunk const> chunks, uint32_t num_string_columns, uint32_t num_stripes, device_2dspan<encoder_chunk_streams> enc_streams, rmm::cuda_stream_view stream) { dim3 dim_block(512, 1); // 512 threads per dictionary dim3 dim_grid(num_string_columns * num_stripes, 2); hipLaunchKernelGGL(( gpuEncodeStringDictionaries<512>) , dim3(dim_grid), dim3(dim_block), 0, stream.value(), stripes, chunks, enc_streams); } void CompactOrcDataStreams(device_2dspan<StripeStream> strm_desc, device_2dspan<encoder_chunk_streams> enc_streams, rmm::cuda_stream_view stream) { dim3 dim_block(compact_streams_block_size, 1); dim3 dim_grid(strm_desc.size().first, strm_desc.size().second); hipLaunchKernelGGL(( gpuCompactOrcDataStreams), dim3(dim_grid), dim3(dim_block), 0, stream.value(), strm_desc, enc_streams); } std::optional<writer_compression_statistics> CompressOrcDataStreams( device_span<uint8_t> compressed_data, uint32_t num_compressed_blocks, CompressionKind compression, uint32_t comp_blk_size, uint32_t max_comp_blk_size, uint32_t comp_block_align, bool collect_statistics, device_2dspan<StripeStream> strm_desc, device_2dspan<encoder_chunk_streams> enc_streams, device_span<compression_result> comp_res, rmm::cuda_stream_view stream) { rmm::device_uvector<device_span<uint8_t const>> comp_in(num_compressed_blocks, stream); rmm::device_uvector<device_span<uint8_t>> comp_out(num_compressed_blocks, stream); dim3 dim_block_init(256, 1); dim3 dim_grid(strm_desc.size().first, strm_desc.size().second); hipLaunchKernelGGL(( gpuInitCompressionBlocks), dim3(dim_grid), dim3(dim_block_init), 0, stream.value(), strm_desc, enc_streams, comp_in, comp_out, comp_res, compressed_data, comp_blk_size, max_comp_blk_size, comp_block_align); if (compression == SNAPPY) { try { if (nvcomp::is_compression_disabled(nvcomp::compression_type::SNAPPY)) { gpu_snap(comp_in, comp_out, comp_res, stream); } else { nvcomp::batched_compress( nvcomp::compression_type::SNAPPY, comp_in, comp_out, comp_res, stream); } } catch (...) { // There was an error in compressing so set an error status for each block thrust::for_each( rmm::exec_policy(stream), comp_res.begin(), comp_res.end(), [] __device__(compression_result & stat) { stat.status = compression_status::FAILURE; }); // Since SNAPPY is the default compression (may not be explicitly requested), fall back to // writing without compression CUDF_LOG_WARN("ORC writer: compression failed, writing uncompressed data"); } } else if (compression == ZLIB) { if (auto const reason = nvcomp::is_compression_disabled(nvcomp::compression_type::DEFLATE); reason) { CUDF_FAIL("Compression error: " + reason.value()); } nvcomp::batched_compress( nvcomp::compression_type::DEFLATE, comp_in, comp_out, comp_res, stream); } else if (compression == ZSTD) { if (auto const reason = nvcomp::is_compression_disabled(nvcomp::compression_type::ZSTD); reason) { CUDF_FAIL("Compression error: " + reason.value()); } nvcomp::batched_compress(nvcomp::compression_type::ZSTD, comp_in, comp_out, comp_res, stream); } else if (compression != NONE) { CUDF_FAIL("Unsupported compression type"); } dim3 dim_block_compact(1024, 1); hipLaunchKernelGGL(( gpuCompactCompressedBlocks), dim3(dim_grid), dim3(dim_block_compact), 0, stream.value(), strm_desc, comp_in, comp_out, comp_res, compressed_data, comp_blk_size, max_comp_blk_size); if (collect_statistics) { return cudf::io::collect_compression_statistics(comp_in, comp_res, stream); } else { return std::nullopt; } } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
9d29460a58003647539f2b8cc437a1eea6149acf.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_gpu.hpp" #include <cudf/io/orc_types.hpp> #include <io/comp/nvcomp_adapter.hpp> #include <io/utilities/block_utils.cuh> #include <io/utilities/config_utils.hpp> #include <io/utilities/time_utils.cuh> #include <cudf/column/column_device_view.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/lists/lists_column_view.hpp> #include <cudf/utilities/bit.hpp> #include <cub/cub.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/for_each.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform.h> #include <thrust/tuple.h> namespace cudf { namespace io { namespace orc { namespace gpu { using cudf::detail::device_2dspan; constexpr int scratch_buffer_size = 512 * 4; constexpr int compact_streams_block_size = 1024; // Apache ORC reader does not handle zero-length patch lists for RLEv2 mode2 // Workaround replaces zero-length patch lists by a dummy zero patch constexpr bool zero_pll_war = true; struct byterle_enc_state_s { uint32_t literal_run; uint32_t repeat_run; volatile uint32_t rpt_map[(512 / 32) + 1]; }; struct intrle_enc_state_s { uint32_t literal_run; uint32_t delta_run; uint32_t literal_mode; uint32_t literal_w; uint32_t hdr_bytes; uint32_t pl_bytes; volatile uint32_t delta_map[(512 / 32) + 1]; }; struct strdata_enc_state_s { uint32_t char_count; uint32_t lengths_red[(512 / 32)]; const char* str_data[512]; }; struct orcenc_state_s { uint32_t cur_row; // Current row in group uint32_t present_rows; // # of rows in present buffer uint32_t present_out; // # of rows in present buffer that have been flushed uint32_t nrows; // # of rows in current batch uint32_t numvals; // # of non-zero values in current batch (<=nrows) uint32_t numlengths; // # of non-zero values in DATA2 batch uint32_t nnz; // Running count of non-null values encoder_chunk_streams stream; EncChunk chunk; uint32_t strm_pos[CI_NUM_STREAMS]; uint8_t valid_buf[512]; // valid map bits union { byterle_enc_state_s byterle; intrle_enc_state_s intrle; strdata_enc_state_s strenc; StripeDictionary dict_stripe; } u; union { uint8_t u8[scratch_buffer_size]; // gblock_vminscratch buffer uint32_t u32[scratch_buffer_size / 4]; } buf; union { uint8_t u8[2048]; uint32_t u32[1024]; int32_t i32[1024]; uint64_t u64[1024]; int64_t i64[1024]; } vals; union { uint8_t u8[2048]; uint32_t u32[1024]; uint64_t u64[1024]; } lengths; }; static inline __device__ uint32_t zigzag(uint32_t v) { return v; } static inline __device__ uint32_t zigzag(int32_t v) { int32_t s = (v >> 31); return ((v ^ s) * 2) - s; } static inline __device__ uint64_t zigzag(uint64_t v) { return v; } static inline __device__ uint64_t zigzag(int64_t v) { int64_t s = (v < 0) ? 1 : 0; return ((v ^ -s) * 2) + s; } static inline __device__ __uint128_t zigzag(__int128_t v) { int64_t s = (v < 0) ? 1 : 0; return ((v ^ -s) * 2) + s; } static inline __device__ uint32_t CountLeadingBytes32(uint32_t v) { return __clz(v) >> 3; } static inline __device__ uint32_t CountLeadingBytes64(uint64_t v) { return __clzll(v) >> 3; } /** * @brief Raw data output * * @tparam cid stream type (strm_pos[cid] will be updated and output stored at * streams[cid]+strm_pos[cid]) * @tparam inmask input buffer position mask for circular buffers * @param[in] s encoder state * @param[in] inbuf base input buffer * @param[in] inpos position in input buffer * @param[in] count number of bytes to encode * @param[in] t thread id */ template <StreamIndexType cid, uint32_t inmask> static __device__ void StoreBytes( orcenc_state_s* s, const uint8_t* inbuf, uint32_t inpos, uint32_t count, int t) { uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid]; while (count > 0) { uint32_t n = min(count, 512); if (t < n) { dst[t] = inbuf[(inpos + t) & inmask]; } dst += n; inpos += n; count -= n; } __syncthreads(); if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); } } /** * @brief ByteRLE encoder * * @tparam cid stream type (strm_pos[cid] will be updated and output stored at * streams[cid]+strm_pos[cid]) * @tparam inmask input buffer position mask for circular buffers * @param[in] s encoder state * @param[in] inbuf base input buffer * @param[in] inpos position in input buffer * @param[in] numvals max number of values to encode * @param[in] flush encode all remaining values if nonzero * @param[in] t thread id * * @return number of input values encoded */ template <StreamIndexType cid, uint32_t inmask> static __device__ uint32_t ByteRLE( orcenc_state_s* s, const uint8_t* inbuf, uint32_t inpos, uint32_t numvals, uint32_t flush, int t) { uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid]; uint32_t out_cnt = 0; while (numvals > 0) { uint8_t v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0; uint8_t v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0; uint32_t rpt_map = ballot(t + 1 < numvals && v0 == v1), literal_run, repeat_run, maxvals = min(numvals, 512); if (!(t & 0x1f)) s->u.byterle.rpt_map[t >> 5] = rpt_map; __syncthreads(); if (t == 0) { // Find the start of an identical 3-byte sequence // TBD: The two loops below could be eliminated using more ballot+ffs using warp0 literal_run = 0; repeat_run = 0; while (literal_run < maxvals) { uint32_t next = s->u.byterle.rpt_map[(literal_run >> 5) + 1]; uint32_t mask = rpt_map & __funnelshift_r(rpt_map, next, 1); if (mask) { uint32_t literal_run_ofs = __ffs(mask) - 1; literal_run += literal_run_ofs; repeat_run = __ffs(~((rpt_map >> literal_run_ofs) >> 1)); if (repeat_run + literal_run_ofs == 32) { while (next == ~0) { uint32_t next_idx = ((literal_run + repeat_run) >> 5) + 1; next = (next_idx < 512 / 32) ? s->u.byterle.rpt_map[next_idx] : 0; repeat_run += 32; } repeat_run += __ffs(~next) - 1; } repeat_run = min(repeat_run + 1, maxvals - min(literal_run, maxvals)); if (repeat_run < 3) { literal_run += (flush && literal_run + repeat_run >= numvals) ? repeat_run : 0; repeat_run = 0; } break; } rpt_map = next; literal_run += 32; } if (repeat_run >= 130) { // Limit large runs to multiples of 130 repeat_run = (repeat_run >= 3 * 130) ? 3 * 130 : (repeat_run >= 2 * 130) ? 2 * 130 : 130; } else if (literal_run && literal_run + repeat_run == maxvals) { repeat_run = 0; // Try again at next iteration } s->u.byterle.repeat_run = repeat_run; s->u.byterle.literal_run = min(literal_run, maxvals); } __syncthreads(); literal_run = s->u.byterle.literal_run; if (!flush && literal_run == numvals) { literal_run &= ~0x7f; if (!literal_run) break; } if (literal_run > 0) { uint32_t num_runs = (literal_run + 0x7f) >> 7; if (t < literal_run) { uint32_t run_id = t >> 7; uint32_t run = min(literal_run - run_id * 128, 128); if (!(t & 0x7f)) dst[run_id + t] = 0x100 - run; dst[run_id + t + 1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0; } dst += num_runs + literal_run; out_cnt += literal_run; numvals -= literal_run; inpos += literal_run; } repeat_run = s->u.byterle.repeat_run; if (repeat_run > 0) { while (repeat_run >= 130) { if (t == literal_run) // repeat_run follows literal_run { dst[0] = 0x7f; dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0; } dst += 2; out_cnt += 130; numvals -= 130; inpos += 130; repeat_run -= 130; } if (!flush && repeat_run == numvals) { // Wait for more data in case we can continue the run later break; } if (repeat_run >= 3) { if (t == literal_run) // repeat_run follows literal_run { dst[0] = repeat_run - 3; dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0; } dst += 2; out_cnt += repeat_run; numvals -= repeat_run; inpos += repeat_run; } } } if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); } return out_cnt; } /** * @brief Maps the symbol size in bytes to RLEv2 5-bit length code */ static const __device__ __constant__ uint8_t kByteLengthToRLEv2_W[9] = { 0, 7, 15, 23, 27, 28, 29, 30, 31}; /** * @brief Encode a varint value, return the number of bytes written */ static inline __device__ uint32_t StoreVarint(uint8_t* dst, __uint128_t v) { uint32_t bytecnt = 0; for (;;) { auto c = static_cast<uint32_t>(v & 0x7f); v >>= 7u; if (v == 0) { dst[bytecnt++] = c; break; } else { dst[bytecnt++] = c + 0x80; } } return bytecnt; } template <class T> static inline __device__ void StoreBytesBigEndian(uint8_t* dst, T v, uint32_t w) { for (uint32_t i = 0, b = w * 8; i < w; ++i) { b -= 8; dst[i] = static_cast<uint8_t>(v >> b); } } // Combine and store bits for symbol widths less than 8 static inline __device__ void StoreBitsBigEndian( uint8_t* dst, uint32_t v, uint32_t w, int num_vals, int t) { if (t <= (num_vals | 0x1f)) { uint32_t mask; if (w <= 1) { v = (v << 1) | (shuffle_xor(v, 1) & 0x1); v = (v << 2) | (shuffle_xor(v, 2) & 0x3); v = (v << 4) | (shuffle_xor(v, 4) & 0xf); mask = 0x7; } else if (w <= 2) { v = (v << 2) | (shuffle_xor(v, 1) & 0x3); v = (v << 4) | (shuffle_xor(v, 2) & 0xf); mask = 0x3; } else // if (w <= 4) { v = (v << 4) | (shuffle_xor(v, 1) & 0xf); mask = 0x1; } if (t < num_vals && !(t & mask)) { dst[(t * w) >> 3] = static_cast<uint8_t>(v); } } } /** * @brief Integer RLEv2 encoder * * @tparam cid stream type (strm_pos[cid] will be updated and output stored at * streams[cid]+strm_pos[cid]) * @tparam inmask input buffer position mask for circular buffers * @param[in] s encoder state * @param[in] inbuf base input buffer * @param[in] inpos position in input buffer * @param[in] numvals max number of values to encode * @param[in] flush encode all remaining values if nonzero * @param[in] t thread id * @param[in] temp_storage shared memory storage to perform block reduce * * @return number of input values encoded */ template <StreamIndexType cid, class T, bool is_signed, uint32_t inmask, int block_size, typename Storage> static __device__ uint32_t IntegerRLE( orcenc_state_s* s, const T* inbuf, uint32_t inpos, uint32_t numvals, int t, Storage& temp_storage) { using block_reduce = cub::BlockReduce<T, block_size>; uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid]; uint32_t out_cnt = 0; __shared__ volatile uint64_t block_vmin; while (numvals > 0) { T v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0; T v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0; T v2 = (t + 2 < numvals) ? inbuf[(inpos + t + 2) & inmask] : 0; uint32_t delta_map = ballot(t + 2 < numvals && v1 - v0 == v2 - v1), maxvals = min(numvals, 512), literal_run, delta_run; if (!(t & 0x1f)) s->u.intrle.delta_map[t >> 5] = delta_map; __syncthreads(); if (!t) { // Find the start of the next delta run (2 consecutive values with the same delta) literal_run = delta_run = 0; while (literal_run < maxvals) { if (delta_map != 0) { uint32_t literal_run_ofs = __ffs(delta_map) - 1; literal_run += literal_run_ofs; delta_run = __ffs(~((delta_map >> literal_run_ofs) >> 1)); if (literal_run_ofs + delta_run == 32) { for (;;) { uint32_t delta_idx = (literal_run + delta_run) >> 5; delta_map = (delta_idx < 512 / 32) ? s->u.intrle.delta_map[delta_idx] : 0; if (delta_map != ~0) break; delta_run += 32; } delta_run += __ffs(~delta_map) - 1; } delta_run += 2; break; } literal_run += 32; delta_map = s->u.intrle.delta_map[(literal_run >> 5)]; } literal_run = min(literal_run, maxvals); s->u.intrle.literal_run = literal_run; s->u.intrle.delta_run = min(delta_run, maxvals - literal_run); } __syncthreads(); literal_run = s->u.intrle.literal_run; // Find minimum and maximum values if (literal_run > 0) { // Find min & max T vmin = (t < literal_run) ? v0 : std::numeric_limits<T>::max(); T vmax = (t < literal_run) ? v0 : std::numeric_limits<T>::min(); uint32_t literal_mode, literal_w; vmin = block_reduce(temp_storage).Reduce(vmin, cub::Min()); __syncthreads(); vmax = block_reduce(temp_storage).Reduce(vmax, cub::Max()); if (t == 0) { uint32_t mode1_w, mode2_w; typename std::make_unsigned<T>::type vrange_mode1, vrange_mode2; block_vmin = static_cast<uint64_t>(vmin); if constexpr (sizeof(T) > 4) { vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax; vrange_mode2 = vmax - vmin; mode1_w = 8 - min(CountLeadingBytes64(vrange_mode1), 7); mode2_w = 8 - min(CountLeadingBytes64(vrange_mode2), 7); } else { vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax; vrange_mode2 = vmax - vmin; mode1_w = 4 - min(CountLeadingBytes32(vrange_mode1), 3); mode2_w = 4 - min(CountLeadingBytes32(vrange_mode2), 3); } // Decide between mode1 & mode2 (also mode3 for length=2 repeat) if (vrange_mode2 == 0 && mode1_w > 1) { // Should only occur if literal_run==2 (otherwise would have resulted in repeat_run >= // 3) uint32_t bytecnt = 2; dst[0] = 0xC0 + ((literal_run - 1) >> 8); dst[1] = (literal_run - 1) & 0xff; bytecnt += StoreVarint(dst + 2, vrange_mode1); dst[bytecnt++] = 0; // Zero delta s->u.intrle.literal_mode = 3; s->u.intrle.literal_w = bytecnt; } else { uint32_t range, w; // Mode 2 base value cannot be bigger than max int64_t, i.e. the first bit has to be 0 if (vmin <= std::numeric_limits<int64_t>::max() and mode1_w > mode2_w and (literal_run - 1) * (mode1_w - mode2_w) > 4) { s->u.intrle.literal_mode = 2; w = mode2_w; range = (uint32_t)vrange_mode2; } else { s->u.intrle.literal_mode = 1; w = mode1_w; range = (uint32_t)vrange_mode1; } if (w == 1) w = (range >= 16) ? w << 3 : (range >= 4) ? 4 : (range >= 2) ? 2 : 1; else w <<= 3; // bytes -> bits s->u.intrle.literal_w = w; } } __syncthreads(); vmin = static_cast<T>(block_vmin); literal_mode = s->u.intrle.literal_mode; literal_w = s->u.intrle.literal_w; if (literal_mode == 1) { // Direct mode if (!t) { dst[0] = 0x40 + ((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 + ((literal_run - 1) >> 8); dst[1] = (literal_run - 1) & 0xff; } dst += 2; typename std::make_unsigned<T>::type zzv0 = v0; if (t < literal_run) { zzv0 = zigzag(v0); } if (literal_w < 8) { StoreBitsBigEndian(dst, zzv0, literal_w, literal_run, t); } else if (t < literal_run) { StoreBytesBigEndian(dst + t * (literal_w >> 3), zzv0, (literal_w >> 3)); } } else if (literal_mode == 2) { // Patched base mode if (!t) { uint32_t bw, pw = 1, pll, pgw = 1, bv_scale = (is_signed) ? 0 : 1; vmax = (is_signed) ? ((vmin < 0) ? -vmin : vmin) * 2 : vmin; bw = (sizeof(T) > 4) ? (8 - min(CountLeadingBytes64(vmax << bv_scale), 7)) : (4 - min(CountLeadingBytes32(vmax << bv_scale), 3)); if (zero_pll_war) { // Insert a dummy zero patch pll = 1; dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 0] = 0; dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 1] = 0; } else { pll = 0; } dst[0] = 0x80 + ((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 + ((literal_run - 1) >> 8); dst[1] = (literal_run - 1) & 0xff; dst[2] = ((bw - 1) << 5) | kByteLengthToRLEv2_W[pw]; dst[3] = ((pgw - 1) << 5) | pll; if (is_signed) { vmax >>= 1; vmax |= vmin & ((T)1 << (bw * 8 - 1)); } StoreBytesBigEndian(dst + 4, vmax, bw); s->u.intrle.hdr_bytes = 4 + bw; s->u.intrle.pl_bytes = (pll * (pw * 8 + pgw) + 7) >> 3; } __syncthreads(); dst += s->u.intrle.hdr_bytes; v0 -= (t < literal_run) ? vmin : 0; if (literal_w < 8) StoreBitsBigEndian(dst, (uint32_t)v0, literal_w, literal_run, t); else if (t < literal_run) StoreBytesBigEndian(dst + t * (literal_w >> 3), v0, (literal_w >> 3)); dst += s->u.intrle.pl_bytes; } else { // Delta mode dst += literal_w; literal_w = 0; } dst += (literal_run * literal_w + 7) >> 3; numvals -= literal_run; inpos += literal_run; out_cnt += literal_run; __syncthreads(); } delta_run = s->u.intrle.delta_run; if (delta_run > 0) { if (t == literal_run) { int64_t delta = (int64_t)v1 - (int64_t)v0; uint64_t delta_base = zigzag(v0); if (delta == 0 && delta_run >= 3 && delta_run <= 10) { // Short repeat uint32_t delta_bw = 8 - min(CountLeadingBytes64(delta_base), 7); dst[0] = ((delta_bw - 1) << 3) + (delta_run - 3); for (uint32_t i = 0, b = delta_bw * 8; i < delta_bw; i++) { b -= 8; dst[1 + i] = static_cast<uint8_t>(delta_base >> b); } s->u.intrle.hdr_bytes = 1 + delta_bw; } else { // Delta uint64_t delta_u = zigzag(delta); uint32_t bytecnt = 2; dst[0] = 0xC0 + ((delta_run - 1) >> 8); dst[1] = (delta_run - 1) & 0xff; bytecnt += StoreVarint(dst + bytecnt, delta_base); bytecnt += StoreVarint(dst + bytecnt, delta_u); s->u.intrle.hdr_bytes = bytecnt; } } __syncthreads(); dst += s->u.intrle.hdr_bytes; numvals -= delta_run; inpos += delta_run; out_cnt += delta_run; } } if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); } __syncthreads(); return out_cnt; } /** * @brief Store a group of strings as a single concatenated string * * @param[in] dst destination buffer * @param[in] strenc string encoder state * @param[in] len(t) string length (per thread) * @param[in] t thread id */ static __device__ void StoreStringData(uint8_t* dst, strdata_enc_state_s* strenc, uint32_t len, int t) { // Start with summing up all the lengths uint32_t pos = len; uint32_t wt = t & 0x1f; for (uint32_t n = 1; n < 32; n <<= 1) { uint32_t tmp = shuffle(pos, (wt & ~n) | (n - 1)); pos += (wt & n) ? tmp : 0; } if (wt == 0x1f) { strenc->lengths_red[t >> 5] = pos; } dst += pos - len; __syncthreads(); if (t < 32) { uint32_t wlen = (wt < 16) ? strenc->lengths_red[wt] : 0; uint32_t wpos = wlen; for (uint32_t n = 1; n < 16; n <<= 1) { uint32_t tmp = shuffle(wpos, (wt & ~n) | (n - 1)); wpos += (wt & n) ? tmp : 0; } if (wt < 16) { strenc->lengths_red[wt] = wpos - wlen; } if (wt == 0xf) { strenc->char_count = wpos; // Update stream position } } __syncthreads(); // TBD: Might be more efficient to loop over 4 strings and copy 8 consecutive character at a time // rather than have each thread to a memcpy if (len > 0) { memcpy(dst + strenc->lengths_red[t >> 5], strenc->str_data[t], len); } } /** * @brief In-place conversion from lengths to positions * * @param[in] vals input values * @param[in] numvals number of values * @param[in] t thread id */ template <class T> inline __device__ void lengths_to_positions(volatile T* vals, uint32_t numvals, unsigned int t) { for (uint32_t n = 1; n < numvals; n <<= 1) { __syncthreads(); if ((t & n) && (t < numvals)) vals[t] += vals[(t & ~n) | (n - 1)]; } } template <int block_size, typename Storage> static __device__ void encode_null_mask(orcenc_state_s* s, bitmask_type const* pushdown_mask, Storage& scan_storage, int t) { if (s->stream.ids[CI_PRESENT] < 0) return; auto const column = *s->chunk.column; while (s->present_rows < s->chunk.null_mask_num_rows or s->numvals > 0) { // Number of rows read so far auto present_rows = s->present_rows; // valid_buf capacity is byte per thread in block auto const buf_available_bits = encode_block_size * 8 - s->numvals; // Number of rows for the block to process in this iteration auto const nrows = min(s->chunk.null_mask_num_rows - present_rows, buf_available_bits); // Number of rows for this thread to process in this iteration auto const t_nrows = min(max(static_cast<int32_t>(nrows) - t * 8, 0), 8); auto const row = s->chunk.null_mask_start_row + present_rows + t * 8; auto get_mask_byte = [&](bitmask_type const* mask, size_type offset) -> uint8_t { if (t_nrows == 0) return 0; if (mask == nullptr) return 0xff; auto const begin_offset = row + offset; auto const end_offset = min(begin_offset + 8, offset + column.size()); auto const mask_word = cudf::detail::get_mask_offset_word(mask, 0, begin_offset, end_offset); return mask_word & 0xff; }; uint8_t pd_byte = (1 << t_nrows) - 1; uint32_t pd_set_cnt = t_nrows; uint32_t offset = t_nrows != 0 ? t * 8 : nrows; if (pushdown_mask != nullptr) { pd_byte = get_mask_byte(pushdown_mask, 0) & ((1 << t_nrows) - 1); pd_set_cnt = __popc(pd_byte); // Scan the number of valid bits to get dst offset for each thread cub::BlockScan<uint32_t, block_size>(scan_storage).ExclusiveSum(pd_set_cnt, offset); } auto const mask_byte = get_mask_byte(column.null_mask(), column.offset()); auto dst_offset = offset + s->nnz; auto vbuf_bit_idx = [](int row) { // valid_buf is a circular buffer with validity of 8 rows in each element return row % (encode_block_size * 8); }; if (dst_offset % 8 == 0 and pd_set_cnt == 8) { s->valid_buf[vbuf_bit_idx(dst_offset) / 8] = mask_byte; } else { for (auto bit_idx = 0; bit_idx < t_nrows; ++bit_idx) { // skip bits where pushdown mask is not set if (not(pd_byte & (1 << bit_idx))) continue; if (mask_byte & (1 << bit_idx)) { set_bit(reinterpret_cast<uint32_t*>(s->valid_buf), vbuf_bit_idx(dst_offset++)); } else { clear_bit(reinterpret_cast<uint32_t*>(s->valid_buf), vbuf_bit_idx(dst_offset++)); } } } __syncthreads(); if (t == block_size - 1) { // Number of loaded rows, available for encode s->numvals += offset + pd_set_cnt; // Number of loaded rows (different from present_rows because of pushdown masks) s->nnz += offset + pd_set_cnt; } present_rows += nrows; if (!t) { s->present_rows = present_rows; } __syncthreads(); // RLE encode the present stream if (s->numvals > ((present_rows < s->chunk.null_mask_num_rows) ? 130 * 8 : 0)) { auto const flush = (present_rows < s->chunk.null_mask_num_rows) ? 0 : 7; auto const nbytes_out = (s->numvals + flush) / 8; auto const nrows_encoded = ByteRLE<CI_PRESENT, 0x1ff>(s, s->valid_buf, s->present_out / 8, nbytes_out, flush, t) * 8; if (!t) { // Number of rows encoded so far s->present_out += nrows_encoded; s->numvals -= min(s->numvals, nrows_encoded); } __syncthreads(); } } // reset shared state if (t == 0) { s->nnz = 0; } } /** * @brief Encode column data * * @param[in] chunks encoder chunks device array [column][rowgroup] * @param[in, out] streams chunk streams device array [column][rowgroup] */ // blockDim {`encode_block_size`,1,1} template <int block_size> __global__ void __launch_bounds__(block_size) gpuEncodeOrcColumnData(device_2dspan<EncChunk const> chunks, device_2dspan<encoder_chunk_streams> streams) { __shared__ __align__(16) orcenc_state_s state_g; __shared__ union { typename cub::BlockScan<uint32_t, block_size>::TempStorage scan_u32; typename cub::BlockReduce<int32_t, block_size>::TempStorage i32; typename cub::BlockReduce<int64_t, block_size>::TempStorage i64; typename cub::BlockReduce<uint32_t, block_size>::TempStorage u32; typename cub::BlockReduce<uint64_t, block_size>::TempStorage u64; } temp_storage; orcenc_state_s* const s = &state_g; uint32_t col_id = blockIdx.x; uint32_t group_id = blockIdx.y; int t = threadIdx.x; if (t == 0) { s->chunk = chunks[col_id][group_id]; s->stream = streams[col_id][group_id]; s->cur_row = 0; s->present_rows = 0; s->present_out = 0; s->numvals = 0; s->numlengths = 0; s->nnz = 0; s->strm_pos[CI_DATA] = 0; s->strm_pos[CI_PRESENT] = 0; s->strm_pos[CI_INDEX] = 0; // Dictionary data is encoded in a separate kernel s->strm_pos[CI_DATA2] = s->chunk.encoding_kind == DICTIONARY_V2 ? s->stream.lengths[CI_DATA2] : 0; s->strm_pos[CI_DICTIONARY] = s->chunk.encoding_kind == DICTIONARY_V2 ? s->stream.lengths[CI_DICTIONARY] : 0; } __syncthreads(); auto const pushdown_mask = [&]() -> cudf::bitmask_type const* { auto const parent_index = s->chunk.column->parent_index; if (!parent_index.has_value()) return nullptr; return chunks[parent_index.value()][0].column->pushdown_mask; }(); encode_null_mask<block_size>(s, pushdown_mask, temp_storage.scan_u32, t); __syncthreads(); auto const column = *s->chunk.column; while (s->cur_row < s->chunk.num_rows || s->numvals + s->numlengths != 0) { // Fetch non-null values auto const length_stream_only = s->chunk.type_kind == LIST or s->chunk.type_kind == MAP; if (not length_stream_only && s->stream.data_ptrs[CI_DATA] == nullptr) { // Pass-through __syncthreads(); if (!t) { s->cur_row = s->chunk.num_rows; s->strm_pos[CI_DATA] = s->chunk.num_rows * s->chunk.dtype_len; } } else if (s->cur_row < s->chunk.num_rows) { uint32_t maxnumvals = (s->chunk.type_kind == BOOLEAN) ? 2048 : 1024; uint32_t nrows = min(min(s->chunk.num_rows - s->cur_row, maxnumvals - max(s->numvals, s->numlengths)), encode_block_size); auto const row = s->chunk.start_row + s->cur_row + t; auto const is_value_valid = [&]() { if (t >= nrows) return false; return bit_value_or(pushdown_mask, column.offset() + row, true) and bit_value_or(column.null_mask(), column.offset() + row, true); }(); s->buf.u32[t] = is_value_valid ? 1u : 0u; // TODO: Could use a faster reduction relying on _popc() for the initial phase lengths_to_positions(s->buf.u32, encode_block_size, t); __syncthreads(); if (is_value_valid) { int nz_idx = (s->nnz + s->buf.u32[t] - 1) & (maxnumvals - 1); switch (s->chunk.type_kind) { case INT: case DATE: case FLOAT: s->vals.u32[nz_idx] = column.element<uint32_t>(row); break; case DOUBLE: case LONG: s->vals.u64[nz_idx] = column.element<uint64_t>(row); break; case SHORT: s->vals.u32[nz_idx] = column.element<uint16_t>(row); break; case BOOLEAN: case BYTE: s->vals.u8[nz_idx] = column.element<uint8_t>(row); break; case TIMESTAMP: { int64_t ts = column.element<int64_t>(row); int32_t ts_scale = powers_of_ten[9 - min(s->chunk.scale, 9)]; int64_t seconds = ts / ts_scale; int64_t nanos = (ts - seconds * ts_scale); s->vals.i64[nz_idx] = seconds - orc_utc_epoch; if (nanos != 0) { // Trailing zeroes are encoded in the lower 3-bits uint32_t zeroes = 0; nanos *= powers_of_ten[min(s->chunk.scale, 9)]; if (!(nanos % 100)) { nanos /= 100; zeroes = 1; while (zeroes < 7 && !(nanos % 10)) { nanos /= 10; zeroes++; } } nanos = (nanos << 3) + zeroes; } s->lengths.u64[nz_idx] = nanos; break; } case STRING: if (s->chunk.encoding_kind == DICTIONARY_V2) { uint32_t dict_idx = s->chunk.dict_index[row]; if (dict_idx > 0x7fff'ffffu) { dict_idx = s->chunk.dict_index[dict_idx & 0x7fff'ffffu]; } s->vals.u32[nz_idx] = dict_idx; } else { string_view value = column.element<string_view>(row); s->u.strenc.str_data[s->buf.u32[t] - 1] = value.data(); s->lengths.u32[nz_idx] = value.size_bytes(); } break; // Reusing the lengths array for the scale stream // Note: can be written in a faster manner, given that all values are equal case DECIMAL: s->lengths.u32[nz_idx] = zigzag(s->chunk.scale); break; case LIST: case MAP: { auto const& offsets = column.child(lists_column_view::offsets_column_index); // Compute list length from the offsets s->lengths.u32[nz_idx] = offsets.element<size_type>(row + 1 + column.offset()) - offsets.element<size_type>(row + column.offset()); } break; default: break; } } __syncthreads(); if (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2) { // Store string data uint32_t nz = s->buf.u32[511]; uint32_t nz_idx = (s->nnz + t) & 0x3ff; uint32_t len = (t < nz && s->u.strenc.str_data[t]) ? s->lengths.u32[nz_idx] : 0; StoreStringData(s->stream.data_ptrs[CI_DATA] + s->strm_pos[CI_DATA], &s->u.strenc, len, t); if (!t) { s->strm_pos[CI_DATA] += s->u.strenc.char_count; } __syncthreads(); } else if (s->chunk.type_kind == BOOLEAN) { // bool8 -> 8x bool1 uint32_t nz = s->buf.u32[511]; uint8_t n = ((s->nnz + nz) - (s->nnz & ~7) + 7) >> 3; if (t < n) { uint32_t idx8 = (s->nnz & ~7) + (t << 3); s->lengths.u8[((s->nnz >> 3) + t) & 0x1ff] = ((s->vals.u8[(idx8 + 0) & 0x7ff] & 1) << 7) | ((s->vals.u8[(idx8 + 1) & 0x7ff] & 1) << 6) | ((s->vals.u8[(idx8 + 2) & 0x7ff] & 1) << 5) | ((s->vals.u8[(idx8 + 3) & 0x7ff] & 1) << 4) | ((s->vals.u8[(idx8 + 4) & 0x7ff] & 1) << 3) | ((s->vals.u8[(idx8 + 5) & 0x7ff] & 1) << 2) | ((s->vals.u8[(idx8 + 6) & 0x7ff] & 1) << 1) | ((s->vals.u8[(idx8 + 7) & 0x7ff] & 1) << 0); } __syncthreads(); } if (!t) { uint32_t nz = s->buf.u32[511]; s->nnz += nz; s->numvals += nz; s->numlengths += (s->chunk.type_kind == TIMESTAMP || s->chunk.type_kind == DECIMAL || s->chunk.type_kind == LIST || s->chunk.type_kind == MAP || (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2)) ? nz : 0; s->cur_row += nrows; } __syncthreads(); // Encode values if (s->numvals > 0) { uint32_t flush = (s->cur_row == s->chunk.num_rows) ? 7 : 0, n; switch (s->chunk.type_kind) { case SHORT: case INT: case DATE: n = IntegerRLE<CI_DATA, int32_t, true, 0x3ff, block_size>( s, s->vals.i32, s->nnz - s->numvals, s->numvals, t, temp_storage.i32); break; case LONG: case TIMESTAMP: n = IntegerRLE<CI_DATA, int64_t, true, 0x3ff, block_size>( s, s->vals.i64, s->nnz - s->numvals, s->numvals, t, temp_storage.i64); break; case BYTE: n = ByteRLE<CI_DATA, 0x3ff>(s, s->vals.u8, s->nnz - s->numvals, s->numvals, flush, t); break; case BOOLEAN: n = ByteRLE<CI_DATA, 0x1ff>(s, s->lengths.u8, (s->nnz - s->numvals + flush) >> 3, (s->numvals + flush) >> 3, flush, t) * 8; break; case FLOAT: StoreBytes<CI_DATA, 0xfff>(s, s->vals.u8, (s->nnz - s->numvals) * 4, s->numvals * 4, t); n = s->numvals; break; case DOUBLE: StoreBytes<CI_DATA, 0x1fff>( s, s->vals.u8, (s->nnz - s->numvals) * 8, s->numvals * 8, t); n = s->numvals; break; case STRING: if (s->chunk.encoding_kind == DICTIONARY_V2) { n = IntegerRLE<CI_DATA, uint32_t, false, 0x3ff, block_size>( s, s->vals.u32, s->nnz - s->numvals, s->numvals, t, temp_storage.u32); } else { n = s->numvals; } break; case DECIMAL: { if (is_value_valid) { auto const id = column.type().id(); __uint128_t const zz_val = id == type_id::DECIMAL32 ? zigzag(column.element<int32_t>(row)) : id == type_id::DECIMAL64 ? zigzag(column.element<int64_t>(row)) : zigzag(column.element<__int128_t>(row)); auto const offset = (row == s->chunk.start_row) ? 0 : s->chunk.decimal_offsets[row - 1]; StoreVarint(s->stream.data_ptrs[CI_DATA] + offset, zz_val); } n = s->numvals; } break; default: n = s->numvals; break; } __syncthreads(); if (!t) { s->numvals -= min(n, s->numvals); } } // Encode secondary stream values if (s->numlengths > 0) { uint32_t n; switch (s->chunk.type_kind) { case TIMESTAMP: n = IntegerRLE<CI_DATA2, uint64_t, false, 0x3ff, block_size>( s, s->lengths.u64, s->nnz - s->numlengths, s->numlengths, t, temp_storage.u64); break; case DECIMAL: case LIST: case MAP: case STRING: n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>( s, s->lengths.u32, s->nnz - s->numlengths, s->numlengths, t, temp_storage.u32); break; default: n = s->numlengths; break; } __syncthreads(); if (!t) { s->numlengths -= min(n, s->numlengths); } } } __syncthreads(); } __syncthreads(); if (t <= CI_PRESENT && s->stream.ids[t] >= 0) { // Update actual compressed length // (not needed for decimal data, whose exact size is known before encode) if (!(t == CI_DATA && s->chunk.type_kind == DECIMAL)) streams[col_id][group_id].lengths[t] = s->strm_pos[t]; if (!s->stream.data_ptrs[t]) { streams[col_id][group_id].data_ptrs[t] = static_cast<uint8_t*>(const_cast<void*>(column.head())) + (column.offset() + s->chunk.start_row) * s->chunk.dtype_len; } } } /** * @brief Encode column dictionaries * * @param[in] stripes Stripe dictionaries device array [stripe][string_column] * @param[in] chunks EncChunk device array [rowgroup][column] * @param[in] num_columns Number of columns */ // blockDim {512,1,1} template <int block_size> __global__ void __launch_bounds__(block_size) gpuEncodeStringDictionaries(StripeDictionary const* stripes, device_2dspan<EncChunk const> chunks, device_2dspan<encoder_chunk_streams> streams) { __shared__ __align__(16) orcenc_state_s state_g; __shared__ typename cub::BlockReduce<uint32_t, block_size>::TempStorage temp_storage; orcenc_state_s* const s = &state_g; uint32_t stripe_id = blockIdx.x; uint32_t cid = (blockIdx.y) ? CI_DICTIONARY : CI_DATA2; int t = threadIdx.x; if (t == 0) s->u.dict_stripe = stripes[stripe_id]; __syncthreads(); auto const strm_ptr = &streams[s->u.dict_stripe.column_id][s->u.dict_stripe.start_chunk]; if (t == 0) { s->chunk = chunks[s->u.dict_stripe.column_id][s->u.dict_stripe.start_chunk]; s->stream = *strm_ptr; s->strm_pos[cid] = 0; s->numlengths = 0; s->nrows = s->u.dict_stripe.num_strings; s->cur_row = 0; } auto const string_column = s->u.dict_stripe.leaf_column; auto const dict_data = s->u.dict_stripe.dict_data; __syncthreads(); if (s->chunk.encoding_kind != DICTIONARY_V2) { return; // This column isn't using dictionary encoding -> bail out } while (s->cur_row < s->nrows || s->numlengths != 0) { uint32_t numvals = min(s->nrows - s->cur_row, min(1024 - s->numlengths, 512)); uint32_t string_idx = (t < numvals) ? dict_data[s->cur_row + t] : 0; if (cid == CI_DICTIONARY) { // Encoding string contents const char* ptr = nullptr; uint32_t count = 0; if (t < numvals) { auto string_val = string_column->element<string_view>(string_idx); ptr = string_val.data(); count = string_val.size_bytes(); } s->u.strenc.str_data[t] = ptr; StoreStringData(s->stream.data_ptrs[CI_DICTIONARY] + s->strm_pos[CI_DICTIONARY], &s->u.strenc, (ptr) ? count : 0, t); if (!t) { s->strm_pos[CI_DICTIONARY] += s->u.strenc.char_count; } } else { // Encoding string lengths uint32_t count = (t < numvals) ? static_cast<uint32_t>(string_column->element<string_view>(string_idx).size_bytes()) : 0; uint32_t nz_idx = (s->cur_row + t) & 0x3ff; if (t < numvals) s->lengths.u32[nz_idx] = count; __syncthreads(); if (s->numlengths + numvals > 0) { uint32_t n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>( s, s->lengths.u32, s->cur_row, s->numlengths + numvals, t, temp_storage); __syncthreads(); if (!t) { s->numlengths += numvals; s->numlengths -= min(n, s->numlengths); } } } if (t == 0) { s->cur_row += numvals; } __syncthreads(); } if (t == 0) { strm_ptr->lengths[cid] = s->strm_pos[cid]; } } /** * @brief Merge chunked column data into a single contiguous stream * * @param[in,out] strm_desc StripeStream device array [stripe][stream] * @param[in,out] streams List of encoder chunk streams [column][rowgroup] */ // blockDim {compact_streams_block_size,1,1} __global__ void __launch_bounds__(compact_streams_block_size) gpuCompactOrcDataStreams(device_2dspan<StripeStream> strm_desc, device_2dspan<encoder_chunk_streams> streams) { __shared__ __align__(16) StripeStream ss; auto const stripe_id = blockIdx.x; auto const stream_id = blockIdx.y; auto const t = threadIdx.x; if (t == 0) { ss = strm_desc[stripe_id][stream_id]; } __syncthreads(); if (ss.data_ptr == nullptr) { return; } auto const cid = ss.stream_type; auto dst_ptr = ss.data_ptr; for (auto group = ss.first_chunk_id; group < ss.first_chunk_id + ss.num_chunks; ++group) { auto const len = streams[ss.column_id][group].lengths[cid]; if (len > 0) { auto const src_ptr = streams[ss.column_id][group].data_ptrs[cid]; for (uint32_t i = t; i < len; i += blockDim.x) { dst_ptr[i] = src_ptr[i]; } __syncthreads(); } if (t == 0) { streams[ss.column_id][group].data_ptrs[cid] = dst_ptr; } dst_ptr += len; } } /** * @brief Initializes compression input/output structures * * @param[in] strm_desc StripeStream device array [stripe][stream] * @param[in] chunks EncChunk device array [rowgroup][column] * @param[out] inputs Per-block compression input buffers * @param[out] outputs Per-block compression output buffers * @param[out] results Per-block compression status * @param[in] compressed_bfr Compression output buffer * @param[in] comp_blk_size Compression block size * @param[in] max_comp_blk_size Max size of any block after compression * @param[in] comp_block_align Required alignment for compressed blocks */ // blockDim {256,1,1} __global__ void __launch_bounds__(256) gpuInitCompressionBlocks(device_2dspan<StripeStream const> strm_desc, device_2dspan<encoder_chunk_streams> streams, // const? device_span<device_span<uint8_t const>> inputs, device_span<device_span<uint8_t>> outputs, device_span<compression_result> results, device_span<uint8_t> compressed_bfr, uint32_t comp_blk_size, uint32_t max_comp_blk_size, uint32_t comp_block_align) { __shared__ __align__(16) StripeStream ss; __shared__ uint8_t* volatile uncomp_base_g; auto const padded_block_header_size = util::round_up_unsafe(block_header_size, comp_block_align); auto const padded_comp_block_size = util::round_up_unsafe(max_comp_blk_size, comp_block_align); auto const stripe_id = blockIdx.x; auto const stream_id = blockIdx.y; uint32_t t = threadIdx.x; uint32_t num_blocks; uint8_t *src, *dst; if (t == 0) { ss = strm_desc[stripe_id][stream_id]; uncomp_base_g = streams[ss.column_id][ss.first_chunk_id].data_ptrs[ss.stream_type]; } __syncthreads(); src = uncomp_base_g; dst = compressed_bfr.data() + ss.bfr_offset; num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 1; for (uint32_t b = t; b < num_blocks; b += 256) { uint32_t blk_size = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size)); inputs[ss.first_block + b] = {src + b * comp_blk_size, blk_size}; auto const dst_offset = padded_block_header_size + b * (padded_block_header_size + padded_comp_block_size); outputs[ss.first_block + b] = {dst + dst_offset, max_comp_blk_size}; results[ss.first_block + b] = {0, compression_status::FAILURE}; } } /** * @brief Compacts compressed blocks in a single contiguous stream, and update 3-byte block length *fields * * @param[in,out] strm_desc StripeStream device array [stripe][stream] * @param[in] chunks EncChunk device array [rowgroup][column] * @param[in] inputs Per-block compression input buffers * @param[out] outputs Per-block compression output buffers * @param[out] results Per-block compression status * @param[in] compressed_bfr Compression output buffer * @param[in] comp_blk_size Compression block size * @param[in] max_comp_blk_size Max size of any block after compression */ // blockDim {1024,1,1} __global__ void __launch_bounds__(1024) gpuCompactCompressedBlocks(device_2dspan<StripeStream> strm_desc, device_span<device_span<uint8_t const> const> inputs, device_span<device_span<uint8_t> const> outputs, device_span<compression_result> results, device_span<uint8_t> compressed_bfr, uint32_t comp_blk_size, uint32_t max_comp_blk_size) { __shared__ __align__(16) StripeStream ss; __shared__ const uint8_t* volatile comp_src_g; __shared__ uint32_t volatile comp_len_g; auto const stripe_id = blockIdx.x; auto const stream_id = blockIdx.y; uint32_t t = threadIdx.x; uint32_t num_blocks, b, blk_size; const uint8_t* src; uint8_t* dst; if (t == 0) ss = strm_desc[stripe_id][stream_id]; __syncthreads(); num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 0; dst = compressed_bfr.data() + ss.bfr_offset; b = 0; do { if (t == 0) { auto const src_len = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size)); auto dst_len = (results[ss.first_block + b].status == compression_status::SUCCESS) ? results[ss.first_block + b].bytes_written : src_len; uint32_t blk_size24{}; // Only use the compressed block if it's smaller than the uncompressed // If compression failed, dst_len == src_len, so the uncompressed block will be used if (src_len < dst_len) { // Copy from uncompressed source src = inputs[ss.first_block + b].data(); results[ss.first_block + b].bytes_written = src_len; dst_len = src_len; blk_size24 = dst_len * 2 + 1; } else { // Compressed block src = outputs[ss.first_block + b].data(); blk_size24 = dst_len * 2 + 0; } dst[0] = static_cast<uint8_t>(blk_size24 >> 0); dst[1] = static_cast<uint8_t>(blk_size24 >> 8); dst[2] = static_cast<uint8_t>(blk_size24 >> 16); comp_src_g = src; comp_len_g = dst_len; } __syncthreads(); src = comp_src_g; blk_size = comp_len_g; dst += 3; // skip over length written by thread0 if (src != dst) { for (uint32_t i = 0; i < blk_size; i += 1024) { uint8_t v = (i + t < blk_size) ? src[i + t] : 0; __syncthreads(); if (i + t < blk_size) { dst[i + t] = v; } } } dst += blk_size; __syncthreads(); } while (++b < num_blocks); // Update stripe stream with the compressed size if (t == 0) { strm_desc[stripe_id][stream_id].stream_size = static_cast<uint32_t>(dst - (compressed_bfr.data() + ss.bfr_offset)); } } void EncodeOrcColumnData(device_2dspan<EncChunk const> chunks, device_2dspan<encoder_chunk_streams> streams, rmm::cuda_stream_view stream) { dim3 dim_block(encode_block_size, 1); // `encode_block_size` threads per chunk dim3 dim_grid(chunks.size().first, chunks.size().second); gpuEncodeOrcColumnData<encode_block_size> <<<dim_grid, dim_block, 0, stream.value()>>>(chunks, streams); } void EncodeStripeDictionaries(StripeDictionary const* stripes, device_2dspan<EncChunk const> chunks, uint32_t num_string_columns, uint32_t num_stripes, device_2dspan<encoder_chunk_streams> enc_streams, rmm::cuda_stream_view stream) { dim3 dim_block(512, 1); // 512 threads per dictionary dim3 dim_grid(num_string_columns * num_stripes, 2); gpuEncodeStringDictionaries<512> <<<dim_grid, dim_block, 0, stream.value()>>>(stripes, chunks, enc_streams); } void CompactOrcDataStreams(device_2dspan<StripeStream> strm_desc, device_2dspan<encoder_chunk_streams> enc_streams, rmm::cuda_stream_view stream) { dim3 dim_block(compact_streams_block_size, 1); dim3 dim_grid(strm_desc.size().first, strm_desc.size().second); gpuCompactOrcDataStreams<<<dim_grid, dim_block, 0, stream.value()>>>(strm_desc, enc_streams); } std::optional<writer_compression_statistics> CompressOrcDataStreams( device_span<uint8_t> compressed_data, uint32_t num_compressed_blocks, CompressionKind compression, uint32_t comp_blk_size, uint32_t max_comp_blk_size, uint32_t comp_block_align, bool collect_statistics, device_2dspan<StripeStream> strm_desc, device_2dspan<encoder_chunk_streams> enc_streams, device_span<compression_result> comp_res, rmm::cuda_stream_view stream) { rmm::device_uvector<device_span<uint8_t const>> comp_in(num_compressed_blocks, stream); rmm::device_uvector<device_span<uint8_t>> comp_out(num_compressed_blocks, stream); dim3 dim_block_init(256, 1); dim3 dim_grid(strm_desc.size().first, strm_desc.size().second); gpuInitCompressionBlocks<<<dim_grid, dim_block_init, 0, stream.value()>>>(strm_desc, enc_streams, comp_in, comp_out, comp_res, compressed_data, comp_blk_size, max_comp_blk_size, comp_block_align); if (compression == SNAPPY) { try { if (nvcomp::is_compression_disabled(nvcomp::compression_type::SNAPPY)) { gpu_snap(comp_in, comp_out, comp_res, stream); } else { nvcomp::batched_compress( nvcomp::compression_type::SNAPPY, comp_in, comp_out, comp_res, stream); } } catch (...) { // There was an error in compressing so set an error status for each block thrust::for_each( rmm::exec_policy(stream), comp_res.begin(), comp_res.end(), [] __device__(compression_result & stat) { stat.status = compression_status::FAILURE; }); // Since SNAPPY is the default compression (may not be explicitly requested), fall back to // writing without compression CUDF_LOG_WARN("ORC writer: compression failed, writing uncompressed data"); } } else if (compression == ZLIB) { if (auto const reason = nvcomp::is_compression_disabled(nvcomp::compression_type::DEFLATE); reason) { CUDF_FAIL("Compression error: " + reason.value()); } nvcomp::batched_compress( nvcomp::compression_type::DEFLATE, comp_in, comp_out, comp_res, stream); } else if (compression == ZSTD) { if (auto const reason = nvcomp::is_compression_disabled(nvcomp::compression_type::ZSTD); reason) { CUDF_FAIL("Compression error: " + reason.value()); } nvcomp::batched_compress(nvcomp::compression_type::ZSTD, comp_in, comp_out, comp_res, stream); } else if (compression != NONE) { CUDF_FAIL("Unsupported compression type"); } dim3 dim_block_compact(1024, 1); gpuCompactCompressedBlocks<<<dim_grid, dim_block_compact, 0, stream.value()>>>( strm_desc, comp_in, comp_out, comp_res, compressed_data, comp_blk_size, max_comp_blk_size); if (collect_statistics) { return cudf::io::collect_compression_statistics(comp_in, comp_res, stream); } else { return std::nullopt; } } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
0fba1030465feecfe9e955953785479a62ae8665.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <thrust/execution_policy.h> #include <thrust/reduce.h> #include "paddle/fluid/operators/accuracy_op.h" #include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace operators { using platform::PADDLE_CUDA_NUM_THREADS; template <int BlockSize> __global__ void AccuracyCudaKernel(const int N, const int D, const int64_t* Xdata, const int64_t* labeldata, int* correct_data, float* accuracy, int* total_data) { int count = 0; __shared__ int total[BlockSize]; // support only 1 block for (int i = threadIdx.x; i < (N); i += BlockSize) { for (int j = 0; j < D; ++j) { if (Xdata[i * D + j] == labeldata[i]) { ++count; break; } } } total[threadIdx.x] = count; __syncthreads(); // reduce the count with init value 0, and output accuracy. int result = thrust::reduce(thrust::device, total, total + BlockSize, 0); if (threadIdx.x == 0) { *correct_data = result; *accuracy = static_cast<float>(result) / static_cast<float>(N); *total_data = N; } } template <typename T> class AccuracyOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "It must use CUDAPlace."); auto* inference = ctx.Input<Tensor>("Out"); auto* indices = ctx.Input<Tensor>("Indices"); auto* label = ctx.Input<Tensor>("Label"); auto* accuracy = ctx.Output<Tensor>("Accuracy"); auto* correct = ctx.Output<Tensor>("Correct"); auto* total = ctx.Output<Tensor>("Total"); // FIXME(typhoonzero): only support indices currently // if add support for output values, how to detect the data type? const int64_t* indices_data = indices->data<int64_t>(); const int64_t* label_data = label->data<int64_t>(); int* correct_data = correct->mutable_data<int>(ctx.GetPlace()); int* total_data = total->mutable_data<int>(ctx.GetPlace()); float* accuracy_data = accuracy->mutable_data<float>(ctx.GetPlace()); int num_samples = static_cast<int>(inference->dims()[0]); size_t infer_width = inference->dims()[1]; auto stream = ctx.cuda_device_context().stream(); platform::GpuMemsetAsync(accuracy_data, 0, sizeof(float), stream); if (num_samples == 0) { return; } hipLaunchKernelGGL(( AccuracyCudaKernel< PADDLE_CUDA_NUM_THREADS>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, num_samples, infer_width, indices_data, label_data, correct_data, accuracy_data, total_data); } }; } // namespace operators } // namespace paddle // FIXME(typhoonzero): types of T is for inference data. // label data is always int64 REGISTER_OP_CUDA_KERNEL(accuracy, paddle::operators::AccuracyOpCUDAKernel<float>, paddle::operators::AccuracyOpCUDAKernel<double>);
0fba1030465feecfe9e955953785479a62ae8665.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <thrust/execution_policy.h> #include <thrust/reduce.h> #include "paddle/fluid/operators/accuracy_op.h" #include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace operators { using platform::PADDLE_CUDA_NUM_THREADS; template <int BlockSize> __global__ void AccuracyCudaKernel(const int N, const int D, const int64_t* Xdata, const int64_t* labeldata, int* correct_data, float* accuracy, int* total_data) { int count = 0; __shared__ int total[BlockSize]; // support only 1 block for (int i = threadIdx.x; i < (N); i += BlockSize) { for (int j = 0; j < D; ++j) { if (Xdata[i * D + j] == labeldata[i]) { ++count; break; } } } total[threadIdx.x] = count; __syncthreads(); // reduce the count with init value 0, and output accuracy. int result = thrust::reduce(thrust::device, total, total + BlockSize, 0); if (threadIdx.x == 0) { *correct_data = result; *accuracy = static_cast<float>(result) / static_cast<float>(N); *total_data = N; } } template <typename T> class AccuracyOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "It must use CUDAPlace."); auto* inference = ctx.Input<Tensor>("Out"); auto* indices = ctx.Input<Tensor>("Indices"); auto* label = ctx.Input<Tensor>("Label"); auto* accuracy = ctx.Output<Tensor>("Accuracy"); auto* correct = ctx.Output<Tensor>("Correct"); auto* total = ctx.Output<Tensor>("Total"); // FIXME(typhoonzero): only support indices currently // if add support for output values, how to detect the data type? const int64_t* indices_data = indices->data<int64_t>(); const int64_t* label_data = label->data<int64_t>(); int* correct_data = correct->mutable_data<int>(ctx.GetPlace()); int* total_data = total->mutable_data<int>(ctx.GetPlace()); float* accuracy_data = accuracy->mutable_data<float>(ctx.GetPlace()); int num_samples = static_cast<int>(inference->dims()[0]); size_t infer_width = inference->dims()[1]; auto stream = ctx.cuda_device_context().stream(); platform::GpuMemsetAsync(accuracy_data, 0, sizeof(float), stream); if (num_samples == 0) { return; } AccuracyCudaKernel< PADDLE_CUDA_NUM_THREADS><<<1, PADDLE_CUDA_NUM_THREADS, 0, stream>>>( num_samples, infer_width, indices_data, label_data, correct_data, accuracy_data, total_data); } }; } // namespace operators } // namespace paddle // FIXME(typhoonzero): types of T is for inference data. // label data is always int64 REGISTER_OP_CUDA_KERNEL(accuracy, paddle::operators::AccuracyOpCUDAKernel<float>, paddle::operators::AccuracyOpCUDAKernel<double>);
13c69089fa925d399c7a27a99d50b45e93c313a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Filters // // Includes: system #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <fcntl.h> #include <stdint.h> #include <errno.h> #include <assert.h> #include <string.h> #include <sys/io.h> #include <cutil_inline.h> // Includes: local #include "bmp.h" enum {SOBEL_FILTER=1, AVERAGE_FILTER, HIGH_BOOST_FILTER}; #define CLAMP_8bit(x) max(0, min(255, (x))) char *BMPInFile = "lena.bmp"; char *BMPOutFile = "output.bmp"; char *Filter = "sobel"; int FilterMode = SOBEL_FILTER; // Functions void Cleanup(void); void ParseArguments(int, char**); void FilterWrapper(unsigned char* pImageIn, int Width, int Height); // Kernels __global__ void SobelFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height); __global__ void AverageFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height); __global__ void HighBoostFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height, const int HIGH_BOOST_FACTOR ); /* Device Memory */ unsigned char *d_In; unsigned char *d_Out; const float *d_sobel; // Setup for kernel size const int TILE_WIDTH = 6; const int TILE_HEIGHT = 6; const int FILTER_RADIUS = 1; //const int FILTER_RADIUS = 3; const int FILTER_DIAMETER = 2 * FILTER_RADIUS + 1; const int FILTER_AREA = FILTER_DIAMETER * FILTER_DIAMETER; const int BLOCK_WIDTH = TILE_WIDTH + 2*FILTER_RADIUS; const int BLOCK_HEIGHT = TILE_HEIGHT + 2*FILTER_RADIUS; const int EDGE_VALUE_THRESHOLD = 70; const int HIGH_BOOST_FACTOR = 10; #include "filter_kernel.hip" void BitMapRead(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char **data, unsigned char **palete) { size_t palete_size; int fd; if((fd = open(file, O_RDONLY )) < 0) FATAL("Open Source"); if(read(fd, bmp, BMP_SIZE) != BMP_SIZE) FATAL("Read BMP Header"); if(read(fd, dib, DIB_SIZE) != DIB_SIZE) FATAL("Read DIB Header"); assert(dib->bpp == 8); palete_size = bmp->offset - BMP_SIZE - DIB_SIZE; if(palete_size > 0) { *palete = (unsigned char *)malloc(palete_size); int go = read(fd, *palete, palete_size); if (go != palete_size) { FATAL("Read Palete"); } } *data = (unsigned char *)malloc(dib->image_size); if(read(fd, *data, dib->image_size) != dib->image_size) FATAL("Read Image"); close(fd); } void BitMapWrite(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char *data, unsigned char *palete) { size_t palete_size; int fd; palete_size = bmp->offset - BMP_SIZE - DIB_SIZE; if((fd = open(file, O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR |S_IRGRP)) < 0) FATAL("Open Destination"); if(write(fd, bmp, BMP_SIZE) != BMP_SIZE) FATAL("Write BMP Header"); if(write(fd, dib, DIB_SIZE) != DIB_SIZE) FATAL("Write BMP Header"); if(palete_size != 0) { if(write(fd, palete, palete_size) != palete_size) FATAL("Write Palete"); } if(write(fd, data, dib->image_size) != dib->image_size) FATAL("Write Image"); close(fd); } void CPU_Sobel(unsigned char* imageIn, unsigned char* imageOut, int width, int height) { int i, j, rows, cols, startCol, endCol, startRow, endRow; const float SobelMatrix[9] = {-1,0,1,-2,0,2,-1,0,1}; rows = height; cols = width; // Initialize all output pixels to zero for(i=0; i<rows; i++) { for(j=0; j<cols; j++) { imageOut[i*width + j] = 0; } } startCol = 1; endCol = cols - 1; startRow = 1; endRow = rows - 1; // Go through all inner pizel positions for(i=startRow; i<endRow; i++) { for(j=startCol; j<endCol; j++) { // sum up the 9 values to calculate both the direction x and direction y float sumX = 0, sumY=0; for(int dy = -FILTER_RADIUS; dy <= FILTER_RADIUS; dy++) { for(int dx = -FILTER_RADIUS; dx <= FILTER_RADIUS; dx++) { float Pixel = (float)(imageIn[i*width + j + (dy * width + dx)]); sumX += Pixel * SobelMatrix[(dy + FILTER_RADIUS) * FILTER_DIAMETER + (dx+FILTER_RADIUS)]; sumY += Pixel * SobelMatrix[(dx + FILTER_RADIUS) * FILTER_DIAMETER + (dy+FILTER_RADIUS)]; } } imageOut[i*width + j] = (abs(sumX) + abs(sumY)) > EDGE_VALUE_THRESHOLD ? 255 : 0; } } } // Host code int main(int argc, char** argv) { ParseArguments(argc, argv); struct bmp_header bmp; struct dib_header dib; unsigned char *palete = NULL; unsigned char *data = NULL, *out = NULL; printf("Running %s filter\n", Filter); BitMapRead(BMPInFile, &bmp, &dib, &data, &palete); out = (unsigned char *)malloc(dib.image_size); printf("Computing the CPU output\n"); printf("Image details: %d by %d = %d , imagesize = %d\n", dib.width, dib.height, dib.width * dib.height,dib.image_size); unsigned int cpu_timer = 0; cutilCheckError(cutCreateTimer(&cpu_timer)); cutilCheckError(cutStartTimer(cpu_timer)); CPU_Sobel(data, out, dib.width, dib.height); cutilCheckError(cutStopTimer(cpu_timer)); BitMapWrite("CPU_sobel.bmp", &bmp, &dib, out, palete); printf("Done with CPU output\n"); printf("CPU time: %f (ms) \n", cutGetTimerValue(cpu_timer)); cutilCheckError(cutDeleteTimer(cpu_timer)); unsigned int sobel_mtimer = 0; // Initialize the timer to zero cycles. cutilCheckError(cutCreateTimer(&sobel_mtimer)); printf("Allocating %d bytes for image \n", dib.image_size); cutilSafeCall( hipMalloc( (void **)&d_In, dib.image_size*sizeof(unsigned char)) ); cutilSafeCall( hipMalloc( (void **)&d_Out, dib.image_size*sizeof(unsigned char)) ); cutilCheckError(cutStartTimer(sobel_mtimer)); //Send image to host DRAM. hipMemcpy(d_In, data, dib.image_size*sizeof(unsigned char), hipMemcpyHostToDevice); cutilCheckError(cutStopTimer(sobel_mtimer)); // Call Kernels FilterWrapper(data, dib.width, dib.height); cutilCheckError(cutStartTimer(sobel_mtimer)); // Copy image back to host hipMemcpy(out, d_Out, dib.image_size*sizeof(unsigned char), hipMemcpyDeviceToHost); cutilCheckError(cutStopTimer(sobel_mtimer)); printf("memory Transfer Time: %f (ms)\n", cutGetTimerValue(sobel_mtimer)); cutilCheckError(cutDeleteTimer(sobel_mtimer)); // Write output image BitMapWrite(BMPOutFile, &bmp, &dib, out, palete); Cleanup(); } void Cleanup(void) { cutilSafeCall( hipDeviceReset() ); exit(0); } void FilterWrapper(unsigned char* pImageIn, int Width, int Height) { // Design grid disection around tile size int gridWidth = (Width + TILE_WIDTH - 1) / TILE_WIDTH; int gridHeight = (Height + TILE_HEIGHT - 1) / TILE_HEIGHT; dim3 dimGrid(gridWidth, gridHeight); // But actually invoke larger blocks to take care of surrounding shared memory dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT); unsigned int sobel_timer = 0; // Initialize the timer to zero cycles. cutilCheckError(cutCreateTimer(&sobel_timer)); switch(FilterMode) { case SOBEL_FILTER: printf("Sobel Filter \n"); cutilCheckError(cutStartTimer(sobel_timer)); hipLaunchKernelGGL(( SobelFilter), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_In, d_Out, Width, Height); cutilCheckMsg("kernel launch failure"); cutilCheckError(cutStopTimer(sobel_timer)); printf("GPU time: %f (ms) \n", cutGetTimerValue(sobel_timer)); break; case AVERAGE_FILTER: printf("Average Filter \n"); hipLaunchKernelGGL(( AverageFilter), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_In, d_Out, Width, Height); cutilCheckMsg("kernel launch failure"); break; case HIGH_BOOST_FILTER: printf("Boost Filter \n"); hipLaunchKernelGGL(( HighBoostFilter), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_In, d_Out, Width, Height, HIGH_BOOST_FACTOR ); cutilCheckMsg("kernel launch failure"); break; } cutilSafeCall( hipDeviceSynchronize() ); } // Parse program arguments void ParseArguments(int argc, char** argv) { for (int i = 0; i < argc; ++i) { if (strcmp(argv[i], "--file") == 0 || strcmp(argv[i], "-file") == 0) { BMPInFile = argv[i+1]; i = i + 1; } if (strcmp(argv[i], "--out") == 0 || strcmp(argv[i], "-out") == 0) { BMPOutFile = argv[i+1]; i = i + 1; } if (strcmp(argv[i], "--filter") == 0 || strcmp(argv[i], "-filter") == 0) { Filter = argv[i+1]; i = i + 1; if (strcmp(Filter, "sobel") == 0) FilterMode = SOBEL_FILTER; else if (strcmp(Filter, "average") == 0) FilterMode = AVERAGE_FILTER; else if (strcmp(Filter, "boost") == 0) FilterMode = HIGH_BOOST_FILTER; } } }
13c69089fa925d399c7a27a99d50b45e93c313a5.cu
// // Filters // // Includes: system #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <fcntl.h> #include <stdint.h> #include <errno.h> #include <assert.h> #include <string.h> #include <sys/io.h> #include <cutil_inline.h> // Includes: local #include "bmp.h" enum {SOBEL_FILTER=1, AVERAGE_FILTER, HIGH_BOOST_FILTER}; #define CLAMP_8bit(x) max(0, min(255, (x))) char *BMPInFile = "lena.bmp"; char *BMPOutFile = "output.bmp"; char *Filter = "sobel"; int FilterMode = SOBEL_FILTER; // Functions void Cleanup(void); void ParseArguments(int, char**); void FilterWrapper(unsigned char* pImageIn, int Width, int Height); // Kernels __global__ void SobelFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height); __global__ void AverageFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height); __global__ void HighBoostFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height, const int HIGH_BOOST_FACTOR ); /* Device Memory */ unsigned char *d_In; unsigned char *d_Out; const float *d_sobel; // Setup for kernel size const int TILE_WIDTH = 6; const int TILE_HEIGHT = 6; const int FILTER_RADIUS = 1; //const int FILTER_RADIUS = 3; const int FILTER_DIAMETER = 2 * FILTER_RADIUS + 1; const int FILTER_AREA = FILTER_DIAMETER * FILTER_DIAMETER; const int BLOCK_WIDTH = TILE_WIDTH + 2*FILTER_RADIUS; const int BLOCK_HEIGHT = TILE_HEIGHT + 2*FILTER_RADIUS; const int EDGE_VALUE_THRESHOLD = 70; const int HIGH_BOOST_FACTOR = 10; #include "filter_kernel.cu" void BitMapRead(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char **data, unsigned char **palete) { size_t palete_size; int fd; if((fd = open(file, O_RDONLY )) < 0) FATAL("Open Source"); if(read(fd, bmp, BMP_SIZE) != BMP_SIZE) FATAL("Read BMP Header"); if(read(fd, dib, DIB_SIZE) != DIB_SIZE) FATAL("Read DIB Header"); assert(dib->bpp == 8); palete_size = bmp->offset - BMP_SIZE - DIB_SIZE; if(palete_size > 0) { *palete = (unsigned char *)malloc(palete_size); int go = read(fd, *palete, palete_size); if (go != palete_size) { FATAL("Read Palete"); } } *data = (unsigned char *)malloc(dib->image_size); if(read(fd, *data, dib->image_size) != dib->image_size) FATAL("Read Image"); close(fd); } void BitMapWrite(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char *data, unsigned char *palete) { size_t palete_size; int fd; palete_size = bmp->offset - BMP_SIZE - DIB_SIZE; if((fd = open(file, O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR |S_IRGRP)) < 0) FATAL("Open Destination"); if(write(fd, bmp, BMP_SIZE) != BMP_SIZE) FATAL("Write BMP Header"); if(write(fd, dib, DIB_SIZE) != DIB_SIZE) FATAL("Write BMP Header"); if(palete_size != 0) { if(write(fd, palete, palete_size) != palete_size) FATAL("Write Palete"); } if(write(fd, data, dib->image_size) != dib->image_size) FATAL("Write Image"); close(fd); } void CPU_Sobel(unsigned char* imageIn, unsigned char* imageOut, int width, int height) { int i, j, rows, cols, startCol, endCol, startRow, endRow; const float SobelMatrix[9] = {-1,0,1,-2,0,2,-1,0,1}; rows = height; cols = width; // Initialize all output pixels to zero for(i=0; i<rows; i++) { for(j=0; j<cols; j++) { imageOut[i*width + j] = 0; } } startCol = 1; endCol = cols - 1; startRow = 1; endRow = rows - 1; // Go through all inner pizel positions for(i=startRow; i<endRow; i++) { for(j=startCol; j<endCol; j++) { // sum up the 9 values to calculate both the direction x and direction y float sumX = 0, sumY=0; for(int dy = -FILTER_RADIUS; dy <= FILTER_RADIUS; dy++) { for(int dx = -FILTER_RADIUS; dx <= FILTER_RADIUS; dx++) { float Pixel = (float)(imageIn[i*width + j + (dy * width + dx)]); sumX += Pixel * SobelMatrix[(dy + FILTER_RADIUS) * FILTER_DIAMETER + (dx+FILTER_RADIUS)]; sumY += Pixel * SobelMatrix[(dx + FILTER_RADIUS) * FILTER_DIAMETER + (dy+FILTER_RADIUS)]; } } imageOut[i*width + j] = (abs(sumX) + abs(sumY)) > EDGE_VALUE_THRESHOLD ? 255 : 0; } } } // Host code int main(int argc, char** argv) { ParseArguments(argc, argv); struct bmp_header bmp; struct dib_header dib; unsigned char *palete = NULL; unsigned char *data = NULL, *out = NULL; printf("Running %s filter\n", Filter); BitMapRead(BMPInFile, &bmp, &dib, &data, &palete); out = (unsigned char *)malloc(dib.image_size); printf("Computing the CPU output\n"); printf("Image details: %d by %d = %d , imagesize = %d\n", dib.width, dib.height, dib.width * dib.height,dib.image_size); unsigned int cpu_timer = 0; cutilCheckError(cutCreateTimer(&cpu_timer)); cutilCheckError(cutStartTimer(cpu_timer)); CPU_Sobel(data, out, dib.width, dib.height); cutilCheckError(cutStopTimer(cpu_timer)); BitMapWrite("CPU_sobel.bmp", &bmp, &dib, out, palete); printf("Done with CPU output\n"); printf("CPU time: %f (ms) \n", cutGetTimerValue(cpu_timer)); cutilCheckError(cutDeleteTimer(cpu_timer)); unsigned int sobel_mtimer = 0; // Initialize the timer to zero cycles. cutilCheckError(cutCreateTimer(&sobel_mtimer)); printf("Allocating %d bytes for image \n", dib.image_size); cutilSafeCall( cudaMalloc( (void **)&d_In, dib.image_size*sizeof(unsigned char)) ); cutilSafeCall( cudaMalloc( (void **)&d_Out, dib.image_size*sizeof(unsigned char)) ); cutilCheckError(cutStartTimer(sobel_mtimer)); //Send image to host DRAM. cudaMemcpy(d_In, data, dib.image_size*sizeof(unsigned char), cudaMemcpyHostToDevice); cutilCheckError(cutStopTimer(sobel_mtimer)); // Call Kernels FilterWrapper(data, dib.width, dib.height); cutilCheckError(cutStartTimer(sobel_mtimer)); // Copy image back to host cudaMemcpy(out, d_Out, dib.image_size*sizeof(unsigned char), cudaMemcpyDeviceToHost); cutilCheckError(cutStopTimer(sobel_mtimer)); printf("memory Transfer Time: %f (ms)\n", cutGetTimerValue(sobel_mtimer)); cutilCheckError(cutDeleteTimer(sobel_mtimer)); // Write output image BitMapWrite(BMPOutFile, &bmp, &dib, out, palete); Cleanup(); } void Cleanup(void) { cutilSafeCall( cudaThreadExit() ); exit(0); } void FilterWrapper(unsigned char* pImageIn, int Width, int Height) { // Design grid disection around tile size int gridWidth = (Width + TILE_WIDTH - 1) / TILE_WIDTH; int gridHeight = (Height + TILE_HEIGHT - 1) / TILE_HEIGHT; dim3 dimGrid(gridWidth, gridHeight); // But actually invoke larger blocks to take care of surrounding shared memory dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT); unsigned int sobel_timer = 0; // Initialize the timer to zero cycles. cutilCheckError(cutCreateTimer(&sobel_timer)); switch(FilterMode) { case SOBEL_FILTER: printf("Sobel Filter \n"); cutilCheckError(cutStartTimer(sobel_timer)); SobelFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height); cutilCheckMsg("kernel launch failure"); cutilCheckError(cutStopTimer(sobel_timer)); printf("GPU time: %f (ms) \n", cutGetTimerValue(sobel_timer)); break; case AVERAGE_FILTER: printf("Average Filter \n"); AverageFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height); cutilCheckMsg("kernel launch failure"); break; case HIGH_BOOST_FILTER: printf("Boost Filter \n"); HighBoostFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height, HIGH_BOOST_FACTOR ); cutilCheckMsg("kernel launch failure"); break; } cutilSafeCall( cudaThreadSynchronize() ); } // Parse program arguments void ParseArguments(int argc, char** argv) { for (int i = 0; i < argc; ++i) { if (strcmp(argv[i], "--file") == 0 || strcmp(argv[i], "-file") == 0) { BMPInFile = argv[i+1]; i = i + 1; } if (strcmp(argv[i], "--out") == 0 || strcmp(argv[i], "-out") == 0) { BMPOutFile = argv[i+1]; i = i + 1; } if (strcmp(argv[i], "--filter") == 0 || strcmp(argv[i], "-filter") == 0) { Filter = argv[i+1]; i = i + 1; if (strcmp(Filter, "sobel") == 0) FilterMode = SOBEL_FILTER; else if (strcmp(Filter, "average") == 0) FilterMode = AVERAGE_FILTER; else if (strcmp(Filter, "boost") == 0) FilterMode = HIGH_BOOST_FILTER; } } }
a57f0d3607e56f6dd6d6a081e51efeb50e67c565.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "CUDAkernel_multiply.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *sourceA = NULL; hipMalloc(&sourceA, XSIZE*YSIZE); float *sourceB = NULL; hipMalloc(&sourceB, XSIZE*YSIZE); float *destination = NULL; hipMalloc(&destination, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( CUDAkernel_multiply), dim3(gridBlock),dim3(threadBlock), 0, 0, sourceA,sourceB,destination,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( CUDAkernel_multiply), dim3(gridBlock),dim3(threadBlock), 0, 0, sourceA,sourceB,destination,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( CUDAkernel_multiply), dim3(gridBlock),dim3(threadBlock), 0, 0, sourceA,sourceB,destination,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a57f0d3607e56f6dd6d6a081e51efeb50e67c565.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "CUDAkernel_multiply.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *sourceA = NULL; cudaMalloc(&sourceA, XSIZE*YSIZE); float *sourceB = NULL; cudaMalloc(&sourceB, XSIZE*YSIZE); float *destination = NULL; cudaMalloc(&destination, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); CUDAkernel_multiply<<<gridBlock,threadBlock>>>(sourceA,sourceB,destination,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { CUDAkernel_multiply<<<gridBlock,threadBlock>>>(sourceA,sourceB,destination,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { CUDAkernel_multiply<<<gridBlock,threadBlock>>>(sourceA,sourceB,destination,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
afe4f05aaad7e7878a7154e1100031959f53464b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpu_utils.cuh" #include "hilbert_sort.hpp" #include "kernels/k_hilbert.cuh" #include "vendored/hilbert.h" #include <hipcub/hipcub.hpp> #include <stdexcept> #include <string> #include <vector> namespace timemachine { HilbertSort::HilbertSort(const int N) : N_(N), d_bin_to_idx_(HILBERT_GRID_DIM * HILBERT_GRID_DIM * HILBERT_GRID_DIM), d_sort_keys_in_(N), d_sort_keys_out_(N), d_sort_vals_in_(N), d_sort_storage_(nullptr), d_sort_storage_bytes_(0) { // initialize hilbert curve which maps each of the HILBERT_GRID_DIM x HILBERT_GRID_DIM x HILBERT_GRID_DIM cells into an index. std::vector<unsigned int> bin_to_idx(HILBERT_GRID_DIM * HILBERT_GRID_DIM * HILBERT_GRID_DIM); for (int i = 0; i < HILBERT_GRID_DIM; i++) { for (int j = 0; j < HILBERT_GRID_DIM; j++) { for (int k = 0; k < HILBERT_GRID_DIM; k++) { bitmask_t hilbert_coords[3]; hilbert_coords[0] = i; hilbert_coords[1] = j; hilbert_coords[2] = k; unsigned int bin = static_cast<unsigned int>(hilbert_c2i(3, HILBERT_N_BITS, hilbert_coords)); bin_to_idx[i * HILBERT_GRID_DIM * HILBERT_GRID_DIM + j * HILBERT_GRID_DIM + k] = bin; } } } d_bin_to_idx_.copy_from(&bin_to_idx[0]); // estimate size needed to do radix sorting // reuse d_sort_keys_in_ rather than constructing a dummy output idxs buffer hipcub::DeviceRadixSort::SortPairs( nullptr, d_sort_storage_bytes_, d_sort_keys_in_.data, d_sort_keys_out_.data, d_sort_vals_in_.data, d_sort_keys_in_.data, N_); gpuErrchk(hipPeekAtLastError()); d_sort_storage_.reset(new DeviceBuffer<char>(d_sort_storage_bytes_)); } HilbertSort::~HilbertSort(){}; void HilbertSort::sort_device( const int N, const unsigned int *d_atom_idxs, const double *d_coords, const double *d_box, unsigned int *d_output_perm, hipStream_t stream) { if (N > N_) { throw std::runtime_error("number of idxs to sort must be less than or equal to N"); } const int tpb = DEFAULT_THREADS_PER_BLOCK; const int B = ceil_divide(N, tpb); hipLaunchKernelGGL(( k_coords_to_kv_gather), dim3(B), dim3(tpb), 0, stream, N, d_atom_idxs, d_coords, d_box, d_bin_to_idx_.data, d_sort_keys_in_.data, d_sort_vals_in_.data); gpuErrchk(hipPeekAtLastError()); hipcub::DeviceRadixSort::SortPairs( d_sort_storage_->data, d_sort_storage_bytes_, d_sort_keys_in_.data, d_sort_keys_out_.data, d_sort_vals_in_.data, d_output_perm, N, 0, // begin bit sizeof(*d_sort_keys_in_.data) * 8, // end bit stream // cudaStream ); gpuErrchk(hipPeekAtLastError()); } std::vector<unsigned int> HilbertSort::sort_host(const int N, const double *h_coords, const double *h_box) { std::vector<unsigned int> h_atom_idxs(N); std::iota(h_atom_idxs.begin(), h_atom_idxs.end(), 0); DeviceBuffer<double> d_coords(N * 3); DeviceBuffer<double> d_box(3 * 3); DeviceBuffer<unsigned int> d_atom_idxs(N); DeviceBuffer<unsigned int> d_perm(N); d_coords.copy_from(h_coords); d_box.copy_from(h_box); d_atom_idxs.copy_from(&h_atom_idxs[0]); hipStream_t stream = static_cast<hipStream_t>(0); this->sort_device(N, d_atom_idxs.data, d_coords.data, d_box.data, d_perm.data, stream); gpuErrchk(hipStreamSynchronize(stream)); d_perm.copy_to(&h_atom_idxs[0]); return h_atom_idxs; } } // namespace timemachine
afe4f05aaad7e7878a7154e1100031959f53464b.cu
#include "gpu_utils.cuh" #include "hilbert_sort.hpp" #include "kernels/k_hilbert.cuh" #include "vendored/hilbert.h" #include <cub/cub.cuh> #include <stdexcept> #include <string> #include <vector> namespace timemachine { HilbertSort::HilbertSort(const int N) : N_(N), d_bin_to_idx_(HILBERT_GRID_DIM * HILBERT_GRID_DIM * HILBERT_GRID_DIM), d_sort_keys_in_(N), d_sort_keys_out_(N), d_sort_vals_in_(N), d_sort_storage_(nullptr), d_sort_storage_bytes_(0) { // initialize hilbert curve which maps each of the HILBERT_GRID_DIM x HILBERT_GRID_DIM x HILBERT_GRID_DIM cells into an index. std::vector<unsigned int> bin_to_idx(HILBERT_GRID_DIM * HILBERT_GRID_DIM * HILBERT_GRID_DIM); for (int i = 0; i < HILBERT_GRID_DIM; i++) { for (int j = 0; j < HILBERT_GRID_DIM; j++) { for (int k = 0; k < HILBERT_GRID_DIM; k++) { bitmask_t hilbert_coords[3]; hilbert_coords[0] = i; hilbert_coords[1] = j; hilbert_coords[2] = k; unsigned int bin = static_cast<unsigned int>(hilbert_c2i(3, HILBERT_N_BITS, hilbert_coords)); bin_to_idx[i * HILBERT_GRID_DIM * HILBERT_GRID_DIM + j * HILBERT_GRID_DIM + k] = bin; } } } d_bin_to_idx_.copy_from(&bin_to_idx[0]); // estimate size needed to do radix sorting // reuse d_sort_keys_in_ rather than constructing a dummy output idxs buffer cub::DeviceRadixSort::SortPairs( nullptr, d_sort_storage_bytes_, d_sort_keys_in_.data, d_sort_keys_out_.data, d_sort_vals_in_.data, d_sort_keys_in_.data, N_); gpuErrchk(cudaPeekAtLastError()); d_sort_storage_.reset(new DeviceBuffer<char>(d_sort_storage_bytes_)); } HilbertSort::~HilbertSort(){}; void HilbertSort::sort_device( const int N, const unsigned int *d_atom_idxs, const double *d_coords, const double *d_box, unsigned int *d_output_perm, cudaStream_t stream) { if (N > N_) { throw std::runtime_error("number of idxs to sort must be less than or equal to N"); } const int tpb = DEFAULT_THREADS_PER_BLOCK; const int B = ceil_divide(N, tpb); k_coords_to_kv_gather<<<B, tpb, 0, stream>>>( N, d_atom_idxs, d_coords, d_box, d_bin_to_idx_.data, d_sort_keys_in_.data, d_sort_vals_in_.data); gpuErrchk(cudaPeekAtLastError()); cub::DeviceRadixSort::SortPairs( d_sort_storage_->data, d_sort_storage_bytes_, d_sort_keys_in_.data, d_sort_keys_out_.data, d_sort_vals_in_.data, d_output_perm, N, 0, // begin bit sizeof(*d_sort_keys_in_.data) * 8, // end bit stream // cudaStream ); gpuErrchk(cudaPeekAtLastError()); } std::vector<unsigned int> HilbertSort::sort_host(const int N, const double *h_coords, const double *h_box) { std::vector<unsigned int> h_atom_idxs(N); std::iota(h_atom_idxs.begin(), h_atom_idxs.end(), 0); DeviceBuffer<double> d_coords(N * 3); DeviceBuffer<double> d_box(3 * 3); DeviceBuffer<unsigned int> d_atom_idxs(N); DeviceBuffer<unsigned int> d_perm(N); d_coords.copy_from(h_coords); d_box.copy_from(h_box); d_atom_idxs.copy_from(&h_atom_idxs[0]); cudaStream_t stream = static_cast<cudaStream_t>(0); this->sort_device(N, d_atom_idxs.data, d_coords.data, d_box.data, d_perm.data, stream); gpuErrchk(cudaStreamSynchronize(stream)); d_perm.copy_to(&h_atom_idxs[0]); return h_atom_idxs; } } // namespace timemachine
2c1c87b72543554894af205b99392192176b0047.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*------------------------------------------------------------------------- * * CUDA functions for Steepest descend in POCS-type algorithms. * * This file will iteratively minimize by stepest descend the total variation * of the input image, with the parameters given, using GPUs. * * CODE by Ander Biguri * * --------------------------------------------------------------------------- * --------------------------------------------------------------------------- * Copyright (c) 2015, University of Bath and CERN- European Organization for * Nuclear Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- * * Contact: [email protected] * Codes : https://github.com/CERN/TIGRE * --------------------------------------------------------------------------- */ #define MAXTHREADS 1024 #define MAX_BUFFER 60 #include "POCS_TV.hpp" #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ mexPrintf("%s \n",msg);\ hipDeviceReset();\ mexErrMsgIdAndTxt("CBCT:CUDA:POCS_TV",hipGetErrorString(__err));\ } \ } while (0) // CUDA kernels //https://stackoverflow.com/questions/21332040/simple-cuda-kernel-optimization/21340927#21340927 __global__ void divideArrayScalar(float* vec,float scalar,const size_t n){ unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x; for(; i<n; i+=gridDim.x*blockDim.x) { vec[i]/=scalar; } } __global__ void multiplyArrayScalar(float* vec,float scalar,const size_t n) { unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x; for(; i<n; i+=gridDim.x*blockDim.x) { vec[i]*=scalar; } } __global__ void substractArrays(float* vec,float* vec2,const size_t n) { unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x; for(; i<n; i+=gridDim.x*blockDim.x) { vec[i]-=vec2[i]; } } __device__ __inline__ void gradient(const float* u, float* grad, long z, long y, long x, long depth, long rows, long cols){ unsigned long size2d = rows*cols; unsigned long long idx = z * size2d + y * cols + x; float uidx = u[idx]; if ( z - 1 >= 0 && z<depth) { grad[0] = (uidx-u[(z-1)*size2d + y*cols + x]) ; } if ( y - 1 >= 0 && y<rows){ grad[1] = (uidx-u[z*size2d + (y-1)*cols + x]) ; } if ( x - 1 >= 0 && x<cols) { grad[2] = (uidx-u[z*size2d + y*cols + (x-1)]); } } __global__ void gradientTV(const float* f, float* dftv, long depth, long rows, long cols,const float delta){ unsigned long x = threadIdx.x + blockIdx.x * blockDim.x; unsigned long y = threadIdx.y + blockIdx.y * blockDim.y; unsigned long z = threadIdx.z + blockIdx.z * blockDim.z; unsigned long long idx = z * rows * cols + y * cols + x; if ( x >= cols || y >= rows || z >= depth ) return; float df[3] ={0.f,0.f,0.f}; float dfi[3]={0.f,0.f,0.f}; // dfi== \partial f_{i+1,j,k} float dfj[3]={0.f,0.f,0.f}; float dfk[3]={0.f,0.f,0.f}; gradient(f,df ,z ,y ,x , depth,rows,cols); gradient(f,dfi ,z ,y ,x+1, depth,rows,cols); gradient(f,dfj ,z ,y+1,x , depth,rows,cols); gradient(f,dfk ,z+1,y ,x , depth,rows,cols); float eps=0.00000001; //% avoid division by zero float wx=__expf(-(df[0]/delta)*(df[0]/delta)); float wy=__expf(-(df[1]/delta)*(df[1]/delta)); float wz=__expf(-(df[2]/delta)*(df[2]/delta)); float wxi=__expf(-(dfi[0]/delta)*(dfi[0]/delta)); float wyi=__expf(-(dfi[1]/delta)*(dfi[1]/delta)); float wzi=__expf(-(dfi[2]/delta)*(dfi[2]/delta)); float wxj=__expf(-(dfj[0]/delta)*(dfj[0]/delta)); float wyj=__expf(-(dfj[1]/delta)*(dfj[1]/delta)); float wzj=__expf(-(dfj[2]/delta)*(dfj[2]/delta)); float wxk=__expf(-(dfk[0]/delta)*(dfk[0]/delta)); float wyk=__expf(-(dfk[1]/delta)*(dfk[1]/delta)); float wzk=__expf(-(dfk[2]/delta)*(dfk[2]/delta)); // this hsould do the trick I think dftv[idx]=(wx*df[0]+wy*df[1]+wz*df[2])/(sqrt(wx*df[0] *df[0] +wy*df[1] *df[1] +wz*df[2] *df[2])+eps) -wzi*dfi[2]/(sqrt(wxi*dfi[0]*dfi[0]+wyi*dfi[1]*dfi[1]+wzi*dfi[2]*dfi[2]) +eps) // I wish I coudl precompute this, but if I do then Id need to recompute the gradient. -wyj*dfj[1]/(sqrt(wxj*dfj[0]*dfj[0]+wyj*dfj[1]*dfj[1]+wzj*dfj[2]*dfj[2]) +eps) -wxk*dfk[0]/(sqrt(wxk*dfk[0]*dfk[0]+wyk*dfk[1]*dfk[1]+wzk*dfk[2]*dfk[2]) +eps); return; } __device__ void warpReduce(volatile float *sdata, size_t tid) { sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } __global__ void reduceNorm2(float *g_idata, float *g_odata, size_t n){ extern __shared__ volatile float sdata[]; //http://stackoverflow.com/a/35133396/1485872 size_t tid = threadIdx.x; size_t i = blockIdx.x*blockDim.x + tid; size_t gridSize = blockDim.x*gridDim.x; float mySum = 0; float value=0; while (i < n) { value=g_idata[i]; //avoid reading twice mySum += value*value; i += gridSize; } sdata[tid] = mySum; __syncthreads(); if (tid < 512) sdata[tid] += sdata[tid + 512]; __syncthreads(); if (tid < 256) sdata[tid] += sdata[tid + 256]; __syncthreads(); if (tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); if (tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); #if (__CUDART_VERSION >= 9000) if ( tid < 32 ) { mySum = sdata[tid] + sdata[tid + 32]; for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down_sync(0xFFFFFFFF, mySum, offset,32); } } #else if (tid < 32) { warpReduce(sdata, tid); mySum = sdata[0]; } #endif if (tid == 0) g_odata[blockIdx.x] = mySum; } __global__ void reduceSum(float *g_idata, float *g_odata, size_t n){ extern __shared__ volatile float sdata[]; //http://stackoverflow.com/a/35133396/1485872 size_t tid = threadIdx.x; size_t i = blockIdx.x*blockDim.x + tid; size_t gridSize = blockDim.x*gridDim.x; float mySum = 0; // float value=0; while (i < n) { mySum += g_idata[i]; i += gridSize; } sdata[tid] = mySum; __syncthreads(); if (tid < 512) sdata[tid] += sdata[tid + 512]; __syncthreads(); if (tid < 256) sdata[tid] += sdata[tid + 256]; __syncthreads(); if (tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); if (tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); #if (__CUDART_VERSION >= 9000) if ( tid < 32 ) { mySum = sdata[tid] + sdata[tid + 32]; for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down_sync(0xFFFFFFFF, mySum, offset,32); } } #else if (tid < 32) { warpReduce(sdata, tid); mySum = sdata[0]; } #endif if (tid == 0) g_odata[blockIdx.x] = mySum; } // main function void aw_pocs_tv(float* img,float* dst,float alpha,const long* image_size, int maxIter,const float delta, const GpuIds& gpuids){ // Prepare for MultiGPU int deviceCount = gpuids.GetLength(); cudaCheckErrors("Device query fail"); if (deviceCount == 0) { mexErrMsgIdAndTxt("minimizeAwTV:POCS_TV2:GPUselect","There are no available device(s) that support CUDA\n"); } // // CODE assumes // 1.-All available devices are usable by this code // 2.-All available devices are equal, they are the same machine (warning thrown) // Check the available devices, and if they are the same if (!gpuids.AreEqualDevices()) { mexWarnMsgIdAndTxt("minimizeAwTV:POCS_TV2:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed."); } int dev; // We don't know if the devices are being used. lets check that. and only use the amount of memory we need. // check free memory size_t mem_GPU_global; checkFreeMemory(gpuids, &mem_GPU_global); // %5 of free memory should be enough, we have almost no variables in these kernels size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ; size_t mem_slice_image = sizeof(float)* image_size[0] * image_size[1] ; size_t mem_size_image = sizeof(float)* total_pixels; size_t mem_auxiliary = sizeof(float)* (total_pixels + MAXTHREADS - 1) / MAXTHREADS; // Decide how are we handling the distribution of computation size_t mem_img_each_GPU; unsigned int buffer_length=2; //Does everything fit in the GPU? unsigned int slices_per_split; // if it is a thin problem (no need to split), just use one GPU if (image_size[2]<4){deviceCount=1;} unsigned int splits=1; // if the number does not fit in an uint, you have more serious trouble than this. if(mem_GPU_global> 3*mem_size_image+3*(deviceCount-1)*mem_slice_image*buffer_length+mem_auxiliary) { // We only need to split if we have extra GPUs slices_per_split=(image_size[2]+deviceCount-1)/deviceCount; mem_img_each_GPU=mem_slice_image*((slices_per_split+buffer_length*2)); }else{ // As mem_auxiliary is not expected to be a large value (for a 2000^3 image is around 28Mbytes), lets for now assume we need it all size_t mem_free=mem_GPU_global-mem_auxiliary; splits=(unsigned int)(ceil(((float)(3*mem_size_image)/(float)(deviceCount))/mem_free)); // Now, there is an overhead here, as each splits should have 2 slices more, to account for overlap of images. // lets make sure these 2 slices fit, if they do not, add 1 to splits. slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2)); // if the new stuff does not fit in the GPU, it means we are in the edge case where adding that extra slice will overflow memory if (mem_GPU_global< 3*mem_img_each_GPU+mem_auxiliary){ // one more split should do the job, as its an edge case. splits++; //recompute for later slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); // amount of slices that fit on a GPU. Later we add 2 to these, as we need them for overlap mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2)); } // How many EXTRA buffer slices should be able to fit in here??!?! // Only do it if there are splits needed. if(splits>1){ mem_free=mem_GPU_global-(3*mem_img_each_GPU+mem_auxiliary); unsigned int extra_buff=(mem_free/mem_slice_image); buffer_length=(extra_buff/2)/3; // we need double whatever this results in, rounded down. buffer_length=max(buffer_length,2);// minimum 2 buffer_length=min(MAX_BUFFER,buffer_length); mem_img_each_GPU=mem_slice_image*(slices_per_split+buffer_length*2); }else{ buffer_length=2; } // Assert if (mem_GPU_global< 3*mem_img_each_GPU+mem_auxiliary){ mexErrMsgIdAndTxt("minimizeAwTV:POCS_TV2:GPU","Assertion Failed. Logic behind spliting flawed! Please tell: [email protected]\n"); } } // Assert if ((slices_per_split+buffer_length*2)*image_size[0]*image_size[1]* sizeof(float)!= mem_img_each_GPU){ mexErrMsgIdAndTxt("minimizeAwTV:POCS_TV2:GPU","Assertion Failed. Memory needed calculation broken! Please tell: [email protected]\n"); } float** d_image= (float**)malloc(deviceCount*sizeof(float*)); float** d_dimgTV= (float**)malloc(deviceCount*sizeof(float*)); float** d_norm2aux= (float**)malloc(deviceCount*sizeof(float*)); float** d_norm2= (float**)malloc(deviceCount*sizeof(float*)); // allocate memory in each GPU for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipMalloc((void**)&d_image[dev] , mem_img_each_GPU); hipMemset(d_image[dev],0 , mem_img_each_GPU); hipMalloc((void**)&d_dimgTV[dev] , mem_img_each_GPU); hipMalloc((void**)&d_norm2[dev] , slices_per_split*mem_slice_image); hipMalloc((void**)&d_norm2aux[dev] , mem_auxiliary); cudaCheckErrors("Malloc error"); } unsigned long long buffer_pixels=buffer_length*image_size[0]*image_size[1]; float* buffer; if(splits>1){ mexWarnMsgIdAndTxt("minimizeAwTV:POCS_TV2:Image_split","Your image can not be fully split between the available GPUs. The computation of minTV will be significantly slowed due to the image size.\nApproximated mathematics turned on for computational speed."); }else{ hipHostMalloc((void**)&buffer,buffer_length*image_size[0]*image_size[1]*sizeof(float)); } // Lets try to make the host memory pinned: // We laredy queried the GPU and assuemd they are the same, thus should have the same attributes. int isHostRegisterSupported = 0; #if CUDART_VERSION >= 9020 hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,gpuids[0]); #endif // splits>2 is completely empirical observation if (isHostRegisterSupported & splits>2){ hipHostRegister(img ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),hipHostRegisterPortable); hipHostRegister(dst ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),hipHostRegisterPortable); } cudaCheckErrors("Error pinning memory"); // Create streams int nStream_device=2; int nStreams=deviceCount*nStream_device; hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t)); for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); for (int i = 0; i < nStream_device; ++i){ hipStreamCreate(&stream[i+dev*nStream_device]); } } cudaCheckErrors("Stream creation fail"); // For the reduction double totalsum_prev; double totalsum; float sum_curr_spl; float * sumnorm2; hipHostMalloc((void**)&sumnorm2,deviceCount*sizeof(float)); unsigned int curr_slices; unsigned long long curr_pixels; size_t linear_idx_start; unsigned long long* offset_device=(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long)); unsigned long long* offset_host =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long)); unsigned long long* bytes_device =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long)); bool is_first_chunk; bool is_last_chunk; for(unsigned int i=0;i<maxIter;i+=(buffer_length-1)){ if(splits>1){ totalsum_prev=0; } for(unsigned int sp=0;sp<splits;sp++){ // For each iteration we need to compute all the image. The ordering of these loops // need to be like this due to the bounding layers between splits. If more than 1 split is needed // for each GPU then there is no other way that taking the entire memory out of GPU and putting it back. // If the memory can be shared between GPUs fully without extra splits, then there is an easy way of synchronizing the memory // Copy image to memory for (dev = 0; dev < deviceCount; dev++){ curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); curr_pixels=curr_slices*image_size[0]*image_size[1]; linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev); // Check if its the first or last chunck is_last_chunk=!((sp*deviceCount+dev)<deviceCount*splits-1); is_first_chunk=!(sp*deviceCount+dev); // lets compute where we start copyes and how much. This avoids 3 calls to Memcpy offset_device[dev]=buffer_pixels*is_first_chunk; offset_host[dev]=linear_idx_start-buffer_pixels*!is_first_chunk; bytes_device[dev]=curr_pixels+buffer_pixels*!is_first_chunk+buffer_pixels*!is_last_chunk; } if(i==0){ for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipMemcpyAsync(d_image[dev]+offset_device[dev], img+offset_host[dev] , bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]); } for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipDeviceSynchronize(); } } // if we need to split and its not the first iteration, then we need to copy from Host memory the previosu result. if (splits>1 & i>0){ for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipMemcpyAsync(d_image[dev]+offset_device[dev], dst+offset_host[dev] , bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]); } for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipDeviceSynchronize(); } } cudaCheckErrors("Memcpy failure on multi split"); for(unsigned int ib=0; (ib<(buffer_length-1)) && ((i+ib)<maxIter); ib++){ // For the gradient dim3 blockGrad(10, 10, 10); dim3 gridGrad((image_size[0]+blockGrad.x-1)/blockGrad.x, (image_size[1]+blockGrad.y-1)/blockGrad.y, (curr_slices+buffer_length*2+blockGrad.z-1)/blockGrad.z); for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); // Compute the gradient of the TV norm // I don't understand why I need to store 2 layers to compute correctly with 1 buffer. The bounding checks should // be enough but they are not. hipLaunchKernelGGL(( gradientTV), dim3(gridGrad), dim3(blockGrad),0,stream[dev*nStream_device], d_image[dev],d_dimgTV[dev],(long)(curr_slices+buffer_length*2-1), image_size[1],image_size[0],delta); } for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); // no need to copy the 2 aux slices here hipStreamSynchronize(stream[dev*nStream_device]); hipMemcpyAsync(d_norm2[dev], d_dimgTV[dev]+buffer_pixels, image_size[0]*image_size[1]*curr_slices*sizeof(float), hipMemcpyDeviceToDevice,stream[dev*nStream_device+1]); } // Compute the L2 norm of the gradient. For that, reduction is used. //REDUCE for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); total_pixels=curr_slices*image_size[0]*image_size[1]; size_t dimblockRed = MAXTHREADS; size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS; hipStreamSynchronize(stream[dev*nStream_device+1]); reduceNorm2 << <dimgridRed, dimblockRed, MAXTHREADS*sizeof(float),stream[dev*nStream_device]>> >(d_norm2[dev], d_norm2aux[dev], total_pixels); } for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); total_pixels=curr_slices*image_size[0]*image_size[1]; size_t dimblockRed = MAXTHREADS; size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS; if (dimgridRed > 1) { reduceSum << <1, dimblockRed, MAXTHREADS*sizeof(float),stream[dev*nStream_device] >> >(d_norm2aux[dev], d_norm2[dev], dimgridRed); hipStreamSynchronize(stream[dev*nStream_device]); hipMemcpyAsync(&sumnorm2[dev], d_norm2[dev], sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]); } else { hipStreamSynchronize(stream[dev*nStream_device]); hipMemcpyAsync(&sumnorm2[dev], d_norm2aux[dev], sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]); } } for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipDeviceSynchronize(); } cudaCheckErrors("Reduction error"); // Accumulate the norm accross devices sum_curr_spl=0; // this is CPU code for (dev = 0; dev < deviceCount; dev++){ sum_curr_spl+=sumnorm2[dev]; } sum_curr_spl+=0.0000001f; // avoid division by zero // If we have more than one splits, lets use the result from prior calls if(i>0 && splits>1){ // this is already stored: //totalsum=totalsum_prev; }else{ totalsum=sum_curr_spl; } for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); total_pixels=curr_slices*image_size[0]*image_size[1]; //NORMALIZE //in a Tesla, maximum blocks =15 SM * 4 blocks/SM hipLaunchKernelGGL(( divideArrayScalar) , dim3(60),dim3(MAXTHREADS),0,stream[dev*nStream_device], d_dimgTV[dev]+buffer_pixels,(float)sqrt(totalsum),total_pixels); //MULTIPLY HYPERPARAMETER hipLaunchKernelGGL(( multiplyArrayScalar), dim3(60),dim3(MAXTHREADS),0,stream[dev*nStream_device], d_dimgTV[dev]+buffer_pixels,alpha, total_pixels); } for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipDeviceSynchronize(); } cudaCheckErrors("Scalar operations error"); //SUBSTRACT GRADIENT ////////////////////////////////////////////// for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); total_pixels=curr_slices*image_size[0]*image_size[1]; hipLaunchKernelGGL(( substractArrays), dim3(60),dim3(MAXTHREADS),0,stream[dev*nStream_device], d_image[dev]+buffer_pixels,d_dimgTV[dev]+buffer_pixels, total_pixels); } } // Synchronize mathematics, make sure bounding pixels are correct for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipDeviceSynchronize(); } if(splits==1){ for(dev=0; dev<deviceCount;dev++){ curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); total_pixels=curr_slices*image_size[0]*image_size[1]; if (dev<deviceCount-1){ hipSetDevice(gpuids[dev+1]); hipMemcpy(buffer, d_image[dev+1], buffer_pixels*sizeof(float), hipMemcpyDeviceToHost); hipSetDevice(gpuids[dev]); hipMemcpy(d_image[dev]+total_pixels+buffer_pixels,buffer, buffer_pixels*sizeof(float), hipMemcpyHostToDevice); } hipDeviceSynchronize(); if (dev>0){ hipSetDevice(gpuids[dev-1]); hipMemcpyAsync(buffer, d_image[dev-1]+total_pixels+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost); hipSetDevice(gpuids[dev]); hipMemcpyAsync(d_image[dev],buffer, buffer_pixels*sizeof(float), hipMemcpyHostToDevice); } } }else{ // We need to take it out :( for(dev=0; dev<deviceCount;dev++){ hipSetDevice(gpuids[dev]); curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev); total_pixels=curr_slices*image_size[0]*image_size[1]; hipMemcpyAsync(&dst[linear_idx_start], d_image[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]); } } for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipDeviceSynchronize(); } cudaCheckErrors("Memory gather error"); totalsum_prev+=sum_curr_spl; } totalsum=totalsum_prev; } // If there has not been splits, we still have data in memory if(splits==1){ for(dev=0; dev<deviceCount;dev++){ hipSetDevice(gpuids[dev]); curr_slices=((dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*dev; total_pixels=curr_slices*image_size[0]*image_size[1]; hipMemcpy(dst+slices_per_split*image_size[0]*image_size[1]*dev, d_image[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost); } } cudaCheckErrors("Copy result back"); for(dev=0; dev<deviceCount;dev++){ hipSetDevice(gpuids[dev]); hipFree(d_image[dev]); hipFree(d_norm2aux[dev]); hipFree(d_dimgTV[dev]); hipFree(d_norm2[dev]); } if (splits==1){ hipHostFree(buffer); } if (isHostRegisterSupported& splits>2){ hipHostUnregister(img); hipHostUnregister(dst); } for (int i = 0; i < nStreams; ++i) hipStreamDestroy(stream[i]) ; cudaCheckErrors("Memory free"); // hipDeviceReset(); } void checkFreeMemory(const GpuIds& gpuids, size_t *mem_GPU_global){ size_t memfree; size_t memtotal; const int deviceCount = gpuids.GetLength(); for (int dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipMemGetInfo(&memfree,&memtotal); if(dev==0) *mem_GPU_global=memfree; if(memfree<memtotal/2){ mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n"); } cudaCheckErrors("Check mem error"); *mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global; } *mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95); //*mem_GPU_global= insert your known number here, in bytes. }
2c1c87b72543554894af205b99392192176b0047.cu
/*------------------------------------------------------------------------- * * CUDA functions for Steepest descend in POCS-type algorithms. * * This file will iteratively minimize by stepest descend the total variation * of the input image, with the parameters given, using GPUs. * * CODE by Ander Biguri * * --------------------------------------------------------------------------- * --------------------------------------------------------------------------- * Copyright (c) 2015, University of Bath and CERN- European Organization for * Nuclear Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- * * Contact: [email protected] * Codes : https://github.com/CERN/TIGRE * --------------------------------------------------------------------------- */ #define MAXTHREADS 1024 #define MAX_BUFFER 60 #include "POCS_TV.hpp" #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ mexPrintf("%s \n",msg);\ cudaDeviceReset();\ mexErrMsgIdAndTxt("CBCT:CUDA:POCS_TV",cudaGetErrorString(__err));\ } \ } while (0) // CUDA kernels //https://stackoverflow.com/questions/21332040/simple-cuda-kernel-optimization/21340927#21340927 __global__ void divideArrayScalar(float* vec,float scalar,const size_t n){ unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x; for(; i<n; i+=gridDim.x*blockDim.x) { vec[i]/=scalar; } } __global__ void multiplyArrayScalar(float* vec,float scalar,const size_t n) { unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x; for(; i<n; i+=gridDim.x*blockDim.x) { vec[i]*=scalar; } } __global__ void substractArrays(float* vec,float* vec2,const size_t n) { unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x; for(; i<n; i+=gridDim.x*blockDim.x) { vec[i]-=vec2[i]; } } __device__ __inline__ void gradient(const float* u, float* grad, long z, long y, long x, long depth, long rows, long cols){ unsigned long size2d = rows*cols; unsigned long long idx = z * size2d + y * cols + x; float uidx = u[idx]; if ( z - 1 >= 0 && z<depth) { grad[0] = (uidx-u[(z-1)*size2d + y*cols + x]) ; } if ( y - 1 >= 0 && y<rows){ grad[1] = (uidx-u[z*size2d + (y-1)*cols + x]) ; } if ( x - 1 >= 0 && x<cols) { grad[2] = (uidx-u[z*size2d + y*cols + (x-1)]); } } __global__ void gradientTV(const float* f, float* dftv, long depth, long rows, long cols,const float delta){ unsigned long x = threadIdx.x + blockIdx.x * blockDim.x; unsigned long y = threadIdx.y + blockIdx.y * blockDim.y; unsigned long z = threadIdx.z + blockIdx.z * blockDim.z; unsigned long long idx = z * rows * cols + y * cols + x; if ( x >= cols || y >= rows || z >= depth ) return; float df[3] ={0.f,0.f,0.f}; float dfi[3]={0.f,0.f,0.f}; // dfi== \partial f_{i+1,j,k} float dfj[3]={0.f,0.f,0.f}; float dfk[3]={0.f,0.f,0.f}; gradient(f,df ,z ,y ,x , depth,rows,cols); gradient(f,dfi ,z ,y ,x+1, depth,rows,cols); gradient(f,dfj ,z ,y+1,x , depth,rows,cols); gradient(f,dfk ,z+1,y ,x , depth,rows,cols); float eps=0.00000001; //% avoid division by zero float wx=__expf(-(df[0]/delta)*(df[0]/delta)); float wy=__expf(-(df[1]/delta)*(df[1]/delta)); float wz=__expf(-(df[2]/delta)*(df[2]/delta)); float wxi=__expf(-(dfi[0]/delta)*(dfi[0]/delta)); float wyi=__expf(-(dfi[1]/delta)*(dfi[1]/delta)); float wzi=__expf(-(dfi[2]/delta)*(dfi[2]/delta)); float wxj=__expf(-(dfj[0]/delta)*(dfj[0]/delta)); float wyj=__expf(-(dfj[1]/delta)*(dfj[1]/delta)); float wzj=__expf(-(dfj[2]/delta)*(dfj[2]/delta)); float wxk=__expf(-(dfk[0]/delta)*(dfk[0]/delta)); float wyk=__expf(-(dfk[1]/delta)*(dfk[1]/delta)); float wzk=__expf(-(dfk[2]/delta)*(dfk[2]/delta)); // this hsould do the trick I think dftv[idx]=(wx*df[0]+wy*df[1]+wz*df[2])/(sqrt(wx*df[0] *df[0] +wy*df[1] *df[1] +wz*df[2] *df[2])+eps) -wzi*dfi[2]/(sqrt(wxi*dfi[0]*dfi[0]+wyi*dfi[1]*dfi[1]+wzi*dfi[2]*dfi[2]) +eps) // I wish I coudl precompute this, but if I do then Id need to recompute the gradient. -wyj*dfj[1]/(sqrt(wxj*dfj[0]*dfj[0]+wyj*dfj[1]*dfj[1]+wzj*dfj[2]*dfj[2]) +eps) -wxk*dfk[0]/(sqrt(wxk*dfk[0]*dfk[0]+wyk*dfk[1]*dfk[1]+wzk*dfk[2]*dfk[2]) +eps); return; } __device__ void warpReduce(volatile float *sdata, size_t tid) { sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } __global__ void reduceNorm2(float *g_idata, float *g_odata, size_t n){ extern __shared__ volatile float sdata[]; //http://stackoverflow.com/a/35133396/1485872 size_t tid = threadIdx.x; size_t i = blockIdx.x*blockDim.x + tid; size_t gridSize = blockDim.x*gridDim.x; float mySum = 0; float value=0; while (i < n) { value=g_idata[i]; //avoid reading twice mySum += value*value; i += gridSize; } sdata[tid] = mySum; __syncthreads(); if (tid < 512) sdata[tid] += sdata[tid + 512]; __syncthreads(); if (tid < 256) sdata[tid] += sdata[tid + 256]; __syncthreads(); if (tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); if (tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); #if (__CUDART_VERSION >= 9000) if ( tid < 32 ) { mySum = sdata[tid] + sdata[tid + 32]; for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down_sync(0xFFFFFFFF, mySum, offset,32); } } #else if (tid < 32) { warpReduce(sdata, tid); mySum = sdata[0]; } #endif if (tid == 0) g_odata[blockIdx.x] = mySum; } __global__ void reduceSum(float *g_idata, float *g_odata, size_t n){ extern __shared__ volatile float sdata[]; //http://stackoverflow.com/a/35133396/1485872 size_t tid = threadIdx.x; size_t i = blockIdx.x*blockDim.x + tid; size_t gridSize = blockDim.x*gridDim.x; float mySum = 0; // float value=0; while (i < n) { mySum += g_idata[i]; i += gridSize; } sdata[tid] = mySum; __syncthreads(); if (tid < 512) sdata[tid] += sdata[tid + 512]; __syncthreads(); if (tid < 256) sdata[tid] += sdata[tid + 256]; __syncthreads(); if (tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); if (tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); #if (__CUDART_VERSION >= 9000) if ( tid < 32 ) { mySum = sdata[tid] + sdata[tid + 32]; for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down_sync(0xFFFFFFFF, mySum, offset,32); } } #else if (tid < 32) { warpReduce(sdata, tid); mySum = sdata[0]; } #endif if (tid == 0) g_odata[blockIdx.x] = mySum; } // main function void aw_pocs_tv(float* img,float* dst,float alpha,const long* image_size, int maxIter,const float delta, const GpuIds& gpuids){ // Prepare for MultiGPU int deviceCount = gpuids.GetLength(); cudaCheckErrors("Device query fail"); if (deviceCount == 0) { mexErrMsgIdAndTxt("minimizeAwTV:POCS_TV2:GPUselect","There are no available device(s) that support CUDA\n"); } // // CODE assumes // 1.-All available devices are usable by this code // 2.-All available devices are equal, they are the same machine (warning thrown) // Check the available devices, and if they are the same if (!gpuids.AreEqualDevices()) { mexWarnMsgIdAndTxt("minimizeAwTV:POCS_TV2:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed."); } int dev; // We don't know if the devices are being used. lets check that. and only use the amount of memory we need. // check free memory size_t mem_GPU_global; checkFreeMemory(gpuids, &mem_GPU_global); // %5 of free memory should be enough, we have almost no variables in these kernels size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ; size_t mem_slice_image = sizeof(float)* image_size[0] * image_size[1] ; size_t mem_size_image = sizeof(float)* total_pixels; size_t mem_auxiliary = sizeof(float)* (total_pixels + MAXTHREADS - 1) / MAXTHREADS; // Decide how are we handling the distribution of computation size_t mem_img_each_GPU; unsigned int buffer_length=2; //Does everything fit in the GPU? unsigned int slices_per_split; // if it is a thin problem (no need to split), just use one GPU if (image_size[2]<4){deviceCount=1;} unsigned int splits=1; // if the number does not fit in an uint, you have more serious trouble than this. if(mem_GPU_global> 3*mem_size_image+3*(deviceCount-1)*mem_slice_image*buffer_length+mem_auxiliary) { // We only need to split if we have extra GPUs slices_per_split=(image_size[2]+deviceCount-1)/deviceCount; mem_img_each_GPU=mem_slice_image*((slices_per_split+buffer_length*2)); }else{ // As mem_auxiliary is not expected to be a large value (for a 2000^3 image is around 28Mbytes), lets for now assume we need it all size_t mem_free=mem_GPU_global-mem_auxiliary; splits=(unsigned int)(ceil(((float)(3*mem_size_image)/(float)(deviceCount))/mem_free)); // Now, there is an overhead here, as each splits should have 2 slices more, to account for overlap of images. // lets make sure these 2 slices fit, if they do not, add 1 to splits. slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2)); // if the new stuff does not fit in the GPU, it means we are in the edge case where adding that extra slice will overflow memory if (mem_GPU_global< 3*mem_img_each_GPU+mem_auxiliary){ // one more split should do the job, as its an edge case. splits++; //recompute for later slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); // amount of slices that fit on a GPU. Later we add 2 to these, as we need them for overlap mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2)); } // How many EXTRA buffer slices should be able to fit in here??!?! // Only do it if there are splits needed. if(splits>1){ mem_free=mem_GPU_global-(3*mem_img_each_GPU+mem_auxiliary); unsigned int extra_buff=(mem_free/mem_slice_image); buffer_length=(extra_buff/2)/3; // we need double whatever this results in, rounded down. buffer_length=max(buffer_length,2);// minimum 2 buffer_length=min(MAX_BUFFER,buffer_length); mem_img_each_GPU=mem_slice_image*(slices_per_split+buffer_length*2); }else{ buffer_length=2; } // Assert if (mem_GPU_global< 3*mem_img_each_GPU+mem_auxiliary){ mexErrMsgIdAndTxt("minimizeAwTV:POCS_TV2:GPU","Assertion Failed. Logic behind spliting flawed! Please tell: [email protected]\n"); } } // Assert if ((slices_per_split+buffer_length*2)*image_size[0]*image_size[1]* sizeof(float)!= mem_img_each_GPU){ mexErrMsgIdAndTxt("minimizeAwTV:POCS_TV2:GPU","Assertion Failed. Memory needed calculation broken! Please tell: [email protected]\n"); } float** d_image= (float**)malloc(deviceCount*sizeof(float*)); float** d_dimgTV= (float**)malloc(deviceCount*sizeof(float*)); float** d_norm2aux= (float**)malloc(deviceCount*sizeof(float*)); float** d_norm2= (float**)malloc(deviceCount*sizeof(float*)); // allocate memory in each GPU for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaMalloc((void**)&d_image[dev] , mem_img_each_GPU); cudaMemset(d_image[dev],0 , mem_img_each_GPU); cudaMalloc((void**)&d_dimgTV[dev] , mem_img_each_GPU); cudaMalloc((void**)&d_norm2[dev] , slices_per_split*mem_slice_image); cudaMalloc((void**)&d_norm2aux[dev] , mem_auxiliary); cudaCheckErrors("Malloc error"); } unsigned long long buffer_pixels=buffer_length*image_size[0]*image_size[1]; float* buffer; if(splits>1){ mexWarnMsgIdAndTxt("minimizeAwTV:POCS_TV2:Image_split","Your image can not be fully split between the available GPUs. The computation of minTV will be significantly slowed due to the image size.\nApproximated mathematics turned on for computational speed."); }else{ cudaMallocHost((void**)&buffer,buffer_length*image_size[0]*image_size[1]*sizeof(float)); } // Lets try to make the host memory pinned: // We laredy queried the GPU and assuemd they are the same, thus should have the same attributes. int isHostRegisterSupported = 0; #if CUDART_VERSION >= 9020 cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,gpuids[0]); #endif // splits>2 is completely empirical observation if (isHostRegisterSupported & splits>2){ cudaHostRegister(img ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),cudaHostRegisterPortable); cudaHostRegister(dst ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),cudaHostRegisterPortable); } cudaCheckErrors("Error pinning memory"); // Create streams int nStream_device=2; int nStreams=deviceCount*nStream_device; cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t)); for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); for (int i = 0; i < nStream_device; ++i){ cudaStreamCreate(&stream[i+dev*nStream_device]); } } cudaCheckErrors("Stream creation fail"); // For the reduction double totalsum_prev; double totalsum; float sum_curr_spl; float * sumnorm2; cudaMallocHost((void**)&sumnorm2,deviceCount*sizeof(float)); unsigned int curr_slices; unsigned long long curr_pixels; size_t linear_idx_start; unsigned long long* offset_device=(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long)); unsigned long long* offset_host =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long)); unsigned long long* bytes_device =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long)); bool is_first_chunk; bool is_last_chunk; for(unsigned int i=0;i<maxIter;i+=(buffer_length-1)){ if(splits>1){ totalsum_prev=0; } for(unsigned int sp=0;sp<splits;sp++){ // For each iteration we need to compute all the image. The ordering of these loops // need to be like this due to the bounding layers between splits. If more than 1 split is needed // for each GPU then there is no other way that taking the entire memory out of GPU and putting it back. // If the memory can be shared between GPUs fully without extra splits, then there is an easy way of synchronizing the memory // Copy image to memory for (dev = 0; dev < deviceCount; dev++){ curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); curr_pixels=curr_slices*image_size[0]*image_size[1]; linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev); // Check if its the first or last chunck is_last_chunk=!((sp*deviceCount+dev)<deviceCount*splits-1); is_first_chunk=!(sp*deviceCount+dev); // lets compute where we start copyes and how much. This avoids 3 calls to Memcpy offset_device[dev]=buffer_pixels*is_first_chunk; offset_host[dev]=linear_idx_start-buffer_pixels*!is_first_chunk; bytes_device[dev]=curr_pixels+buffer_pixels*!is_first_chunk+buffer_pixels*!is_last_chunk; } if(i==0){ for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaMemcpyAsync(d_image[dev]+offset_device[dev], img+offset_host[dev] , bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]); } for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaDeviceSynchronize(); } } // if we need to split and its not the first iteration, then we need to copy from Host memory the previosu result. if (splits>1 & i>0){ for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaMemcpyAsync(d_image[dev]+offset_device[dev], dst+offset_host[dev] , bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]); } for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaDeviceSynchronize(); } } cudaCheckErrors("Memcpy failure on multi split"); for(unsigned int ib=0; (ib<(buffer_length-1)) && ((i+ib)<maxIter); ib++){ // For the gradient dim3 blockGrad(10, 10, 10); dim3 gridGrad((image_size[0]+blockGrad.x-1)/blockGrad.x, (image_size[1]+blockGrad.y-1)/blockGrad.y, (curr_slices+buffer_length*2+blockGrad.z-1)/blockGrad.z); for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); // Compute the gradient of the TV norm // I don't understand why I need to store 2 layers to compute correctly with 1 buffer. The bounding checks should // be enough but they are not. gradientTV<<<gridGrad, blockGrad,0,stream[dev*nStream_device]>>>(d_image[dev],d_dimgTV[dev],(long)(curr_slices+buffer_length*2-1), image_size[1],image_size[0],delta); } for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); // no need to copy the 2 aux slices here cudaStreamSynchronize(stream[dev*nStream_device]); cudaMemcpyAsync(d_norm2[dev], d_dimgTV[dev]+buffer_pixels, image_size[0]*image_size[1]*curr_slices*sizeof(float), cudaMemcpyDeviceToDevice,stream[dev*nStream_device+1]); } // Compute the L2 norm of the gradient. For that, reduction is used. //REDUCE for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); total_pixels=curr_slices*image_size[0]*image_size[1]; size_t dimblockRed = MAXTHREADS; size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS; cudaStreamSynchronize(stream[dev*nStream_device+1]); reduceNorm2 << <dimgridRed, dimblockRed, MAXTHREADS*sizeof(float),stream[dev*nStream_device]>> >(d_norm2[dev], d_norm2aux[dev], total_pixels); } for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); total_pixels=curr_slices*image_size[0]*image_size[1]; size_t dimblockRed = MAXTHREADS; size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS; if (dimgridRed > 1) { reduceSum << <1, dimblockRed, MAXTHREADS*sizeof(float),stream[dev*nStream_device] >> >(d_norm2aux[dev], d_norm2[dev], dimgridRed); cudaStreamSynchronize(stream[dev*nStream_device]); cudaMemcpyAsync(&sumnorm2[dev], d_norm2[dev], sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]); } else { cudaStreamSynchronize(stream[dev*nStream_device]); cudaMemcpyAsync(&sumnorm2[dev], d_norm2aux[dev], sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]); } } for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaDeviceSynchronize(); } cudaCheckErrors("Reduction error"); // Accumulate the norm accross devices sum_curr_spl=0; // this is CPU code for (dev = 0; dev < deviceCount; dev++){ sum_curr_spl+=sumnorm2[dev]; } sum_curr_spl+=0.0000001f; // avoid division by zero // If we have more than one splits, lets use the result from prior calls if(i>0 && splits>1){ // this is already stored: //totalsum=totalsum_prev; }else{ totalsum=sum_curr_spl; } for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); total_pixels=curr_slices*image_size[0]*image_size[1]; //NORMALIZE //in a Tesla, maximum blocks =15 SM * 4 blocks/SM divideArrayScalar <<<60,MAXTHREADS,0,stream[dev*nStream_device]>>>(d_dimgTV[dev]+buffer_pixels,(float)sqrt(totalsum),total_pixels); //MULTIPLY HYPERPARAMETER multiplyArrayScalar<<<60,MAXTHREADS,0,stream[dev*nStream_device]>>>(d_dimgTV[dev]+buffer_pixels,alpha, total_pixels); } for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaDeviceSynchronize(); } cudaCheckErrors("Scalar operations error"); //SUBSTRACT GRADIENT ////////////////////////////////////////////// for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); total_pixels=curr_slices*image_size[0]*image_size[1]; substractArrays<<<60,MAXTHREADS,0,stream[dev*nStream_device]>>>(d_image[dev]+buffer_pixels,d_dimgTV[dev]+buffer_pixels, total_pixels); } } // Synchronize mathematics, make sure bounding pixels are correct for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaDeviceSynchronize(); } if(splits==1){ for(dev=0; dev<deviceCount;dev++){ curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); total_pixels=curr_slices*image_size[0]*image_size[1]; if (dev<deviceCount-1){ cudaSetDevice(gpuids[dev+1]); cudaMemcpy(buffer, d_image[dev+1], buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost); cudaSetDevice(gpuids[dev]); cudaMemcpy(d_image[dev]+total_pixels+buffer_pixels,buffer, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice); } cudaDeviceSynchronize(); if (dev>0){ cudaSetDevice(gpuids[dev-1]); cudaMemcpyAsync(buffer, d_image[dev-1]+total_pixels+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost); cudaSetDevice(gpuids[dev]); cudaMemcpyAsync(d_image[dev],buffer, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice); } } }else{ // We need to take it out :( for(dev=0; dev<deviceCount;dev++){ cudaSetDevice(gpuids[dev]); curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev); linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev); total_pixels=curr_slices*image_size[0]*image_size[1]; cudaMemcpyAsync(&dst[linear_idx_start], d_image[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]); } } for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaDeviceSynchronize(); } cudaCheckErrors("Memory gather error"); totalsum_prev+=sum_curr_spl; } totalsum=totalsum_prev; } // If there has not been splits, we still have data in memory if(splits==1){ for(dev=0; dev<deviceCount;dev++){ cudaSetDevice(gpuids[dev]); curr_slices=((dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*dev; total_pixels=curr_slices*image_size[0]*image_size[1]; cudaMemcpy(dst+slices_per_split*image_size[0]*image_size[1]*dev, d_image[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost); } } cudaCheckErrors("Copy result back"); for(dev=0; dev<deviceCount;dev++){ cudaSetDevice(gpuids[dev]); cudaFree(d_image[dev]); cudaFree(d_norm2aux[dev]); cudaFree(d_dimgTV[dev]); cudaFree(d_norm2[dev]); } if (splits==1){ cudaFreeHost(buffer); } if (isHostRegisterSupported& splits>2){ cudaHostUnregister(img); cudaHostUnregister(dst); } for (int i = 0; i < nStreams; ++i) cudaStreamDestroy(stream[i]) ; cudaCheckErrors("Memory free"); // cudaDeviceReset(); } void checkFreeMemory(const GpuIds& gpuids, size_t *mem_GPU_global){ size_t memfree; size_t memtotal; const int deviceCount = gpuids.GetLength(); for (int dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaMemGetInfo(&memfree,&memtotal); if(dev==0) *mem_GPU_global=memfree; if(memfree<memtotal/2){ mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n"); } cudaCheckErrors("Check mem error"); *mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global; } *mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95); //*mem_GPU_global= insert your known number here, in bytes. }
0ac5ff4278cb11faa31f6fe3a4ba22565729e868.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" template <class T> __device__ void memcpy_p(T *dst, const T *src, int size) { for (int idx = threadIdx.x; idx < size; idx += blockDim.x) dst[idx] = src[idx]; } __global__ void reverse_kernel(SORT_T *buffer, int size, int sub_size, int block_per_outer) { GET_GID int blockId = blockIdx.x/block_per_outer; int totalWarps = (step_size >> 5); int totalBlocks = gridDim.x/block_per_outer; int totalSubs = size/sub_size; int blockIndex = blockIdx.x % block_per_outer; int warpIdx = blockIndex * blockDim.x + threadIdx.x; int inner_step = blockDim.x * block_per_outer; for (int sub_id = blockId; sub_id < totalSubs; sub_id += totalBlocks) { // sub_id is the current for (int index = warpIdx; index < (sub_size>>2); index += inner_step) { int new_index = sub_id * sub_size + (sub_size>>1) + index; int new_index_mirror = sub_id * sub_size + sub_size - index-1; // int new_index_mirror = sub_id * sub_size + (sub_size>>1) + index; SORT_T tmp = buffer[new_index]; buffer[new_index] = buffer[new_index_mirror]; buffer[new_index_mirror] = tmp; } } } __global__ void reverse_warp(SORT_T *buffer, int size, int sub_size) { GET_GID int warpId = (gid >> 5); int totalWarps = (step_size >> 5); int totalSubs = size/sub_size; int warpIdx = threadIdx.x & 0x1F; for (int sub_id = warpId; sub_id < totalSubs; sub_id += totalWarps) { // sub_id is the current for (int index = warpIdx; index < sub_size/4; index += 32) { int new_index = sub_id * sub_size + sub_size/2 + index; int new_index_mirror = sub_id * sub_size + sub_size - index-1; //int new_index_mirror = sub_id * sub_size + (sub_size>>1) + index; SORT_T tmp = buffer[new_index]; buffer[new_index] = buffer[new_index_mirror]; buffer[new_index_mirror] = tmp; } } } __global__ void reverse_thread(SORT_T *buffer, int size, int sub_size) { GET_GID int totalSubs = size/sub_size; for (int sub_id = gid; sub_id < totalSubs; sub_id += step_size) { // sub_id is the current for (int index = 0; index < sub_size/4; index++) { int new_index = sub_id * sub_size + sub_size/2 + index; int new_index_mirror = sub_id * sub_size + sub_size - index-1; //int new_index_mirror = sub_id * sub_size + (sub_size>>1) + index; SORT_T tmp = buffer[new_index]; buffer[new_index] = buffer[new_index_mirror]; buffer[new_index_mirror] = tmp; } } } __global__ void bitonic_kernel(SORT_T *buffer, int size, int sub_size, int block_per_outer, int sub_size_log) { GET_GID int blockId = blockIdx.x/block_per_outer; int totalBlocks = gridDim.x/block_per_outer; int totalSubs = (size >> sub_size_log); int blockIndex = blockIdx.x % block_per_outer; int warpIdx = blockIndex * blockDim.x + threadIdx.x; int inner_step = blockDim.x * block_per_outer; for (int sub_id = blockId; sub_id < totalSubs; sub_id += totalBlocks) { // sub_id is the current for (int index = warpIdx; index < (sub_size>>1); index += inner_step) { int new_index = (sub_id << sub_size_log) + index; // int new_index_mirror = (sub_id <<sub_size_log) + sub_size - index-1; int new_index_mirror = (sub_id <<sub_size_log) + (sub_size>>1) +index; SORT_T left = buffer[new_index]; SORT_T right = buffer[new_index_mirror]; if (left > right) { buffer[new_index] = right; buffer[new_index_mirror] = left; } } } } __global__ void bitonic_subwarp(SORT_T *buffer, int size, int sub_size, int sub_size_log) { GET_GID int warpId = (gid >> 3); int totalWarps = (step_size >> 3); int totalSubs = (size>>sub_size_log); int warpIdx = threadIdx.x & 0x7; for (int sub_id = warpId; sub_id < totalSubs; sub_id += totalWarps) { // sub_id is the current for (int index = warpIdx; index < (sub_size>>1); index += 8) { int new_index = (sub_id << sub_size_log) + index; // int new_index_mirror = (sub_id << sub_size_log) + sub_size - index-1; int new_index_mirror = (sub_id << sub_size_log) + (sub_size>>1) + index; SORT_T left = buffer[new_index]; SORT_T right = buffer[new_index_mirror]; if (left > right) { buffer[new_index] = right; buffer[new_index_mirror] = left; } } } } __global__ void bitonic_warp(SORT_T *buffer, int size, int sub_size, int sub_size_log) { GET_GID int warpId = (gid >> 5); int totalWarps = (step_size >> 5); int totalSubs = (size >> sub_size_log); int warpIdx = threadIdx.x & 0x1F; for (int sub_id = warpId; sub_id < totalSubs; sub_id += totalWarps) { // sub_id is the current for (int index = warpIdx; index < (sub_size>>1); index += 32) { int new_index = (sub_id <<sub_size_log) + index; // int new_index_mirror = (sub_id <<sub_size_log) + sub_size - index-1; int new_index_mirror = (sub_id <<sub_size_log) + (sub_size>>1) + index; SORT_T left = buffer[new_index]; SORT_T right = buffer[new_index_mirror]; if (left > right) { buffer[new_index] = right; buffer[new_index_mirror] = left; } } } } __global__ void bitonic_thread(SORT_T *buffer, int size, int sub_size, int sub_size_log) { GET_GID int totalSubs = (size >> sub_size_log); for (int sub_id = gid; sub_id < totalSubs; sub_id += step_size) { // sub_id is the current for (int index = 0; index < (sub_size>>1); index++) { int new_index = (sub_id << sub_size_log) + index; // int new_index_mirror = (sub_id << sub_size_log) + sub_size - index-1; int new_index_mirror = (sub_id << sub_size_log) + (sub_size>>1) + index; SORT_T left = buffer[new_index]; SORT_T right = buffer[new_index_mirror]; // printf("thread %d read left %d right %d. sub_size %d\n", threadIdx.x, left, right, sub_size); if (left > right) { // printf("swapping \n"); buffer[new_index] = right; buffer[new_index_mirror] = left; } } } } template <int SUB_SIZE, int LOG_SUB_SIZE> __global__ void bitonic_block(SORT_T *buffer, int size) { __shared__ SORT_T s_array[SUB_SIZE]; int numbs = size >> LOG_SUB_SIZE; for (int bid = blockIdx.x; bid < numbs; bid += gridDim.x) { s_array[threadIdx.x] = buffer[(bid << LOG_SUB_SIZE) + threadIdx.x]; s_array[blockDim.x + threadIdx.x] = buffer[(bid << LOG_SUB_SIZE) + blockDim.x + threadIdx.x]; __syncthreads(); int innersize = (SUB_SIZE>>1); int innersize_log = LOG_SUB_SIZE-1; while (innersize >= 1) { // if (threadIdx.x == 0 && blockIdx.x == 0 && (SUB_SIZE>>2) != blockDim.x) printf("here.\n"); // for (int idx = threadIdx.x; idx < (SUB_SIZE>>1); idx += blockDim.x) { int idx = threadIdx.x; int subid = (idx>>innersize_log); ///innersize; int subindex = idx & (innersize-1); //% innersize; int index = (subid << (innersize_log+1)) + subindex; // int index_mirror = (subid << (innersize_log+1)) + (innersize<<1) - subindex -1; int index_mirror = (subid << (innersize_log+1)) + innersize + subindex; // printf("subid %d. subindex %d.index %d mirror %d threadIdx %d.\n", subid, subindex, index, index_mirror, idx); SORT_T left = s_array[index]; SORT_T right = s_array[index_mirror]; if (left > right) { s_array[index] = right; s_array[index_mirror] = left; } } innersize >>= 1; innersize_log -= 1; __syncthreads(); } __syncthreads(); buffer[(bid << LOG_SUB_SIZE) + threadIdx.x] = s_array[threadIdx.x]; buffer[(bid << LOG_SUB_SIZE) + blockDim.x + threadIdx.x] = s_array[blockDim.x + threadIdx.x]; } } __global__ void bitonic_block2_unroll(SORT_T *buffer, int size) { __shared__ SORT_T s_array[2048]; int numbs = size >> 11; for (int bid = blockIdx.x; bid < numbs; bid += gridDim.x) { /* s_array[threadIdx.x] = buffer[(bid << 11) + threadIdx.x]; s_array[blockDim.x + threadIdx.x] = buffer[(bid << 11) + blockDim.x + threadIdx.x]; */ // memcpy_p(s_array, &buffer[bid << 11], 2048); SORT_T *dst = s_array+threadIdx.x; SORT_T *src = &buffer[(bid<<11)+ threadIdx.x]; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; __syncthreads(); int innersize = (2048>>1); int innersize_log = 11-1; while (innersize >= 1) { // for (int idx = threadIdx.x; idx < 1024; idx += blockDim.x) int idx = threadIdx.x; { int subid = (idx>>innersize_log); ///innersize; int subindex = idx & (innersize-1); //% innersize; int index = (subid << (innersize_log+1)) + subindex; // int index_mirror = (subid << (innersize_log+1)) + (innersize<<1) - subindex -1; int index_mirror = (subid << (innersize_log+1)) + innersize + subindex; // printf("subid %d. subindex %d.index %d mirror %d threadIdx %d.\n", subid, subindex, index, index_mirror, threadIdx.x); SORT_T left = s_array[index]; SORT_T right = s_array[index_mirror]; if (left > right) { s_array[index] = right; s_array[index_mirror] = left; } } idx += blockDim.x; { int subid = (idx>>innersize_log); ///innersize; int subindex = idx & (innersize-1); //% innersize; int index = (subid << (innersize_log+1)) + subindex; // int index_mirror = (subid << (innersize_log+1)) + (innersize<<1) - subindex -1; int index_mirror = (subid << (innersize_log+1)) + innersize + subindex; // printf("subid %d. subindex %d.index %d mirror %d threadIdx %d.\n", subid, subindex, index, index_mirror, threadIdx.x); SORT_T left = s_array[index]; SORT_T right = s_array[index_mirror]; if (left > right) { s_array[index] = right; s_array[index_mirror] = left; } } idx += blockDim.x; { int subid = (idx>>innersize_log); ///innersize; int subindex = idx & (innersize-1); //% innersize; int index = (subid << (innersize_log+1)) + subindex; // int index_mirror = (subid << (innersize_log+1)) + (innersize<<1) - subindex -1; int index_mirror = (subid << (innersize_log+1)) + innersize + subindex; // printf("subid %d. subindex %d.index %d mirror %d threadIdx %d.\n", subid, subindex, index, index_mirror, threadIdx.x); SORT_T left = s_array[index]; SORT_T right = s_array[index_mirror]; if (left > right) { s_array[index] = right; s_array[index_mirror] = left; } } idx += blockDim.x; { int subid = (idx>>innersize_log); ///innersize; int subindex = idx & (innersize-1); //% innersize; int index = (subid << (innersize_log+1)) + subindex; // int index_mirror = (subid << (innersize_log+1)) + (innersize<<1) - subindex -1; int index_mirror = (subid << (innersize_log+1)) + innersize + subindex; // printf("subid %d. subindex %d.index %d mirror %d threadIdx %d.\n", subid, subindex, index, index_mirror, threadIdx.x); SORT_T left = s_array[index]; SORT_T right = s_array[index_mirror]; if (left > right) { s_array[index] = right; s_array[index_mirror] = left; } } innersize >>= 1; innersize_log -= 1; __syncthreads(); } __syncthreads(); /* buffer[(bid << 11) + threadIdx.x] = s_array[threadIdx.x]; buffer[(bid << 11) + blockDim.x + threadIdx.x] = s_array[blockDim.x + threadIdx.x]; */ // memcpy_p(&buffer[bid << 11], s_array, 2048); { SORT_T *dst = &buffer[(bid<<11)+ threadIdx.x]; SORT_T *src = s_array+threadIdx.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; } } } __global__ void bitonic_block2(SORT_T *buffer, int size) { __shared__ SORT_T s_array[2048]; int numbs = size >> 11; for (int bid = blockIdx.x; bid < numbs; bid += gridDim.x) { /* s_array[threadIdx.x] = buffer[(bid << 11) + threadIdx.x]; s_array[blockDim.x + threadIdx.x] = buffer[(bid << 11) + blockDim.x + threadIdx.x]; */ memcpy_p(s_array, &buffer[bid << 11], 2048); /* SORT_T *dst = s_array+threadIdx.x; SORT_T *src = &buffer[(bid<<11)+ threadIdx.x]; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src;*/ __syncthreads(); int innersize = (2048>>1); int innersize_log = 11-1; while (innersize >= 1) { for (int idx = threadIdx.x; idx < 1024; idx += blockDim.x) { int subid = (idx>>innersize_log); ///innersize; int subindex = idx & (innersize-1); //% innersize; int index = (subid << (innersize_log+1)) + subindex; // int index_mirror = (subid << (innersize_log+1)) + (innersize<<1) - subindex -1; int index_mirror = (subid << (innersize_log+1)) + innersize + subindex; // printf("subid %d. subindex %d.index %d mirror %d threadIdx %d.\n", subid, subindex, index, index_mirror, threadIdx.x); SORT_T left = s_array[index]; SORT_T right = s_array[index_mirror]; if (left > right) { s_array[index] = right; s_array[index_mirror] = left; } } innersize >>= 1; innersize_log -= 1; __syncthreads(); } __syncthreads(); /* buffer[(bid << 11) + threadIdx.x] = s_array[threadIdx.x]; buffer[(bid << 11) + blockDim.x + threadIdx.x] = s_array[blockDim.x + threadIdx.x]; */ memcpy_p(&buffer[bid << 11], s_array, 2048); /* { SORT_T *dst = &buffer[(bid<<11)+ threadIdx.x]; SORT_T *src = s_array+threadIdx.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; }*/ } }
0ac5ff4278cb11faa31f6fe3a4ba22565729e868.cu
template <class T> __device__ void memcpy_p(T *dst, const T *src, int size) { for (int idx = threadIdx.x; idx < size; idx += blockDim.x) dst[idx] = src[idx]; } __global__ void reverse_kernel(SORT_T *buffer, int size, int sub_size, int block_per_outer) { GET_GID int blockId = blockIdx.x/block_per_outer; int totalWarps = (step_size >> 5); int totalBlocks = gridDim.x/block_per_outer; int totalSubs = size/sub_size; int blockIndex = blockIdx.x % block_per_outer; int warpIdx = blockIndex * blockDim.x + threadIdx.x; int inner_step = blockDim.x * block_per_outer; for (int sub_id = blockId; sub_id < totalSubs; sub_id += totalBlocks) { // sub_id is the current for (int index = warpIdx; index < (sub_size>>2); index += inner_step) { int new_index = sub_id * sub_size + (sub_size>>1) + index; int new_index_mirror = sub_id * sub_size + sub_size - index-1; // int new_index_mirror = sub_id * sub_size + (sub_size>>1) + index; SORT_T tmp = buffer[new_index]; buffer[new_index] = buffer[new_index_mirror]; buffer[new_index_mirror] = tmp; } } } __global__ void reverse_warp(SORT_T *buffer, int size, int sub_size) { GET_GID int warpId = (gid >> 5); int totalWarps = (step_size >> 5); int totalSubs = size/sub_size; int warpIdx = threadIdx.x & 0x1F; for (int sub_id = warpId; sub_id < totalSubs; sub_id += totalWarps) { // sub_id is the current for (int index = warpIdx; index < sub_size/4; index += 32) { int new_index = sub_id * sub_size + sub_size/2 + index; int new_index_mirror = sub_id * sub_size + sub_size - index-1; //int new_index_mirror = sub_id * sub_size + (sub_size>>1) + index; SORT_T tmp = buffer[new_index]; buffer[new_index] = buffer[new_index_mirror]; buffer[new_index_mirror] = tmp; } } } __global__ void reverse_thread(SORT_T *buffer, int size, int sub_size) { GET_GID int totalSubs = size/sub_size; for (int sub_id = gid; sub_id < totalSubs; sub_id += step_size) { // sub_id is the current for (int index = 0; index < sub_size/4; index++) { int new_index = sub_id * sub_size + sub_size/2 + index; int new_index_mirror = sub_id * sub_size + sub_size - index-1; //int new_index_mirror = sub_id * sub_size + (sub_size>>1) + index; SORT_T tmp = buffer[new_index]; buffer[new_index] = buffer[new_index_mirror]; buffer[new_index_mirror] = tmp; } } } __global__ void bitonic_kernel(SORT_T *buffer, int size, int sub_size, int block_per_outer, int sub_size_log) { GET_GID int blockId = blockIdx.x/block_per_outer; int totalBlocks = gridDim.x/block_per_outer; int totalSubs = (size >> sub_size_log); int blockIndex = blockIdx.x % block_per_outer; int warpIdx = blockIndex * blockDim.x + threadIdx.x; int inner_step = blockDim.x * block_per_outer; for (int sub_id = blockId; sub_id < totalSubs; sub_id += totalBlocks) { // sub_id is the current for (int index = warpIdx; index < (sub_size>>1); index += inner_step) { int new_index = (sub_id << sub_size_log) + index; // int new_index_mirror = (sub_id <<sub_size_log) + sub_size - index-1; int new_index_mirror = (sub_id <<sub_size_log) + (sub_size>>1) +index; SORT_T left = buffer[new_index]; SORT_T right = buffer[new_index_mirror]; if (left > right) { buffer[new_index] = right; buffer[new_index_mirror] = left; } } } } __global__ void bitonic_subwarp(SORT_T *buffer, int size, int sub_size, int sub_size_log) { GET_GID int warpId = (gid >> 3); int totalWarps = (step_size >> 3); int totalSubs = (size>>sub_size_log); int warpIdx = threadIdx.x & 0x7; for (int sub_id = warpId; sub_id < totalSubs; sub_id += totalWarps) { // sub_id is the current for (int index = warpIdx; index < (sub_size>>1); index += 8) { int new_index = (sub_id << sub_size_log) + index; // int new_index_mirror = (sub_id << sub_size_log) + sub_size - index-1; int new_index_mirror = (sub_id << sub_size_log) + (sub_size>>1) + index; SORT_T left = buffer[new_index]; SORT_T right = buffer[new_index_mirror]; if (left > right) { buffer[new_index] = right; buffer[new_index_mirror] = left; } } } } __global__ void bitonic_warp(SORT_T *buffer, int size, int sub_size, int sub_size_log) { GET_GID int warpId = (gid >> 5); int totalWarps = (step_size >> 5); int totalSubs = (size >> sub_size_log); int warpIdx = threadIdx.x & 0x1F; for (int sub_id = warpId; sub_id < totalSubs; sub_id += totalWarps) { // sub_id is the current for (int index = warpIdx; index < (sub_size>>1); index += 32) { int new_index = (sub_id <<sub_size_log) + index; // int new_index_mirror = (sub_id <<sub_size_log) + sub_size - index-1; int new_index_mirror = (sub_id <<sub_size_log) + (sub_size>>1) + index; SORT_T left = buffer[new_index]; SORT_T right = buffer[new_index_mirror]; if (left > right) { buffer[new_index] = right; buffer[new_index_mirror] = left; } } } } __global__ void bitonic_thread(SORT_T *buffer, int size, int sub_size, int sub_size_log) { GET_GID int totalSubs = (size >> sub_size_log); for (int sub_id = gid; sub_id < totalSubs; sub_id += step_size) { // sub_id is the current for (int index = 0; index < (sub_size>>1); index++) { int new_index = (sub_id << sub_size_log) + index; // int new_index_mirror = (sub_id << sub_size_log) + sub_size - index-1; int new_index_mirror = (sub_id << sub_size_log) + (sub_size>>1) + index; SORT_T left = buffer[new_index]; SORT_T right = buffer[new_index_mirror]; // printf("thread %d read left %d right %d. sub_size %d\n", threadIdx.x, left, right, sub_size); if (left > right) { // printf("swapping \n"); buffer[new_index] = right; buffer[new_index_mirror] = left; } } } } template <int SUB_SIZE, int LOG_SUB_SIZE> __global__ void bitonic_block(SORT_T *buffer, int size) { __shared__ SORT_T s_array[SUB_SIZE]; int numbs = size >> LOG_SUB_SIZE; for (int bid = blockIdx.x; bid < numbs; bid += gridDim.x) { s_array[threadIdx.x] = buffer[(bid << LOG_SUB_SIZE) + threadIdx.x]; s_array[blockDim.x + threadIdx.x] = buffer[(bid << LOG_SUB_SIZE) + blockDim.x + threadIdx.x]; __syncthreads(); int innersize = (SUB_SIZE>>1); int innersize_log = LOG_SUB_SIZE-1; while (innersize >= 1) { // if (threadIdx.x == 0 && blockIdx.x == 0 && (SUB_SIZE>>2) != blockDim.x) printf("here.\n"); // for (int idx = threadIdx.x; idx < (SUB_SIZE>>1); idx += blockDim.x) { int idx = threadIdx.x; int subid = (idx>>innersize_log); ///innersize; int subindex = idx & (innersize-1); //% innersize; int index = (subid << (innersize_log+1)) + subindex; // int index_mirror = (subid << (innersize_log+1)) + (innersize<<1) - subindex -1; int index_mirror = (subid << (innersize_log+1)) + innersize + subindex; // printf("subid %d. subindex %d.index %d mirror %d threadIdx %d.\n", subid, subindex, index, index_mirror, idx); SORT_T left = s_array[index]; SORT_T right = s_array[index_mirror]; if (left > right) { s_array[index] = right; s_array[index_mirror] = left; } } innersize >>= 1; innersize_log -= 1; __syncthreads(); } __syncthreads(); buffer[(bid << LOG_SUB_SIZE) + threadIdx.x] = s_array[threadIdx.x]; buffer[(bid << LOG_SUB_SIZE) + blockDim.x + threadIdx.x] = s_array[blockDim.x + threadIdx.x]; } } __global__ void bitonic_block2_unroll(SORT_T *buffer, int size) { __shared__ SORT_T s_array[2048]; int numbs = size >> 11; for (int bid = blockIdx.x; bid < numbs; bid += gridDim.x) { /* s_array[threadIdx.x] = buffer[(bid << 11) + threadIdx.x]; s_array[blockDim.x + threadIdx.x] = buffer[(bid << 11) + blockDim.x + threadIdx.x]; */ // memcpy_p(s_array, &buffer[bid << 11], 2048); SORT_T *dst = s_array+threadIdx.x; SORT_T *src = &buffer[(bid<<11)+ threadIdx.x]; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; __syncthreads(); int innersize = (2048>>1); int innersize_log = 11-1; while (innersize >= 1) { // for (int idx = threadIdx.x; idx < 1024; idx += blockDim.x) int idx = threadIdx.x; { int subid = (idx>>innersize_log); ///innersize; int subindex = idx & (innersize-1); //% innersize; int index = (subid << (innersize_log+1)) + subindex; // int index_mirror = (subid << (innersize_log+1)) + (innersize<<1) - subindex -1; int index_mirror = (subid << (innersize_log+1)) + innersize + subindex; // printf("subid %d. subindex %d.index %d mirror %d threadIdx %d.\n", subid, subindex, index, index_mirror, threadIdx.x); SORT_T left = s_array[index]; SORT_T right = s_array[index_mirror]; if (left > right) { s_array[index] = right; s_array[index_mirror] = left; } } idx += blockDim.x; { int subid = (idx>>innersize_log); ///innersize; int subindex = idx & (innersize-1); //% innersize; int index = (subid << (innersize_log+1)) + subindex; // int index_mirror = (subid << (innersize_log+1)) + (innersize<<1) - subindex -1; int index_mirror = (subid << (innersize_log+1)) + innersize + subindex; // printf("subid %d. subindex %d.index %d mirror %d threadIdx %d.\n", subid, subindex, index, index_mirror, threadIdx.x); SORT_T left = s_array[index]; SORT_T right = s_array[index_mirror]; if (left > right) { s_array[index] = right; s_array[index_mirror] = left; } } idx += blockDim.x; { int subid = (idx>>innersize_log); ///innersize; int subindex = idx & (innersize-1); //% innersize; int index = (subid << (innersize_log+1)) + subindex; // int index_mirror = (subid << (innersize_log+1)) + (innersize<<1) - subindex -1; int index_mirror = (subid << (innersize_log+1)) + innersize + subindex; // printf("subid %d. subindex %d.index %d mirror %d threadIdx %d.\n", subid, subindex, index, index_mirror, threadIdx.x); SORT_T left = s_array[index]; SORT_T right = s_array[index_mirror]; if (left > right) { s_array[index] = right; s_array[index_mirror] = left; } } idx += blockDim.x; { int subid = (idx>>innersize_log); ///innersize; int subindex = idx & (innersize-1); //% innersize; int index = (subid << (innersize_log+1)) + subindex; // int index_mirror = (subid << (innersize_log+1)) + (innersize<<1) - subindex -1; int index_mirror = (subid << (innersize_log+1)) + innersize + subindex; // printf("subid %d. subindex %d.index %d mirror %d threadIdx %d.\n", subid, subindex, index, index_mirror, threadIdx.x); SORT_T left = s_array[index]; SORT_T right = s_array[index_mirror]; if (left > right) { s_array[index] = right; s_array[index_mirror] = left; } } innersize >>= 1; innersize_log -= 1; __syncthreads(); } __syncthreads(); /* buffer[(bid << 11) + threadIdx.x] = s_array[threadIdx.x]; buffer[(bid << 11) + blockDim.x + threadIdx.x] = s_array[blockDim.x + threadIdx.x]; */ // memcpy_p(&buffer[bid << 11], s_array, 2048); { SORT_T *dst = &buffer[(bid<<11)+ threadIdx.x]; SORT_T *src = s_array+threadIdx.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; } } } __global__ void bitonic_block2(SORT_T *buffer, int size) { __shared__ SORT_T s_array[2048]; int numbs = size >> 11; for (int bid = blockIdx.x; bid < numbs; bid += gridDim.x) { /* s_array[threadIdx.x] = buffer[(bid << 11) + threadIdx.x]; s_array[blockDim.x + threadIdx.x] = buffer[(bid << 11) + blockDim.x + threadIdx.x]; */ memcpy_p(s_array, &buffer[bid << 11], 2048); /* SORT_T *dst = s_array+threadIdx.x; SORT_T *src = &buffer[(bid<<11)+ threadIdx.x]; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src;*/ __syncthreads(); int innersize = (2048>>1); int innersize_log = 11-1; while (innersize >= 1) { for (int idx = threadIdx.x; idx < 1024; idx += blockDim.x) { int subid = (idx>>innersize_log); ///innersize; int subindex = idx & (innersize-1); //% innersize; int index = (subid << (innersize_log+1)) + subindex; // int index_mirror = (subid << (innersize_log+1)) + (innersize<<1) - subindex -1; int index_mirror = (subid << (innersize_log+1)) + innersize + subindex; // printf("subid %d. subindex %d.index %d mirror %d threadIdx %d.\n", subid, subindex, index, index_mirror, threadIdx.x); SORT_T left = s_array[index]; SORT_T right = s_array[index_mirror]; if (left > right) { s_array[index] = right; s_array[index_mirror] = left; } } innersize >>= 1; innersize_log -= 1; __syncthreads(); } __syncthreads(); /* buffer[(bid << 11) + threadIdx.x] = s_array[threadIdx.x]; buffer[(bid << 11) + blockDim.x + threadIdx.x] = s_array[blockDim.x + threadIdx.x]; */ memcpy_p(&buffer[bid << 11], s_array, 2048); /* { SORT_T *dst = &buffer[(bid<<11)+ threadIdx.x]; SORT_T *src = s_array+threadIdx.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; dst += blockDim.x; src += blockDim.x; *dst = *src; }*/ } }
42c5495a0dbb498d67bd495fc75934ffb3103308.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Test on cuda::MultiDimArray class. * * Author: Ying Xiong. * Created: Mar 20, 2015. */ #include "cuda/MultiDimArray.cuh" #include "LogAndCheck.h" #include "MultiDimArray.h" #include "Timer.h" using namespace xyUtils; using MDA_32f3 = MultiDimArray<float,3>; using CMDA_32f3 = cuda::MultiDimArray<float,3>; __global__ void AddMultiDimArrays(const CMDA_32f3 a, const CMDA_32f3 b, CMDA_32f3 r) { for (int x = threadIdx.x; x < a.GetDim(0); x += blockDim.x) { for (int y = threadIdx.y; y < a.GetDim(1); y += blockDim.y) { for (int z = threadIdx.z; z < a.GetDim(2); z += blockDim.z) { r(x,y,z) = a(x,y,z) + b(x,y,z); } } } } int main() { Timer timer; LOG(INFO) << "Test on cuda::MultiDimArray class..."; int dims[3] = {30, 40, 20}; MDA_32f3 a(dims); MDA_32f3 b(dims); a(10, 20, 4) = -100; b(20, 30, 5) = 200; CMDA_32f3 d_a = cuda::CopyMultiDimArrayToDevice(a); CMDA_32f3 d_b = cuda::CopyMultiDimArrayToDevice(b); CMDA_32f3 d_c = cuda::CreateMultiDimArrayOnDevice<float,3>(dims); dim3 blockSize(8,8,2); hipLaunchKernelGGL(( AddMultiDimArrays), dim3(1),dim3(blockSize), 0, 0, d_a, d_b, d_c); MDA_32f3 c(dims); d_c.CopyToHost(&c); for (int i = 0; i < dims[0]; ++i) { for (int j = 0; j < dims[1]; ++j) { for (int k = 0; k < dims[2]; ++k) { CHECK_NEAR(c(i,j,k), a(i,j,k)+b(i,j,k), 0.001); } } } d_a.FreeOnDevice(); d_b.FreeOnDevice(); d_c.FreeOnDevice(); LOG(INFO) << "Passed. [" << timer.elapsed() << " seconds]"; return 0; }
42c5495a0dbb498d67bd495fc75934ffb3103308.cu
/** * Test on cuda::MultiDimArray class. * * Author: Ying Xiong. * Created: Mar 20, 2015. */ #include "cuda/MultiDimArray.cuh" #include "LogAndCheck.h" #include "MultiDimArray.h" #include "Timer.h" using namespace xyUtils; using MDA_32f3 = MultiDimArray<float,3>; using CMDA_32f3 = cuda::MultiDimArray<float,3>; __global__ void AddMultiDimArrays(const CMDA_32f3 a, const CMDA_32f3 b, CMDA_32f3 r) { for (int x = threadIdx.x; x < a.GetDim(0); x += blockDim.x) { for (int y = threadIdx.y; y < a.GetDim(1); y += blockDim.y) { for (int z = threadIdx.z; z < a.GetDim(2); z += blockDim.z) { r(x,y,z) = a(x,y,z) + b(x,y,z); } } } } int main() { Timer timer; LOG(INFO) << "Test on cuda::MultiDimArray class..."; int dims[3] = {30, 40, 20}; MDA_32f3 a(dims); MDA_32f3 b(dims); a(10, 20, 4) = -100; b(20, 30, 5) = 200; CMDA_32f3 d_a = cuda::CopyMultiDimArrayToDevice(a); CMDA_32f3 d_b = cuda::CopyMultiDimArrayToDevice(b); CMDA_32f3 d_c = cuda::CreateMultiDimArrayOnDevice<float,3>(dims); dim3 blockSize(8,8,2); AddMultiDimArrays<<<1,blockSize>>>(d_a, d_b, d_c); MDA_32f3 c(dims); d_c.CopyToHost(&c); for (int i = 0; i < dims[0]; ++i) { for (int j = 0; j < dims[1]; ++j) { for (int k = 0; k < dims[2]; ++k) { CHECK_NEAR(c(i,j,k), a(i,j,k)+b(i,j,k), 0.001); } } } d_a.FreeOnDevice(); d_b.FreeOnDevice(); d_c.FreeOnDevice(); LOG(INFO) << "Passed. [" << timer.elapsed() << " seconds]"; return 0; }
1aa3be3a133602141539e699f580bca544b8f8f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <omp.h> #include <stdio.h> //using namespace std; Weird //This is a CPU helper kernel for hybrid setting template <class T> double cpu_perman64(T* mat_t, double x[], int nov, long long start, long long end, int threads) { double p = 0; //product of the elements in vector 'x' long long one = 1; long long chunk_size = (end - start) / threads + 1; omp_set_num_threads(threads); #pragma omp parallel { double my_x[nov]; for (int i = 0; i < nov; i++) { my_x[i] = x[i]; } int tid = omp_get_thread_num(); long long my_start = start + tid * chunk_size; long long my_end = min(start + ((tid+1) * chunk_size), end); double *xptr; int s; //+1 or -1 double prod; //product of the elements in vector 'x' double my_p = 0; long long i = my_start; long long gray = (i-1) ^ ((i-1) >> 1); for (int k = 0; k < (nov-1); k++) { if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not xptr = (double*)my_x; for (int j = 0; j < nov; j++) { *xptr += mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries xptr++; } } } int k; int prodSign = 1; if(i & 1LL) { prodSign = -1; } while (i < my_end) { //compute the gray code k = __builtin_ctzll(i); gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,... //decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1 s = ((one << k) & gray) ? 1 : -1; prod = 1.0; xptr = (double*)my_x; for (int j = 0; j < nov; j++) { *xptr += s * mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries prod *= *xptr++; //product of the elements in vector 'x' } my_p += prodSign * prod; prodSign *= -1; i++; } #pragma omp atomic p += my_p; } return p; } template <class T> __global__ void kernel_xglobal(T* mat_t, float* x, double* p, int nov) { int tid = threadIdx.x + (blockIdx.x * blockDim.x); long long number_of_threads = blockDim.x * gridDim.x; long long one = 1; long long start = 1; long long end = (1LL << (nov-1)); long long chunk_size = end / number_of_threads + 1; long long my_start = start + tid * chunk_size; long long my_end = min(start + ((tid+1) * chunk_size), end); int s; //+1 or -1 double prod; //product of the elements in vector 'x' double my_p = 0; long long i = my_start; long long gray = (i-1) ^ ((i-1) >> 1); for (int k = 0; k < (nov-1); k++) { if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not for (int j = 0; j < nov; j++) { x[tid*nov + j] += mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries } } } long long gray_diff; int k; int prodSign = 1; if(i & 1LL) { prodSign = -1; } while (i < my_end) { gray_diff = (i ^ (i >> 1)) ^ gray; k = __ffsll(gray_diff) - 1; gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,... //decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1 s = ((one << k) & gray) ? 1 : -1; prod = 1.0; for (int j = 0; j < nov; j++) { x[tid*nov + j] += s * mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries prod *= x[tid*nov + j]; //product of the elements in vector 'x' } my_p += prodSign * prod; prodSign *= -1; i++; } p[tid] = my_p; } template <class T> __global__ void kernel_xlocal(T* mat_t, double* x, double* p, int nov) { float my_x[40]; for (int k = 0; k < nov; k++) { my_x[k] = x[k]; } long long number_of_threads = blockDim.x * gridDim.x; long long one = 1; long long start = 1; long long end = (1LL << (nov-1)); long long chunk_size = end / number_of_threads + 1; int tid = threadIdx.x + (blockIdx.x * blockDim.x); long long my_start = start + tid * chunk_size; long long my_end = min(start + ((tid+1) * chunk_size), end); float *xptr; int s; //+1 or -1 double prod; //product of the elements in vector 'x' double my_p = 0; long long i = my_start; long long gray = (i-1) ^ ((i-1) >> 1); for (int k = 0; k < (nov-1); k++) { if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not xptr = (float*)my_x; for (int j = 0; j < nov; j++) { *xptr += mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries xptr++; } } } long long gray_diff; int k; int prodSign = 1; if(i & 1LL) { prodSign = -1; } while (i < my_end) { gray_diff = (i ^ (i >> 1)) ^ gray; k = __ffsll(gray_diff) - 1; gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,... //decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1 s = ((one << k) & gray) ? 1 : -1; prod = 1.0; xptr = (float*)my_x; for (int j = 0; j < nov; j++) { *xptr += s * mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries prod *= *xptr++; //product of the elements in vector 'x' } my_p += prodSign * prod; prodSign *= -1; i++; } p[tid] = my_p; } template <class T> __global__ void kernel_xshared(T* mat_t, double* x, double* p, int nov) { int tid = threadIdx.x + (blockIdx.x * blockDim.x); int thread_id = threadIdx.x; extern __shared__ float shared_mem[]; float *my_x = shared_mem; // size = nov * BLOCK_SIZE for (int k = 0; k < nov; k++) { my_x[thread_id*nov + k] = x[k]; } long long number_of_threads = blockDim.x * gridDim.x; long long one = 1; long long start = 1; long long end = (1LL << (nov-1)); long long chunk_size = end / number_of_threads + 1; long long my_start = start + tid * chunk_size; long long my_end = min(start + ((tid+1) * chunk_size), end); int s; //+1 or -1 double prod; //product of the elements in vector 'x' double my_p = 0; long long i = my_start; long long gray = (i-1) ^ ((i-1) >> 1); for (int k = 0; k < (nov-1); k++) { if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not for (int j = 0; j < nov; j++) { my_x[thread_id*nov + j] += mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries } } } long long gray_diff; int k; int prodSign = 1; if(i & 1LL) { prodSign = -1; } while (i < my_end) { gray_diff = (i ^ (i >> 1)) ^ gray; k = __ffsll(gray_diff) - 1; gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,... //decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1 s = ((one << k) & gray) ? 1 : -1; prod = 1.0; for (int j = 0; j < nov; j++) { my_x[thread_id*nov + j] += s * mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries prod *= my_x[thread_id*nov + j]; //product of the elements in vector 'x' } my_p += prodSign * prod; prodSign *= -1; i++; } p[tid] = my_p; } template <class T> __global__ void kernel_xshared_coalescing(T* mat_t, double* x, double* p, int nov) { int tid = threadIdx.x + (blockIdx.x * blockDim.x); int thread_id = threadIdx.x; int block_dim = blockDim.x; extern __shared__ float shared_mem[]; float *my_x = shared_mem; // size = nov * BLOCK_SIZE for (int k = 0; k < nov; k++) { my_x[block_dim*k + thread_id] = x[k]; } long long number_of_threads = blockDim.x * gridDim.x; long long one = 1; long long start = 1; long long end = (1LL << (nov-1)); long long chunk_size = end / number_of_threads + 1; long long my_start = start + tid * chunk_size; long long my_end = min(start + ((tid+1) * chunk_size), end); int s; //+1 or -1 double prod; //product of the elements in vector 'x' double my_p = 0; long long i = my_start; long long gray = (i-1) ^ ((i-1) >> 1); for (int k = 0; k < (nov-1); k++) { if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not for (int j = 0; j < nov; j++) { my_x[block_dim*j + thread_id] += mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries } } } long long gray_diff; int k; int prodSign = 1; if(i & 1LL) { prodSign = -1; } while (i < my_end) { gray_diff = (i ^ (i >> 1)) ^ gray; k = __ffsll(gray_diff) - 1; gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,... //decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1 s = ((one << k) & gray) ? 1 : -1; prod = 1.0; for (int j = 0; j < nov; j++) { my_x[block_dim*j + thread_id] += s * mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries prod *= my_x[block_dim*j + thread_id]; //product of the elements in vector 'x' } my_p += prodSign * prod; prodSign *= -1; i++; } p[tid] = my_p; } template <class T> __global__ void kernel_xshared_coalescing_mshared(T* mat_t, double* x, double* p, int nov, long long start, long long end) { int tid = threadIdx.x + (blockIdx.x * blockDim.x); int thread_id = threadIdx.x; int block_dim = blockDim.x; extern __shared__ float shared_mem[]; float *my_x = shared_mem; // size = nov * BLOCK_SIZE T *shared_mat_t = (T*) &my_x[nov * block_dim]; // size = nov * nov for (int k = 0; k < nov; k++) { my_x[block_dim*k + thread_id] = x[k]; } for (int k = 0; k < ((nov*nov)/block_dim + 1); k++) { if ((block_dim * k + thread_id) < (nov * nov)) shared_mat_t[block_dim * k + thread_id] = mat_t[block_dim * k + thread_id]; } __syncthreads(); long long number_of_threads = blockDim.x * gridDim.x; long long one = 1; long long chunk_size = (end - start) / number_of_threads + 1; long long my_start = start + tid * chunk_size; long long my_end = min(start + ((tid+1) * chunk_size), end); int s; //+1 or -1 double prod; //product of the elements in vector 'x' double my_p = 0; long long i = my_start; long long gray = (i-1) ^ ((i-1) >> 1); for (int k = 0; k < (nov-1); k++) { if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not for (int j = 0; j < nov; j++) { my_x[block_dim*j + thread_id] += shared_mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries } } } long long gray_diff; int k; int prodSign = 1; if (i & 1LL) { prodSign = -1; } while (i < my_end) { gray_diff = (i ^ (i >> 1)) ^ gray; k = __ffsll(gray_diff) - 1; gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,... //decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1 s = ((one << k) & gray) ? 1 : -1; prod = 1.0; for (int j = 0; j < nov; j++) { my_x[block_dim*j + thread_id] += s * shared_mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries prod *= my_x[block_dim*j + thread_id]; //product of the elements in vector 'x' } my_p += prodSign * prod; prodSign *= -1; i++; } p[tid] = my_p; } template <class T> double gpu_perman64_xglobal(T* mat, int nov, int grid_dim, int block_dim) { float x[nov]; double rs; //row sum double p = 1; //product of the elements in vector 'x' //create the x vector and initiate the permanent for (int j = 0; j < nov; j++) { rs = .0f; for (int k = 0; k < nov; k++) { rs += mat[(j * nov) + k]; // sum of row j } x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry p *= x[j]; // product of the elements in vector 'x' } //create the transpose of the matrix T* mat_t = new T[nov * nov]; for (int i = 0; i < nov; i++) { for (int j = 0; j < nov; j++) { mat_t[(i * nov) + j] = mat[(j * nov) + i]; } } float *h_x = new float[nov*grid_dim*block_dim]; for (int i = 0; i < nov*grid_dim*block_dim; i++) { h_x[i] = x[i%nov]; } hipSetDevice(1); T *d_mat_t; float *d_x; double *d_p; double *h_p = new double[grid_dim * block_dim]; hipMalloc( &d_x, (nov*grid_dim*block_dim) * sizeof(float)); hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double)); hipMalloc( &d_mat_t, (nov * nov) * sizeof(T)); hipMemcpy( d_x, h_x, (nov*grid_dim*block_dim) * sizeof(float), hipMemcpyHostToDevice); hipMemcpy( d_mat_t, mat_t, (nov * nov) * sizeof(T), hipMemcpyHostToDevice); double stt = omp_get_wtime(); hipLaunchKernelGGL(( kernel_xglobal), dim3(grid_dim) , dim3(block_dim) , 0, 0, d_mat_t, d_x, d_p, nov); hipDeviceSynchronize(); double enn = omp_get_wtime(); printf("Kernel in %f \n", enn - stt); //cout << "kernel" << " in " << (enn - stt) << endl; hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost); hipFree(d_mat_t); hipFree(d_x); hipFree(d_p); for (int i = 0; i < grid_dim * block_dim; i++) { p += h_p[i]; } delete [] mat_t; delete [] h_x; delete[] h_p; return((4*(nov&1)-2) * p); } template <class T> double gpu_perman64_xlocal(T* mat, int nov, int grid_dim, int block_dim) { double x[nov]; double rs; //row sum double p = 1; //product of the elements in vector 'x' //create the x vector and initiate the permanent for (int j = 0; j < nov; j++) { rs = .0f; for (int k = 0; k < nov; k++) { rs += mat[(j * nov) + k]; // sum of row j } x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry p *= x[j]; // product of the elements in vector 'x' } //create the transpose of the matrix T* mat_t = new T[nov * nov]; for (int i = 0; i < nov; i++) { for (int j = 0; j < nov; j++) { mat_t[(i * nov) + j] = mat[(j * nov) + i]; } } hipSetDevice(1); T *d_mat_t; double *d_x, *d_p; double *h_p = new double[grid_dim * block_dim]; hipMalloc( &d_x, (nov) * sizeof(double)); hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double)); hipMalloc( &d_mat_t, (nov * nov) * sizeof(T)); hipMemcpy( d_x, x, (nov) * sizeof(double), hipMemcpyHostToDevice); hipMemcpy( d_mat_t, mat_t, (nov * nov) * sizeof(T), hipMemcpyHostToDevice); double stt = omp_get_wtime(); hipLaunchKernelGGL(( kernel_xlocal), dim3(grid_dim) , dim3(block_dim) , 0, 0, d_mat_t, d_x, d_p, nov); hipDeviceSynchronize(); double enn = omp_get_wtime(); printf("Kernel in %f \n", enn - stt); //cout << "kernel" << " in " << (enn - stt) << endl; hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost); hipFree(d_mat_t); hipFree(d_x); hipFree(d_p); for (int i = 0; i < grid_dim * block_dim; i++) { p += h_p[i]; } delete [] mat_t; delete[] h_p; return((4*(nov&1)-2) * p); } template <class T> double gpu_perman64_xshared(T* mat, int nov, int grid_dim, int block_dim) { double x[nov]; double rs; //row sum double p = 1; //product of the elements in vector 'x' //create the x vector and initiate the permanent for (int j = 0; j < nov; j++) { rs = .0f; for (int k = 0; k < nov; k++) { rs += mat[(j * nov) + k]; // sum of row j } x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry p *= x[j]; // product of the elements in vector 'x' } //create the transpose of the matrix T* mat_t = new T[nov * nov]; for (int i = 0; i < nov; i++) { for (int j = 0; j < nov; j++) { mat_t[(i * nov) + j] = mat[(j * nov) + i]; } } hipSetDevice(1); T *d_mat_t; double *d_x, *d_p; double *h_p = new double[grid_dim * block_dim]; hipMalloc( &d_x, (nov) * sizeof(double)); hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double)); hipMalloc( &d_mat_t, (nov * nov) * sizeof(T)); hipMemcpy( d_x, x, (nov) * sizeof(double), hipMemcpyHostToDevice); hipMemcpy( d_mat_t, mat_t, (nov * nov) * sizeof(T), hipMemcpyHostToDevice); double stt = omp_get_wtime(); hipLaunchKernelGGL(( kernel_xshared), dim3(grid_dim) , dim3(block_dim) , nov*block_dim*sizeof(float) , 0, d_mat_t, d_x, d_p, nov); hipDeviceSynchronize(); double enn = omp_get_wtime(); printf("Kernel in %f \n", enn - stt); //cout << "kernel" << " in " << (enn - stt) << endl; hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost); hipFree(d_mat_t); hipFree(d_x); hipFree(d_p); for (int i = 0; i < grid_dim * block_dim; i++) { p += h_p[i]; } delete [] mat_t; delete[] h_p; return((4*(nov&1)-2) * p); } template <class T> double gpu_perman64_xshared_coalescing(T* mat, int nov, int grid_dim, int block_dim) { double x[nov]; double rs; //row sum double p = 1; //product of the elements in vector 'x' //create the x vector and initiate the permanent for (int j = 0; j < nov; j++) { rs = .0f; for (int k = 0; k < nov; k++) { rs += mat[(j * nov) + k]; // sum of row j } x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry p *= x[j]; // product of the elements in vector 'x' } //create the transpose of the matrix T* mat_t = new T[nov * nov]; for (int i = 0; i < nov; i++) { for (int j = 0; j < nov; j++) { mat_t[(i * nov) + j] = mat[(j * nov) + i]; } } hipSetDevice(1); T *d_mat_t; double *d_x, *d_p; double *h_p = new double[grid_dim * block_dim]; hipMalloc( &d_x, (nov) * sizeof(double)); hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double)); hipMalloc( &d_mat_t, (nov * nov) * sizeof(T)); hipMemcpy( d_x, x, (nov) * sizeof(double), hipMemcpyHostToDevice); hipMemcpy( d_mat_t, mat_t, (nov * nov) * sizeof(T), hipMemcpyHostToDevice); double stt = omp_get_wtime(); hipLaunchKernelGGL(( kernel_xshared_coalescing), dim3(grid_dim) , dim3(block_dim) , nov*block_dim*sizeof(float) , 0, d_mat_t, d_x, d_p, nov); hipDeviceSynchronize(); double enn = omp_get_wtime(); printf("Kernel in %f \n", enn - stt); //cout << "kernel" << " in " << (enn - stt) << endl; hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost); hipFree(d_mat_t); hipFree(d_x); hipFree(d_p); for (int i = 0; i < grid_dim * block_dim; i++) { p += h_p[i]; } delete [] mat_t; delete[] h_p; return((4*(nov&1)-2) * p); } template <class T> double gpu_perman64_xshared_coalescing_mshared(T* mat, int nov, int grid_dim, int block_dim) { double x[nov]; double rs; //row sum double p = 1; //product of the elements in vector 'x' //create the x vector and initiate the permanent for (int j = 0; j < nov; j++) { rs = .0f; for (int k = 0; k < nov; k++) { rs += mat[(j * nov) + k]; // sum of row j } x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry p *= x[j]; // product of the elements in vector 'x' } //create the transpose of the matrix T* mat_t = new T[nov * nov]; for (int i = 0; i < nov; i++) { for (int j = 0; j < nov; j++) { mat_t[(i * nov) + j] = mat[(j * nov) + i]; } } hipSetDevice(1); T *d_mat_t; double *d_x, *d_p; double *h_p = new double[grid_dim * block_dim]; hipMalloc( &d_x, (nov) * sizeof(double)); hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double)); hipMalloc( &d_mat_t, (nov * nov) * sizeof(T)); hipMemcpy( d_x, x, (nov) * sizeof(double), hipMemcpyHostToDevice); hipMemcpy( d_mat_t, mat_t, (nov * nov) * sizeof(T), hipMemcpyHostToDevice); long long start = 1; long long end = (1LL << (nov-1)); double stt = omp_get_wtime(); hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) , 0, d_mat_t, d_x, d_p, nov, start, end); hipDeviceSynchronize(); double enn = omp_get_wtime(); printf("Kernel in %f \n", enn - stt); //cout << "kernel" << " in " << (enn - stt) << endl; hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost); hipFree(d_mat_t); hipFree(d_x); hipFree(d_p); for (int i = 0; i < grid_dim * block_dim; i++) { p += h_p[i]; } delete [] mat_t; delete[] h_p; return((4*(nov&1)-2) * p); } template <class T> double gpu_perman64_xshared_coalescing_mshared_multigpu(T* mat, int nov, int gpu_num, int grid_dim, int block_dim) { double x[nov]; double rs; //row sum double p = 1; //product of the elements in vector 'x' double p_partial[gpu_num]; for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) { p_partial[gpu_id] = 0; } //create the x vector and initiate the permanent for (int j = 0; j < nov; j++) { rs = .0f; for (int k = 0; k < nov; k++) { rs += mat[(j * nov) + k]; // sum of row j } x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry p *= x[j]; // product of the elements in vector 'x' } //create the transpose of the matrix T* mat_t = new T[nov * nov]; for (int i = 0; i < nov; i++) { for (int j = 0; j < nov; j++) { mat_t[(i * nov) + j] = mat[(j * nov) + i]; } } long long start = 1; long long end = (1LL << (nov-1)); long long offset = (end - start) / gpu_num; #pragma omp parallel for num_threads(gpu_num) for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) { hipSetDevice(gpu_id); T *d_mat_t; double *d_x, *d_p; double *h_p = new double[grid_dim * block_dim]; hipMalloc( &d_x, (nov) * sizeof(double)); hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double)); hipMalloc( &d_mat_t, (nov * nov) * sizeof(T)); hipMemcpy( d_x, x, (nov) * sizeof(double), hipMemcpyHostToDevice); hipMemcpy( d_mat_t, mat_t, (nov * nov) * sizeof(T), hipMemcpyHostToDevice); double stt = omp_get_wtime(); if (gpu_id == gpu_num-1) { hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) , 0, d_mat_t, d_x, d_p, nov, (start + gpu_id*offset), end); } else { hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) , 0, d_mat_t, d_x, d_p, nov, (start + gpu_id*offset), (start + (gpu_id+1)*offset)); } hipDeviceSynchronize(); double enn = omp_get_wtime(); printf("Kernel in %f \n", enn - stt); //cout << "kernel" << gpu_id << " in " << (enn - stt) << endl; hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost); hipFree(d_mat_t); hipFree(d_x); hipFree(d_p); for (int i = 0; i < grid_dim * block_dim; i++) { p_partial[gpu_id] += h_p[i]; } delete[] h_p; } delete [] mat_t; for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) { p += p_partial[gpu_id]; } return((4*(nov&1)-2) * p); } template <class T> double gpu_perman64_xshared_coalescing_mshared_multigpucpu_chunks(T* mat, int nov, int gpu_num, bool cpu, int threads, int grid_dim, int block_dim) { double x[nov]; double rs; //row sum double p = 1; //product of the elements in vector 'x' double p_partial[gpu_num+1]; for (int id = 0; id < gpu_num+1; id++) { p_partial[id] = 0; } int number_of_chunks = 1; int init = 29; if (cpu) { init = 28; } for (int i = init; i < nov; i++) { number_of_chunks *= 2; } int chunk_id = 0; //create the x vector and initiate the permanent for (int j = 0; j < nov; j++) { rs = .0f; for (int k = 0; k < nov; k++) { rs += mat[(j * nov) + k]; // sum of row j } x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry p *= x[j]; // product of the elements in vector 'x' } //create the transpose of the matrix T* mat_t = new T[nov * nov]; for (int i = 0; i < nov; i++) { for (int j = 0; j < nov; j++) { mat_t[(i * nov) + j] = mat[(j * nov) + i]; } } long long start = 1; long long end = (1LL << (nov-1)); long long offset = (end - start) / number_of_chunks; omp_set_nested(1); omp_set_dynamic(0); #pragma omp parallel for num_threads(gpu_num+1) for (int id = 0; id < gpu_num+1; id++) { if (id == gpu_num) { if (cpu) { int curr_chunk_id; #pragma omp critical { curr_chunk_id = chunk_id; chunk_id++; } while (curr_chunk_id < number_of_chunks) { double stt = omp_get_wtime(); if (curr_chunk_id == number_of_chunks - 1) { p_partial[id] += cpu_perman64(mat_t, x, nov, (start + curr_chunk_id*offset), end, threads); } else { p_partial[id] += cpu_perman64(mat_t, x, nov, (start + curr_chunk_id*offset), (start + (curr_chunk_id+1)*offset), threads); } double enn = omp_get_wtime(); printf("ChunkID %d is DONE by CPU in %f \n", curr_chunk_id, enn - stt); //cout << "ChunkID " << curr_chunk_id << "is DONE by CPU" << " in " << (enn - stt) << endl; #pragma omp critical { curr_chunk_id = chunk_id; chunk_id++; } } } } else { hipSetDevice(id); T *d_mat_t; double *d_x, *d_p; double *h_p = new double[grid_dim * block_dim]; hipMalloc( &d_x, (nov) * sizeof(double)); hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double)); hipMalloc( &d_mat_t, (nov * nov) * sizeof(T)); hipMemcpy( d_x, x, (nov) * sizeof(double), hipMemcpyHostToDevice); hipMemcpy( d_mat_t, mat_t, (nov * nov) * sizeof(T), hipMemcpyHostToDevice); int curr_chunk_id; #pragma omp critical { curr_chunk_id = chunk_id; chunk_id++; } while (curr_chunk_id < number_of_chunks) { double stt = omp_get_wtime(); if (curr_chunk_id == number_of_chunks - 1) { hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) , 0, d_mat_t, d_x, d_p, nov, (start + curr_chunk_id*offset), end); } else { hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) , 0, d_mat_t, d_x, d_p, nov, (start + curr_chunk_id*offset), (start + (curr_chunk_id+1)*offset)); } hipDeviceSynchronize(); double enn = omp_get_wtime(); printf("ChunkID %d is DONE by kernel %d in %f \n", curr_chunk_id, id, enn - stt); //cout << "ChunkID " << curr_chunk_id << "is DONE by kernel" << id << " in " << (enn - stt) << endl; hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost); for (int i = 0; i < grid_dim * block_dim; i++) { p_partial[id] += h_p[i]; } #pragma omp critical { curr_chunk_id = chunk_id; chunk_id++; } } hipFree(d_mat_t); hipFree(d_x); hipFree(d_p); delete[] h_p; } } delete [] mat_t; for (int id = 0; id < gpu_num+1; id++) { p += p_partial[id]; } return((4*(nov&1)-2) * p); } template <class T> double gpu_perman64_xshared_coalescing_mshared_multigpu_manual_distribution(T* mat, int nov, int gpu_num, int grid_dim, int block_dim) { double x[nov]; double rs; //row sum double p = 1; //product of the elements in vector 'x' double p_partial[gpu_num]; for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) { p_partial[gpu_id] = 0; } //create the x vector and initiate the permanent for (int j = 0; j < nov; j++) { rs = .0f; for (int k = 0; k < nov; k++) { rs += mat[(j * nov) + k]; // sum of row j } x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry p *= x[j]; // product of the elements in vector 'x' } //create the transpose of the matrix T* mat_t = new T[nov * nov]; for (int i = 0; i < nov; i++) { for (int j = 0; j < nov; j++) { mat_t[(i * nov) + j] = mat[(j * nov) + i]; } } long long start = 1; long long end = (1LL << (nov-1)); long long offset = (end - start) / 8; #pragma omp parallel for num_threads(gpu_num) for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) { hipSetDevice(gpu_id); T *d_mat_t; double *d_x, *d_p; double *h_p = new double[grid_dim * block_dim]; hipMalloc( &d_x, (nov) * sizeof(double)); hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double)); hipMalloc( &d_mat_t, (nov * nov) * sizeof(T)); hipMemcpy( d_x, x, (nov) * sizeof(double), hipMemcpyHostToDevice); hipMemcpy( d_mat_t, mat_t, (nov * nov) * sizeof(T), hipMemcpyHostToDevice); double stt = omp_get_wtime(); if (gpu_id == 0) { hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) , 0, d_mat_t, d_x, d_p, nov, start, start + 3*offset); } else if (gpu_id == 1) { hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) , 0, d_mat_t, d_x, d_p, nov, start + 3*offset, start + 6*offset); } else if (gpu_id == 2) { hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) , 0, d_mat_t, d_x, d_p, nov, start + 6*offset, start + 7*offset); } else if (gpu_id == 3) { hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) , 0, d_mat_t, d_x, d_p, nov, start + 7*offset, end); } hipDeviceSynchronize(); double enn = omp_get_wtime(); printf("Kernel in %f \n", enn - stt); //cout << "kernel" << gpu_id << " in " << (enn - stt) << endl; hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost); hipFree(d_mat_t); hipFree(d_x); hipFree(d_p); for (int i = 0; i < grid_dim * block_dim; i++) { p_partial[gpu_id] += h_p[i]; } delete[] h_p; } delete [] mat_t; for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) { p += p_partial[gpu_id]; } return((4*(nov&1)-2) * p); }
1aa3be3a133602141539e699f580bca544b8f8f3.cu
#include <omp.h> #include <stdio.h> //using namespace std; Weird //This is a CPU helper kernel for hybrid setting template <class T> double cpu_perman64(T* mat_t, double x[], int nov, long long start, long long end, int threads) { double p = 0; //product of the elements in vector 'x' long long one = 1; long long chunk_size = (end - start) / threads + 1; omp_set_num_threads(threads); #pragma omp parallel { double my_x[nov]; for (int i = 0; i < nov; i++) { my_x[i] = x[i]; } int tid = omp_get_thread_num(); long long my_start = start + tid * chunk_size; long long my_end = min(start + ((tid+1) * chunk_size), end); double *xptr; int s; //+1 or -1 double prod; //product of the elements in vector 'x' double my_p = 0; long long i = my_start; long long gray = (i-1) ^ ((i-1) >> 1); for (int k = 0; k < (nov-1); k++) { if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not xptr = (double*)my_x; for (int j = 0; j < nov; j++) { *xptr += mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries xptr++; } } } int k; int prodSign = 1; if(i & 1LL) { prodSign = -1; } while (i < my_end) { //compute the gray code k = __builtin_ctzll(i); gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,... //decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1 s = ((one << k) & gray) ? 1 : -1; prod = 1.0; xptr = (double*)my_x; for (int j = 0; j < nov; j++) { *xptr += s * mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries prod *= *xptr++; //product of the elements in vector 'x' } my_p += prodSign * prod; prodSign *= -1; i++; } #pragma omp atomic p += my_p; } return p; } template <class T> __global__ void kernel_xglobal(T* mat_t, float* x, double* p, int nov) { int tid = threadIdx.x + (blockIdx.x * blockDim.x); long long number_of_threads = blockDim.x * gridDim.x; long long one = 1; long long start = 1; long long end = (1LL << (nov-1)); long long chunk_size = end / number_of_threads + 1; long long my_start = start + tid * chunk_size; long long my_end = min(start + ((tid+1) * chunk_size), end); int s; //+1 or -1 double prod; //product of the elements in vector 'x' double my_p = 0; long long i = my_start; long long gray = (i-1) ^ ((i-1) >> 1); for (int k = 0; k < (nov-1); k++) { if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not for (int j = 0; j < nov; j++) { x[tid*nov + j] += mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries } } } long long gray_diff; int k; int prodSign = 1; if(i & 1LL) { prodSign = -1; } while (i < my_end) { gray_diff = (i ^ (i >> 1)) ^ gray; k = __ffsll(gray_diff) - 1; gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,... //decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1 s = ((one << k) & gray) ? 1 : -1; prod = 1.0; for (int j = 0; j < nov; j++) { x[tid*nov + j] += s * mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries prod *= x[tid*nov + j]; //product of the elements in vector 'x' } my_p += prodSign * prod; prodSign *= -1; i++; } p[tid] = my_p; } template <class T> __global__ void kernel_xlocal(T* mat_t, double* x, double* p, int nov) { float my_x[40]; for (int k = 0; k < nov; k++) { my_x[k] = x[k]; } long long number_of_threads = blockDim.x * gridDim.x; long long one = 1; long long start = 1; long long end = (1LL << (nov-1)); long long chunk_size = end / number_of_threads + 1; int tid = threadIdx.x + (blockIdx.x * blockDim.x); long long my_start = start + tid * chunk_size; long long my_end = min(start + ((tid+1) * chunk_size), end); float *xptr; int s; //+1 or -1 double prod; //product of the elements in vector 'x' double my_p = 0; long long i = my_start; long long gray = (i-1) ^ ((i-1) >> 1); for (int k = 0; k < (nov-1); k++) { if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not xptr = (float*)my_x; for (int j = 0; j < nov; j++) { *xptr += mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries xptr++; } } } long long gray_diff; int k; int prodSign = 1; if(i & 1LL) { prodSign = -1; } while (i < my_end) { gray_diff = (i ^ (i >> 1)) ^ gray; k = __ffsll(gray_diff) - 1; gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,... //decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1 s = ((one << k) & gray) ? 1 : -1; prod = 1.0; xptr = (float*)my_x; for (int j = 0; j < nov; j++) { *xptr += s * mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries prod *= *xptr++; //product of the elements in vector 'x' } my_p += prodSign * prod; prodSign *= -1; i++; } p[tid] = my_p; } template <class T> __global__ void kernel_xshared(T* mat_t, double* x, double* p, int nov) { int tid = threadIdx.x + (blockIdx.x * blockDim.x); int thread_id = threadIdx.x; extern __shared__ float shared_mem[]; float *my_x = shared_mem; // size = nov * BLOCK_SIZE for (int k = 0; k < nov; k++) { my_x[thread_id*nov + k] = x[k]; } long long number_of_threads = blockDim.x * gridDim.x; long long one = 1; long long start = 1; long long end = (1LL << (nov-1)); long long chunk_size = end / number_of_threads + 1; long long my_start = start + tid * chunk_size; long long my_end = min(start + ((tid+1) * chunk_size), end); int s; //+1 or -1 double prod; //product of the elements in vector 'x' double my_p = 0; long long i = my_start; long long gray = (i-1) ^ ((i-1) >> 1); for (int k = 0; k < (nov-1); k++) { if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not for (int j = 0; j < nov; j++) { my_x[thread_id*nov + j] += mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries } } } long long gray_diff; int k; int prodSign = 1; if(i & 1LL) { prodSign = -1; } while (i < my_end) { gray_diff = (i ^ (i >> 1)) ^ gray; k = __ffsll(gray_diff) - 1; gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,... //decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1 s = ((one << k) & gray) ? 1 : -1; prod = 1.0; for (int j = 0; j < nov; j++) { my_x[thread_id*nov + j] += s * mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries prod *= my_x[thread_id*nov + j]; //product of the elements in vector 'x' } my_p += prodSign * prod; prodSign *= -1; i++; } p[tid] = my_p; } template <class T> __global__ void kernel_xshared_coalescing(T* mat_t, double* x, double* p, int nov) { int tid = threadIdx.x + (blockIdx.x * blockDim.x); int thread_id = threadIdx.x; int block_dim = blockDim.x; extern __shared__ float shared_mem[]; float *my_x = shared_mem; // size = nov * BLOCK_SIZE for (int k = 0; k < nov; k++) { my_x[block_dim*k + thread_id] = x[k]; } long long number_of_threads = blockDim.x * gridDim.x; long long one = 1; long long start = 1; long long end = (1LL << (nov-1)); long long chunk_size = end / number_of_threads + 1; long long my_start = start + tid * chunk_size; long long my_end = min(start + ((tid+1) * chunk_size), end); int s; //+1 or -1 double prod; //product of the elements in vector 'x' double my_p = 0; long long i = my_start; long long gray = (i-1) ^ ((i-1) >> 1); for (int k = 0; k < (nov-1); k++) { if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not for (int j = 0; j < nov; j++) { my_x[block_dim*j + thread_id] += mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries } } } long long gray_diff; int k; int prodSign = 1; if(i & 1LL) { prodSign = -1; } while (i < my_end) { gray_diff = (i ^ (i >> 1)) ^ gray; k = __ffsll(gray_diff) - 1; gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,... //decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1 s = ((one << k) & gray) ? 1 : -1; prod = 1.0; for (int j = 0; j < nov; j++) { my_x[block_dim*j + thread_id] += s * mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries prod *= my_x[block_dim*j + thread_id]; //product of the elements in vector 'x' } my_p += prodSign * prod; prodSign *= -1; i++; } p[tid] = my_p; } template <class T> __global__ void kernel_xshared_coalescing_mshared(T* mat_t, double* x, double* p, int nov, long long start, long long end) { int tid = threadIdx.x + (blockIdx.x * blockDim.x); int thread_id = threadIdx.x; int block_dim = blockDim.x; extern __shared__ float shared_mem[]; float *my_x = shared_mem; // size = nov * BLOCK_SIZE T *shared_mat_t = (T*) &my_x[nov * block_dim]; // size = nov * nov for (int k = 0; k < nov; k++) { my_x[block_dim*k + thread_id] = x[k]; } for (int k = 0; k < ((nov*nov)/block_dim + 1); k++) { if ((block_dim * k + thread_id) < (nov * nov)) shared_mat_t[block_dim * k + thread_id] = mat_t[block_dim * k + thread_id]; } __syncthreads(); long long number_of_threads = blockDim.x * gridDim.x; long long one = 1; long long chunk_size = (end - start) / number_of_threads + 1; long long my_start = start + tid * chunk_size; long long my_end = min(start + ((tid+1) * chunk_size), end); int s; //+1 or -1 double prod; //product of the elements in vector 'x' double my_p = 0; long long i = my_start; long long gray = (i-1) ^ ((i-1) >> 1); for (int k = 0; k < (nov-1); k++) { if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not for (int j = 0; j < nov; j++) { my_x[block_dim*j + thread_id] += shared_mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries } } } long long gray_diff; int k; int prodSign = 1; if (i & 1LL) { prodSign = -1; } while (i < my_end) { gray_diff = (i ^ (i >> 1)) ^ gray; k = __ffsll(gray_diff) - 1; gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,... //decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1 s = ((one << k) & gray) ? 1 : -1; prod = 1.0; for (int j = 0; j < nov; j++) { my_x[block_dim*j + thread_id] += s * shared_mat_t[(k * nov) + j]; // see Nijenhuis and Wilf - update x vector entries prod *= my_x[block_dim*j + thread_id]; //product of the elements in vector 'x' } my_p += prodSign * prod; prodSign *= -1; i++; } p[tid] = my_p; } template <class T> double gpu_perman64_xglobal(T* mat, int nov, int grid_dim, int block_dim) { float x[nov]; double rs; //row sum double p = 1; //product of the elements in vector 'x' //create the x vector and initiate the permanent for (int j = 0; j < nov; j++) { rs = .0f; for (int k = 0; k < nov; k++) { rs += mat[(j * nov) + k]; // sum of row j } x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry p *= x[j]; // product of the elements in vector 'x' } //create the transpose of the matrix T* mat_t = new T[nov * nov]; for (int i = 0; i < nov; i++) { for (int j = 0; j < nov; j++) { mat_t[(i * nov) + j] = mat[(j * nov) + i]; } } float *h_x = new float[nov*grid_dim*block_dim]; for (int i = 0; i < nov*grid_dim*block_dim; i++) { h_x[i] = x[i%nov]; } cudaSetDevice(1); T *d_mat_t; float *d_x; double *d_p; double *h_p = new double[grid_dim * block_dim]; cudaMalloc( &d_x, (nov*grid_dim*block_dim) * sizeof(float)); cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double)); cudaMalloc( &d_mat_t, (nov * nov) * sizeof(T)); cudaMemcpy( d_x, h_x, (nov*grid_dim*block_dim) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy( d_mat_t, mat_t, (nov * nov) * sizeof(T), cudaMemcpyHostToDevice); double stt = omp_get_wtime(); kernel_xglobal<<< grid_dim , block_dim >>> (d_mat_t, d_x, d_p, nov); cudaDeviceSynchronize(); double enn = omp_get_wtime(); printf("Kernel in %f \n", enn - stt); //cout << "kernel" << " in " << (enn - stt) << endl; cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_mat_t); cudaFree(d_x); cudaFree(d_p); for (int i = 0; i < grid_dim * block_dim; i++) { p += h_p[i]; } delete [] mat_t; delete [] h_x; delete[] h_p; return((4*(nov&1)-2) * p); } template <class T> double gpu_perman64_xlocal(T* mat, int nov, int grid_dim, int block_dim) { double x[nov]; double rs; //row sum double p = 1; //product of the elements in vector 'x' //create the x vector and initiate the permanent for (int j = 0; j < nov; j++) { rs = .0f; for (int k = 0; k < nov; k++) { rs += mat[(j * nov) + k]; // sum of row j } x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry p *= x[j]; // product of the elements in vector 'x' } //create the transpose of the matrix T* mat_t = new T[nov * nov]; for (int i = 0; i < nov; i++) { for (int j = 0; j < nov; j++) { mat_t[(i * nov) + j] = mat[(j * nov) + i]; } } cudaSetDevice(1); T *d_mat_t; double *d_x, *d_p; double *h_p = new double[grid_dim * block_dim]; cudaMalloc( &d_x, (nov) * sizeof(double)); cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double)); cudaMalloc( &d_mat_t, (nov * nov) * sizeof(T)); cudaMemcpy( d_x, x, (nov) * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy( d_mat_t, mat_t, (nov * nov) * sizeof(T), cudaMemcpyHostToDevice); double stt = omp_get_wtime(); kernel_xlocal<<< grid_dim , block_dim >>> (d_mat_t, d_x, d_p, nov); cudaDeviceSynchronize(); double enn = omp_get_wtime(); printf("Kernel in %f \n", enn - stt); //cout << "kernel" << " in " << (enn - stt) << endl; cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_mat_t); cudaFree(d_x); cudaFree(d_p); for (int i = 0; i < grid_dim * block_dim; i++) { p += h_p[i]; } delete [] mat_t; delete[] h_p; return((4*(nov&1)-2) * p); } template <class T> double gpu_perman64_xshared(T* mat, int nov, int grid_dim, int block_dim) { double x[nov]; double rs; //row sum double p = 1; //product of the elements in vector 'x' //create the x vector and initiate the permanent for (int j = 0; j < nov; j++) { rs = .0f; for (int k = 0; k < nov; k++) { rs += mat[(j * nov) + k]; // sum of row j } x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry p *= x[j]; // product of the elements in vector 'x' } //create the transpose of the matrix T* mat_t = new T[nov * nov]; for (int i = 0; i < nov; i++) { for (int j = 0; j < nov; j++) { mat_t[(i * nov) + j] = mat[(j * nov) + i]; } } cudaSetDevice(1); T *d_mat_t; double *d_x, *d_p; double *h_p = new double[grid_dim * block_dim]; cudaMalloc( &d_x, (nov) * sizeof(double)); cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double)); cudaMalloc( &d_mat_t, (nov * nov) * sizeof(T)); cudaMemcpy( d_x, x, (nov) * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy( d_mat_t, mat_t, (nov * nov) * sizeof(T), cudaMemcpyHostToDevice); double stt = omp_get_wtime(); kernel_xshared<<< grid_dim , block_dim , nov*block_dim*sizeof(float) >>> (d_mat_t, d_x, d_p, nov); cudaDeviceSynchronize(); double enn = omp_get_wtime(); printf("Kernel in %f \n", enn - stt); //cout << "kernel" << " in " << (enn - stt) << endl; cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_mat_t); cudaFree(d_x); cudaFree(d_p); for (int i = 0; i < grid_dim * block_dim; i++) { p += h_p[i]; } delete [] mat_t; delete[] h_p; return((4*(nov&1)-2) * p); } template <class T> double gpu_perman64_xshared_coalescing(T* mat, int nov, int grid_dim, int block_dim) { double x[nov]; double rs; //row sum double p = 1; //product of the elements in vector 'x' //create the x vector and initiate the permanent for (int j = 0; j < nov; j++) { rs = .0f; for (int k = 0; k < nov; k++) { rs += mat[(j * nov) + k]; // sum of row j } x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry p *= x[j]; // product of the elements in vector 'x' } //create the transpose of the matrix T* mat_t = new T[nov * nov]; for (int i = 0; i < nov; i++) { for (int j = 0; j < nov; j++) { mat_t[(i * nov) + j] = mat[(j * nov) + i]; } } cudaSetDevice(1); T *d_mat_t; double *d_x, *d_p; double *h_p = new double[grid_dim * block_dim]; cudaMalloc( &d_x, (nov) * sizeof(double)); cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double)); cudaMalloc( &d_mat_t, (nov * nov) * sizeof(T)); cudaMemcpy( d_x, x, (nov) * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy( d_mat_t, mat_t, (nov * nov) * sizeof(T), cudaMemcpyHostToDevice); double stt = omp_get_wtime(); kernel_xshared_coalescing<<< grid_dim , block_dim , nov*block_dim*sizeof(float) >>> (d_mat_t, d_x, d_p, nov); cudaDeviceSynchronize(); double enn = omp_get_wtime(); printf("Kernel in %f \n", enn - stt); //cout << "kernel" << " in " << (enn - stt) << endl; cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_mat_t); cudaFree(d_x); cudaFree(d_p); for (int i = 0; i < grid_dim * block_dim; i++) { p += h_p[i]; } delete [] mat_t; delete[] h_p; return((4*(nov&1)-2) * p); } template <class T> double gpu_perman64_xshared_coalescing_mshared(T* mat, int nov, int grid_dim, int block_dim) { double x[nov]; double rs; //row sum double p = 1; //product of the elements in vector 'x' //create the x vector and initiate the permanent for (int j = 0; j < nov; j++) { rs = .0f; for (int k = 0; k < nov; k++) { rs += mat[(j * nov) + k]; // sum of row j } x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry p *= x[j]; // product of the elements in vector 'x' } //create the transpose of the matrix T* mat_t = new T[nov * nov]; for (int i = 0; i < nov; i++) { for (int j = 0; j < nov; j++) { mat_t[(i * nov) + j] = mat[(j * nov) + i]; } } cudaSetDevice(1); T *d_mat_t; double *d_x, *d_p; double *h_p = new double[grid_dim * block_dim]; cudaMalloc( &d_x, (nov) * sizeof(double)); cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double)); cudaMalloc( &d_mat_t, (nov * nov) * sizeof(T)); cudaMemcpy( d_x, x, (nov) * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy( d_mat_t, mat_t, (nov * nov) * sizeof(T), cudaMemcpyHostToDevice); long long start = 1; long long end = (1LL << (nov-1)); double stt = omp_get_wtime(); kernel_xshared_coalescing_mshared<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) >>> (d_mat_t, d_x, d_p, nov, start, end); cudaDeviceSynchronize(); double enn = omp_get_wtime(); printf("Kernel in %f \n", enn - stt); //cout << "kernel" << " in " << (enn - stt) << endl; cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_mat_t); cudaFree(d_x); cudaFree(d_p); for (int i = 0; i < grid_dim * block_dim; i++) { p += h_p[i]; } delete [] mat_t; delete[] h_p; return((4*(nov&1)-2) * p); } template <class T> double gpu_perman64_xshared_coalescing_mshared_multigpu(T* mat, int nov, int gpu_num, int grid_dim, int block_dim) { double x[nov]; double rs; //row sum double p = 1; //product of the elements in vector 'x' double p_partial[gpu_num]; for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) { p_partial[gpu_id] = 0; } //create the x vector and initiate the permanent for (int j = 0; j < nov; j++) { rs = .0f; for (int k = 0; k < nov; k++) { rs += mat[(j * nov) + k]; // sum of row j } x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry p *= x[j]; // product of the elements in vector 'x' } //create the transpose of the matrix T* mat_t = new T[nov * nov]; for (int i = 0; i < nov; i++) { for (int j = 0; j < nov; j++) { mat_t[(i * nov) + j] = mat[(j * nov) + i]; } } long long start = 1; long long end = (1LL << (nov-1)); long long offset = (end - start) / gpu_num; #pragma omp parallel for num_threads(gpu_num) for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) { cudaSetDevice(gpu_id); T *d_mat_t; double *d_x, *d_p; double *h_p = new double[grid_dim * block_dim]; cudaMalloc( &d_x, (nov) * sizeof(double)); cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double)); cudaMalloc( &d_mat_t, (nov * nov) * sizeof(T)); cudaMemcpy( d_x, x, (nov) * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy( d_mat_t, mat_t, (nov * nov) * sizeof(T), cudaMemcpyHostToDevice); double stt = omp_get_wtime(); if (gpu_id == gpu_num-1) { kernel_xshared_coalescing_mshared<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) >>> (d_mat_t, d_x, d_p, nov, (start + gpu_id*offset), end); } else { kernel_xshared_coalescing_mshared<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) >>> (d_mat_t, d_x, d_p, nov, (start + gpu_id*offset), (start + (gpu_id+1)*offset)); } cudaDeviceSynchronize(); double enn = omp_get_wtime(); printf("Kernel in %f \n", enn - stt); //cout << "kernel" << gpu_id << " in " << (enn - stt) << endl; cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_mat_t); cudaFree(d_x); cudaFree(d_p); for (int i = 0; i < grid_dim * block_dim; i++) { p_partial[gpu_id] += h_p[i]; } delete[] h_p; } delete [] mat_t; for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) { p += p_partial[gpu_id]; } return((4*(nov&1)-2) * p); } template <class T> double gpu_perman64_xshared_coalescing_mshared_multigpucpu_chunks(T* mat, int nov, int gpu_num, bool cpu, int threads, int grid_dim, int block_dim) { double x[nov]; double rs; //row sum double p = 1; //product of the elements in vector 'x' double p_partial[gpu_num+1]; for (int id = 0; id < gpu_num+1; id++) { p_partial[id] = 0; } int number_of_chunks = 1; int init = 29; if (cpu) { init = 28; } for (int i = init; i < nov; i++) { number_of_chunks *= 2; } int chunk_id = 0; //create the x vector and initiate the permanent for (int j = 0; j < nov; j++) { rs = .0f; for (int k = 0; k < nov; k++) { rs += mat[(j * nov) + k]; // sum of row j } x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry p *= x[j]; // product of the elements in vector 'x' } //create the transpose of the matrix T* mat_t = new T[nov * nov]; for (int i = 0; i < nov; i++) { for (int j = 0; j < nov; j++) { mat_t[(i * nov) + j] = mat[(j * nov) + i]; } } long long start = 1; long long end = (1LL << (nov-1)); long long offset = (end - start) / number_of_chunks; omp_set_nested(1); omp_set_dynamic(0); #pragma omp parallel for num_threads(gpu_num+1) for (int id = 0; id < gpu_num+1; id++) { if (id == gpu_num) { if (cpu) { int curr_chunk_id; #pragma omp critical { curr_chunk_id = chunk_id; chunk_id++; } while (curr_chunk_id < number_of_chunks) { double stt = omp_get_wtime(); if (curr_chunk_id == number_of_chunks - 1) { p_partial[id] += cpu_perman64(mat_t, x, nov, (start + curr_chunk_id*offset), end, threads); } else { p_partial[id] += cpu_perman64(mat_t, x, nov, (start + curr_chunk_id*offset), (start + (curr_chunk_id+1)*offset), threads); } double enn = omp_get_wtime(); printf("ChunkID %d is DONE by CPU in %f \n", curr_chunk_id, enn - stt); //cout << "ChunkID " << curr_chunk_id << "is DONE by CPU" << " in " << (enn - stt) << endl; #pragma omp critical { curr_chunk_id = chunk_id; chunk_id++; } } } } else { cudaSetDevice(id); T *d_mat_t; double *d_x, *d_p; double *h_p = new double[grid_dim * block_dim]; cudaMalloc( &d_x, (nov) * sizeof(double)); cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double)); cudaMalloc( &d_mat_t, (nov * nov) * sizeof(T)); cudaMemcpy( d_x, x, (nov) * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy( d_mat_t, mat_t, (nov * nov) * sizeof(T), cudaMemcpyHostToDevice); int curr_chunk_id; #pragma omp critical { curr_chunk_id = chunk_id; chunk_id++; } while (curr_chunk_id < number_of_chunks) { double stt = omp_get_wtime(); if (curr_chunk_id == number_of_chunks - 1) { kernel_xshared_coalescing_mshared<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) >>> (d_mat_t, d_x, d_p, nov, (start + curr_chunk_id*offset), end); } else { kernel_xshared_coalescing_mshared<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) >>> (d_mat_t, d_x, d_p, nov, (start + curr_chunk_id*offset), (start + (curr_chunk_id+1)*offset)); } cudaDeviceSynchronize(); double enn = omp_get_wtime(); printf("ChunkID %d is DONE by kernel %d in %f \n", curr_chunk_id, id, enn - stt); //cout << "ChunkID " << curr_chunk_id << "is DONE by kernel" << id << " in " << (enn - stt) << endl; cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost); for (int i = 0; i < grid_dim * block_dim; i++) { p_partial[id] += h_p[i]; } #pragma omp critical { curr_chunk_id = chunk_id; chunk_id++; } } cudaFree(d_mat_t); cudaFree(d_x); cudaFree(d_p); delete[] h_p; } } delete [] mat_t; for (int id = 0; id < gpu_num+1; id++) { p += p_partial[id]; } return((4*(nov&1)-2) * p); } template <class T> double gpu_perman64_xshared_coalescing_mshared_multigpu_manual_distribution(T* mat, int nov, int gpu_num, int grid_dim, int block_dim) { double x[nov]; double rs; //row sum double p = 1; //product of the elements in vector 'x' double p_partial[gpu_num]; for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) { p_partial[gpu_id] = 0; } //create the x vector and initiate the permanent for (int j = 0; j < nov; j++) { rs = .0f; for (int k = 0; k < nov; k++) { rs += mat[(j * nov) + k]; // sum of row j } x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry p *= x[j]; // product of the elements in vector 'x' } //create the transpose of the matrix T* mat_t = new T[nov * nov]; for (int i = 0; i < nov; i++) { for (int j = 0; j < nov; j++) { mat_t[(i * nov) + j] = mat[(j * nov) + i]; } } long long start = 1; long long end = (1LL << (nov-1)); long long offset = (end - start) / 8; #pragma omp parallel for num_threads(gpu_num) for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) { cudaSetDevice(gpu_id); T *d_mat_t; double *d_x, *d_p; double *h_p = new double[grid_dim * block_dim]; cudaMalloc( &d_x, (nov) * sizeof(double)); cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double)); cudaMalloc( &d_mat_t, (nov * nov) * sizeof(T)); cudaMemcpy( d_x, x, (nov) * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy( d_mat_t, mat_t, (nov * nov) * sizeof(T), cudaMemcpyHostToDevice); double stt = omp_get_wtime(); if (gpu_id == 0) { kernel_xshared_coalescing_mshared<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) >>> (d_mat_t, d_x, d_p, nov, start, start + 3*offset); } else if (gpu_id == 1) { kernel_xshared_coalescing_mshared<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) >>> (d_mat_t, d_x, d_p, nov, start + 3*offset, start + 6*offset); } else if (gpu_id == 2) { kernel_xshared_coalescing_mshared<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) >>> (d_mat_t, d_x, d_p, nov, start + 6*offset, start + 7*offset); } else if (gpu_id == 3) { kernel_xshared_coalescing_mshared<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + nov*nov*sizeof(T)) >>> (d_mat_t, d_x, d_p, nov, start + 7*offset, end); } cudaDeviceSynchronize(); double enn = omp_get_wtime(); printf("Kernel in %f \n", enn - stt); //cout << "kernel" << gpu_id << " in " << (enn - stt) << endl; cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_mat_t); cudaFree(d_x); cudaFree(d_p); for (int i = 0; i < grid_dim * block_dim; i++) { p_partial[gpu_id] += h_p[i]; } delete[] h_p; } delete [] mat_t; for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) { p += p_partial[gpu_id]; } return((4*(nov&1)-2) * p); }
bc9700e999549640f1716cdf598924d29d259ad4.hip
// !!! This is a file automatically generated by hipify!!! /** * gemm.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Will Killian <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <hip/hip_runtime.h> #define POLYBENCH_TIME 1 #include "gemm_hip.cuh" #include <polybench.h> #include <polybenchUtilFuncts.h> #define GPU_DEVICE 0 //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define RUN_ON_CPU void gemm(int ni, int nj, int nk, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NI,NJ,ni,nj)) { int i,j,k; for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NJ; j++) { C[i][j] *= beta; for (k = 0; k < _PB_NK; ++k) { C[i][j] += alpha * A[i][k] * B[k][j]; } } } } void init(int ni, int nj, int nk, DATA_TYPE* alpha, DATA_TYPE* beta, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NI,NJ,ni,nj)) { int i, j; *alpha = 32412; *beta = 2123; for (i = 0; i < ni; i++) { for (j = 0; j < nk; j++) { A[i][j] = ((DATA_TYPE) i*j) / NI; } } for (i = 0; i < nk; i++) { for (j = 0; j < nj; j++) { B[i][j] = ((DATA_TYPE) i*j) / NI; } } for (i = 0; i < ni; i++) { for (j = 0; j < nj; j++) { C[i][j] = ((DATA_TYPE) i*j) / NI; } } } void compareResults(int ni, int nj, DATA_TYPE POLYBENCH_2D(C,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(C_outputFromGpu,NI,NJ,ni,nj)) { int i, j, fail; fail = 0; // Compare CPU and GPU outputs for (i=0; i < ni; i++) { for (j=0; j < nj; j++) { if (percentDiff(C[i][j], C_outputFromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); hipSetDevice( GPU_DEVICE ); } __global__ void gemm_kernel(int ni, int nj, int nk, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *c) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < _PB_NI) && (j < _PB_NJ)) { c[i * NJ + j] *= beta; int k; for(k=0; k < _PB_NK; k++) { c[i * NJ + j] += alpha * a[i * NK + k] * b[k * NJ +j]; } } } void gemmCuda(int ni, int nj, int nk, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(C_outputFromGpu,NI,NJ,ni,nj)) { DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *C_gpu; hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK); hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ); hipMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NI * NJ); hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NK, hipMemcpyHostToDevice); hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NK * NJ, hipMemcpyHostToDevice); hipMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((size_t)(ceil( ((float)NI)/ ((float)block.x) )),(size_t)(ceil( ((float)NJ)/ ((float)block.y) ))); /* Start timer. */ polybench_start_instruments; hipLaunchKernelGGL(( gemm_kernel), dim3(grid), dim3(block) , 0, 0, ni, nj, nk, alpha, beta, A_gpu, B_gpu, C_gpu); hipDeviceSynchronize(); /* Stop and print timer. */ printf("GPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; hipMemcpy(C_outputFromGpu, C_gpu, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyDeviceToHost); hipFree(A_gpu); hipFree(B_gpu); hipFree(C_gpu); } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(C,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, C[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } int main(int argc, char *argv[]) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; /* Variable declaration/allocation. */ DATA_TYPE alpha; DATA_TYPE beta; POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE,NI,NK,ni,nk); POLYBENCH_2D_ARRAY_DECL(B,DATA_TYPE,NK,NJ,nk,nj); POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE,NI,NJ,ni,nj); POLYBENCH_2D_ARRAY_DECL(C_outputFromGpu,DATA_TYPE,NI,NJ,ni,nj); init(ni, nj, nk, &alpha, &beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C)); GPU_argv_init(); gemmCuda(ni, nj, nk, alpha, beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(C_outputFromGpu)); #ifdef RUN_ON_CPU /* Start timer. */ polybench_start_instruments; gemm(ni, nj, nk, alpha, beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C)); /* Stop and print timer. */ printf("CPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; compareResults(ni, nj, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(C_outputFromGpu)); #else //prevent dead code elimination polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(C_outputFromGpu))); #endif //RUN_ON_CPU POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(C_outputFromGpu); return 0; } #include <polybench.c>
bc9700e999549640f1716cdf598924d29d259ad4.cu
/** * gemm.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Will Killian <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <cuda.h> #define POLYBENCH_TIME 1 #include "gemm.cuh" #include <polybench.h> #include <polybenchUtilFuncts.h> #define GPU_DEVICE 0 //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define RUN_ON_CPU void gemm(int ni, int nj, int nk, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NI,NJ,ni,nj)) { int i,j,k; for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NJ; j++) { C[i][j] *= beta; for (k = 0; k < _PB_NK; ++k) { C[i][j] += alpha * A[i][k] * B[k][j]; } } } } void init(int ni, int nj, int nk, DATA_TYPE* alpha, DATA_TYPE* beta, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NI,NJ,ni,nj)) { int i, j; *alpha = 32412; *beta = 2123; for (i = 0; i < ni; i++) { for (j = 0; j < nk; j++) { A[i][j] = ((DATA_TYPE) i*j) / NI; } } for (i = 0; i < nk; i++) { for (j = 0; j < nj; j++) { B[i][j] = ((DATA_TYPE) i*j) / NI; } } for (i = 0; i < ni; i++) { for (j = 0; j < nj; j++) { C[i][j] = ((DATA_TYPE) i*j) / NI; } } } void compareResults(int ni, int nj, DATA_TYPE POLYBENCH_2D(C,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(C_outputFromGpu,NI,NJ,ni,nj)) { int i, j, fail; fail = 0; // Compare CPU and GPU outputs for (i=0; i < ni; i++) { for (j=0; j < nj; j++) { if (percentDiff(C[i][j], C_outputFromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void gemm_kernel(int ni, int nj, int nk, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *c) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < _PB_NI) && (j < _PB_NJ)) { c[i * NJ + j] *= beta; int k; for(k=0; k < _PB_NK; k++) { c[i * NJ + j] += alpha * a[i * NK + k] * b[k * NJ +j]; } } } void gemmCuda(int ni, int nj, int nk, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(C_outputFromGpu,NI,NJ,ni,nj)) { DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *C_gpu; cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK); cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ); cudaMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NI * NJ); cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NK, cudaMemcpyHostToDevice); cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NK * NJ, cudaMemcpyHostToDevice); cudaMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((size_t)(ceil( ((float)NI)/ ((float)block.x) )),(size_t)(ceil( ((float)NJ)/ ((float)block.y) ))); /* Start timer. */ polybench_start_instruments; gemm_kernel<<< grid, block >>>(ni, nj, nk, alpha, beta, A_gpu, B_gpu, C_gpu); cudaThreadSynchronize(); /* Stop and print timer. */ printf("GPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; cudaMemcpy(C_outputFromGpu, C_gpu, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyDeviceToHost); cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(C_gpu); } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(C,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, C[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } int main(int argc, char *argv[]) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; /* Variable declaration/allocation. */ DATA_TYPE alpha; DATA_TYPE beta; POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE,NI,NK,ni,nk); POLYBENCH_2D_ARRAY_DECL(B,DATA_TYPE,NK,NJ,nk,nj); POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE,NI,NJ,ni,nj); POLYBENCH_2D_ARRAY_DECL(C_outputFromGpu,DATA_TYPE,NI,NJ,ni,nj); init(ni, nj, nk, &alpha, &beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C)); GPU_argv_init(); gemmCuda(ni, nj, nk, alpha, beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(C_outputFromGpu)); #ifdef RUN_ON_CPU /* Start timer. */ polybench_start_instruments; gemm(ni, nj, nk, alpha, beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C)); /* Stop and print timer. */ printf("CPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; compareResults(ni, nj, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(C_outputFromGpu)); #else //prevent dead code elimination polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(C_outputFromGpu))); #endif //RUN_ON_CPU POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(C_outputFromGpu); return 0; } #include <polybench.c>
a2b4d28254a81631742465283f8312a6fc648659.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "sharpeningFilter.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *srcImage = NULL; hipMalloc(&srcImage, XSIZE*YSIZE); unsigned char *dstImage = NULL; hipMalloc(&dstImage, XSIZE*YSIZE); unsigned int width = 1; unsigned int height = 1; int channel = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( sharpeningFilter), dim3(gridBlock),dim3(threadBlock), 0, 0, srcImage,dstImage,width,height,channel); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( sharpeningFilter), dim3(gridBlock),dim3(threadBlock), 0, 0, srcImage,dstImage,width,height,channel); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( sharpeningFilter), dim3(gridBlock),dim3(threadBlock), 0, 0, srcImage,dstImage,width,height,channel); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a2b4d28254a81631742465283f8312a6fc648659.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "sharpeningFilter.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *srcImage = NULL; cudaMalloc(&srcImage, XSIZE*YSIZE); unsigned char *dstImage = NULL; cudaMalloc(&dstImage, XSIZE*YSIZE); unsigned int width = 1; unsigned int height = 1; int channel = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); sharpeningFilter<<<gridBlock,threadBlock>>>(srcImage,dstImage,width,height,channel); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { sharpeningFilter<<<gridBlock,threadBlock>>>(srcImage,dstImage,width,height,channel); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { sharpeningFilter<<<gridBlock,threadBlock>>>(srcImage,dstImage,width,height,channel); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ea7c41b088cb8af8426a7e8084a73b9405e86385.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * argmax_one_layer.cpp * * Created on: Sep 20, 2015 * Author: zhangyuting */ #include "caffe/layer.hpp" #include "caffe/layer_factory.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/argmax_one_layer.hpp" #include "caffe/util/math_functions.cuh" using namespace std; namespace caffe { template <typename Dtype> __global__ void ArgMaxOneLayerKernel(const int count, const Dtype* val_data, const int inner_num, const int channel_num, Dtype* midx_data ) { CUDA_KERNEL_LOOP(index, count) { int i = index/inner_num; // outer index int j = index%inner_num; // inner index const Dtype* dataq = val_data + i*channel_num*inner_num + j; int maxidx = -1; Dtype maxval = -FLT_MAX; for ( int c=0; c<channel_num; ++c ) { if ( *dataq>maxval ) { maxval = *dataq; maxidx = c; } dataq += inner_num; } midx_data[index] = static_cast<Dtype>(maxidx); } } template <typename Dtype> void ArgMaxOneLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int n = outer_num_*inner_num_; const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); hipLaunchKernelGGL(( ArgMaxOneLayerKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, bottom_data, inner_num_, channel_num_, top_data ); } template <typename Dtype> void ArgMaxOneLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // No backward } INSTANTIATE_LAYER_GPU_FUNCS(ArgMaxOneLayer); } // namespace caffe
ea7c41b088cb8af8426a7e8084a73b9405e86385.cu
/* * argmax_one_layer.cpp * * Created on: Sep 20, 2015 * Author: zhangyuting */ #include "caffe/layer.hpp" #include "caffe/layer_factory.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/argmax_one_layer.hpp" #include "caffe/util/math_functions.cuh" using namespace std; namespace caffe { template <typename Dtype> __global__ void ArgMaxOneLayerKernel(const int count, const Dtype* val_data, const int inner_num, const int channel_num, Dtype* midx_data ) { CUDA_KERNEL_LOOP(index, count) { int i = index/inner_num; // outer index int j = index%inner_num; // inner index const Dtype* dataq = val_data + i*channel_num*inner_num + j; int maxidx = -1; Dtype maxval = -FLT_MAX; for ( int c=0; c<channel_num; ++c ) { if ( *dataq>maxval ) { maxval = *dataq; maxidx = c; } dataq += inner_num; } midx_data[index] = static_cast<Dtype>(maxidx); } } template <typename Dtype> void ArgMaxOneLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int n = outer_num_*inner_num_; const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); ArgMaxOneLayerKernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, bottom_data, inner_num_, channel_num_, top_data ); } template <typename Dtype> void ArgMaxOneLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // No backward } INSTANTIATE_LAYER_GPU_FUNCS(ArgMaxOneLayer); } // namespace caffe
3ee3d50a940c4d03895c2a58e50aabbf5433db83.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<cuda.h> #include<math.h> __global__ void Matmultkernel(int* A , int* b, int* C) { int col = blockIdx.x * blockDim.x + threadIdx.x; int P = 0; for(int k= 0; k<32; k++) { int MA = A[col * 32 + k]; int Mb = b[k]; P = P + MA * Mb; } C[col] = P; } int main() { FILE * pFile; int i,j; pFile = fopen("problem1.out","w"); int d_A[16][32]; int d_b[32][1]; int d_C[16][1]; float time; hipEvent_t start1, stop1; hipEventCreate(&start1); hipEventCreate(&stop1); for(i = 0;i<16;i++) { for (j=0;j<32;j++) { d_A[i][j] = i + j; printf(" %d ",d_A[i][j]); } printf("\n"); } printf("\n"); for(i=0; i<32; i++) { d_b[i][0] = i; printf(" %d \t", d_b[i][0]); } size_t sizeA = 16 * 32 * sizeof(int); size_t sizeb = 32 * sizeof(int); size_t sizeC = 16 * sizeof(int); int* A; hipMalloc(&A,sizeA); int* b; hipMalloc(&b,sizeb); int* C; hipMalloc(&C,sizeC); //Allocate and Load A and B into device memory hipDeviceProp_t deviceProp; const int currentDevice = 0; if(hipGetDeviceProperties(&deviceProp, currentDevice) == hipSuccess) printf("Device %d: %s \n", currentDevice, deviceProp.name); hipEventRecord(start1, 0); hipMemcpy(A, d_A, sizeA, hipMemcpyHostToDevice); hipMemcpy(b, d_b, sizeb, hipMemcpyHostToDevice); // Invoke kernel hipLaunchKernelGGL(( Matmultkernel), dim3(1),dim3(16), 0, 0, A, b, C); //bring the result back from the device memory into the host hipMemcpy(d_C, C, sizeC, hipMemcpyDeviceToHost); hipEventRecord(stop1, 0); hipEventSynchronize(stop1); for(i = 0; i<16;i++) { printf("\n %d", d_C[i][0]); fprintf(pFile, "%d \n",d_C[i][0]); } fclose (pFile); hipFree(A); hipFree(b); hipFree(C); hipEventElapsedTime(&time, start1, stop1); printf("\n Inclusive time is %f", time); hipEventDestroy(start1); hipEventDestroy(stop1); return 0; }
3ee3d50a940c4d03895c2a58e50aabbf5433db83.cu
#include<stdio.h> #include<cuda.h> #include<math.h> __global__ void Matmultkernel(int* A , int* b, int* C) { int col = blockIdx.x * blockDim.x + threadIdx.x; int P = 0; for(int k= 0; k<32; k++) { int MA = A[col * 32 + k]; int Mb = b[k]; P = P + MA * Mb; } C[col] = P; } int main() { FILE * pFile; int i,j; pFile = fopen("problem1.out","w"); int d_A[16][32]; int d_b[32][1]; int d_C[16][1]; float time; cudaEvent_t start1, stop1; cudaEventCreate(&start1); cudaEventCreate(&stop1); for(i = 0;i<16;i++) { for (j=0;j<32;j++) { d_A[i][j] = i + j; printf(" %d ",d_A[i][j]); } printf("\n"); } printf("\n"); for(i=0; i<32; i++) { d_b[i][0] = i; printf(" %d \t", d_b[i][0]); } size_t sizeA = 16 * 32 * sizeof(int); size_t sizeb = 32 * sizeof(int); size_t sizeC = 16 * sizeof(int); int* A; cudaMalloc(&A,sizeA); int* b; cudaMalloc(&b,sizeb); int* C; cudaMalloc(&C,sizeC); //Allocate and Load A and B into device memory cudaDeviceProp deviceProp; const int currentDevice = 0; if(cudaGetDeviceProperties(&deviceProp, currentDevice) == cudaSuccess) printf("Device %d: %s \n", currentDevice, deviceProp.name); cudaEventRecord(start1, 0); cudaMemcpy(A, d_A, sizeA, cudaMemcpyHostToDevice); cudaMemcpy(b, d_b, sizeb, cudaMemcpyHostToDevice); // Invoke kernel Matmultkernel<<<1,16>>>(A, b, C); //bring the result back from the device memory into the host cudaMemcpy(d_C, C, sizeC, cudaMemcpyDeviceToHost); cudaEventRecord(stop1, 0); cudaEventSynchronize(stop1); for(i = 0; i<16;i++) { printf("\n %d", d_C[i][0]); fprintf(pFile, "%d \n",d_C[i][0]); } fclose (pFile); cudaFree(A); cudaFree(b); cudaFree(C); cudaEventElapsedTime(&time, start1, stop1); printf("\n Inclusive time is %f", time); cudaEventDestroy(start1); cudaEventDestroy(stop1); return 0; }
b5f794d2c979fc89506b3266fcdb983e2a97349e.hip
// !!! This is a file automatically generated by hipify!!! #include "luaT.h" #include "THH.h" #include "THLogAdd.h" /* DEBUG: WTF */ #include <thrust/transform.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include "utils.h" LUA_EXTERNC DLL_EXPORT int luaopen_libcunn(lua_State *L); int luaopen_libcunn(lua_State *L) { lua_newtable(L); cunn_ClassNLLCriterion_init(L); cunn_Tanh_init(L); cunn_Sigmoid_init(L); cunn_Max_init(L); cunn_Min_init(L); cunn_HardTanh_init(L); cunn_L1Cost_init(L); cunn_LogSoftMax_init(L); cunn_SoftMax_init(L); cunn_TemporalConvolution_init(L); cunn_TemporalMaxPooling_init(L); cunn_SpatialConvolutionMM_init(L); cunn_SpatialMaxPooling_init(L); cunn_SpatialAdaptiveMaxPooling_init(L); cunn_SpatialSubSampling_init(L); cunn_SpatialAveragePooling_init(L); cunn_MultiMarginCriterion_init(L); cunn_Square_init(L); cunn_Sqrt_init(L); cunn_Threshold_init(L); cunn_MSECriterion_init(L); cunn_AbsCriterion_init(L); cunn_DistKLDivCriterion_init(L); cunn_Abs_init(L); cunn_SoftPlus_init(L); cunn_SpatialUpSamplingNearest_init(L); cunn_VolumetricConvolution_init(L); cunn_LogSigmoid_init(L); cunn_PReLU_init(L); cunn_LookupTable_init(L); return 1; }
b5f794d2c979fc89506b3266fcdb983e2a97349e.cu
#include "luaT.h" #include "THC.h" #include "THLogAdd.h" /* DEBUG: WTF */ #include <thrust/transform.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include "utils.h" LUA_EXTERNC DLL_EXPORT int luaopen_libcunn(lua_State *L); int luaopen_libcunn(lua_State *L) { lua_newtable(L); cunn_ClassNLLCriterion_init(L); cunn_Tanh_init(L); cunn_Sigmoid_init(L); cunn_Max_init(L); cunn_Min_init(L); cunn_HardTanh_init(L); cunn_L1Cost_init(L); cunn_LogSoftMax_init(L); cunn_SoftMax_init(L); cunn_TemporalConvolution_init(L); cunn_TemporalMaxPooling_init(L); cunn_SpatialConvolutionMM_init(L); cunn_SpatialMaxPooling_init(L); cunn_SpatialAdaptiveMaxPooling_init(L); cunn_SpatialSubSampling_init(L); cunn_SpatialAveragePooling_init(L); cunn_MultiMarginCriterion_init(L); cunn_Square_init(L); cunn_Sqrt_init(L); cunn_Threshold_init(L); cunn_MSECriterion_init(L); cunn_AbsCriterion_init(L); cunn_DistKLDivCriterion_init(L); cunn_Abs_init(L); cunn_SoftPlus_init(L); cunn_SpatialUpSamplingNearest_init(L); cunn_VolumetricConvolution_init(L); cunn_LogSigmoid_init(L); cunn_PReLU_init(L); cunn_LookupTable_init(L); return 1; }
1b83fcac834dbc9eabd3458b8f2c4eee2a3953dc.hip
// !!! This is a file automatically generated by hipify!!! #include "bitmap.h" #include "morfological.h" #include "helper_timer.h" #include "helper_cuda.h" //__constant__ static int structuringElement[strucElDim*strucElDim]; int main() { checkCudaErrors(hipSetDevice(0)); StopWatchInterface *timer = NULL; bitmap* bmp; Matrix bImage; bitmap res_bmp; int8_t status; int i = 0; Matrix* resultNegation; Matrix* result; Matrix* resultMorfOp; Matrix h_structuringElement; // generacja elementu strukturalnego createHostMatrix(&h_structuringElement, strucElDim, strucElDim, strucElDim *strucElDim * sizeof(uint8_t)); createStructuringElement(h_structuringElement); showMatrix(h_structuringElement, "structuring element"); copy(h_structuringElement); // wczytanie obrazu bmp = readBitmap("fingerprint_noise_duzy.bmp"); status = convertBitmapToBinaryImage(bmp, &bImage); if (status == -1) { printf("Error while converting bitmap to binary image.\n"); WINPAUSE; exit(0); } // wstepnie przygotowanie obrazu // wykonane operacje morfologiczne w danym przypdaku maja sens jeeli pracuje si na obrazie zanegowanym resultNegation = negation(bImage); resultMorfOp = dilatation(*resultNegation); result = negation(*resultMorfOp); convertBinaryImageTOBitmapUsingHeader(result, bmp->hp, &res_bmp); writeBitmap(&res_bmp, "dylatacja.bmp"); free(res_bmp.image); free(resultMorfOp->elements); free(resultMorfOp); free(result->elements); free(result); resultMorfOp = erosion(*resultNegation); result = negation(*resultMorfOp); convertBinaryImageTOBitmapUsingHeader(result, bmp->hp, &res_bmp); writeBitmap(&res_bmp, "erozja.bmp"); free(res_bmp.image); free(resultMorfOp->elements); free(resultMorfOp); free(result->elements); free(result); float czas = 1000000000; sdkCreateTimer(&timer); sdkStartTimer(&timer); for (int i = 0; i < 100; i++) { sdkStartTimer(&timer); resultMorfOp = openingByReconstruction(*resultNegation); sdkStopTimer(&timer); free(resultMorfOp->elements); free(resultMorfOp); if (sdkGetTimerValue(&timer) < czas) czas = sdkGetTimerValue(&timer); } printf("Processing time: %f ms\n", czas); sdkDeleteTimer(&timer); result = negation(*resultMorfOp); convertBinaryImageTOBitmapUsingHeader(result, bmp->hp, &res_bmp); writeBitmap(&res_bmp, "otwarciePrzezRekonstrukcje.bmp"); free(res_bmp.image); free(result->elements); free(result); resultMorfOp = opening(*resultNegation); result = negation(*resultMorfOp); convertBinaryImageTOBitmapUsingHeader(result, bmp->hp, &res_bmp); writeBitmap(&res_bmp, "otwarcie.bmp"); free(res_bmp.image); free(resultMorfOp->elements); free(resultMorfOp); free(result->elements); free(result); free(resultNegation->elements); free(resultNegation); freeBitmap(bmp); free(h_structuringElement.elements); WINPAUSE; return 0; }
1b83fcac834dbc9eabd3458b8f2c4eee2a3953dc.cu
#include "bitmap.h" #include "morfological.h" #include "helper_timer.h" #include "helper_cuda.h" //__constant__ static int structuringElement[strucElDim*strucElDim]; int main() { checkCudaErrors(cudaSetDevice(0)); StopWatchInterface *timer = NULL; bitmap* bmp; Matrix bImage; bitmap res_bmp; int8_t status; int i = 0; Matrix* resultNegation; Matrix* result; Matrix* resultMorfOp; Matrix h_structuringElement; // generacja elementu strukturalnego createHostMatrix(&h_structuringElement, strucElDim, strucElDim, strucElDim *strucElDim * sizeof(uint8_t)); createStructuringElement(h_structuringElement); showMatrix(h_structuringElement, "structuring element"); copy(h_structuringElement); // wczytanie obrazu bmp = readBitmap("fingerprint_noise_duzy.bmp"); status = convertBitmapToBinaryImage(bmp, &bImage); if (status == -1) { printf("Error while converting bitmap to binary image.\n"); WINPAUSE; exit(0); } // wstepnie przygotowanie obrazu // wykonane operacje morfologiczne w danym przypdaku maja sens jeżeli pracuje się na obrazie zanegowanym resultNegation = negation(bImage); resultMorfOp = dilatation(*resultNegation); result = negation(*resultMorfOp); convertBinaryImageTOBitmapUsingHeader(result, bmp->hp, &res_bmp); writeBitmap(&res_bmp, "dylatacja.bmp"); free(res_bmp.image); free(resultMorfOp->elements); free(resultMorfOp); free(result->elements); free(result); resultMorfOp = erosion(*resultNegation); result = negation(*resultMorfOp); convertBinaryImageTOBitmapUsingHeader(result, bmp->hp, &res_bmp); writeBitmap(&res_bmp, "erozja.bmp"); free(res_bmp.image); free(resultMorfOp->elements); free(resultMorfOp); free(result->elements); free(result); float czas = 1000000000; sdkCreateTimer(&timer); sdkStartTimer(&timer); for (int i = 0; i < 100; i++) { sdkStartTimer(&timer); resultMorfOp = openingByReconstruction(*resultNegation); sdkStopTimer(&timer); free(resultMorfOp->elements); free(resultMorfOp); if (sdkGetTimerValue(&timer) < czas) czas = sdkGetTimerValue(&timer); } printf("Processing time: %f ms\n", czas); sdkDeleteTimer(&timer); result = negation(*resultMorfOp); convertBinaryImageTOBitmapUsingHeader(result, bmp->hp, &res_bmp); writeBitmap(&res_bmp, "otwarciePrzezRekonstrukcje.bmp"); free(res_bmp.image); free(result->elements); free(result); resultMorfOp = opening(*resultNegation); result = negation(*resultMorfOp); convertBinaryImageTOBitmapUsingHeader(result, bmp->hp, &res_bmp); writeBitmap(&res_bmp, "otwarcie.bmp"); free(res_bmp.image); free(resultMorfOp->elements); free(resultMorfOp); free(result->elements); free(result); free(resultNegation->elements); free(resultNegation); freeBitmap(bmp); free(h_structuringElement.elements); WINPAUSE; return 0; }
6a6c89c93484a4d4a0250eabfe6b8f6d1ea88e0d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph.hpp> #include "utilities/cuda_utils.cuh" #include "utilities/error.hpp" #include "utilities/graph_utils.cuh" namespace { template <typename vertex_t, typename edge_t> void degree_from_offsets(vertex_t number_of_vertices, edge_t const *offsets, edge_t *degree, hipStream_t stream) { // Computes out-degree for x = 0 and x = 2 thrust::for_each( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(number_of_vertices), [offsets, degree] __device__(vertex_t v) { degree[v] = offsets[v + 1] - offsets[v]; }); } template <typename vertex_t, typename edge_t> void degree_from_vertex_ids(const raft::handle_t *handle, vertex_t number_of_vertices, edge_t number_of_edges, vertex_t const *indices, edge_t *degree, hipStream_t stream) { thrust::for_each( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(number_of_edges), [indices, degree] __device__(edge_t e) { cugraph::atomicAdd(degree + indices[e], 1); }); if ((handle != nullptr) && (handle->comms_initialized())) { auto &comm = handle->get_comms(); comm.allreduce(degree, degree, number_of_vertices, raft::comms::op_t::SUM, stream); } } } // namespace namespace cugraph { template <typename VT, typename ET, typename WT> void GraphViewBase<VT, ET, WT>::get_vertex_identifiers(VT *identifiers) const { cugraph::detail::sequence<VT>(number_of_vertices, identifiers); } template <typename VT, typename ET, typename WT> void GraphCompressedSparseBaseView<VT, ET, WT>::get_source_indices(VT *src_indices) const { CUGRAPH_EXPECTS(offsets != nullptr, "No graph specified"); cugraph::detail::offsets_to_indices<VT>( offsets, GraphViewBase<VT, ET, WT>::number_of_vertices, src_indices); } template <typename VT, typename ET, typename WT> void GraphCOOView<VT, ET, WT>::degree(ET *degree, DegreeDirection direction) const { // // NOTE: We assume offsets/indices are a CSR. If a CSC is passed // in then x should be modified to reflect the expected direction. // (e.g. if you have a CSC and you want in-degree (x=1) then pass // the offsets/indices and request an out-degree (x=2)) // hipStream_t stream{nullptr}; if (direction != DegreeDirection::IN) { if ((GraphViewBase<VT, ET, WT>::handle != nullptr) && (GraphViewBase<VT, ET, WT>::handle ->comms_initialized())) // FIXME retrieve global source // indexing for the allreduce work { CUGRAPH_FAIL("MG degree not implemented for OUT degree"); } degree_from_vertex_ids(GraphViewBase<VT, ET, WT>::handle, GraphViewBase<VT, ET, WT>::number_of_vertices, GraphViewBase<VT, ET, WT>::number_of_edges, src_indices, degree, stream); } if (direction != DegreeDirection::OUT) { degree_from_vertex_ids(GraphViewBase<VT, ET, WT>::handle, GraphViewBase<VT, ET, WT>::number_of_vertices, GraphViewBase<VT, ET, WT>::number_of_edges, dst_indices, degree, stream); } } template <typename VT, typename ET, typename WT> void GraphCompressedSparseBaseView<VT, ET, WT>::degree(ET *degree, DegreeDirection direction) const { // // NOTE: We assume offsets/indices are a CSR. If a CSC is passed // in then x should be modified to reflect the expected direction. // (e.g. if you have a CSC and you want in-degree (x=1) then pass // the offsets/indices and request an out-degree (x=2)) // hipStream_t stream{nullptr}; if (direction != DegreeDirection::IN) { if ((GraphViewBase<VT, ET, WT>::handle != nullptr) && (GraphViewBase<VT, ET, WT>::handle->comms_initialized())) { CUGRAPH_FAIL("MG degree not implemented for OUT degree"); // FIXME retrieve global // source indexing for // the allreduce to work } degree_from_offsets(GraphViewBase<VT, ET, WT>::number_of_vertices, offsets, degree, stream); } if (direction != DegreeDirection::OUT) { degree_from_vertex_ids(GraphViewBase<VT, ET, WT>::handle, GraphViewBase<VT, ET, WT>::number_of_vertices, GraphViewBase<VT, ET, WT>::number_of_edges, indices, degree, stream); } } // explicit instantiation template class GraphViewBase<int32_t, int32_t, float>; template class GraphViewBase<int32_t, int32_t, double>; template class GraphCOOView<int32_t, int32_t, float>; template class GraphCOOView<int32_t, int32_t, double>; template class GraphCompressedSparseBaseView<int32_t, int32_t, float>; template class GraphCompressedSparseBaseView<int32_t, int32_t, double>; } // namespace cugraph
6a6c89c93484a4d4a0250eabfe6b8f6d1ea88e0d.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph.hpp> #include "utilities/cuda_utils.cuh" #include "utilities/error.hpp" #include "utilities/graph_utils.cuh" namespace { template <typename vertex_t, typename edge_t> void degree_from_offsets(vertex_t number_of_vertices, edge_t const *offsets, edge_t *degree, cudaStream_t stream) { // Computes out-degree for x = 0 and x = 2 thrust::for_each( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(number_of_vertices), [offsets, degree] __device__(vertex_t v) { degree[v] = offsets[v + 1] - offsets[v]; }); } template <typename vertex_t, typename edge_t> void degree_from_vertex_ids(const raft::handle_t *handle, vertex_t number_of_vertices, edge_t number_of_edges, vertex_t const *indices, edge_t *degree, cudaStream_t stream) { thrust::for_each( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(number_of_edges), [indices, degree] __device__(edge_t e) { cugraph::atomicAdd(degree + indices[e], 1); }); if ((handle != nullptr) && (handle->comms_initialized())) { auto &comm = handle->get_comms(); comm.allreduce(degree, degree, number_of_vertices, raft::comms::op_t::SUM, stream); } } } // namespace namespace cugraph { template <typename VT, typename ET, typename WT> void GraphViewBase<VT, ET, WT>::get_vertex_identifiers(VT *identifiers) const { cugraph::detail::sequence<VT>(number_of_vertices, identifiers); } template <typename VT, typename ET, typename WT> void GraphCompressedSparseBaseView<VT, ET, WT>::get_source_indices(VT *src_indices) const { CUGRAPH_EXPECTS(offsets != nullptr, "No graph specified"); cugraph::detail::offsets_to_indices<VT>( offsets, GraphViewBase<VT, ET, WT>::number_of_vertices, src_indices); } template <typename VT, typename ET, typename WT> void GraphCOOView<VT, ET, WT>::degree(ET *degree, DegreeDirection direction) const { // // NOTE: We assume offsets/indices are a CSR. If a CSC is passed // in then x should be modified to reflect the expected direction. // (e.g. if you have a CSC and you want in-degree (x=1) then pass // the offsets/indices and request an out-degree (x=2)) // cudaStream_t stream{nullptr}; if (direction != DegreeDirection::IN) { if ((GraphViewBase<VT, ET, WT>::handle != nullptr) && (GraphViewBase<VT, ET, WT>::handle ->comms_initialized())) // FIXME retrieve global source // indexing for the allreduce work { CUGRAPH_FAIL("MG degree not implemented for OUT degree"); } degree_from_vertex_ids(GraphViewBase<VT, ET, WT>::handle, GraphViewBase<VT, ET, WT>::number_of_vertices, GraphViewBase<VT, ET, WT>::number_of_edges, src_indices, degree, stream); } if (direction != DegreeDirection::OUT) { degree_from_vertex_ids(GraphViewBase<VT, ET, WT>::handle, GraphViewBase<VT, ET, WT>::number_of_vertices, GraphViewBase<VT, ET, WT>::number_of_edges, dst_indices, degree, stream); } } template <typename VT, typename ET, typename WT> void GraphCompressedSparseBaseView<VT, ET, WT>::degree(ET *degree, DegreeDirection direction) const { // // NOTE: We assume offsets/indices are a CSR. If a CSC is passed // in then x should be modified to reflect the expected direction. // (e.g. if you have a CSC and you want in-degree (x=1) then pass // the offsets/indices and request an out-degree (x=2)) // cudaStream_t stream{nullptr}; if (direction != DegreeDirection::IN) { if ((GraphViewBase<VT, ET, WT>::handle != nullptr) && (GraphViewBase<VT, ET, WT>::handle->comms_initialized())) { CUGRAPH_FAIL("MG degree not implemented for OUT degree"); // FIXME retrieve global // source indexing for // the allreduce to work } degree_from_offsets(GraphViewBase<VT, ET, WT>::number_of_vertices, offsets, degree, stream); } if (direction != DegreeDirection::OUT) { degree_from_vertex_ids(GraphViewBase<VT, ET, WT>::handle, GraphViewBase<VT, ET, WT>::number_of_vertices, GraphViewBase<VT, ET, WT>::number_of_edges, indices, degree, stream); } } // explicit instantiation template class GraphViewBase<int32_t, int32_t, float>; template class GraphViewBase<int32_t, int32_t, double>; template class GraphCOOView<int32_t, int32_t, float>; template class GraphCOOView<int32_t, int32_t, double>; template class GraphCompressedSparseBaseView<int32_t, int32_t, float>; template class GraphCompressedSparseBaseView<int32_t, int32_t, double>; } // namespace cugraph
9c516b5adcb6a70fd15fae87e83086b7c729e27a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// /// vecMaxKernel00.cu : code computes partiol answer per each thread. /// By Waruna Ranasinghe /// Created: 15 Aug 2017 /// Last Modified: /// The code computes partial answer for reduction over max per a thread. /// Each thread computes max of a consective chunck of data of size C. /// The memory loads are not coalesced. /* * A - input vector of floats of size G*B*C * reductions - output of partial answers compted by each thread * C - chunck size - number of elements processed by ech thread */ __global__ void reduce(const float* A, float* reductions, int C) { int tid = threadIdx.x; //Thread index within a thread block int blockid = blockIdx.x; //Block index within the grid int B = blockDim.x; //numer of threads per block //The index of the array corresponds to the start of a thread block int start_of_the_block = blockid*B*C; int end_of_the_block = start_of_the_block + B*C; reductions[blockid*B + tid] = 0.0f; for (int i=start_of_the_block + tid; i < end_of_the_block; i += B) { reductions[blockid*B+tid] = max(reductions[blockid*B+tid],A[i]); } }
9c516b5adcb6a70fd15fae87e83086b7c729e27a.cu
/// /// vecMaxKernel00.cu : code computes partiol answer per each thread. /// By Waruna Ranasinghe /// Created: 15 Aug 2017 /// Last Modified: /// The code computes partial answer for reduction over max per a thread. /// Each thread computes max of a consective chunck of data of size C. /// The memory loads are not coalesced. /* * A - input vector of floats of size G*B*C * reductions - output of partial answers compted by each thread * C - chunck size - number of elements processed by ech thread */ __global__ void reduce(const float* A, float* reductions, int C) { int tid = threadIdx.x; //Thread index within a thread block int blockid = blockIdx.x; //Block index within the grid int B = blockDim.x; //numer of threads per block //The index of the array corresponds to the start of a thread block int start_of_the_block = blockid*B*C; int end_of_the_block = start_of_the_block + B*C; reductions[blockid*B + tid] = 0.0f; for (int i=start_of_the_block + tid; i < end_of_the_block; i += B) { reductions[blockid*B+tid] = max(reductions[blockid*B+tid],A[i]); } }
febbb1e9166c20535c8968a8935467f7532c1268.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <mtx.h> #include <ctime> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> const size_t BSZ = 128; const size_t TSZ = 16; const size_t SZ = BSZ * TSZ; __global__ void random_init(unsigned seed, hiprandStatePhilox4_32_10_t* states){ unsigned id = blockDim.x * blockIdx.x + threadIdx.x; hiprand_init(seed, id, 0, &states[id]); } __global__ void random_normal(double* dst, hiprandStatePhilox4_32_10_t* states){ unsigned id = blockDim.x * blockIdx.x + threadIdx.x; unsigned idx = id * 2; double2* dst2 = reinterpret_cast<double2*>(&dst[idx]); *dst2 = hiprand_uniform2_double(&states[id]); } int main(){ Mtx<double> a(false, SZ, SZ), b(false, SZ, SZ); Mtx<double> da(true, SZ, SZ), db(true, SZ, SZ); hiprandStatePhilox4_32_10_t* states; gpu_errchk(hipMalloc(&states, sizeof(hiprandStatePhilox4_32_10_t) * SZ * SZ / 2)); dim3 blocks(BSZ * BSZ / 2); dim3 tpb(TSZ * TSZ); hipLaunchKernelGGL(( random_init), dim3(blocks), dim3(tpb) , 0, 0, time(NULL), states); hipLaunchKernelGGL(( random_normal), dim3(blocks), dim3(tpb) , 0, 0, da.data, states); hipLaunchKernelGGL(( random_normal), dim3(blocks), dim3(tpb) , 0, 0, db.data, states); hipMemcpy(a.data, da.data, sizeof(double) * SZ * SZ, hipMemcpyDeviceToHost); hipMemcpy(b.data, db.data, sizeof(double) * SZ * SZ, hipMemcpyDeviceToHost); for (size_t i = 0; i <SZ; ++i) for (size_t j = 0; j < SZ; ++j) if (a.data[i * SZ + j] == b.data[i * SZ + j]){ cout << "Equal random value at same position " << (i * SZ + j) << endl; } cout << a << endl; cout << b << endl; }
febbb1e9166c20535c8968a8935467f7532c1268.cu
#include <mtx.h> #include <ctime> #include <curand.h> #include <curand_kernel.h> const size_t BSZ = 128; const size_t TSZ = 16; const size_t SZ = BSZ * TSZ; __global__ void random_init(unsigned seed, curandStatePhilox4_32_10_t* states){ unsigned id = blockDim.x * blockIdx.x + threadIdx.x; curand_init(seed, id, 0, &states[id]); } __global__ void random_normal(double* dst, curandStatePhilox4_32_10_t* states){ unsigned id = blockDim.x * blockIdx.x + threadIdx.x; unsigned idx = id * 2; double2* dst2 = reinterpret_cast<double2*>(&dst[idx]); *dst2 = curand_uniform2_double(&states[id]); } int main(){ Mtx<double> a(false, SZ, SZ), b(false, SZ, SZ); Mtx<double> da(true, SZ, SZ), db(true, SZ, SZ); curandStatePhilox4_32_10_t* states; gpu_errchk(cudaMalloc(&states, sizeof(curandStatePhilox4_32_10_t) * SZ * SZ / 2)); dim3 blocks(BSZ * BSZ / 2); dim3 tpb(TSZ * TSZ); random_init<<< blocks, tpb >>>(time(NULL), states); random_normal<<< blocks, tpb >>>(da.data, states); random_normal<<< blocks, tpb >>>(db.data, states); cudaMemcpy(a.data, da.data, sizeof(double) * SZ * SZ, cudaMemcpyDeviceToHost); cudaMemcpy(b.data, db.data, sizeof(double) * SZ * SZ, cudaMemcpyDeviceToHost); for (size_t i = 0; i <SZ; ++i) for (size_t j = 0; j < SZ; ++j) if (a.data[i * SZ + j] == b.data[i * SZ + j]){ cout << "Equal random value at same position " << (i * SZ + j) << endl; } cout << a << endl; cout << b << endl; }
9d90b78f3cb6d34a3a5596369f77aed3ce1c249e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @precisions normal z -> s d c */ #include "magma_internal.h" #include "commonblas_z.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define COMPLEX /******************************************************************************/ __global__ void magma_zlarfgx_gpu_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex* dx, magmaDoubleComplex *dtau, double *dxnorm, magmaDoubleComplex *dA, int it) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; __shared__ magmaDoubleComplex scale; __shared__ double xnorm; magmaDoubleComplex dxi; if ( j < n-1 ) dxi = dx[j]; if ( i == 0 ) { xnorm = *dxnorm; #ifdef REAL double alpha = *dx0; double alphai = MAGMA_Z_ZERO; if ( (xnorm == 0 && alphai == MAGMA_Z_ZERO ) || n == 1 ) #else magmaDoubleComplex alpha = *dx0; double alphar = MAGMA_Z_REAL(alpha), alphai = MAGMA_Z_IMAG(alpha); if ( (xnorm == 0 && alphai == MAGMA_Z_ZERO ) || n == 0 ) #endif { *dtau = MAGMA_Z_ZERO; *dA = *dx0; } else { #ifdef REAL // no need to compute the norm as it is passed as input double beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm ); beta = -copysign( beta, alpha ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j == 0) { *dtau = (beta - alpha) / beta; //*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha *dA = beta; } scale = 1. / (alpha - beta); #else // no need to compute the norm as it is passed as input double beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm ); beta = -copysign( beta, alphar ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j == 0) { *dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta); //*dx0 = MAGMA_Z_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha *dA = MAGMA_Z_MAKE(beta, 0.); } alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha)); scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha); #endif } } // scale x __syncthreads(); if ( xnorm != 0 && j < n-1) dx[j] = MAGMA_Z_MUL(dxi, scale); if (j < it) { *( dA-it+j) = *(dx0-it+j); *(dx0-it+j) = MAGMA_Z_MAKE(0., 0.); } } /***************************************************************************//** Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = norm( [dx0, dx] ) = dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's zlarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). *******************************************************************************/ extern "C" void magma_zlarfgx_gpu( magma_int_t n, magmaDoubleComplex_ptr dx0, magmaDoubleComplex_ptr dx, magmaDoubleComplex_ptr dtau, magmaDouble_ptr dxnorm, magmaDoubleComplex_ptr dA, magma_int_t iter, magma_queue_t queue ) { dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) ); dim3 threads( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_zlarfgx_gpu_kernel) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , n, dx0, dx, dtau, dxnorm, dA, iter); } /***************************************************************************//** Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = norm( [dx0, dx] ) = dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's zlarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). *******************************************************************************/ extern "C" void magma_zlarfgtx_gpu( magma_int_t n, magmaDoubleComplex_ptr dx0, magmaDoubleComplex_ptr dx, magmaDoubleComplex_ptr dtau, magmaDouble_ptr dxnorm, magmaDoubleComplex_ptr dA, magma_int_t iter, magmaDoubleComplex_ptr V, magma_int_t ldv, magmaDoubleComplex_ptr T, magma_int_t ldt, magmaDoubleComplex_ptr dwork, magma_queue_t queue ) { /* Generate the elementary reflector H(iter) */ magma_zlarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, iter, queue); if (iter == 0) { magmaDoubleComplex tt = MAGMA_Z_ONE; magmablas_zlacpy( MagmaFull, 1, 1, dtau, 1, T+iter+iter*ldt, 1, queue ); magma_zsetmatrix( 1, 1, &tt, 1, dx0, 1, queue ); } else { /* Compute the iter-th column of T */ hipLaunchKernelGGL(( magma_zgemv_kernel3) , dim3(iter), dim3(BLOCK_SIZE), 0, queue->cuda_stream() , n, V, ldv, dx0, dwork, dtau ); hipLaunchKernelGGL(( magma_ztrmv_kernel2) , dim3(iter), dim3(iter), 0, queue->cuda_stream() , T, ldt, dwork, T+iter*ldt, dtau ); } }
9d90b78f3cb6d34a3a5596369f77aed3ce1c249e.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @precisions normal z -> s d c */ #include "magma_internal.h" #include "commonblas_z.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define COMPLEX /******************************************************************************/ __global__ void magma_zlarfgx_gpu_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex* dx, magmaDoubleComplex *dtau, double *dxnorm, magmaDoubleComplex *dA, int it) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; __shared__ magmaDoubleComplex scale; __shared__ double xnorm; magmaDoubleComplex dxi; if ( j < n-1 ) dxi = dx[j]; if ( i == 0 ) { xnorm = *dxnorm; #ifdef REAL double alpha = *dx0; double alphai = MAGMA_Z_ZERO; if ( (xnorm == 0 && alphai == MAGMA_Z_ZERO ) || n == 1 ) #else magmaDoubleComplex alpha = *dx0; double alphar = MAGMA_Z_REAL(alpha), alphai = MAGMA_Z_IMAG(alpha); if ( (xnorm == 0 && alphai == MAGMA_Z_ZERO ) || n == 0 ) #endif { *dtau = MAGMA_Z_ZERO; *dA = *dx0; } else { #ifdef REAL // no need to compute the norm as it is passed as input double beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm ); beta = -copysign( beta, alpha ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j == 0) { *dtau = (beta - alpha) / beta; //*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha *dA = beta; } scale = 1. / (alpha - beta); #else // no need to compute the norm as it is passed as input double beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm ); beta = -copysign( beta, alphar ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j == 0) { *dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta); //*dx0 = MAGMA_Z_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha *dA = MAGMA_Z_MAKE(beta, 0.); } alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha)); scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha); #endif } } // scale x __syncthreads(); if ( xnorm != 0 && j < n-1) dx[j] = MAGMA_Z_MUL(dxi, scale); if (j < it) { *( dA-it+j) = *(dx0-it+j); *(dx0-it+j) = MAGMA_Z_MAKE(0., 0.); } } /***************************************************************************//** Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = ±norm( [dx0, dx] ) = ±dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's zlarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). *******************************************************************************/ extern "C" void magma_zlarfgx_gpu( magma_int_t n, magmaDoubleComplex_ptr dx0, magmaDoubleComplex_ptr dx, magmaDoubleComplex_ptr dtau, magmaDouble_ptr dxnorm, magmaDoubleComplex_ptr dA, magma_int_t iter, magma_queue_t queue ) { dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) ); dim3 threads( BLOCK_SIZE ); magma_zlarfgx_gpu_kernel <<< blocks, threads, 0, queue->cuda_stream() >>> ( n, dx0, dx, dtau, dxnorm, dA, iter); } /***************************************************************************//** Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = ±norm( [dx0, dx] ) = ±dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's zlarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). *******************************************************************************/ extern "C" void magma_zlarfgtx_gpu( magma_int_t n, magmaDoubleComplex_ptr dx0, magmaDoubleComplex_ptr dx, magmaDoubleComplex_ptr dtau, magmaDouble_ptr dxnorm, magmaDoubleComplex_ptr dA, magma_int_t iter, magmaDoubleComplex_ptr V, magma_int_t ldv, magmaDoubleComplex_ptr T, magma_int_t ldt, magmaDoubleComplex_ptr dwork, magma_queue_t queue ) { /* Generate the elementary reflector H(iter) */ magma_zlarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, iter, queue); if (iter == 0) { magmaDoubleComplex tt = MAGMA_Z_ONE; magmablas_zlacpy( MagmaFull, 1, 1, dtau, 1, T+iter+iter*ldt, 1, queue ); magma_zsetmatrix( 1, 1, &tt, 1, dx0, 1, queue ); } else { /* Compute the iter-th column of T */ magma_zgemv_kernel3 <<< iter, BLOCK_SIZE, 0, queue->cuda_stream() >>> ( n, V, ldv, dx0, dwork, dtau ); magma_ztrmv_kernel2 <<< iter, iter, 0, queue->cuda_stream() >>> ( T, ldt, dwork, T+iter*ldt, dtau ); } }
5a543af61bcba1a43ade0fcccbadca741502c839.hip
// !!! This is a file automatically generated by hipify!!! #include "allocators.hpp" #include <boost/format.hpp> #include <boost/thread/recursive_mutex.hpp> #include <hip/hip_runtime_api.h> #include <sstream> #include <stdexcept> #include <limits> #include <thrust/device_ptr.h> #include <thrust/fill.h> #include <vector> #include <cuv/tools/cuv_general.hpp> /*#include <iostream>*/ /*#undef CUV_LOG_DEBUG*/ /*#define CUV_LOG_DEBUG(X) std::cout << X << std::endl;*/ namespace cuv { void default_allocator::alloc(void** ptr, size_t memsize, size_t valueSize, host_memory_space) { assert(*ptr == 0); *ptr = malloc(memsize * valueSize); assert(*ptr); } void default_allocator::alloc(void** ptr, size_t memsize, size_t valueSize, dev_memory_space) { assert(*ptr == 0); cuvSafeCall(hipMalloc(ptr, memsize * valueSize)); assert(*ptr); } void default_allocator::alloc2d(void** ptr, size_t& pitch, size_t height, size_t width, size_t valueSize, host_memory_space m) { pitch = width * valueSize; alloc(ptr, height * width, valueSize, m); assert(*ptr); } void default_allocator::alloc2d(void** ptr, size_t& pitch, size_t height, size_t width, size_t valueSize, dev_memory_space) { cuvSafeCall(hipMallocPitch(ptr, &pitch, valueSize * width, height)); assert(*ptr); } void default_allocator::dealloc(void** ptr, host_memory_space) { assert(*ptr != 0); free(*ptr); *ptr = 0; } void default_allocator::dealloc(void** ptr, dev_memory_space) { assert(*ptr != 0); cuvSafeCall(hipFree(*ptr)); *ptr = 0; } void cuda_allocator::alloc(void** ptr, size_t memsize, size_t valueSize, host_memory_space) { assert(*ptr == 0); cuvSafeCall(hipHostMalloc(ptr, memsize * valueSize)); assert(*ptr != 0); } void cuda_allocator::alloc(void** ptr, size_t memsize, size_t valueSize, dev_memory_space m) { default_allocator::alloc(ptr, memsize, valueSize, m); } void cuda_allocator::alloc2d(void** ptr, size_t& pitch, size_t height, size_t width, size_t valueSize, host_memory_space m) { pitch = width * valueSize; alloc(ptr, height * width, valueSize, m); } void cuda_allocator::alloc2d(void** ptr, size_t& pitch, size_t height, size_t width, size_t valueSize, dev_memory_space) { cuvSafeCall(hipMallocPitch(ptr, &pitch, valueSize * width, height)); } void cuda_allocator::dealloc(void** ptr, host_memory_space) { assert(*ptr != 0); cuvSafeCall(hipHostFree(*ptr)); *ptr = 0; } void cuda_allocator::dealloc(void** ptr, dev_memory_space m) { default_allocator::dealloc(ptr, m); } template<class memory_space> void pooled_cuda_allocator::collect_garbage(memory_space m) { boost::recursive_mutex::scoped_lock pool_lock(get_pool_mutex(m)); std::map<void*, bool>& pool = get_pool(m); std::map<void*, size_t>& pool_sizes = get_pool_sizes(m); std::vector<void*> to_delete; std::map<void*, bool>::iterator it; for (it = pool.begin(); it != pool.end(); it++) { if (it->second) { to_delete.push_back(it->first); } } for (size_t i = 0; i < to_delete.size(); i++) { void* ptr = to_delete[i]; pool.erase(ptr); pool_sizes.erase(ptr); cuda_alloc.dealloc(&ptr, m); } assert(pool_free_count(m) == 0); CUV_LOG_DEBUG("garbage collection in memory pool " << m_name << " (" << memtype(m) << "): removed " << to_delete.size() << " elements"); } template<> boost::recursive_mutex& pooled_cuda_allocator::get_pool_mutex(dev_memory_space) const { // locking/unlocking a mutex does not violate constness of this object // unfortunately, the design of the scoped_lock and mutex class requires this hack of a const_cast return *(const_cast<boost::recursive_mutex*>(&m_dev_pool_mutex)); } template<> boost::recursive_mutex& pooled_cuda_allocator::get_pool_mutex(host_memory_space) const { // locking/unlocking a mutex does not violate constness of this object // unfortunately, the design of the scoped_lock and mutex class requires this hack of a const_cast return *(const_cast<boost::recursive_mutex*>(&m_host_pool_mutex)); } template<> std::map<void*, bool>& pooled_cuda_allocator::get_pool(dev_memory_space) { return m_dev_pool; } template<> std::map<void*, bool>& pooled_cuda_allocator::get_pool(host_memory_space) { return m_host_pool; } template<> const std::map<void*, bool>& pooled_cuda_allocator::get_pool(dev_memory_space) const { return m_dev_pool; } template<> const std::map<void*, bool>& pooled_cuda_allocator::get_pool(host_memory_space) const { return m_host_pool; } template<> std::map<void*, size_t>& pooled_cuda_allocator::get_pool_sizes(dev_memory_space) { return m_dev_pool_sizes; } template<> std::map<void*, size_t>& pooled_cuda_allocator::get_pool_sizes(host_memory_space) { return m_host_pool_sizes; } template<> const std::map<void*, size_t>& pooled_cuda_allocator::get_pool_sizes(dev_memory_space) const { return m_dev_pool_sizes; } template<> const std::map<void*, size_t>& pooled_cuda_allocator::get_pool_sizes(host_memory_space) const { return m_host_pool_sizes; } template<class memory_space> void pooled_cuda_allocator::delete_pool(memory_space m) { boost::recursive_mutex::scoped_lock pool_lock(get_pool_mutex(m)); std::map<void*, bool>& pool = get_pool(m); std::map<void*, size_t>& pool_sizes = get_pool_sizes(m); #ifndef NDEBUG size_t free_count = pool_free_count(m); size_t count = pool_count(m); if (free_count != count) { throw std::runtime_error( (boost::format("detected potential memory leak in memory pool '%s' (%s): free: %d, count: %d") % m_name % memtype(m) % free_count % count).str()); } #endif std::map<void*, bool>::iterator it; for (it = pool.begin(); it != pool.end(); it++) { if (!it->second) { throw std::runtime_error( "misuse of allocator. memory was not deallocated before allocator is destroyed. this is a programming failure."); } void* ptr = it->first; cuda_alloc.dealloc(&ptr, m); } pool.clear(); pool_sizes.clear(); CUV_LOG_DEBUG("deleted memory pool " << m_name << " (" << memtype(m) << ")"); } pooled_cuda_allocator::pooled_cuda_allocator(const std::string& _name) : m_name(_name), m_dev_pool_mutex(), m_host_pool_mutex(), m_dev_pool(), m_dev_pool_sizes(), m_host_pool(), m_host_pool_sizes() { if (m_name.empty()) { std::ostringstream o; o << this; m_name = o.str(); } } pooled_cuda_allocator::~pooled_cuda_allocator() { delete_pool(dev_memory_space()); delete_pool(host_memory_space()); } template<class memory_space> size_t pooled_cuda_allocator::pool_size(memory_space m) const { size_t sum = 0; boost::recursive_mutex::scoped_lock pool_lock(get_pool_mutex(m)); const std::map<void*, size_t>& pool_sizes = get_pool_sizes(m); std::map<void*, size_t>::const_iterator it; for (it = pool_sizes.begin(); it != pool_sizes.end(); it++) { sum += it->second; } return sum; } template<class memory_space> size_t pooled_cuda_allocator::pool_count(memory_space m) const { boost::recursive_mutex::scoped_lock pool_lock(get_pool_mutex(m)); return get_pool_sizes(m).size(); } template<class memory_space> size_t pooled_cuda_allocator::pool_free_count(memory_space m) const { size_t free = 0; boost::recursive_mutex::scoped_lock pool_lock(get_pool_mutex(m)); const std::map<void*, bool>& pool = get_pool(m); std::map<void*, bool>::const_iterator it; for (it = pool.begin(); it != pool.end(); it++) { if (it->second) { free++; } } return free; } size_t pooled_cuda_allocator::pool_free_count() const { return pool_free_count(dev_memory_space()) + pool_free_count(host_memory_space()); } size_t pooled_cuda_allocator::pool_size() const { return pool_size(dev_memory_space()) + pool_size(host_memory_space()); } size_t pooled_cuda_allocator::pool_count() const { return pool_count(dev_memory_space()) + pool_count(host_memory_space()); } void pooled_cuda_allocator::alloc(void** ptr, size_t memsize, size_t valueSize, dev_memory_space m) { if (memsize * valueSize < MIN_SIZE_DEV) { default_alloc.alloc(ptr, memsize, valueSize, m); } else { alloc_pooled(ptr, memsize, valueSize, m); } } template<class memory_space> void pooled_cuda_allocator::alloc_pooled(void** ptr, size_t memsize, size_t valueSize, memory_space m) { assert(memsize > 0); // try to find memory in the pool that is available and large enough but not too large size_t bestSize = 0; void* bestPtr = 0; boost::recursive_mutex::scoped_lock pool_lock(get_pool_mutex(m)); std::map<void*, bool>& pool = get_pool(m); std::map<void*, size_t>& pool_sizes = get_pool_sizes(m); std::map<void*, bool>::iterator it; { for (it = pool.begin(); it != pool.end(); it++) { // available? if (!it->second) { continue; } size_t size = pool_sizes[it->first]; // large enough? if (size > memsize * valueSize) { if (bestPtr == 0 || size < bestSize) { bestPtr = it->first; bestSize = size; } } // cant get better else if (size == memsize * valueSize) { bestPtr = it->first; bestSize = size; break; } } if (bestPtr) { // we take it assert(pool[bestPtr]); pool[bestPtr] = false; *ptr = bestPtr; CUV_LOG_DEBUG("reusing " << memsize * valueSize << "/" << pool_sizes[bestPtr] << " bytes in pool " << m_name << " (" << memtype(m) << ")"); return; } } CUV_LOG_DEBUG("allocating " << memsize << "x" << valueSize << " bytes in pool " << m_name << " (" << memtype(m) << ")"); // nothing found? // allocate new memory cuda_alloc.alloc(ptr, memsize, valueSize, m); pool[*ptr] = false; pool_sizes[*ptr] = memsize * valueSize; CUV_LOG_DEBUG("allocated in pool " << m_name << " (" << memtype(m) << "). total bytes: " << pool_size(m) << ". count: " << pool_count(m) << ". free: " << pool_free_count(m)); assert(!pool.empty()); } void pooled_cuda_allocator::dealloc(void** ptr, dev_memory_space m) { do_dealloc(ptr, m); } void pooled_cuda_allocator::dealloc(void** ptr, host_memory_space m) { do_dealloc(ptr, m); } template<class memory_space> void pooled_cuda_allocator::do_dealloc(void** ptr, memory_space m) { assert(*ptr); boost::recursive_mutex::scoped_lock pool_lock(get_pool_mutex(m)); std::map<void*, bool>& pool = get_pool(m); std::map<void*, bool>::iterator it = pool.find(*ptr); if (it == pool.end()) { default_alloc.dealloc(ptr, m); return; } // mark the memory as available assert(it->second == false); it->second = true; #ifndef NDEBUG std::map<void*, size_t>& pool_sizes = get_pool_sizes(m); assert(pool_sizes[*ptr] > 0); CUV_LOG_DEBUG( "released " << pool_sizes[*ptr] << " bytes in pool " << m_name << " (" << memtype(m) << "). total bytes: " << pool_size(m) << ". count: " << pool_count(m) <<", free: " << pool_free_count(m)); #endif *ptr = 0; } void pooled_cuda_allocator::alloc(void** ptr, size_t memsize, size_t valueSize, host_memory_space m) { if (memsize * valueSize < MIN_SIZE_HOST) { default_alloc.alloc(ptr, memsize, valueSize, m); } else { alloc_pooled(ptr, memsize, valueSize, m); } } void pooled_cuda_allocator::alloc2d(void** ptr, size_t& pitch, size_t height, size_t width, size_t valueSize, host_memory_space m) { // not yet pooled default_alloc.alloc2d(ptr, pitch, height, width, valueSize, m); } void pooled_cuda_allocator::alloc2d(void** ptr, size_t& pitch, size_t height, size_t width, size_t valueSize, dev_memory_space m) { // not yet pooled default_alloc.alloc2d(ptr, pitch, height, width, valueSize, m); } void nan_pooled_cuda_allocator::alloc(void** ptr, size_t memsize, size_t valueSize, dev_memory_space m) { pooled_cuda_allocator::alloc(ptr, memsize, valueSize, m); if(valueSize == sizeof(float)){ // set everything to NaN thrust::device_ptr<float> begin((float*)*ptr); thrust::device_ptr<float> end(((float*)*ptr) + memsize); thrust::fill(begin, end, std::numeric_limits<float>::quiet_NaN()); } } void nan_pooled_cuda_allocator::alloc(void** ptr, size_t memsize, size_t valueSize, host_memory_space m) { pooled_cuda_allocator::alloc(ptr, memsize, valueSize, m); if(valueSize == sizeof(float)){ // set everything to NaN thrust::fill((float*)*ptr, ((float*)*ptr) + memsize, std::numeric_limits<float>::quiet_NaN()); } } } #define CUV_POOLED_CUDA_ALLOCATOR_INST(X) \ template size_t cuv::pooled_cuda_allocator::pool_count(X) const; \ template size_t cuv::pooled_cuda_allocator::pool_free_count(X) const; \ template size_t cuv::pooled_cuda_allocator::pool_size(X) const; CUV_POOLED_CUDA_ALLOCATOR_INST(cuv::dev_memory_space); CUV_POOLED_CUDA_ALLOCATOR_INST(cuv::host_memory_space);
5a543af61bcba1a43ade0fcccbadca741502c839.cu
#include "allocators.hpp" #include <boost/format.hpp> #include <boost/thread/recursive_mutex.hpp> #include <cuda_runtime_api.h> #include <sstream> #include <stdexcept> #include <limits> #include <thrust/device_ptr.h> #include <thrust/fill.h> #include <vector> #include <cuv/tools/cuv_general.hpp> /*#include <iostream>*/ /*#undef CUV_LOG_DEBUG*/ /*#define CUV_LOG_DEBUG(X) std::cout << X << std::endl;*/ namespace cuv { void default_allocator::alloc(void** ptr, size_t memsize, size_t valueSize, host_memory_space) { assert(*ptr == 0); *ptr = malloc(memsize * valueSize); assert(*ptr); } void default_allocator::alloc(void** ptr, size_t memsize, size_t valueSize, dev_memory_space) { assert(*ptr == 0); cuvSafeCall(cudaMalloc(ptr, memsize * valueSize)); assert(*ptr); } void default_allocator::alloc2d(void** ptr, size_t& pitch, size_t height, size_t width, size_t valueSize, host_memory_space m) { pitch = width * valueSize; alloc(ptr, height * width, valueSize, m); assert(*ptr); } void default_allocator::alloc2d(void** ptr, size_t& pitch, size_t height, size_t width, size_t valueSize, dev_memory_space) { cuvSafeCall(cudaMallocPitch(ptr, &pitch, valueSize * width, height)); assert(*ptr); } void default_allocator::dealloc(void** ptr, host_memory_space) { assert(*ptr != 0); free(*ptr); *ptr = 0; } void default_allocator::dealloc(void** ptr, dev_memory_space) { assert(*ptr != 0); cuvSafeCall(cudaFree(*ptr)); *ptr = 0; } void cuda_allocator::alloc(void** ptr, size_t memsize, size_t valueSize, host_memory_space) { assert(*ptr == 0); cuvSafeCall(cudaMallocHost(ptr, memsize * valueSize)); assert(*ptr != 0); } void cuda_allocator::alloc(void** ptr, size_t memsize, size_t valueSize, dev_memory_space m) { default_allocator::alloc(ptr, memsize, valueSize, m); } void cuda_allocator::alloc2d(void** ptr, size_t& pitch, size_t height, size_t width, size_t valueSize, host_memory_space m) { pitch = width * valueSize; alloc(ptr, height * width, valueSize, m); } void cuda_allocator::alloc2d(void** ptr, size_t& pitch, size_t height, size_t width, size_t valueSize, dev_memory_space) { cuvSafeCall(cudaMallocPitch(ptr, &pitch, valueSize * width, height)); } void cuda_allocator::dealloc(void** ptr, host_memory_space) { assert(*ptr != 0); cuvSafeCall(cudaFreeHost(*ptr)); *ptr = 0; } void cuda_allocator::dealloc(void** ptr, dev_memory_space m) { default_allocator::dealloc(ptr, m); } template<class memory_space> void pooled_cuda_allocator::collect_garbage(memory_space m) { boost::recursive_mutex::scoped_lock pool_lock(get_pool_mutex(m)); std::map<void*, bool>& pool = get_pool(m); std::map<void*, size_t>& pool_sizes = get_pool_sizes(m); std::vector<void*> to_delete; std::map<void*, bool>::iterator it; for (it = pool.begin(); it != pool.end(); it++) { if (it->second) { to_delete.push_back(it->first); } } for (size_t i = 0; i < to_delete.size(); i++) { void* ptr = to_delete[i]; pool.erase(ptr); pool_sizes.erase(ptr); cuda_alloc.dealloc(&ptr, m); } assert(pool_free_count(m) == 0); CUV_LOG_DEBUG("garbage collection in memory pool " << m_name << " (" << memtype(m) << "): removed " << to_delete.size() << " elements"); } template<> boost::recursive_mutex& pooled_cuda_allocator::get_pool_mutex(dev_memory_space) const { // locking/unlocking a mutex does not violate constness of this object // unfortunately, the design of the scoped_lock and mutex class requires this hack of a const_cast return *(const_cast<boost::recursive_mutex*>(&m_dev_pool_mutex)); } template<> boost::recursive_mutex& pooled_cuda_allocator::get_pool_mutex(host_memory_space) const { // locking/unlocking a mutex does not violate constness of this object // unfortunately, the design of the scoped_lock and mutex class requires this hack of a const_cast return *(const_cast<boost::recursive_mutex*>(&m_host_pool_mutex)); } template<> std::map<void*, bool>& pooled_cuda_allocator::get_pool(dev_memory_space) { return m_dev_pool; } template<> std::map<void*, bool>& pooled_cuda_allocator::get_pool(host_memory_space) { return m_host_pool; } template<> const std::map<void*, bool>& pooled_cuda_allocator::get_pool(dev_memory_space) const { return m_dev_pool; } template<> const std::map<void*, bool>& pooled_cuda_allocator::get_pool(host_memory_space) const { return m_host_pool; } template<> std::map<void*, size_t>& pooled_cuda_allocator::get_pool_sizes(dev_memory_space) { return m_dev_pool_sizes; } template<> std::map<void*, size_t>& pooled_cuda_allocator::get_pool_sizes(host_memory_space) { return m_host_pool_sizes; } template<> const std::map<void*, size_t>& pooled_cuda_allocator::get_pool_sizes(dev_memory_space) const { return m_dev_pool_sizes; } template<> const std::map<void*, size_t>& pooled_cuda_allocator::get_pool_sizes(host_memory_space) const { return m_host_pool_sizes; } template<class memory_space> void pooled_cuda_allocator::delete_pool(memory_space m) { boost::recursive_mutex::scoped_lock pool_lock(get_pool_mutex(m)); std::map<void*, bool>& pool = get_pool(m); std::map<void*, size_t>& pool_sizes = get_pool_sizes(m); #ifndef NDEBUG size_t free_count = pool_free_count(m); size_t count = pool_count(m); if (free_count != count) { throw std::runtime_error( (boost::format("detected potential memory leak in memory pool '%s' (%s): free: %d, count: %d") % m_name % memtype(m) % free_count % count).str()); } #endif std::map<void*, bool>::iterator it; for (it = pool.begin(); it != pool.end(); it++) { if (!it->second) { throw std::runtime_error( "misuse of allocator. memory was not deallocated before allocator is destroyed. this is a programming failure."); } void* ptr = it->first; cuda_alloc.dealloc(&ptr, m); } pool.clear(); pool_sizes.clear(); CUV_LOG_DEBUG("deleted memory pool " << m_name << " (" << memtype(m) << ")"); } pooled_cuda_allocator::pooled_cuda_allocator(const std::string& _name) : m_name(_name), m_dev_pool_mutex(), m_host_pool_mutex(), m_dev_pool(), m_dev_pool_sizes(), m_host_pool(), m_host_pool_sizes() { if (m_name.empty()) { std::ostringstream o; o << this; m_name = o.str(); } } pooled_cuda_allocator::~pooled_cuda_allocator() { delete_pool(dev_memory_space()); delete_pool(host_memory_space()); } template<class memory_space> size_t pooled_cuda_allocator::pool_size(memory_space m) const { size_t sum = 0; boost::recursive_mutex::scoped_lock pool_lock(get_pool_mutex(m)); const std::map<void*, size_t>& pool_sizes = get_pool_sizes(m); std::map<void*, size_t>::const_iterator it; for (it = pool_sizes.begin(); it != pool_sizes.end(); it++) { sum += it->second; } return sum; } template<class memory_space> size_t pooled_cuda_allocator::pool_count(memory_space m) const { boost::recursive_mutex::scoped_lock pool_lock(get_pool_mutex(m)); return get_pool_sizes(m).size(); } template<class memory_space> size_t pooled_cuda_allocator::pool_free_count(memory_space m) const { size_t free = 0; boost::recursive_mutex::scoped_lock pool_lock(get_pool_mutex(m)); const std::map<void*, bool>& pool = get_pool(m); std::map<void*, bool>::const_iterator it; for (it = pool.begin(); it != pool.end(); it++) { if (it->second) { free++; } } return free; } size_t pooled_cuda_allocator::pool_free_count() const { return pool_free_count(dev_memory_space()) + pool_free_count(host_memory_space()); } size_t pooled_cuda_allocator::pool_size() const { return pool_size(dev_memory_space()) + pool_size(host_memory_space()); } size_t pooled_cuda_allocator::pool_count() const { return pool_count(dev_memory_space()) + pool_count(host_memory_space()); } void pooled_cuda_allocator::alloc(void** ptr, size_t memsize, size_t valueSize, dev_memory_space m) { if (memsize * valueSize < MIN_SIZE_DEV) { default_alloc.alloc(ptr, memsize, valueSize, m); } else { alloc_pooled(ptr, memsize, valueSize, m); } } template<class memory_space> void pooled_cuda_allocator::alloc_pooled(void** ptr, size_t memsize, size_t valueSize, memory_space m) { assert(memsize > 0); // try to find memory in the pool that is available and large enough but not too large size_t bestSize = 0; void* bestPtr = 0; boost::recursive_mutex::scoped_lock pool_lock(get_pool_mutex(m)); std::map<void*, bool>& pool = get_pool(m); std::map<void*, size_t>& pool_sizes = get_pool_sizes(m); std::map<void*, bool>::iterator it; { for (it = pool.begin(); it != pool.end(); it++) { // available? if (!it->second) { continue; } size_t size = pool_sizes[it->first]; // large enough? if (size > memsize * valueSize) { if (bestPtr == 0 || size < bestSize) { bestPtr = it->first; bestSize = size; } } // can’t get better else if (size == memsize * valueSize) { bestPtr = it->first; bestSize = size; break; } } if (bestPtr) { // we take it assert(pool[bestPtr]); pool[bestPtr] = false; *ptr = bestPtr; CUV_LOG_DEBUG("reusing " << memsize * valueSize << "/" << pool_sizes[bestPtr] << " bytes in pool " << m_name << " (" << memtype(m) << ")"); return; } } CUV_LOG_DEBUG("allocating " << memsize << "x" << valueSize << " bytes in pool " << m_name << " (" << memtype(m) << ")"); // nothing found? // allocate new memory cuda_alloc.alloc(ptr, memsize, valueSize, m); pool[*ptr] = false; pool_sizes[*ptr] = memsize * valueSize; CUV_LOG_DEBUG("allocated in pool " << m_name << " (" << memtype(m) << "). total bytes: " << pool_size(m) << ". count: " << pool_count(m) << ". free: " << pool_free_count(m)); assert(!pool.empty()); } void pooled_cuda_allocator::dealloc(void** ptr, dev_memory_space m) { do_dealloc(ptr, m); } void pooled_cuda_allocator::dealloc(void** ptr, host_memory_space m) { do_dealloc(ptr, m); } template<class memory_space> void pooled_cuda_allocator::do_dealloc(void** ptr, memory_space m) { assert(*ptr); boost::recursive_mutex::scoped_lock pool_lock(get_pool_mutex(m)); std::map<void*, bool>& pool = get_pool(m); std::map<void*, bool>::iterator it = pool.find(*ptr); if (it == pool.end()) { default_alloc.dealloc(ptr, m); return; } // mark the memory as available assert(it->second == false); it->second = true; #ifndef NDEBUG std::map<void*, size_t>& pool_sizes = get_pool_sizes(m); assert(pool_sizes[*ptr] > 0); CUV_LOG_DEBUG( "released " << pool_sizes[*ptr] << " bytes in pool " << m_name << " (" << memtype(m) << "). total bytes: " << pool_size(m) << ". count: " << pool_count(m) <<", free: " << pool_free_count(m)); #endif *ptr = 0; } void pooled_cuda_allocator::alloc(void** ptr, size_t memsize, size_t valueSize, host_memory_space m) { if (memsize * valueSize < MIN_SIZE_HOST) { default_alloc.alloc(ptr, memsize, valueSize, m); } else { alloc_pooled(ptr, memsize, valueSize, m); } } void pooled_cuda_allocator::alloc2d(void** ptr, size_t& pitch, size_t height, size_t width, size_t valueSize, host_memory_space m) { // not yet pooled default_alloc.alloc2d(ptr, pitch, height, width, valueSize, m); } void pooled_cuda_allocator::alloc2d(void** ptr, size_t& pitch, size_t height, size_t width, size_t valueSize, dev_memory_space m) { // not yet pooled default_alloc.alloc2d(ptr, pitch, height, width, valueSize, m); } void nan_pooled_cuda_allocator::alloc(void** ptr, size_t memsize, size_t valueSize, dev_memory_space m) { pooled_cuda_allocator::alloc(ptr, memsize, valueSize, m); if(valueSize == sizeof(float)){ // set everything to NaN thrust::device_ptr<float> begin((float*)*ptr); thrust::device_ptr<float> end(((float*)*ptr) + memsize); thrust::fill(begin, end, std::numeric_limits<float>::quiet_NaN()); } } void nan_pooled_cuda_allocator::alloc(void** ptr, size_t memsize, size_t valueSize, host_memory_space m) { pooled_cuda_allocator::alloc(ptr, memsize, valueSize, m); if(valueSize == sizeof(float)){ // set everything to NaN thrust::fill((float*)*ptr, ((float*)*ptr) + memsize, std::numeric_limits<float>::quiet_NaN()); } } } #define CUV_POOLED_CUDA_ALLOCATOR_INST(X) \ template size_t cuv::pooled_cuda_allocator::pool_count(X) const; \ template size_t cuv::pooled_cuda_allocator::pool_free_count(X) const; \ template size_t cuv::pooled_cuda_allocator::pool_size(X) const; CUV_POOLED_CUDA_ALLOCATOR_INST(cuv::dev_memory_space); CUV_POOLED_CUDA_ALLOCATOR_INST(cuv::host_memory_space);
7f04fefe7e9b7e369f69458be366b88a72f81d9d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "emc.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/fill.h> __global__ void update_slices_kernel(real * images, real * slices, int * mask, real * respons, real * scaling, int N_images, int N_slices, int N_2d, real * slices_total_respons, real * rot, real * x_coord, real * y_coord, real * z_coord, real * model, real * weight, int slice_rows, int slice_cols, int model_x, int model_y, int model_z, real * weights); __global__ void insert_slices_kernel(real * images, real * slices, int * mask, real * respons, real * scaling, int N_images, int N_slices, int N_2d, real * slices_total_respons, real * rot, real * x_coord, real * y_coord, real * z_coord, real * model, real * weight, int slice_rows, int slice_cols, int model_x, int model_y, int model_z, real * weights); template<typename T> __device__ void inblock_reduce(T * data){ __syncthreads(); for(unsigned int s=blockDim.x/2; s>0; s>>=1){ if (threadIdx.x < s){ data[threadIdx.x] += data[threadIdx.x + s]; } __syncthreads(); } } template<typename T> __device__ void inblock_maximum(T * data){ __syncthreads(); for(unsigned int s=blockDim.x/2; s>0; s>>=1){ if (threadIdx.x < s){ if(data[threadIdx.x] < data[threadIdx.x + s]){ data[threadIdx.x] = data[threadIdx.x + s]; } } __syncthreads(); } } __device__ void cuda_get_slice(real *model, real *slice, real *rot, real *x_coordinates, real *y_coordinates, real *z_coordinates, int slice_rows, int slice_cols, int model_x, int model_y, int model_z, int tid, int step) { const int x_max = slice_rows; const int y_max = slice_cols; //tabulate angle later real new_x, new_y, new_z; int round_x, round_y, round_z; for (int x = 0; x < x_max; x++) { for (int y = tid; y < y_max; y+=step) { /* This is just a matrix multiplication with rot */ new_x = (rot[0]*rot[0] + rot[1]*rot[1] - rot[2]*rot[2] - rot[3]*rot[3])*x_coordinates[y*x_max+x] + (2.0f*rot[1]*rot[2] - 2.0f*rot[0]*rot[3])*y_coordinates[y*x_max+x] + (2.0f*rot[1]*rot[3] + 2.0f*rot[0]*rot[2])*z_coordinates[y*x_max+x]; new_y = (2.0f*rot[1]*rot[2] + 2.0f*rot[0]*rot[3])*x_coordinates[y*x_max+x] + (rot[0]*rot[0] - rot[1]*rot[1] + rot[2]*rot[2] - rot[3]*rot[3])*y_coordinates[y*x_max+x] + (2.0f*rot[2]*rot[3] - 2.0f*rot[0]*rot[1])*z_coordinates[y*x_max+x]; new_z = (2.0f*rot[1]*rot[3] - 2.0f*rot[0]*rot[2])*x_coordinates[y*x_max+x] + (2.0f*rot[2]*rot[3] + 2.0f*rot[0]*rot[1])*y_coordinates[y*x_max+x] + (rot[0]*rot[0] - rot[1]*rot[1] - rot[2]*rot[2] + rot[3]*rot[3])*z_coordinates[y*x_max+x]; round_x = roundf(model_x/2.0f + 0.5f + new_x); round_y = roundf(model_y/2.0f + 0.5f + new_y); round_z = roundf(model_z/2.0f + 0.5f + new_z); if (round_x > 0 && round_x < model_x && round_y > 0 && round_y < model_y && round_z > 0 && round_z < model_z) { slice[y*x_max+x] = model[(round_z*model_x*model_y + round_y*model_x + round_x)]; }else{ slice[y*x_max+x] = 0.0f; } } } } __global__ void get_slices_kernel(real * model, real * slices, real *rot, real *x_coordinates, real *y_coordinates, real *z_coordinates, int slice_rows, int slice_cols, int model_x, int model_y, int model_z){ int bid = blockIdx.x; int i_slice = bid; int tid = threadIdx.x; int step = blockDim.x; int N_2d = slice_rows*slice_cols; cuda_get_slice(model,&slices[N_2d*i_slice],&rot[4*i_slice],x_coordinates, y_coordinates,z_coordinates,slice_rows,slice_cols,model_x,model_y, model_z,tid,step); } /* This responsability does not yet take scaling of patterns into accoutnt. */ __device__ void cuda_calculate_responsability_absolute(float *slice, float *image, int *mask, real sigma, real scaling, int N_2d, int tid, int step, real * sum_cache, int * count_cache) { real sum = 0.0; const int i_max = N_2d; int count = 0; for (int i = tid; i < i_max; i+=step) { if (mask[i] != 0) { sum += pow(slice[i] - image[i]/scaling,2); count++; } } sum_cache[tid] = sum; count_cache[tid] = count; // return -sum/2.0/(real)count/pow(sigma,2); //return in log scale. } __global__ void calculate_responsabilities_kernel(float * slices, float * images, int * mask, real sigma, real * scaling, real * respons, int N_2d){ __shared__ real sum_cache[256]; __shared__ int count_cache[256]; int tid = threadIdx.x; int step = blockDim.x; int i_image = blockIdx.x; int i_slice = blockIdx.y; int N_images = gridDim.x; cuda_calculate_responsability_absolute(&slices[i_slice*N_2d], &images[i_image*N_2d],mask, sigma,scaling[i_image], N_2d, tid,step, sum_cache,count_cache); inblock_reduce(sum_cache); inblock_reduce(count_cache); if(tid == 0){ respons[i_slice*N_images+i_image] = -sum_cache[0]/2.0/(real)count_cache[0]/pow(sigma,2); } } void cuda_calculate_responsabilities(real * d_slices, real * d_images, int * d_mask, real sigma, real * d_scaling, real * d_respons, int N_2d, int N_images, int N_slices, real * respons){ hipEvent_t begin; hipEvent_t end; hipEventCreate(&begin); hipEventCreate(&end); hipEventRecord (begin,0); dim3 nblocks(N_images,N_slices); int nthreads = 256; hipEvent_t k_begin; hipEvent_t k_end; hipEventCreate(&k_begin); hipEventCreate(&k_end); hipEventRecord (k_begin,0); hipLaunchKernelGGL(( calculate_responsabilities_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_slices,d_images,d_mask, sigma,d_scaling,d_respons, N_2d); hipEventRecord(k_end,0); hipEventSynchronize(k_end); real k_ms; hipEventElapsedTime (&k_ms, k_begin, k_end); printf("cuda kernel calc respons time = %fms\n",k_ms); hipError_t status = hipGetLastError(); if(status != hipSuccess){ printf("CUDA Error: %s\n",hipGetErrorString(status)); } hipMemcpy(respons,d_respons,sizeof(real)*N_slices*N_images,hipMemcpyDeviceToHost); real respons_sum = 0; for(int i = 0;i<N_slices*N_images;i++){ respons_sum += respons[i]; } printf("respons_sum = %f\n",respons_sum); hipEventRecord(end,0); hipEventSynchronize (end); real ms; hipEventElapsedTime (&ms, begin, end); printf("cuda calc respons time = %fms\n",ms); } __global__ void slice_weighting_kernel(real * images, real * slices,int * mask, real * respons, real * scaling, int N_slices, int N_2d, int N_images){ __shared__ real image_power[256]; __shared__ real correlation[256]; int bid = blockIdx.x; int tid = threadIdx.x; int step = blockDim.x; int i_image = bid; real weighted_power = 0; image_power[tid] = 0.0; for (int i = tid; i < N_2d; i+=step) { if (mask[i] != 0) { image_power[tid] += pow(images[i_image*N_2d+i],2); } } inblock_reduce(image_power); for (int i_slice = 0; i_slice < N_slices; i_slice++) { correlation[tid] = 0.0; for (int i = tid; i < N_2d; i+=step) { if (mask[i] != 0) { correlation[tid] += images[i_image*N_2d+i]*slices[i_slice*N_2d+i]; } } inblock_reduce(correlation); if(tid == 0){ weighted_power += respons[i_slice*N_images+i_image]*correlation[tid]; } } if(tid == 0){ scaling[i_image] = image_power[tid]/weighted_power; } } void cuda_update_scaling(real * d_images, real * d_slices, int * d_mask, real * d_respons, real * d_scaling, int N_images, int N_slices, int N_2d, real * scaling){ hipEvent_t begin; hipEvent_t end; hipEventCreate(&begin); hipEventCreate(&end); hipEventRecord (begin,0); int nblocks = N_images; int nthreads = 256; hipEvent_t k_begin; hipEvent_t k_end; hipEventCreate(&k_begin); hipEventCreate(&k_end); hipEventRecord (k_begin,0); hipLaunchKernelGGL(( slice_weighting_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_images,d_slices,d_mask, d_respons, d_scaling, N_slices,N_2d, N_images); hipMemcpy(scaling,d_scaling,sizeof(real)*N_images,hipMemcpyDeviceToHost); hipEventRecord(k_end,0); hipEventSynchronize(k_end); real k_ms; hipEventElapsedTime (&k_ms, k_begin, k_end); printf("cuda kernel update scaling time = %fms\n",k_ms); hipError_t status = hipGetLastError(); if(status != hipSuccess){ printf("CUDA Error: %s\n",hipGetErrorString(status)); } hipEventRecord(end,0); hipEventSynchronize (end); real ms; hipEventElapsedTime (&ms, begin, end); printf("cuda update scaling time = %fms\n",ms); } void cuda_get_slices(sp_3matrix * model, real * d_model, real * d_slices, real * d_rot, real * d_x_coordinates, real * d_y_coordinates, real * d_z_coordinates, int N_slices){ hipEvent_t begin; hipEvent_t end; hipEventCreate(&begin); hipEventCreate(&end); hipEventRecord (begin,0); int rows = sp_3matrix_x(model); int cols = sp_3matrix_y(model); int N_2d = sp_3matrix_x(model)*sp_3matrix_y(model); int nblocks = N_slices; int nthreads = 256; hipLaunchKernelGGL(( get_slices_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_model, d_slices, d_rot,d_x_coordinates, d_y_coordinates,d_z_coordinates, rows,cols, sp_3matrix_x(model),sp_3matrix_y(model), sp_3matrix_z(model)); hipEventRecord(end,0); hipEventSynchronize (end); real ms; hipEventElapsedTime (&ms, begin, end); printf("cuda get slices time = %fms\n",ms); } real cuda_update_slices(real * d_images, real * d_slices, int * d_mask, real * d_respons, real * d_scaling, int N_images, int N_slices, int N_2d, sp_3matrix * model, real * d_model, real *d_x_coordinates, real *d_y_coordinates, real *d_z_coordinates, real *d_rot, real * weights, real * d_weight, Setup setup, sp_matrix ** images){ hipEvent_t begin; hipEvent_t end; hipEventCreate(&begin); hipEventCreate(&end); hipEventRecord (begin,0); dim3 nblocks = N_slices; int nthreads = 256; real * d_slices_total_respons; hipMalloc(&d_slices_total_respons,sizeof(real)*N_slices); real * d_weights; hipMalloc(&d_weights,sizeof(real)*N_slices); hipMemcpy(d_weights,weights,sizeof(real)*N_slices,hipMemcpyHostToDevice); hipEvent_t k_begin; hipEvent_t k_end; hipEventCreate(&k_begin); hipEventCreate(&k_end); hipEventRecord (k_begin,0); hipLaunchKernelGGL(( update_slices_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_images, d_slices, d_mask, d_respons, d_scaling, N_images, N_slices, N_2d, d_slices_total_respons, d_rot,d_x_coordinates, d_y_coordinates,d_z_coordinates,d_model, d_weight, sp_matrix_rows(images[0]),sp_matrix_cols(images[0]), sp_3matrix_x(model),sp_3matrix_y(model), sp_3matrix_z(model),d_weights); hipDeviceSynchronize(); hipLaunchKernelGGL(( insert_slices_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_images, d_slices, d_mask, d_respons, d_scaling, N_images, N_slices, N_2d, d_slices_total_respons, d_rot,d_x_coordinates, d_y_coordinates,d_z_coordinates,d_model, d_weight, sp_matrix_rows(images[0]),sp_matrix_cols(images[0]), sp_3matrix_x(model),sp_3matrix_y(model), sp_3matrix_z(model),d_weights); hipEventRecord(k_end,0); hipEventSynchronize(k_end); real k_ms; hipEventElapsedTime (&k_ms, k_begin, k_end); printf("cuda kernel slice update time = %fms\n",k_ms); hipError_t status = hipGetLastError(); if(status != hipSuccess){ printf("CUDA Error: %s\n",hipGetErrorString(status)); } real slices_total_respons[N_slices]; hipMemcpy(slices_total_respons,d_slices_total_respons,sizeof(real)*N_slices, hipMemcpyDeviceToHost); real overal_respons = 0.0; for (int i_slice = 0; i_slice < N_slices; i_slice++) { overal_respons += slices_total_respons[i_slice]; } hipMemcpy(model->data,d_model,sizeof(real)*sp_3matrix_size(model),hipMemcpyDeviceToHost); hipFree(d_slices_total_respons); hipFree(d_weights); hipEventRecord(end,0); hipEventSynchronize (end); real ms; hipEventElapsedTime (&ms, begin, end); printf("cuda slice update time = %fms\n",ms); return overal_respons; } real cuda_model_max(real * model, int model_size){ thrust::device_ptr<real> p(model); real max = thrust::reduce(p, p+model_size, real(0), thrust::maximum<real>()); return max; } void cuda_allocate_slices(real ** slices,Setup setup,int N_slices){ hipMalloc(slices,sizeof(real)*setup.side*setup.side*N_slices); } void cuda_allocate_model(real ** d_model, sp_3matrix * model){ hipMalloc(d_model,sizeof(real)*sp_3matrix_size(model)); hipMemcpy(*d_model,model->data,sizeof(real)*sp_3matrix_size(model),hipMemcpyHostToDevice); } void cuda_allocate_mask(int ** d_mask, sp_imatrix * mask){ hipMalloc(d_mask,sizeof(int)*sp_imatrix_size(mask)); hipMemcpy(*d_mask,mask->data,sizeof(int)*sp_imatrix_size(mask),hipMemcpyHostToDevice); } void cuda_allocate_rotations(real ** d_rotations, Quaternion ** rotations, int N_slices){ hipMalloc(d_rotations,sizeof(real)*4*N_slices); for(int i = 0;i<N_slices;i++){ hipMemcpy(&(*d_rotations)[4*i],rotations[i]->q,sizeof(real)*4,hipMemcpyHostToDevice); } } void cuda_allocate_images(real ** d_images, sp_matrix ** images, int N_images){ hipMalloc(d_images,sizeof(real)*sp_matrix_size(images[0])*N_images); for(int i = 0;i<N_images;i++){ hipMemcpy(&(*d_images)[sp_matrix_size(images[0])*i],images[i]->data,sizeof(real)*sp_matrix_size(images[0]),hipMemcpyHostToDevice); } } void cuda_allocate_coords(real ** d_x, real ** d_y, real ** d_z, sp_matrix * x, sp_matrix * y, sp_matrix * z){ hipMalloc(d_x,sizeof(real)*sp_matrix_size(x)); hipMalloc(d_y,sizeof(real)*sp_matrix_size(x)); hipMalloc(d_z,sizeof(real)*sp_matrix_size(x)); hipMemcpy(*d_x,x->data,sizeof(real)*sp_matrix_size(x),hipMemcpyHostToDevice); hipMemcpy(*d_y,y->data,sizeof(real)*sp_matrix_size(x),hipMemcpyHostToDevice); hipMemcpy(*d_z,z->data,sizeof(real)*sp_matrix_size(x),hipMemcpyHostToDevice); } void cuda_reset_model(sp_3matrix * model, real * d_model){ hipMemset(d_model,0,sizeof(real)*sp_3matrix_size(model)); } __global__ void cuda_normalize_model_kernel(real * model, real * weight, int n){ int i = threadIdx.x + blockIdx.x*blockDim.x; if(weight[i] > 0.0f){ model[i] /= weight[i]; }else{ model[i] = 0.0f; } } void cuda_normalize_model(sp_3matrix * model, real * d_model, real * d_weight){ int n = sp_3matrix_size(model); int nthreads = 256; int nblocks = (n+nthreads-1)/nthreads; hipLaunchKernelGGL(( cuda_normalize_model_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_model,d_weight,n); hipDeviceSynchronize(); thrust::device_ptr<real> p(d_model); real model_sum = thrust::reduce(p, p+n, real(0), thrust::plus<real>()); model_sum /= n; /* model /= model_sum; */ thrust::transform(p, p+n,thrust::make_constant_iterator(1.0f/model_sum), p, thrust::multiplies<real>()); } void cuda_allocate_real(real ** x, int n){ hipMalloc(x,n); } void cuda_allocate_scaling(real ** d_scaling, int N_images){ hipMalloc(d_scaling,N_images*sizeof(real)); thrust::device_ptr<real> p(*d_scaling); thrust::fill(p, p+N_images, real(1)); } __global__ void cuda_normalize_responsabilities_kernel(real * respons, int N_slices, int N_images){ __shared__ real cache[256]; int i_image = blockIdx.x; int tid = threadIdx.x; int step = blockDim.x; cache[tid] = -1.0e10f; for(int i_slice = tid;i_slice < N_slices;i_slice += step){ if(cache[tid] < respons[i_slice*N_images+i_image]){ cache[tid] = respons[i_slice*N_images+i_image]; } } inblock_maximum(cache); real max_resp = cache[0]; for (int i_slice = tid; i_slice < N_slices; i_slice+= step) { respons[i_slice*N_images+i_image] -= max_resp; } cache[tid] = 0; for (int i_slice = tid; i_slice < N_slices; i_slice+=step) { if (respons[i_slice*N_images+i_image] > -1.0e10f) { respons[i_slice*N_images+i_image] = expf(respons[i_slice*N_images+i_image]); cache[tid] += respons[i_slice*N_images+i_image]; } else { respons[i_slice*N_images+i_image] = 0.0f; } } inblock_reduce(cache); real sum = cache[0]; for (int i_slice = tid; i_slice < N_slices; i_slice+=step) { respons[i_slice*N_images+i_image] /= sum; } } void cuda_normalize_responsabilities(real * d_respons, int N_slices, int N_images){ int nblocks = N_images; int nthreads = 256; hipLaunchKernelGGL(( cuda_normalize_responsabilities_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_respons,N_slices,N_images); } // x_log_x<T> computes the f(x) -> x*log(x) template <typename T> struct x_log_x { __host__ __device__ T operator()(const T& x) const { if(x > 0){ return x * logf(x); }else{ return 0; } } }; real cuda_total_respons(real * d_respons, real * respons,int n){ thrust::device_ptr<real> p(d_respons); x_log_x<real> unary_op; thrust::plus<real> binary_op; real init = 0; // Calculates sum_0^n d_respons*log(d_respons) return thrust::transform_reduce(p, p+n, unary_op, init, binary_op); }
7f04fefe7e9b7e369f69458be366b88a72f81d9d.cu
#include "emc.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/fill.h> __global__ void update_slices_kernel(real * images, real * slices, int * mask, real * respons, real * scaling, int N_images, int N_slices, int N_2d, real * slices_total_respons, real * rot, real * x_coord, real * y_coord, real * z_coord, real * model, real * weight, int slice_rows, int slice_cols, int model_x, int model_y, int model_z, real * weights); __global__ void insert_slices_kernel(real * images, real * slices, int * mask, real * respons, real * scaling, int N_images, int N_slices, int N_2d, real * slices_total_respons, real * rot, real * x_coord, real * y_coord, real * z_coord, real * model, real * weight, int slice_rows, int slice_cols, int model_x, int model_y, int model_z, real * weights); template<typename T> __device__ void inblock_reduce(T * data){ __syncthreads(); for(unsigned int s=blockDim.x/2; s>0; s>>=1){ if (threadIdx.x < s){ data[threadIdx.x] += data[threadIdx.x + s]; } __syncthreads(); } } template<typename T> __device__ void inblock_maximum(T * data){ __syncthreads(); for(unsigned int s=blockDim.x/2; s>0; s>>=1){ if (threadIdx.x < s){ if(data[threadIdx.x] < data[threadIdx.x + s]){ data[threadIdx.x] = data[threadIdx.x + s]; } } __syncthreads(); } } __device__ void cuda_get_slice(real *model, real *slice, real *rot, real *x_coordinates, real *y_coordinates, real *z_coordinates, int slice_rows, int slice_cols, int model_x, int model_y, int model_z, int tid, int step) { const int x_max = slice_rows; const int y_max = slice_cols; //tabulate angle later real new_x, new_y, new_z; int round_x, round_y, round_z; for (int x = 0; x < x_max; x++) { for (int y = tid; y < y_max; y+=step) { /* This is just a matrix multiplication with rot */ new_x = (rot[0]*rot[0] + rot[1]*rot[1] - rot[2]*rot[2] - rot[3]*rot[3])*x_coordinates[y*x_max+x] + (2.0f*rot[1]*rot[2] - 2.0f*rot[0]*rot[3])*y_coordinates[y*x_max+x] + (2.0f*rot[1]*rot[3] + 2.0f*rot[0]*rot[2])*z_coordinates[y*x_max+x]; new_y = (2.0f*rot[1]*rot[2] + 2.0f*rot[0]*rot[3])*x_coordinates[y*x_max+x] + (rot[0]*rot[0] - rot[1]*rot[1] + rot[2]*rot[2] - rot[3]*rot[3])*y_coordinates[y*x_max+x] + (2.0f*rot[2]*rot[3] - 2.0f*rot[0]*rot[1])*z_coordinates[y*x_max+x]; new_z = (2.0f*rot[1]*rot[3] - 2.0f*rot[0]*rot[2])*x_coordinates[y*x_max+x] + (2.0f*rot[2]*rot[3] + 2.0f*rot[0]*rot[1])*y_coordinates[y*x_max+x] + (rot[0]*rot[0] - rot[1]*rot[1] - rot[2]*rot[2] + rot[3]*rot[3])*z_coordinates[y*x_max+x]; round_x = roundf(model_x/2.0f + 0.5f + new_x); round_y = roundf(model_y/2.0f + 0.5f + new_y); round_z = roundf(model_z/2.0f + 0.5f + new_z); if (round_x > 0 && round_x < model_x && round_y > 0 && round_y < model_y && round_z > 0 && round_z < model_z) { slice[y*x_max+x] = model[(round_z*model_x*model_y + round_y*model_x + round_x)]; }else{ slice[y*x_max+x] = 0.0f; } } } } __global__ void get_slices_kernel(real * model, real * slices, real *rot, real *x_coordinates, real *y_coordinates, real *z_coordinates, int slice_rows, int slice_cols, int model_x, int model_y, int model_z){ int bid = blockIdx.x; int i_slice = bid; int tid = threadIdx.x; int step = blockDim.x; int N_2d = slice_rows*slice_cols; cuda_get_slice(model,&slices[N_2d*i_slice],&rot[4*i_slice],x_coordinates, y_coordinates,z_coordinates,slice_rows,slice_cols,model_x,model_y, model_z,tid,step); } /* This responsability does not yet take scaling of patterns into accoutnt. */ __device__ void cuda_calculate_responsability_absolute(float *slice, float *image, int *mask, real sigma, real scaling, int N_2d, int tid, int step, real * sum_cache, int * count_cache) { real sum = 0.0; const int i_max = N_2d; int count = 0; for (int i = tid; i < i_max; i+=step) { if (mask[i] != 0) { sum += pow(slice[i] - image[i]/scaling,2); count++; } } sum_cache[tid] = sum; count_cache[tid] = count; // return -sum/2.0/(real)count/pow(sigma,2); //return in log scale. } __global__ void calculate_responsabilities_kernel(float * slices, float * images, int * mask, real sigma, real * scaling, real * respons, int N_2d){ __shared__ real sum_cache[256]; __shared__ int count_cache[256]; int tid = threadIdx.x; int step = blockDim.x; int i_image = blockIdx.x; int i_slice = blockIdx.y; int N_images = gridDim.x; cuda_calculate_responsability_absolute(&slices[i_slice*N_2d], &images[i_image*N_2d],mask, sigma,scaling[i_image], N_2d, tid,step, sum_cache,count_cache); inblock_reduce(sum_cache); inblock_reduce(count_cache); if(tid == 0){ respons[i_slice*N_images+i_image] = -sum_cache[0]/2.0/(real)count_cache[0]/pow(sigma,2); } } void cuda_calculate_responsabilities(real * d_slices, real * d_images, int * d_mask, real sigma, real * d_scaling, real * d_respons, int N_2d, int N_images, int N_slices, real * respons){ cudaEvent_t begin; cudaEvent_t end; cudaEventCreate(&begin); cudaEventCreate(&end); cudaEventRecord (begin,0); dim3 nblocks(N_images,N_slices); int nthreads = 256; cudaEvent_t k_begin; cudaEvent_t k_end; cudaEventCreate(&k_begin); cudaEventCreate(&k_end); cudaEventRecord (k_begin,0); calculate_responsabilities_kernel<<<nblocks,nthreads>>>(d_slices,d_images,d_mask, sigma,d_scaling,d_respons, N_2d); cudaEventRecord(k_end,0); cudaEventSynchronize(k_end); real k_ms; cudaEventElapsedTime (&k_ms, k_begin, k_end); printf("cuda kernel calc respons time = %fms\n",k_ms); cudaError_t status = cudaGetLastError(); if(status != cudaSuccess){ printf("CUDA Error: %s\n",cudaGetErrorString(status)); } cudaMemcpy(respons,d_respons,sizeof(real)*N_slices*N_images,cudaMemcpyDeviceToHost); real respons_sum = 0; for(int i = 0;i<N_slices*N_images;i++){ respons_sum += respons[i]; } printf("respons_sum = %f\n",respons_sum); cudaEventRecord(end,0); cudaEventSynchronize (end); real ms; cudaEventElapsedTime (&ms, begin, end); printf("cuda calc respons time = %fms\n",ms); } __global__ void slice_weighting_kernel(real * images, real * slices,int * mask, real * respons, real * scaling, int N_slices, int N_2d, int N_images){ __shared__ real image_power[256]; __shared__ real correlation[256]; int bid = blockIdx.x; int tid = threadIdx.x; int step = blockDim.x; int i_image = bid; real weighted_power = 0; image_power[tid] = 0.0; for (int i = tid; i < N_2d; i+=step) { if (mask[i] != 0) { image_power[tid] += pow(images[i_image*N_2d+i],2); } } inblock_reduce(image_power); for (int i_slice = 0; i_slice < N_slices; i_slice++) { correlation[tid] = 0.0; for (int i = tid; i < N_2d; i+=step) { if (mask[i] != 0) { correlation[tid] += images[i_image*N_2d+i]*slices[i_slice*N_2d+i]; } } inblock_reduce(correlation); if(tid == 0){ weighted_power += respons[i_slice*N_images+i_image]*correlation[tid]; } } if(tid == 0){ scaling[i_image] = image_power[tid]/weighted_power; } } void cuda_update_scaling(real * d_images, real * d_slices, int * d_mask, real * d_respons, real * d_scaling, int N_images, int N_slices, int N_2d, real * scaling){ cudaEvent_t begin; cudaEvent_t end; cudaEventCreate(&begin); cudaEventCreate(&end); cudaEventRecord (begin,0); int nblocks = N_images; int nthreads = 256; cudaEvent_t k_begin; cudaEvent_t k_end; cudaEventCreate(&k_begin); cudaEventCreate(&k_end); cudaEventRecord (k_begin,0); slice_weighting_kernel<<<nblocks,nthreads>>>(d_images,d_slices,d_mask, d_respons, d_scaling, N_slices,N_2d, N_images); cudaMemcpy(scaling,d_scaling,sizeof(real)*N_images,cudaMemcpyDeviceToHost); cudaEventRecord(k_end,0); cudaEventSynchronize(k_end); real k_ms; cudaEventElapsedTime (&k_ms, k_begin, k_end); printf("cuda kernel update scaling time = %fms\n",k_ms); cudaError_t status = cudaGetLastError(); if(status != cudaSuccess){ printf("CUDA Error: %s\n",cudaGetErrorString(status)); } cudaEventRecord(end,0); cudaEventSynchronize (end); real ms; cudaEventElapsedTime (&ms, begin, end); printf("cuda update scaling time = %fms\n",ms); } void cuda_get_slices(sp_3matrix * model, real * d_model, real * d_slices, real * d_rot, real * d_x_coordinates, real * d_y_coordinates, real * d_z_coordinates, int N_slices){ cudaEvent_t begin; cudaEvent_t end; cudaEventCreate(&begin); cudaEventCreate(&end); cudaEventRecord (begin,0); int rows = sp_3matrix_x(model); int cols = sp_3matrix_y(model); int N_2d = sp_3matrix_x(model)*sp_3matrix_y(model); int nblocks = N_slices; int nthreads = 256; get_slices_kernel<<<nblocks,nthreads>>>(d_model, d_slices, d_rot,d_x_coordinates, d_y_coordinates,d_z_coordinates, rows,cols, sp_3matrix_x(model),sp_3matrix_y(model), sp_3matrix_z(model)); cudaEventRecord(end,0); cudaEventSynchronize (end); real ms; cudaEventElapsedTime (&ms, begin, end); printf("cuda get slices time = %fms\n",ms); } real cuda_update_slices(real * d_images, real * d_slices, int * d_mask, real * d_respons, real * d_scaling, int N_images, int N_slices, int N_2d, sp_3matrix * model, real * d_model, real *d_x_coordinates, real *d_y_coordinates, real *d_z_coordinates, real *d_rot, real * weights, real * d_weight, Setup setup, sp_matrix ** images){ cudaEvent_t begin; cudaEvent_t end; cudaEventCreate(&begin); cudaEventCreate(&end); cudaEventRecord (begin,0); dim3 nblocks = N_slices; int nthreads = 256; real * d_slices_total_respons; cudaMalloc(&d_slices_total_respons,sizeof(real)*N_slices); real * d_weights; cudaMalloc(&d_weights,sizeof(real)*N_slices); cudaMemcpy(d_weights,weights,sizeof(real)*N_slices,cudaMemcpyHostToDevice); cudaEvent_t k_begin; cudaEvent_t k_end; cudaEventCreate(&k_begin); cudaEventCreate(&k_end); cudaEventRecord (k_begin,0); update_slices_kernel<<<nblocks,nthreads>>>(d_images, d_slices, d_mask, d_respons, d_scaling, N_images, N_slices, N_2d, d_slices_total_respons, d_rot,d_x_coordinates, d_y_coordinates,d_z_coordinates,d_model, d_weight, sp_matrix_rows(images[0]),sp_matrix_cols(images[0]), sp_3matrix_x(model),sp_3matrix_y(model), sp_3matrix_z(model),d_weights); cudaThreadSynchronize(); insert_slices_kernel<<<nblocks,nthreads>>>(d_images, d_slices, d_mask, d_respons, d_scaling, N_images, N_slices, N_2d, d_slices_total_respons, d_rot,d_x_coordinates, d_y_coordinates,d_z_coordinates,d_model, d_weight, sp_matrix_rows(images[0]),sp_matrix_cols(images[0]), sp_3matrix_x(model),sp_3matrix_y(model), sp_3matrix_z(model),d_weights); cudaEventRecord(k_end,0); cudaEventSynchronize(k_end); real k_ms; cudaEventElapsedTime (&k_ms, k_begin, k_end); printf("cuda kernel slice update time = %fms\n",k_ms); cudaError_t status = cudaGetLastError(); if(status != cudaSuccess){ printf("CUDA Error: %s\n",cudaGetErrorString(status)); } real slices_total_respons[N_slices]; cudaMemcpy(slices_total_respons,d_slices_total_respons,sizeof(real)*N_slices, cudaMemcpyDeviceToHost); real overal_respons = 0.0; for (int i_slice = 0; i_slice < N_slices; i_slice++) { overal_respons += slices_total_respons[i_slice]; } cudaMemcpy(model->data,d_model,sizeof(real)*sp_3matrix_size(model),cudaMemcpyDeviceToHost); cudaFree(d_slices_total_respons); cudaFree(d_weights); cudaEventRecord(end,0); cudaEventSynchronize (end); real ms; cudaEventElapsedTime (&ms, begin, end); printf("cuda slice update time = %fms\n",ms); return overal_respons; } real cuda_model_max(real * model, int model_size){ thrust::device_ptr<real> p(model); real max = thrust::reduce(p, p+model_size, real(0), thrust::maximum<real>()); return max; } void cuda_allocate_slices(real ** slices,Setup setup,int N_slices){ cudaMalloc(slices,sizeof(real)*setup.side*setup.side*N_slices); } void cuda_allocate_model(real ** d_model, sp_3matrix * model){ cudaMalloc(d_model,sizeof(real)*sp_3matrix_size(model)); cudaMemcpy(*d_model,model->data,sizeof(real)*sp_3matrix_size(model),cudaMemcpyHostToDevice); } void cuda_allocate_mask(int ** d_mask, sp_imatrix * mask){ cudaMalloc(d_mask,sizeof(int)*sp_imatrix_size(mask)); cudaMemcpy(*d_mask,mask->data,sizeof(int)*sp_imatrix_size(mask),cudaMemcpyHostToDevice); } void cuda_allocate_rotations(real ** d_rotations, Quaternion ** rotations, int N_slices){ cudaMalloc(d_rotations,sizeof(real)*4*N_slices); for(int i = 0;i<N_slices;i++){ cudaMemcpy(&(*d_rotations)[4*i],rotations[i]->q,sizeof(real)*4,cudaMemcpyHostToDevice); } } void cuda_allocate_images(real ** d_images, sp_matrix ** images, int N_images){ cudaMalloc(d_images,sizeof(real)*sp_matrix_size(images[0])*N_images); for(int i = 0;i<N_images;i++){ cudaMemcpy(&(*d_images)[sp_matrix_size(images[0])*i],images[i]->data,sizeof(real)*sp_matrix_size(images[0]),cudaMemcpyHostToDevice); } } void cuda_allocate_coords(real ** d_x, real ** d_y, real ** d_z, sp_matrix * x, sp_matrix * y, sp_matrix * z){ cudaMalloc(d_x,sizeof(real)*sp_matrix_size(x)); cudaMalloc(d_y,sizeof(real)*sp_matrix_size(x)); cudaMalloc(d_z,sizeof(real)*sp_matrix_size(x)); cudaMemcpy(*d_x,x->data,sizeof(real)*sp_matrix_size(x),cudaMemcpyHostToDevice); cudaMemcpy(*d_y,y->data,sizeof(real)*sp_matrix_size(x),cudaMemcpyHostToDevice); cudaMemcpy(*d_z,z->data,sizeof(real)*sp_matrix_size(x),cudaMemcpyHostToDevice); } void cuda_reset_model(sp_3matrix * model, real * d_model){ cudaMemset(d_model,0,sizeof(real)*sp_3matrix_size(model)); } __global__ void cuda_normalize_model_kernel(real * model, real * weight, int n){ int i = threadIdx.x + blockIdx.x*blockDim.x; if(weight[i] > 0.0f){ model[i] /= weight[i]; }else{ model[i] = 0.0f; } } void cuda_normalize_model(sp_3matrix * model, real * d_model, real * d_weight){ int n = sp_3matrix_size(model); int nthreads = 256; int nblocks = (n+nthreads-1)/nthreads; cuda_normalize_model_kernel<<<nblocks,nthreads>>>(d_model,d_weight,n); cudaThreadSynchronize(); thrust::device_ptr<real> p(d_model); real model_sum = thrust::reduce(p, p+n, real(0), thrust::plus<real>()); model_sum /= n; /* model /= model_sum; */ thrust::transform(p, p+n,thrust::make_constant_iterator(1.0f/model_sum), p, thrust::multiplies<real>()); } void cuda_allocate_real(real ** x, int n){ cudaMalloc(x,n); } void cuda_allocate_scaling(real ** d_scaling, int N_images){ cudaMalloc(d_scaling,N_images*sizeof(real)); thrust::device_ptr<real> p(*d_scaling); thrust::fill(p, p+N_images, real(1)); } __global__ void cuda_normalize_responsabilities_kernel(real * respons, int N_slices, int N_images){ __shared__ real cache[256]; int i_image = blockIdx.x; int tid = threadIdx.x; int step = blockDim.x; cache[tid] = -1.0e10f; for(int i_slice = tid;i_slice < N_slices;i_slice += step){ if(cache[tid] < respons[i_slice*N_images+i_image]){ cache[tid] = respons[i_slice*N_images+i_image]; } } inblock_maximum(cache); real max_resp = cache[0]; for (int i_slice = tid; i_slice < N_slices; i_slice+= step) { respons[i_slice*N_images+i_image] -= max_resp; } cache[tid] = 0; for (int i_slice = tid; i_slice < N_slices; i_slice+=step) { if (respons[i_slice*N_images+i_image] > -1.0e10f) { respons[i_slice*N_images+i_image] = expf(respons[i_slice*N_images+i_image]); cache[tid] += respons[i_slice*N_images+i_image]; } else { respons[i_slice*N_images+i_image] = 0.0f; } } inblock_reduce(cache); real sum = cache[0]; for (int i_slice = tid; i_slice < N_slices; i_slice+=step) { respons[i_slice*N_images+i_image] /= sum; } } void cuda_normalize_responsabilities(real * d_respons, int N_slices, int N_images){ int nblocks = N_images; int nthreads = 256; cuda_normalize_responsabilities_kernel<<<nblocks,nthreads>>>(d_respons,N_slices,N_images); } // x_log_x<T> computes the f(x) -> x*log(x) template <typename T> struct x_log_x { __host__ __device__ T operator()(const T& x) const { if(x > 0){ return x * logf(x); }else{ return 0; } } }; real cuda_total_respons(real * d_respons, real * respons,int n){ thrust::device_ptr<real> p(d_respons); x_log_x<real> unary_op; thrust::plus<real> binary_op; real init = 0; // Calculates sum_0^n d_respons*log(d_respons) return thrust::transform_reduce(p, p+n, unary_op, init, binary_op); }
97266010204d61767c4582046ad0ef447ae1b4fe.hip
// !!! This is a file automatically generated by hipify!!! #ifdef _WIN32 # define WINDOWS_LEAN_AND_MEAN # define NOMINMAX # include <windows.h> #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, GL #include <GL/glew.h> #include <GL/glut.h> // includes #include <hip/hip_runtime.h> #include <cutil.h> #include <cutil_gl_error.h> #include <cudaGL.h> //////////////////////////////////////////////////////////////////////////////// // constants const unsigned int window_width = 512; const unsigned int window_height = 512; const unsigned int mesh_width = 8; const unsigned int mesh_height = 9; // vbo variables GLuint *vboId; float anim = 0.0; // mouse controls int mouse_old_x, mouse_old_y; int mouse_buttons = 0; float rotate_x = 0.0, rotate_y = 0.0; float translate_z = -3.0; int flagG = 0; const float vertices [] = {1.0, 1.0, 1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, 1.0, //0-1-2-3 1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0,//4-7-6-5 -1.0, -1.0, -1.0, 1.0, -1.0, -1.0, 1.0, -1.0, 1.0, -1.0, -1.0, 1.0,//7-4-3-2 -1.0, 1.0, 1.0, -1.0, 1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 1.0,//1-6-7-2 1.0, 1.0, 1.0, 1.0, 1.0, -1.0, -1.0, 1.0, -1.0, -1.0, 1.0, 1.0, //0-5-6-1 1.0, 1.0, 1.0, 1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, //0-3-4-5 }; const float normals [] = {0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0,// v0-v1-v2-v3 0.0, 0.0, -1.0, 0.0, 0.0, -1.0, 0.0, 0.0, -1.0, 0.0, 0.0, -1.0,// v4-v7-v6-v5 0.0, -1.0, 0.0, 0.0, -1.0, 0.0, 0.0, -1.0, 0.0, 0.0, -1.0, 0.0,// v7-v4-v3-v2 -1.0, 0.0, 0.0, -1.0, 0.0, 0.0, -1.0, 0.0, 0.0, -1.0, 0.0, 0.0,// v1-v6-v7-v2 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0,// v0-v5-v6-v1 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0,// v0-v3-v4-v5 }; const float colors [] = {1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0,// v0-v1-v2-v3 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, // v4-v7-v6-v5 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, // v7-v4-v3-v2 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, // v1-v6-v7-v2 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, // v0-v5-v6-v1 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, // v0-v3-v4-v5 }; //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); // GL functionality CUTBoolean initGL(); void createVBO( GLuint* vbo); void deleteVBO( GLuint* vbo); // rendering callbacks void display(); void keyboard( unsigned char key, int x, int y); void mouse(int button, int state, int x, int y); void motion(int x, int y); // Cuda functionality void runCuda( GLuint* vbo, hipFunction_t knl); hipFunction_t kernel; hipDevice_t cuDevice; hipCtx_t cuContext; hipModule_t cuModule; //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); CUT_EXIT(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Init for CUDA //////////////////////////////////////////////////////////////////////////////// static hipError_t initCuda(hipFunction_t *pKernel, int argc, char** argv) { hipFunction_t cuFunction = 0; CUT_DEVICE_INIT_DRV(cuDevice); hipError_t status = hipCtxCreate( &cuContext, 0, cuDevice ); printf ("Ctx Creation: %d\n", status); if ( hipSuccess != status ) { hipCtxDetach(cuContext); return status; } status = hipModuleLoad(&cuModule, "data/cube_kernel.cubin"); printf ("ModuleLoad: %d\n", status); // cutFree(module_path); if ( hipSuccess != status ) { hipCtxDetach(cuContext); return status; } // Global function status = hipModuleGetFunction( &cuFunction, cuModule, "kernel" ); printf("function loading: %d\n", status); if ( hipSuccess != status) { hipCtxDetach(cuContext); return status; } *pKernel = cuFunction; return hipSuccess; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { CUT_DEVICE_INIT(argc, argv); // Create GL context glutInit( &argc, argv); glutInitDisplayMode( GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize( window_width, window_height); glutCreateWindow( "Cuda GL interop"); // initialize GL if( CUTFalse == initGL()) { return; } // register callbacks glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMouseFunc(mouse); glutMotionFunc(motion); // Cuda Driver Init kernel = NULL; CU_SAFE_CALL(initCuda(&kernel, argc, argv)); // create VBO createVBO(vboId); printf ("INITIALIZE OF SIMPLEL\n"); // run the cuda part runCuda(vboId, kernel); // start rendering mainloop glutMainLoop(); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runCuda( GLuint* vbo, hipFunction_t func) { if(flagG==0) flagG = 1; else flagG = 0; // map OpenGL buffer object for writing from CUDA hipDeviceptr_t dptr; unsigned int size; CU_SAFE_CALL(hipGLMapBufferObject( &dptr, &size, *vbo)); //printf("runCuda (vbo, anim) = %d, %f\n", *vbo, anim); // execute the func int BLOCK_SIZE_X = 8; int BLOCK_SIZE_Y = 9; hipDeviceptr_t dim_dx; hipDeviceptr_t dim_dy; cuMemAlloc(&dim_dx, sizeof(float)); cuMemAlloc(&dim_dy, sizeof(float)); // setup execution parameters CU_SAFE_CALL(hipFuncSetBlockShape( func, BLOCK_SIZE_X, BLOCK_SIZE_Y, 1 )); CU_SAFE_CALL(hipFuncSetSharedSize( func, 3*BLOCK_SIZE_X*BLOCK_SIZE_Y*sizeof(float) ) ); CU_SAFE_CALL(hipParamSeti( func, 0, dptr)); CU_SAFE_CALL(hipParamSeti( func, 4, BLOCK_SIZE_X )); CU_SAFE_CALL(hipParamSetf( func, 8, 0.5f)); CU_SAFE_CALL(hipParamSeti( func, 12, flagG)); CU_SAFE_CALL(hipParamSeti( func, 16, dim_dx)); CU_SAFE_CALL(hipParamSeti( func, 20, dim_dy)); CU_SAFE_CALL(hipParamSetSize( func, 24 )); CU_SAFE_CALL(hipLaunchGrid( func, 1, 1)); //; dim3 block(8, 8, 1); //; dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); //; kernel<<< grid, block>>>(dptr, mesh_width, mesh_height, anim); // unmap buffer object CU_SAFE_CALL(hipGLUnmapBufferObject(*vbo)); } //////////////////////////////////////////////////////////////////////////////// //! Initialize GL //////////////////////////////////////////////////////////////////////////////// CUTBoolean initGL() { // initialize necessary OpenGL extensions glewInit(); if (! glewIsSupported( "GL_VERSION_2_0 " "GL_ARB_pixel_buffer_object" )) { fprintf( stderr, "ERROR: Support for necessary OpenGL extensions missing."); fflush( stderr); return CUTFalse; } // default initialization glClearColor( 0.8, 0.8, 0.8, 1.0); glDisable( GL_DEPTH_TEST); // viewport glViewport( 0, 0, window_width, window_height); // projection glMatrixMode( GL_PROJECTION); glLoadIdentity(); gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 0.1, 10.0); CUT_CHECK_ERROR_GL(); return CUTTrue; } //////////////////////////////////////////////////////////////////////////////// //! Create VBO //////////////////////////////////////////////////////////////////////////////// void createVBO(GLuint* vbo) { unsigned int size = mesh_width * mesh_height * sizeof( float); // create buffer object glGenBuffers( 3, vbo); printf("1: %d\n", 1); glBindBuffer( GL_ARRAY_BUFFER, *vbo); glBufferData( GL_ARRAY_BUFFER, size, vertices, GL_DYNAMIC_DRAW); printf("2\n"); glBindBuffer( GL_ARRAY_BUFFER, *(vbo+1)); glBufferData( GL_ARRAY_BUFFER, size, normals, GL_DYNAMIC_DRAW); printf("3\n"); glBindBuffer( GL_ARRAY_BUFFER, *(vbo+2)); glBufferData( GL_ARRAY_BUFFER, size, colors, GL_DYNAMIC_DRAW); printf("4\n"); glBindBuffer( GL_ARRAY_BUFFER, 0); printf("5\n"); // register buffer object with CUDA CU_SAFE_CALL(hipGLInit()); CU_SAFE_CALL(hipGLRegisterBufferObject(*vbo)); printf("6\n"); CUT_CHECK_ERROR_GL(); } //////////////////////////////////////////////////////////////////////////////// //! Delete VBO //////////////////////////////////////////////////////////////////////////////// void deleteVBO( GLuint* vbo) { glBindBuffer( 1, *vbo); glDeleteBuffers( 1, vbo); CU_SAFE_CALL(hipGLUnregisterBufferObject(*vbo)); *vbo = 0; } //////////////////////////////////////////////////////////////////////////////// //! Display callback //////////////////////////////////////////////////////////////////////////////// void display() { // run CUDA kernel to generate vertex positions runCuda(vboId, kernel); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // set view matrix glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glTranslatef(0.0, 0.0, translate_z); glRotatef(rotate_x, 1.0, 0.0, 0.0); glRotatef(rotate_y, 0.0, 1.0, 0.0); glEnableClientState(GL_VERTEX_ARRAY); glEnableClientState(GL_NORMAL_ARRAY); glEnableClientState(GL_COLOR_ARRAY); // render from the vbo glBindBuffer(GL_ARRAY_BUFFER, *(vboId+1)); glNormalPointer(GL_FLOAT, 0, 0); glBindBuffer(GL_ARRAY_BUFFER, *(vboId+2)); glColorPointer(3, GL_FLOAT, 0, 0); glBindBuffer(GL_ARRAY_BUFFER, *vboId); glVertexPointer(3, GL_FLOAT, 0, 0); glDrawArrays(GL_QUADS, 0, 24); glDisableClientState(GL_VERTEX_ARRAY); glDisableClientState(GL_NORMAL_ARRAY); glDisableClientState(GL_COLOR_ARRAY); glutSwapBuffers(); glutPostRedisplay(); anim += 1.0; } //////////////////////////////////////////////////////////////////////////////// //! Keyboard events handler //////////////////////////////////////////////////////////////////////////////// void keyboard( unsigned char key, int /*x*/, int /*y*/) { switch( key) { case( 27) : deleteVBO( vboId); exit( 0); } } //////////////////////////////////////////////////////////////////////////////// //! Mouse event handlers //////////////////////////////////////////////////////////////////////////////// void mouse(int button, int state, int x, int y) { if (state == GLUT_DOWN) { mouse_buttons |= 1<<button; } else if (state == GLUT_UP) { mouse_buttons = 0; } mouse_old_x = x; mouse_old_y = y; glutPostRedisplay(); } void motion(int x, int y) { float dx, dy; dx = x - mouse_old_x; dy = y - mouse_old_y; if (mouse_buttons & 1) { rotate_x += dy * 0.2; rotate_y += dx * 0.2; } else if (mouse_buttons & 4) { translate_z += dy * 0.01; } mouse_old_x = x; mouse_old_y = y; }
97266010204d61767c4582046ad0ef447ae1b4fe.cu
#ifdef _WIN32 # define WINDOWS_LEAN_AND_MEAN # define NOMINMAX # include <windows.h> #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, GL #include <GL/glew.h> #include <GL/glut.h> // includes #include <cuda.h> #include <cutil.h> #include <cutil_gl_error.h> #include <cudaGL.h> //////////////////////////////////////////////////////////////////////////////// // constants const unsigned int window_width = 512; const unsigned int window_height = 512; const unsigned int mesh_width = 8; const unsigned int mesh_height = 9; // vbo variables GLuint *vboId; float anim = 0.0; // mouse controls int mouse_old_x, mouse_old_y; int mouse_buttons = 0; float rotate_x = 0.0, rotate_y = 0.0; float translate_z = -3.0; int flagG = 0; const float vertices [] = {1.0, 1.0, 1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, 1.0, //0-1-2-3 1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0,//4-7-6-5 -1.0, -1.0, -1.0, 1.0, -1.0, -1.0, 1.0, -1.0, 1.0, -1.0, -1.0, 1.0,//7-4-3-2 -1.0, 1.0, 1.0, -1.0, 1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 1.0,//1-6-7-2 1.0, 1.0, 1.0, 1.0, 1.0, -1.0, -1.0, 1.0, -1.0, -1.0, 1.0, 1.0, //0-5-6-1 1.0, 1.0, 1.0, 1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, //0-3-4-5 }; const float normals [] = {0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0,// v0-v1-v2-v3 0.0, 0.0, -1.0, 0.0, 0.0, -1.0, 0.0, 0.0, -1.0, 0.0, 0.0, -1.0,// v4-v7-v6-v5 0.0, -1.0, 0.0, 0.0, -1.0, 0.0, 0.0, -1.0, 0.0, 0.0, -1.0, 0.0,// v7-v4-v3-v2 -1.0, 0.0, 0.0, -1.0, 0.0, 0.0, -1.0, 0.0, 0.0, -1.0, 0.0, 0.0,// v1-v6-v7-v2 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0,// v0-v5-v6-v1 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0,// v0-v3-v4-v5 }; const float colors [] = {1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0,// v0-v1-v2-v3 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, // v4-v7-v6-v5 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, // v7-v4-v3-v2 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, // v1-v6-v7-v2 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, // v0-v5-v6-v1 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, // v0-v3-v4-v5 }; //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); // GL functionality CUTBoolean initGL(); void createVBO( GLuint* vbo); void deleteVBO( GLuint* vbo); // rendering callbacks void display(); void keyboard( unsigned char key, int x, int y); void mouse(int button, int state, int x, int y); void motion(int x, int y); // Cuda functionality void runCuda( GLuint* vbo, CUfunction knl); CUfunction kernel; CUdevice cuDevice; CUcontext cuContext; CUmodule cuModule; //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); CUT_EXIT(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Init for CUDA //////////////////////////////////////////////////////////////////////////////// static CUresult initCuda(CUfunction *pKernel, int argc, char** argv) { CUfunction cuFunction = 0; CUT_DEVICE_INIT_DRV(cuDevice); CUresult status = cuCtxCreate( &cuContext, 0, cuDevice ); printf ("Ctx Creation: %d\n", status); if ( CUDA_SUCCESS != status ) { cuCtxDetach(cuContext); return status; } status = cuModuleLoad(&cuModule, "data/cube_kernel.cubin"); printf ("ModuleLoad: %d\n", status); // cutFree(module_path); if ( CUDA_SUCCESS != status ) { cuCtxDetach(cuContext); return status; } // Global function status = cuModuleGetFunction( &cuFunction, cuModule, "kernel" ); printf("function loading: %d\n", status); if ( CUDA_SUCCESS != status) { cuCtxDetach(cuContext); return status; } *pKernel = cuFunction; return CUDA_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { CUT_DEVICE_INIT(argc, argv); // Create GL context glutInit( &argc, argv); glutInitDisplayMode( GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize( window_width, window_height); glutCreateWindow( "Cuda GL interop"); // initialize GL if( CUTFalse == initGL()) { return; } // register callbacks glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMouseFunc(mouse); glutMotionFunc(motion); // Cuda Driver Init kernel = NULL; CU_SAFE_CALL(initCuda(&kernel, argc, argv)); // create VBO createVBO(vboId); printf ("INITIALIZE OF SIMPLEL\n"); // run the cuda part runCuda(vboId, kernel); // start rendering mainloop glutMainLoop(); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runCuda( GLuint* vbo, CUfunction func) { if(flagG==0) flagG = 1; else flagG = 0; // map OpenGL buffer object for writing from CUDA CUdeviceptr dptr; unsigned int size; CU_SAFE_CALL(cuGLMapBufferObject( &dptr, &size, *vbo)); //printf("runCuda (vbo, anim) = %d, %f\n", *vbo, anim); // execute the func int BLOCK_SIZE_X = 8; int BLOCK_SIZE_Y = 9; CUdeviceptr dim_dx; CUdeviceptr dim_dy; cuMemAlloc(&dim_dx, sizeof(float)); cuMemAlloc(&dim_dy, sizeof(float)); // setup execution parameters CU_SAFE_CALL(cuFuncSetBlockShape( func, BLOCK_SIZE_X, BLOCK_SIZE_Y, 1 )); CU_SAFE_CALL(cuFuncSetSharedSize( func, 3*BLOCK_SIZE_X*BLOCK_SIZE_Y*sizeof(float) ) ); CU_SAFE_CALL(cuParamSeti( func, 0, dptr)); CU_SAFE_CALL(cuParamSeti( func, 4, BLOCK_SIZE_X )); CU_SAFE_CALL(cuParamSetf( func, 8, 0.5f)); CU_SAFE_CALL(cuParamSeti( func, 12, flagG)); CU_SAFE_CALL(cuParamSeti( func, 16, dim_dx)); CU_SAFE_CALL(cuParamSeti( func, 20, dim_dy)); CU_SAFE_CALL(cuParamSetSize( func, 24 )); CU_SAFE_CALL(cuLaunchGrid( func, 1, 1)); //; dim3 block(8, 8, 1); //; dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); //; kernel<<< grid, block>>>(dptr, mesh_width, mesh_height, anim); // unmap buffer object CU_SAFE_CALL(cuGLUnmapBufferObject(*vbo)); } //////////////////////////////////////////////////////////////////////////////// //! Initialize GL //////////////////////////////////////////////////////////////////////////////// CUTBoolean initGL() { // initialize necessary OpenGL extensions glewInit(); if (! glewIsSupported( "GL_VERSION_2_0 " "GL_ARB_pixel_buffer_object" )) { fprintf( stderr, "ERROR: Support for necessary OpenGL extensions missing."); fflush( stderr); return CUTFalse; } // default initialization glClearColor( 0.8, 0.8, 0.8, 1.0); glDisable( GL_DEPTH_TEST); // viewport glViewport( 0, 0, window_width, window_height); // projection glMatrixMode( GL_PROJECTION); glLoadIdentity(); gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 0.1, 10.0); CUT_CHECK_ERROR_GL(); return CUTTrue; } //////////////////////////////////////////////////////////////////////////////// //! Create VBO //////////////////////////////////////////////////////////////////////////////// void createVBO(GLuint* vbo) { unsigned int size = mesh_width * mesh_height * sizeof( float); // create buffer object glGenBuffers( 3, vbo); printf("1: %d\n", 1); glBindBuffer( GL_ARRAY_BUFFER, *vbo); glBufferData( GL_ARRAY_BUFFER, size, vertices, GL_DYNAMIC_DRAW); printf("2\n"); glBindBuffer( GL_ARRAY_BUFFER, *(vbo+1)); glBufferData( GL_ARRAY_BUFFER, size, normals, GL_DYNAMIC_DRAW); printf("3\n"); glBindBuffer( GL_ARRAY_BUFFER, *(vbo+2)); glBufferData( GL_ARRAY_BUFFER, size, colors, GL_DYNAMIC_DRAW); printf("4\n"); glBindBuffer( GL_ARRAY_BUFFER, 0); printf("5\n"); // register buffer object with CUDA CU_SAFE_CALL(cuGLInit()); CU_SAFE_CALL(cuGLRegisterBufferObject(*vbo)); printf("6\n"); CUT_CHECK_ERROR_GL(); } //////////////////////////////////////////////////////////////////////////////// //! Delete VBO //////////////////////////////////////////////////////////////////////////////// void deleteVBO( GLuint* vbo) { glBindBuffer( 1, *vbo); glDeleteBuffers( 1, vbo); CU_SAFE_CALL(cuGLUnregisterBufferObject(*vbo)); *vbo = 0; } //////////////////////////////////////////////////////////////////////////////// //! Display callback //////////////////////////////////////////////////////////////////////////////// void display() { // run CUDA kernel to generate vertex positions runCuda(vboId, kernel); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // set view matrix glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glTranslatef(0.0, 0.0, translate_z); glRotatef(rotate_x, 1.0, 0.0, 0.0); glRotatef(rotate_y, 0.0, 1.0, 0.0); glEnableClientState(GL_VERTEX_ARRAY); glEnableClientState(GL_NORMAL_ARRAY); glEnableClientState(GL_COLOR_ARRAY); // render from the vbo glBindBuffer(GL_ARRAY_BUFFER, *(vboId+1)); glNormalPointer(GL_FLOAT, 0, 0); glBindBuffer(GL_ARRAY_BUFFER, *(vboId+2)); glColorPointer(3, GL_FLOAT, 0, 0); glBindBuffer(GL_ARRAY_BUFFER, *vboId); glVertexPointer(3, GL_FLOAT, 0, 0); glDrawArrays(GL_QUADS, 0, 24); glDisableClientState(GL_VERTEX_ARRAY); glDisableClientState(GL_NORMAL_ARRAY); glDisableClientState(GL_COLOR_ARRAY); glutSwapBuffers(); glutPostRedisplay(); anim += 1.0; } //////////////////////////////////////////////////////////////////////////////// //! Keyboard events handler //////////////////////////////////////////////////////////////////////////////// void keyboard( unsigned char key, int /*x*/, int /*y*/) { switch( key) { case( 27) : deleteVBO( vboId); exit( 0); } } //////////////////////////////////////////////////////////////////////////////// //! Mouse event handlers //////////////////////////////////////////////////////////////////////////////// void mouse(int button, int state, int x, int y) { if (state == GLUT_DOWN) { mouse_buttons |= 1<<button; } else if (state == GLUT_UP) { mouse_buttons = 0; } mouse_old_x = x; mouse_old_y = y; glutPostRedisplay(); } void motion(int x, int y) { float dx, dy; dx = x - mouse_old_x; dy = y - mouse_old_y; if (mouse_buttons & 1) { rotate_x += dy * 0.2; rotate_y += dx * 0.2; } else if (mouse_buttons & 4) { translate_z += dy * 0.01; } mouse_old_x = x; mouse_old_y = y; }
e9293b0effbd817e01d1a6558d7440df5bed7b35.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <hipfft.h> #include <math.h> #include "helper.h" typedef float2 Complex; __global__ void ComplexMUL(Complex *A, Complex *B, Complex *C, float scale) { int idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx < scale){ hipfftComplex result; hipfftComplex mA = A[idx]; hipfftComplex mB = B[idx]; result.x = (mA.x * mB.x - mA.y * mB.y)/scale; result.y = (mA.x * mB.y + mA.y * mB.x)/scale; C[idx] = result;} } __global__ void copyVal(float*out_red,float*out_green,float*out_blue,hipfftComplex *in_red, hipfftComplex *in_green,hipfftComplex *in_blue,int w, int h, int W, int H ){ //__global__ void copyVal(float*out_red,hipfftComplex *in_red,int w, int h, int W, int H ){ int ix = threadIdx.x + blockDim.x * blockIdx.x;//xaxis of imagein int iy = threadIdx.y + blockDim.y * blockIdx.y;//yaxis of imagein int iz = threadIdx.z + blockDim.z * blockIdx.z; //channels imagein int currentlocation = iz*w*h + ix + iy * w; if (ix < w && iy <h && iz < 1){ out_red[currentlocation]=in_red[W-w+ix+(H-h+iy)*W].x; out_blue[currentlocation]=in_blue[W-w+ix+(H-h+iy)*W].x; out_green[currentlocation]=in_green[W-w+ix+(H-h+iy)*W].x; } } __global__ void pad(float*out_red,float*out_green,float*out_blue,float*in_red, float*in_green, float*in_blue, int w, int h, int W,float* out_kernel, float* in_kernel,int wk,int hk){ //__global__ void pad(float*out_red,float*in_red, int w, int h, int W,float* out_kernel, float* in_kernel,int wk,int hk){ int ix = threadIdx.x + blockDim.x * blockIdx.x;//xaxis of imagein int iy = threadIdx.y + blockDim.y * blockIdx.y;//yaxis of imagein int iz = threadIdx.z + blockDim.z * blockIdx.z; //channels imagein int currentlocation = iz*w*h + ix + iy * w; //printf("Hi\n"); if (ix < w && iy <h && iz < 1){ out_red[ix + iy * W]=in_red[currentlocation]; out_green[ix + iy * W]=in_green[currentlocation]; out_blue[ix + iy * W]=in_blue[currentlocation]; } if(ix < wk && iy <hk && iz < 1) out_kernel[ix + iy * W]=in_kernel[iz*wk*hk + ix + iy * wk]; //TEST out_kernel[ix + iy * W]=1; } __global__ void ConvertComplex(float* in_red,float* in_green,float* in_blue,hipfftComplex *out_red,hipfftComplex *out_green,hipfftComplex *out_blue, int w, int h,float*in_kernel,hipfftComplex *out_kernel){ //__global__ void ConvertComplex(float* in_red,hipfftComplex *out_red, int w, int h,float*in_kernel,hipfftComplex *out_kernel){ int ix = threadIdx.x + blockDim.x * blockIdx.x;//xaxis of imagein int iy = threadIdx.y + blockDim.y * blockIdx.y;//yaxis of imagein int iz = threadIdx.z + blockDim.z * blockIdx.z; //channels imagein int currentlocation = iz*w*h + ix + iy * w; if (ix < w && iy <h && iz < 1){ out_red[currentlocation].x=in_red[currentlocation]; out_blue[currentlocation].x=in_blue[currentlocation]; out_green[currentlocation].x=in_green[currentlocation]; out_kernel[currentlocation].x=in_kernel[currentlocation]; out_red[currentlocation].y=0; out_blue[currentlocation].y=0; out_green[currentlocation].y=0; out_kernel[currentlocation].y=0; } } cv::Mat conv_CUFFT(cv::Mat src, cv::Mat kernel){ int w=src.cols; int h=src.rows; int nc=src.channels(); int krows=kernel.rows; int kcols=kernel.cols; //Split the images to channel cv::Mat src_channels[3]; cv::split(src, src_channels); float *R_src_channels = new float[(size_t)w*h]; float *B_src_channels = new float[(size_t)w*h]; float *G_src_channels = new float[(size_t)w*h]; float *kernelArray = new float[(size_t)krows*kcols]; convert_mat_to_layered (R_src_channels, src); convert_mat_to_layered (R_src_channels, src_channels[0]); convert_mat_to_layered (B_src_channels, src_channels[1]); convert_mat_to_layered (G_src_channels, src_channels[2]); convert_mat_to_layered (kernelArray, kernel); //////////////////////////////////////////////////////////////////////////////////// float* pad_red,*pad_blue, *pad_green,*pad_kernel; int n=(h+2*(krows/2))*(w+2*(kcols/2)); int nk=krows*kcols; hipMalloc(&pad_red,n*sizeof(float)); hipMemset(pad_red,0, n*sizeof(float)); hipMalloc(&pad_blue,n*sizeof(float)); hipMemset(pad_blue,0, n*sizeof(float)); hipMalloc(&pad_green,n*sizeof(float)); hipMemset(pad_green,0, n*sizeof(float)); hipMalloc(&pad_kernel,n*sizeof(float)); hipMemset(pad_kernel,0, n*sizeof(float)); float* R_src,*G_src, *B_src, *kernel_src; hipMalloc(&R_src,h*w*sizeof(float)); hipMemcpy(R_src, R_src_channels,w*h*sizeof(float), hipMemcpyHostToDevice); hipMalloc(&G_src,h*w*sizeof(float)); hipMemcpy(G_src, G_src_channels,w*h*sizeof(float), hipMemcpyHostToDevice); hipMalloc(&B_src,h*w*sizeof(float)); hipMemcpy(B_src, B_src_channels,w*h*sizeof(float), hipMemcpyHostToDevice); hipMalloc(&kernel_src,krows*kcols*sizeof(float)); hipMemcpy(kernel_src, kernelArray,krows*kcols*sizeof(float), hipMemcpyHostToDevice); dim3 Block = dim3(32,32,1); dim3 Grid = dim3((w +Block.x -1) / Block.x, (h + Block.y -1) / Block.y, (1+ Block.z -1) / Block.z); hipLaunchKernelGGL(( pad), dim3(Grid),dim3(Block) , 0, 0, pad_red,pad_green,pad_blue,R_src,G_src,B_src,w,h, w+2*(kcols/2),pad_kernel,kernel_src,kcols,krows); int W = w+2*(kcols/2); //pad<<< Grid,Block >>>(pad_red,R_src,w,h, W,pad_kernel,kernel_src,kcols,krows); //////////////////////////////////////////////////////////////////////////////// //Convert to Complex /////////////////////////////////////////////////////////////////////////////// hipfftComplex* pad_red_complex,*pad_blue_complex, *pad_green_complex,*pad_kernel_complex; hipMalloc(&pad_red_complex,n*sizeof(Complex)); hipMalloc(&pad_blue_complex,n*sizeof(hipfftComplex)); hipMalloc(&pad_green_complex,n*sizeof(hipfftComplex)); hipMalloc(&pad_kernel_complex,n*sizeof(Complex));hipLaunchKernelGGL(( ConvertComplex), dim3(Grid),dim3(Block), 0, 0, pad_red,pad_green,pad_blue,pad_red_complex,pad_green_complex,pad_blue_complex, w+2*(kcols/2),h+2*(krows/2),pad_kernel,pad_kernel_complex); //ConvertComplex<<<Grid,Block>>>(pad_red,pad_red_complex, w+2*(kcols/2),h+2*(krows/2),pad_kernel,pad_kernel_complex); // initialize CUFFT library hipfftHandle plan; hipfftPlan1d(&plan, n ,HIPFFT_C2C, 1); printf("Transforming signal hipfftExecR2C\n"); hipfftExecC2C(plan, (hipfftComplex *)pad_red_complex, (hipfftComplex *)pad_red_complex, HIPFFT_FORWARD); hipfftExecC2C(plan, (hipfftComplex *)pad_green_complex, (hipfftComplex *)pad_green_complex, HIPFFT_FORWARD); hipfftExecC2C(plan, (hipfftComplex *)pad_blue_complex, (hipfftComplex *)pad_blue_complex, HIPFFT_FORWARD); hipfftExecC2C(plan, (hipfftComplex *)pad_kernel_complex, (hipfftComplex *)pad_kernel_complex, HIPFFT_FORWARD); hipfftComplex *g_RedOut, *g_BlueOut, *g_GreenOut; hipMalloc(&g_RedOut, n*sizeof(Complex)); hipMalloc(&g_BlueOut, n*sizeof(hipfftComplex)); hipMalloc(&g_GreenOut,n*sizeof(hipfftComplex)); hipLaunchKernelGGL(( ComplexMUL), dim3(32),dim3(8), 0, 0, pad_kernel_complex, pad_red_complex, g_RedOut, n); hipLaunchKernelGGL(( ComplexMUL), dim3(32),dim3(8), 0, 0, pad_kernel_complex, pad_green_complex, g_GreenOut, n); hipLaunchKernelGGL(( ComplexMUL), dim3(32),dim3(8), 0, 0, pad_kernel_complex, pad_blue_complex, g_BlueOut, n); hipfftExecC2C(plan, (hipfftComplex *)g_RedOut,(hipfftComplex *) g_RedOut, HIPFFT_BACKWARD); hipfftExecC2C(plan, g_GreenOut, g_GreenOut, HIPFFT_BACKWARD); hipfftExecC2C(plan, g_BlueOut, g_BlueOut, HIPFFT_BACKWARD); hipLaunchKernelGGL(( copyVal), dim3(Grid), dim3(Block), 0, 0, R_src,G_src,B_src,g_RedOut, g_GreenOut, g_BlueOut,w,h, w+2*(kcols/2),h+2*(krows/2)); hipLaunchKernelGGL(( copyVal), dim3(Grid), dim3(Block), 0, 0, R_src,g_RedOut, w,h, w+2*(kcols/2),h+2*(krows/2)); hipMemcpy(R_src_channels, R_src,w*h*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(G_src_channels, G_src, w*h*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(B_src_channels, B_src, w*h*sizeof(float), hipMemcpyDeviceToHost); convert_layered_to_mat(src_channels[0], R_src_channels); convert_layered_to_mat(src_channels[1], B_src_channels); convert_layered_to_mat(src_channels[2], G_src_channels); cv::Mat ImgOut; cv::merge(src_channels,3,ImgOut); convert_layered_to_mat(src, R_src_channels); hipfftDestroy(plan); return ImgOut; }
e9293b0effbd817e01d1a6558d7440df5bed7b35.cu
#include <stdio.h> #include <stdlib.h> #include <cufft.h> #include <math.h> #include "helper.h" typedef float2 Complex; __global__ void ComplexMUL(Complex *A, Complex *B, Complex *C, float scale) { int idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx < scale){ cufftComplex result; cufftComplex mA = A[idx]; cufftComplex mB = B[idx]; result.x = (mA.x * mB.x - mA.y * mB.y)/scale; result.y = (mA.x * mB.y + mA.y * mB.x)/scale; C[idx] = result;} } __global__ void copyVal(float*out_red,float*out_green,float*out_blue,cufftComplex *in_red, cufftComplex *in_green,cufftComplex *in_blue,int w, int h, int W, int H ){ //__global__ void copyVal(float*out_red,cufftComplex *in_red,int w, int h, int W, int H ){ int ix = threadIdx.x + blockDim.x * blockIdx.x;//xaxis of imagein int iy = threadIdx.y + blockDim.y * blockIdx.y;//yaxis of imagein int iz = threadIdx.z + blockDim.z * blockIdx.z; //channels imagein int currentlocation = iz*w*h + ix + iy * w; if (ix < w && iy <h && iz < 1){ out_red[currentlocation]=in_red[W-w+ix+(H-h+iy)*W].x; out_blue[currentlocation]=in_blue[W-w+ix+(H-h+iy)*W].x; out_green[currentlocation]=in_green[W-w+ix+(H-h+iy)*W].x; } } __global__ void pad(float*out_red,float*out_green,float*out_blue,float*in_red, float*in_green, float*in_blue, int w, int h, int W,float* out_kernel, float* in_kernel,int wk,int hk){ //__global__ void pad(float*out_red,float*in_red, int w, int h, int W,float* out_kernel, float* in_kernel,int wk,int hk){ int ix = threadIdx.x + blockDim.x * blockIdx.x;//xaxis of imagein int iy = threadIdx.y + blockDim.y * blockIdx.y;//yaxis of imagein int iz = threadIdx.z + blockDim.z * blockIdx.z; //channels imagein int currentlocation = iz*w*h + ix + iy * w; //printf("Hi\n"); if (ix < w && iy <h && iz < 1){ out_red[ix + iy * W]=in_red[currentlocation]; out_green[ix + iy * W]=in_green[currentlocation]; out_blue[ix + iy * W]=in_blue[currentlocation]; } if(ix < wk && iy <hk && iz < 1) out_kernel[ix + iy * W]=in_kernel[iz*wk*hk + ix + iy * wk]; //TEST out_kernel[ix + iy * W]=1; } __global__ void ConvertComplex(float* in_red,float* in_green,float* in_blue,cufftComplex *out_red,cufftComplex *out_green,cufftComplex *out_blue, int w, int h,float*in_kernel,cufftComplex *out_kernel){ //__global__ void ConvertComplex(float* in_red,cufftComplex *out_red, int w, int h,float*in_kernel,cufftComplex *out_kernel){ int ix = threadIdx.x + blockDim.x * blockIdx.x;//xaxis of imagein int iy = threadIdx.y + blockDim.y * blockIdx.y;//yaxis of imagein int iz = threadIdx.z + blockDim.z * blockIdx.z; //channels imagein int currentlocation = iz*w*h + ix + iy * w; if (ix < w && iy <h && iz < 1){ out_red[currentlocation].x=in_red[currentlocation]; out_blue[currentlocation].x=in_blue[currentlocation]; out_green[currentlocation].x=in_green[currentlocation]; out_kernel[currentlocation].x=in_kernel[currentlocation]; out_red[currentlocation].y=0; out_blue[currentlocation].y=0; out_green[currentlocation].y=0; out_kernel[currentlocation].y=0; } } cv::Mat conv_CUFFT(cv::Mat src, cv::Mat kernel){ int w=src.cols; int h=src.rows; int nc=src.channels(); int krows=kernel.rows; int kcols=kernel.cols; //Split the images to channel cv::Mat src_channels[3]; cv::split(src, src_channels); float *R_src_channels = new float[(size_t)w*h]; float *B_src_channels = new float[(size_t)w*h]; float *G_src_channels = new float[(size_t)w*h]; float *kernelArray = new float[(size_t)krows*kcols]; convert_mat_to_layered (R_src_channels, src); convert_mat_to_layered (R_src_channels, src_channels[0]); convert_mat_to_layered (B_src_channels, src_channels[1]); convert_mat_to_layered (G_src_channels, src_channels[2]); convert_mat_to_layered (kernelArray, kernel); //////////////////////////////////////////////////////////////////////////////////// float* pad_red,*pad_blue, *pad_green,*pad_kernel; int n=(h+2*(krows/2))*(w+2*(kcols/2)); int nk=krows*kcols; cudaMalloc(&pad_red,n*sizeof(float)); cudaMemset(pad_red,0, n*sizeof(float)); cudaMalloc(&pad_blue,n*sizeof(float)); cudaMemset(pad_blue,0, n*sizeof(float)); cudaMalloc(&pad_green,n*sizeof(float)); cudaMemset(pad_green,0, n*sizeof(float)); cudaMalloc(&pad_kernel,n*sizeof(float)); cudaMemset(pad_kernel,0, n*sizeof(float)); float* R_src,*G_src, *B_src, *kernel_src; cudaMalloc(&R_src,h*w*sizeof(float)); cudaMemcpy(R_src, R_src_channels,w*h*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc(&G_src,h*w*sizeof(float)); cudaMemcpy(G_src, G_src_channels,w*h*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc(&B_src,h*w*sizeof(float)); cudaMemcpy(B_src, B_src_channels,w*h*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc(&kernel_src,krows*kcols*sizeof(float)); cudaMemcpy(kernel_src, kernelArray,krows*kcols*sizeof(float), cudaMemcpyHostToDevice); dim3 Block = dim3(32,32,1); dim3 Grid = dim3((w +Block.x -1) / Block.x, (h + Block.y -1) / Block.y, (1+ Block.z -1) / Block.z); pad<<< Grid,Block >>>(pad_red,pad_green,pad_blue,R_src,G_src,B_src,w,h, w+2*(kcols/2),pad_kernel,kernel_src,kcols,krows); int W = w+2*(kcols/2); //pad<<< Grid,Block >>>(pad_red,R_src,w,h, W,pad_kernel,kernel_src,kcols,krows); //////////////////////////////////////////////////////////////////////////////// //Convert to Complex /////////////////////////////////////////////////////////////////////////////// cufftComplex* pad_red_complex,*pad_blue_complex, *pad_green_complex,*pad_kernel_complex; cudaMalloc(&pad_red_complex,n*sizeof(Complex)); cudaMalloc(&pad_blue_complex,n*sizeof(cufftComplex)); cudaMalloc(&pad_green_complex,n*sizeof(cufftComplex)); cudaMalloc(&pad_kernel_complex,n*sizeof(Complex)); ConvertComplex<<<Grid,Block>>>(pad_red,pad_green,pad_blue,pad_red_complex,pad_green_complex,pad_blue_complex, w+2*(kcols/2),h+2*(krows/2),pad_kernel,pad_kernel_complex); //ConvertComplex<<<Grid,Block>>>(pad_red,pad_red_complex, w+2*(kcols/2),h+2*(krows/2),pad_kernel,pad_kernel_complex); // initialize CUFFT library cufftHandle plan; cufftPlan1d(&plan, n ,CUFFT_C2C, 1); printf("Transforming signal cufftExecR2C\n"); cufftExecC2C(plan, (cufftComplex *)pad_red_complex, (cufftComplex *)pad_red_complex, CUFFT_FORWARD); cufftExecC2C(plan, (cufftComplex *)pad_green_complex, (cufftComplex *)pad_green_complex, CUFFT_FORWARD); cufftExecC2C(plan, (cufftComplex *)pad_blue_complex, (cufftComplex *)pad_blue_complex, CUFFT_FORWARD); cufftExecC2C(plan, (cufftComplex *)pad_kernel_complex, (cufftComplex *)pad_kernel_complex, CUFFT_FORWARD); cufftComplex *g_RedOut, *g_BlueOut, *g_GreenOut; cudaMalloc(&g_RedOut, n*sizeof(Complex)); cudaMalloc(&g_BlueOut, n*sizeof(cufftComplex)); cudaMalloc(&g_GreenOut,n*sizeof(cufftComplex)); ComplexMUL<<<32,8>>>(pad_kernel_complex, pad_red_complex, g_RedOut, n); ComplexMUL<<<32,8>>>(pad_kernel_complex, pad_green_complex, g_GreenOut, n); ComplexMUL<<<32,8>>>(pad_kernel_complex, pad_blue_complex, g_BlueOut, n); cufftExecC2C(plan, (cufftComplex *)g_RedOut,(cufftComplex *) g_RedOut, CUFFT_INVERSE); cufftExecC2C(plan, g_GreenOut, g_GreenOut, CUFFT_INVERSE); cufftExecC2C(plan, g_BlueOut, g_BlueOut, CUFFT_INVERSE); copyVal<<<Grid, Block>>>(R_src,G_src,B_src,g_RedOut, g_GreenOut, g_BlueOut,w,h, w+2*(kcols/2),h+2*(krows/2)); copyVal<<<Grid, Block>>>(R_src,g_RedOut, w,h, w+2*(kcols/2),h+2*(krows/2)); cudaMemcpy(R_src_channels, R_src,w*h*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(G_src_channels, G_src, w*h*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(B_src_channels, B_src, w*h*sizeof(float), cudaMemcpyDeviceToHost); convert_layered_to_mat(src_channels[0], R_src_channels); convert_layered_to_mat(src_channels[1], B_src_channels); convert_layered_to_mat(src_channels[2], G_src_channels); cv::Mat ImgOut; cv::merge(src_channels,3,ImgOut); convert_layered_to_mat(src, R_src_channels); cufftDestroy(plan); return ImgOut; }
ccd88aec5dce2af8a25f6662a2ed9c2896f9daf9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void __add__(int *array, int *size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > *size) return; int temp = 0; int before = (idx + 1) % *size; int after = idx - 1; if (after < 0) after = *size - 1; temp += array[idx]; temp += array[before]; temp += array[after]; __syncthreads(); // Barrera... array[idx] = temp; }
ccd88aec5dce2af8a25f6662a2ed9c2896f9daf9.cu
#include "includes.h" __global__ void __add__(int *array, int *size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > *size) return; int temp = 0; int before = (idx + 1) % *size; int after = idx - 1; if (after < 0) after = *size - 1; temp += array[idx]; temp += array[before]; temp += array[after]; __syncthreads(); // Barrera... array[idx] = temp; }
8a5115cd0e225874212fe658120c68113d604603.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 32, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
8a5115cd0e225874212fe658120c68113d604603.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 32, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
61e5dd53504e9cbf2543924a0216e5477eaf1d3c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #define BLOCK_SIZE 512 __global__ void reduction(float *out, float *in, unsigned size) { /******************************************************************** Load a segment of the input vector into shared memory Traverse the reduction tree Write the computed sum to the output vector at the correct index ********************************************************************/ // INSERT KERNEL CODE HERE __device__ __shared__ float partialsum[BLOCK_SIZE*2]; unsigned int t = threadIdx.x; unsigned int start = blockIdx.x*blockDim.x*2; if(t+start<size) { partialsum[t]=in[start+t]; } else partialsum[t]=0.0; if((blockDim.x+t+start)<size) { partialsum[blockDim.x+t]=in[blockDim.x+t+start]; } else partialsum[blockDim.x+t]=0.0; for(unsigned int stride=1;stride <=blockDim.x;stride=stride*2) { __syncthreads(); if(t%stride==0){ //__syncthreads(); partialsum[2*t]+=partialsum[2*t+stride]; } __syncthreads(); } //if(threadIdx.x==0) out[blockIdx.x]=partialsum[0]; }
61e5dd53504e9cbf2543924a0216e5477eaf1d3c.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #define BLOCK_SIZE 512 __global__ void reduction(float *out, float *in, unsigned size) { /******************************************************************** Load a segment of the input vector into shared memory Traverse the reduction tree Write the computed sum to the output vector at the correct index ********************************************************************/ // INSERT KERNEL CODE HERE __device__ __shared__ float partialsum[BLOCK_SIZE*2]; unsigned int t = threadIdx.x; unsigned int start = blockIdx.x*blockDim.x*2; if(t+start<size) { partialsum[t]=in[start+t]; } else partialsum[t]=0.0; if((blockDim.x+t+start)<size) { partialsum[blockDim.x+t]=in[blockDim.x+t+start]; } else partialsum[blockDim.x+t]=0.0; for(unsigned int stride=1;stride <=blockDim.x;stride=stride*2) { __syncthreads(); if(t%stride==0){ //__syncthreads(); partialsum[2*t]+=partialsum[2*t+stride]; } __syncthreads(); } //if(threadIdx.x==0) out[blockIdx.x]=partialsum[0]; }
316757e6d256457ef0b4fb1d7c74d4d5318f3d15.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //****************************************** // operators // based on min-app code written by Oliver Fuhrer, MeteoSwiss // modified by Ben Cumming, CSCS // // implements // ***************************************** // Description: Contains simple operators which can be used on 3d-meshes #include "cuda_helpers.h" #include "data.h" #include "operators.h" #include "stats.h" namespace operators { // POD type holding information for device struct DiffusionParams { int nx; int ny; double alpha; double dxs; double *x_old; double *bndN; double *bndE; double *bndS; double *bndW; }; // TODO : explain what the params variable and setup_params_on_device() do __device__ DiffusionParams params; void setup_params_on_device(int nx, int ny, double alpha, double dxs) { auto p = DiffusionParams { nx, ny, alpha, dxs, data::x_old.device_data(), data::bndN.device_data(), data::bndE.device_data(), data::bndS.device_data(), data::bndW.device_data() }; cuda_check_status( hipMemcpyToSymbol(params, &p, sizeof(DiffusionParams)) ); } namespace kernels { __global__ void stencil_interior(double* S, const double *U) { // TODO : implement the interior stencil // EXTRA : can you make it use shared memory? // S(i,j) = -(4. + alpha) * U(i,j) // central point // + U(i-1,j) + U(i+1,j) // east and west // + U(i,j-1) + U(i,j+1) // north and south // + alpha * x_old(i,j) // + dxs * U(i,j) * (1.0 - U(i,j)); auto j = threadIdx.y + blockDim.y*blockIdx.y+1; auto i = threadIdx.x + blockDim.x*blockIdx.x+1; auto nx = params.nx; auto ny = params.ny; auto alpha = params.alpha; auto dxs = params.dxs; auto find_pos = [&nx] (size_t i, size_t j) { return i + j * nx; }; if(i < nx-1 && j < ny-1){ auto pos = find_pos(i,j); S[pos] = -(4. + alpha) * U[pos] // central point + U[pos-1] + U[pos+1] // east and west + U[pos-nx] + U[pos+nx] // north and south + alpha * params.x_old[pos] + dxs * U[pos] * (1.0 - U[pos]); } } __global__ void stencil_east_west(double* S, const double *U) { auto j = threadIdx.x + blockDim.x*blockIdx.x; auto nx = params.nx; auto ny = params.ny; auto alpha = params.alpha; auto dxs = params.dxs; auto find_pos = [&nx] (size_t i, size_t j) { return i + j * nx; }; if(j>0 && j<ny-1) { // EAST : i = nx-1 auto pos = find_pos(nx-1, j); S[pos] = -(4. + alpha) * U[pos] + U[pos-1] + U[pos-nx] + U[pos+nx] + alpha*params.x_old[pos] + params.bndE[j] + dxs * U[pos] * (1.0 - U[pos]); // TODO : do the stencil on the WEST side // WEST : i = 0 pos = find_pos(0, j); S[pos] = -(4. + alpha) * U[pos] + U[pos+1] + U[pos-nx] + U[pos+nx] + alpha*params.x_old[pos] + params.bndW[j] + dxs * U[pos] * (1.0 - U[pos]); } } __global__ void stencil_north_south(double* S, const double *U) { auto i = threadIdx.x + blockDim.x*blockIdx.x; auto nx = params.nx; auto ny = params.ny; auto alpha = params.alpha; auto dxs = params.dxs; if(i>0 && i<nx-1) { // NORTH : j = ny -1 auto pos = i + nx*(ny-1); S[pos] = -(4. + alpha) * U[pos] + U[pos-1] + U[pos+1] + U[pos-nx] + alpha*params.x_old[pos] + params.bndN[i] + dxs * U[pos] * (1.0 - U[pos]); // TODO : do the stencil on the SOUTH side // SOUTH : j = 0 pos = i; S[pos] = -(4. + alpha) * U[pos] + U[pos-1] + U[pos+1] + U[pos+nx] + alpha*params.x_old[pos] + params.bndS[i] + dxs * U[pos] * (1.0 - U[pos]); } } __global__ void stencil_corners(double* S, const double* U) { auto i = threadIdx.x + blockDim.x*blockIdx.x; auto nx = params.nx; auto ny = params.ny; auto alpha = params.alpha; auto dxs = params.dxs; auto find_pos = [&nx] (size_t i, size_t j) { return i + j * nx; }; // only 1 thread executes this kernel if(i==0) { // NORTH-EAST auto pos = find_pos(nx-1, ny-1); S[pos] = -(4. + alpha) * U[pos] // central point + U[pos-1] + params.bndE[ny-1] // east and west + U[pos-nx] + params.bndN[nx-1] // north and south + alpha * params.x_old[pos] + dxs * U[pos] * (1.0 - U[pos]); // SOUTH-EAST pos = find_pos(nx-1, 0); S[pos] = -(4. + alpha) * U[pos] // central point + U[pos-1] + params.bndE[0] // east and west + params.bndS[nx-1]+ U[pos+nx] // north and south + alpha * params.x_old[pos] + dxs * U[pos] * (1.0 - U[pos]); // SOUTH-WEST pos = find_pos(0, 0); S[pos] = -(4. + alpha) * U[pos] // central point + params.bndW[0] + U[pos+1] // east and west + params.bndS[0] + U[pos+nx] // north and south + alpha * params.x_old[pos] + dxs * U[pos] * (1.0 - U[pos]); // NORTH-WEST pos = find_pos(0, ny-1); S[pos] = -(4. + alpha) * U[pos] // central point + params.bndW[nx-1]+ U[pos+1] // east and west + U[pos-nx] + params.bndN[0] // north and south + alpha * params.x_old[pos] + dxs * U[pos] * (1.0 - U[pos]); } } } // namespace kernels //enum class Boundary {north, east, south, west}; void diffusion(data::Field const& U, data::Field &S) { using data::options; using data::bndE; using data::bndW; using data::bndN; using data::bndS; using data::x_old; double dxs = 1000. * (options.dx * options.dx); double alpha = options.alpha; int nx = options.nx; int ny = options.ny; // calculates the linear index into an array of width nx // from an (i,j) coordinate pair auto idx = [&nx] (size_t i, size_t j) { return i + j * nx; }; static bool is_initialized = false; if(!is_initialized) { setup_params_on_device(nx, ny, alpha, dxs); is_initialized = true; } // apply stencil to the interior grid points // TODO: what is the purpose of the following? auto calculate_grid_dim = [] (size_t n, size_t block_dim) { return (n+block_dim-1)/block_dim; }; // TODO: apply stencil to the interior grid points auto bnd_grid_dim_xx = calculate_grid_dim(nx-2 ,8); auto bnd_grid_dim_yy = calculate_grid_dim(ny-2 ,8); dim3 grid_dim(bnd_grid_dim_xx,bnd_grid_dim_yy); dim3 block_dim(8,8); hipLaunchKernelGGL(( kernels::stencil_interior), dim3(grid_dim),dim3(block_dim), 0, 0, S.device_data(),U.device_data()); hipDeviceSynchronize(); // TODO: remove after debugging cuda_check_last_kernel("internal kernel"); // TODO: remove after debugging // apply stencil at east-west boundary auto bnd_grid_dim_y = calculate_grid_dim(ny, 64); hipLaunchKernelGGL(( kernels::stencil_east_west), dim3(bnd_grid_dim_y), dim3(64), 0, 0, S.device_data(), U.device_data()); hipDeviceSynchronize(); // TODO: remove after debugging cuda_check_last_kernel("east-west kernel"); // TODO: remove after debugging // apply stencil at north-south boundary auto bnd_grid_dim_x = calculate_grid_dim(nx, 64); hipLaunchKernelGGL(( kernels::stencil_north_south), dim3(bnd_grid_dim_x), dim3(64), 0, 0, S.device_data(), U.device_data()); hipDeviceSynchronize(); // TODO: remove after debugging cuda_check_last_kernel("north-south kernel"); // TODO: remove after debugging // apply stencil at corners hipLaunchKernelGGL(( kernels::stencil_corners), dim3(1), dim3(1), 0, 0, S.device_data(), U.device_data()); hipDeviceSynchronize(); // TODO: remove after debugging cuda_check_last_kernel("corner kernel"); // TODO: remove after debugging } } // namespace operators
316757e6d256457ef0b4fb1d7c74d4d5318f3d15.cu
//****************************************** // operators // based on min-app code written by Oliver Fuhrer, MeteoSwiss // modified by Ben Cumming, CSCS // // implements // ***************************************** // Description: Contains simple operators which can be used on 3d-meshes #include "cuda_helpers.h" #include "data.h" #include "operators.h" #include "stats.h" namespace operators { // POD type holding information for device struct DiffusionParams { int nx; int ny; double alpha; double dxs; double *x_old; double *bndN; double *bndE; double *bndS; double *bndW; }; // TODO : explain what the params variable and setup_params_on_device() do __device__ DiffusionParams params; void setup_params_on_device(int nx, int ny, double alpha, double dxs) { auto p = DiffusionParams { nx, ny, alpha, dxs, data::x_old.device_data(), data::bndN.device_data(), data::bndE.device_data(), data::bndS.device_data(), data::bndW.device_data() }; cuda_check_status( cudaMemcpyToSymbol(params, &p, sizeof(DiffusionParams)) ); } namespace kernels { __global__ void stencil_interior(double* S, const double *U) { // TODO : implement the interior stencil // EXTRA : can you make it use shared memory? // S(i,j) = -(4. + alpha) * U(i,j) // central point // + U(i-1,j) + U(i+1,j) // east and west // + U(i,j-1) + U(i,j+1) // north and south // + alpha * x_old(i,j) // + dxs * U(i,j) * (1.0 - U(i,j)); auto j = threadIdx.y + blockDim.y*blockIdx.y+1; auto i = threadIdx.x + blockDim.x*blockIdx.x+1; auto nx = params.nx; auto ny = params.ny; auto alpha = params.alpha; auto dxs = params.dxs; auto find_pos = [&nx] (size_t i, size_t j) { return i + j * nx; }; if(i < nx-1 && j < ny-1){ auto pos = find_pos(i,j); S[pos] = -(4. + alpha) * U[pos] // central point + U[pos-1] + U[pos+1] // east and west + U[pos-nx] + U[pos+nx] // north and south + alpha * params.x_old[pos] + dxs * U[pos] * (1.0 - U[pos]); } } __global__ void stencil_east_west(double* S, const double *U) { auto j = threadIdx.x + blockDim.x*blockIdx.x; auto nx = params.nx; auto ny = params.ny; auto alpha = params.alpha; auto dxs = params.dxs; auto find_pos = [&nx] (size_t i, size_t j) { return i + j * nx; }; if(j>0 && j<ny-1) { // EAST : i = nx-1 auto pos = find_pos(nx-1, j); S[pos] = -(4. + alpha) * U[pos] + U[pos-1] + U[pos-nx] + U[pos+nx] + alpha*params.x_old[pos] + params.bndE[j] + dxs * U[pos] * (1.0 - U[pos]); // TODO : do the stencil on the WEST side // WEST : i = 0 pos = find_pos(0, j); S[pos] = -(4. + alpha) * U[pos] + U[pos+1] + U[pos-nx] + U[pos+nx] + alpha*params.x_old[pos] + params.bndW[j] + dxs * U[pos] * (1.0 - U[pos]); } } __global__ void stencil_north_south(double* S, const double *U) { auto i = threadIdx.x + blockDim.x*blockIdx.x; auto nx = params.nx; auto ny = params.ny; auto alpha = params.alpha; auto dxs = params.dxs; if(i>0 && i<nx-1) { // NORTH : j = ny -1 auto pos = i + nx*(ny-1); S[pos] = -(4. + alpha) * U[pos] + U[pos-1] + U[pos+1] + U[pos-nx] + alpha*params.x_old[pos] + params.bndN[i] + dxs * U[pos] * (1.0 - U[pos]); // TODO : do the stencil on the SOUTH side // SOUTH : j = 0 pos = i; S[pos] = -(4. + alpha) * U[pos] + U[pos-1] + U[pos+1] + U[pos+nx] + alpha*params.x_old[pos] + params.bndS[i] + dxs * U[pos] * (1.0 - U[pos]); } } __global__ void stencil_corners(double* S, const double* U) { auto i = threadIdx.x + blockDim.x*blockIdx.x; auto nx = params.nx; auto ny = params.ny; auto alpha = params.alpha; auto dxs = params.dxs; auto find_pos = [&nx] (size_t i, size_t j) { return i + j * nx; }; // only 1 thread executes this kernel if(i==0) { // NORTH-EAST auto pos = find_pos(nx-1, ny-1); S[pos] = -(4. + alpha) * U[pos] // central point + U[pos-1] + params.bndE[ny-1] // east and west + U[pos-nx] + params.bndN[nx-1] // north and south + alpha * params.x_old[pos] + dxs * U[pos] * (1.0 - U[pos]); // SOUTH-EAST pos = find_pos(nx-1, 0); S[pos] = -(4. + alpha) * U[pos] // central point + U[pos-1] + params.bndE[0] // east and west + params.bndS[nx-1]+ U[pos+nx] // north and south + alpha * params.x_old[pos] + dxs * U[pos] * (1.0 - U[pos]); // SOUTH-WEST pos = find_pos(0, 0); S[pos] = -(4. + alpha) * U[pos] // central point + params.bndW[0] + U[pos+1] // east and west + params.bndS[0] + U[pos+nx] // north and south + alpha * params.x_old[pos] + dxs * U[pos] * (1.0 - U[pos]); // NORTH-WEST pos = find_pos(0, ny-1); S[pos] = -(4. + alpha) * U[pos] // central point + params.bndW[nx-1]+ U[pos+1] // east and west + U[pos-nx] + params.bndN[0] // north and south + alpha * params.x_old[pos] + dxs * U[pos] * (1.0 - U[pos]); } } } // namespace kernels //enum class Boundary {north, east, south, west}; void diffusion(data::Field const& U, data::Field &S) { using data::options; using data::bndE; using data::bndW; using data::bndN; using data::bndS; using data::x_old; double dxs = 1000. * (options.dx * options.dx); double alpha = options.alpha; int nx = options.nx; int ny = options.ny; // calculates the linear index into an array of width nx // from an (i,j) coordinate pair auto idx = [&nx] (size_t i, size_t j) { return i + j * nx; }; static bool is_initialized = false; if(!is_initialized) { setup_params_on_device(nx, ny, alpha, dxs); is_initialized = true; } // apply stencil to the interior grid points // TODO: what is the purpose of the following? auto calculate_grid_dim = [] (size_t n, size_t block_dim) { return (n+block_dim-1)/block_dim; }; // TODO: apply stencil to the interior grid points auto bnd_grid_dim_xx = calculate_grid_dim(nx-2 ,8); auto bnd_grid_dim_yy = calculate_grid_dim(ny-2 ,8); dim3 grid_dim(bnd_grid_dim_xx,bnd_grid_dim_yy); dim3 block_dim(8,8); kernels::stencil_interior<<<grid_dim,block_dim>>>(S.device_data(),U.device_data()); cudaDeviceSynchronize(); // TODO: remove after debugging cuda_check_last_kernel("internal kernel"); // TODO: remove after debugging // apply stencil at east-west boundary auto bnd_grid_dim_y = calculate_grid_dim(ny, 64); kernels::stencil_east_west<<<bnd_grid_dim_y, 64>>>(S.device_data(), U.device_data()); cudaDeviceSynchronize(); // TODO: remove after debugging cuda_check_last_kernel("east-west kernel"); // TODO: remove after debugging // apply stencil at north-south boundary auto bnd_grid_dim_x = calculate_grid_dim(nx, 64); kernels::stencil_north_south<<<bnd_grid_dim_x, 64>>>(S.device_data(), U.device_data()); cudaDeviceSynchronize(); // TODO: remove after debugging cuda_check_last_kernel("north-south kernel"); // TODO: remove after debugging // apply stencil at corners kernels::stencil_corners<<<1, 1>>>(S.device_data(), U.device_data()); cudaDeviceSynchronize(); // TODO: remove after debugging cuda_check_last_kernel("corner kernel"); // TODO: remove after debugging } } // namespace operators
ccab1996377f85b3983337b753387d863737ca2f.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/Dispatch.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> constexpr float EPSILON = 1e-12; namespace { using namespace at; void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) { at::TensorIterator iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad) .add_input(input) .add_input(target) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() { at::native::gpu_kernel(iter, [] GPU_LAMBDA ( scalar_t grad_val, scalar_t input_val, scalar_t target_val ) -> scalar_t { const scalar_t one = 1; const scalar_t epsilon = EPSILON; scalar_t grad_input_denominator = max( (one - input_val) * input_val, epsilon ); return grad_val * (input_val - target_val) / grad_input_denominator; } ); }); } } // namespace namespace at { namespace native { Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction, bool log_target) { auto grad_input = at::empty_like(input); if (!log_target) { TensorIterator iter = TensorIteratorConfig() .add_output(grad_input) .add_input(target) .add_input(grad) .build(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "kl_div_backward_cuda", [&]() { scalar_t inv = (reduction == at::Reduction::Mean) ? scalar_t(1.0 / input.numel()) : scalar_t(1.0); gpu_kernel(iter, [inv] GPU_LAMBDA (scalar_t target_val, scalar_t grad_val) { return (target_val > 0) ? scalar_t(-target_val * grad_val * inv) : scalar_t(0.0); }); }); } else { grad_input = -at::exp(target) * grad; if (reduction == at::Reduction::Mean) { grad_input /= input.numel(); } } return grad_input; } Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); Tensor loss = at::empty_like(input); return at::native::binary_cross_entropy_out_cuda(loss, input, target, weight, reduction); } Tensor& binary_cross_entropy_out_cuda(Tensor& loss, const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) { Tensor loss_squeezed = at::squeeze(loss); TensorIterator iter = TensorIteratorConfig() .add_output(loss_squeezed) .add_input(at::squeeze(input)) .add_input(at::squeeze(target)) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t { const scalar_t zero = 0; const scalar_t one = 1; const scalar_t neg_100 = -100; CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one); scalar_t log_input_val = ::log(input_val); scalar_t log_1_minus_input_val = ::log(one - input_val); log_input_val = ::max(log_input_val, neg_100); log_1_minus_input_val = ::max(log_1_minus_input_val, neg_100); return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val); } ); }); if (weight.defined()) { loss.mul_(weight); } if (reduction != at::Reduction::None) { Tensor loss_reduced; if (reduction == at::Reduction::Mean) { loss_reduced = loss.mean(); } else if (reduction == at::Reduction::Sum) { loss_reduced = loss.sum(); } loss.resize_as_(loss_reduced).copy_(loss_reduced); } return loss; } Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); Tensor grad_input = at::empty_like(input); return at::native::binary_cross_entropy_backward_out_cuda(grad_input, grad, input, target, weight, reduction); } Tensor& binary_cross_entropy_backward_out_cuda(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) { Tensor grad_expand = grad.expand_as(input); binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target); if (weight.defined()) { grad_input.mul_(weight); } if (reduction == at::Reduction::Mean) { grad_input.div_(input.numel()); } return grad_input; } }} // namespace at::native
ccab1996377f85b3983337b753387d863737ca2f.cu
#include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> constexpr float EPSILON = 1e-12; namespace { using namespace at; void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) { at::TensorIterator iter = TensorIteratorConfig() .add_output(grad_input) .add_input(grad) .add_input(input) .add_input(target) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() { at::native::gpu_kernel(iter, [] GPU_LAMBDA ( scalar_t grad_val, scalar_t input_val, scalar_t target_val ) -> scalar_t { const scalar_t one = 1; const scalar_t epsilon = EPSILON; scalar_t grad_input_denominator = max( (one - input_val) * input_val, epsilon ); return grad_val * (input_val - target_val) / grad_input_denominator; } ); }); } } // namespace namespace at { namespace native { Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction, bool log_target) { auto grad_input = at::empty_like(input); if (!log_target) { TensorIterator iter = TensorIteratorConfig() .add_output(grad_input) .add_input(target) .add_input(grad) .build(); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "kl_div_backward_cuda", [&]() { scalar_t inv = (reduction == at::Reduction::Mean) ? scalar_t(1.0 / input.numel()) : scalar_t(1.0); gpu_kernel(iter, [inv] GPU_LAMBDA (scalar_t target_val, scalar_t grad_val) { return (target_val > 0) ? scalar_t(-target_val * grad_val * inv) : scalar_t(0.0); }); }); } else { grad_input = -at::exp(target) * grad; if (reduction == at::Reduction::Mean) { grad_input /= input.numel(); } } return grad_input; } Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); Tensor loss = at::empty_like(input); return at::native::binary_cross_entropy_out_cuda(loss, input, target, weight, reduction); } Tensor& binary_cross_entropy_out_cuda(Tensor& loss, const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) { Tensor loss_squeezed = at::squeeze(loss); TensorIterator iter = TensorIteratorConfig() .add_output(loss_squeezed) .add_input(at::squeeze(input)) .add_input(at::squeeze(target)) .build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t { const scalar_t zero = 0; const scalar_t one = 1; const scalar_t neg_100 = -100; CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one); scalar_t log_input_val = std::log(input_val); scalar_t log_1_minus_input_val = std::log(one - input_val); log_input_val = std::max(log_input_val, neg_100); log_1_minus_input_val = std::max(log_1_minus_input_val, neg_100); return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val); } ); }); if (weight.defined()) { loss.mul_(weight); } if (reduction != at::Reduction::None) { Tensor loss_reduced; if (reduction == at::Reduction::Mean) { loss_reduced = loss.mean(); } else if (reduction == at::Reduction::Sum) { loss_reduced = loss.sum(); } loss.resize_as_(loss_reduced).copy_(loss_reduced); } return loss; } Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); Tensor grad_input = at::empty_like(input); return at::native::binary_cross_entropy_backward_out_cuda(grad_input, grad, input, target, weight, reduction); } Tensor& binary_cross_entropy_backward_out_cuda(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) { Tensor grad_expand = grad.expand_as(input); binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target); if (weight.defined()) { grad_input.mul_(weight); } if (reduction == at::Reduction::Mean) { grad_input.div_(input.numel()); } return grad_input; } }} // namespace at::native
0d07215bd4f6df99401b634a48eb63fdc65e7d35.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /********************************************************************************** * Numerical Solution for the Cubic-Quintic Nonlinear Schrodinger Equation * * using second order split step Fourier method. * * Coded by: Omar Ashour, Texas A&M University at Qatar, February 2015. * **********************************************************************************/ #include "../lib/cu_helpers.h" #include <hipfft.h> // Grid Parameters #define XN nodes // Number of Fourier modes #define TN 100 // Number of temporal nodes #define LX 10.0 // x-spatial domain [-LX,LX) #define TT 10.0 // Max time #define DX (2*LX / XN) // x-spatial step size #define DT (TT / TN) // temporal step size // Timing parameters #define IRVL 100 // Timing interval. Take a reading every N iterations. // Output files #define PLOT_F "gpu_ffts_plot.m" #define TIME_F argv[2] // Function prototypes __global__ void nonlin(hipfftComplex *psi, float dt, int xn); __global__ void lin(hipfftComplex *psi, float *k2, float dt, int xn); __global__ void normalize(hipfftComplex *psi, int size); int main(int argc, char **argv) { // Timing info hipEvent_t begin_event, end_event; hipEventCreate(&begin_event); hipEventCreate(&end_event); // Print basic info about simulation const int nodes = atoi(argv[1]); printf("XN: %d. DX: %f, DT: %f, dt/dx^2: %f\n", XN, DX, DT, DT/(DX*DX)); // Allocate host arrays float *h_x = (float*)malloc(sizeof(float) * XN); float *h_k2 = (float*)malloc(sizeof(float) * XN); float *h_kx = (float*)malloc(XN * sizeof(float)); hipfftComplex *h_psi = (hipfftComplex*)malloc(sizeof(hipfftComplex)*XN); hipfftComplex *h_psi_0 = (hipfftComplex*)malloc(sizeof(hipfftComplex)*XN); // Create transform plans hipfftHandle plan; CUFFT_SAFE_CALL(hipfftPlan1d(&plan, XN, HIPFFT_C2C, 1)); // Create wave number float dkx = 2*M_PI/XN/DX; for(int i = XN/2; i >= 0; i--) h_kx[XN/2 - i]=(XN/2 - i) * dkx; for(int i = XN/2+1; i < XN; i++) h_kx[i]=(i - XN) * dkx; // Initial Conditions on host for(int i = 0; i < XN; i++) { h_x[i] = (i-XN/2)*DX; h_psi[i].x = sqrt(2)/cosh(h_x[i]); //h_psi[i].x = 2*exp(-(x[i]*x[i]/2.0/2.0)); h_psi[i].y = 0; h_psi_0[i].x = h_psi[i].x; h_psi_0[i].y = h_psi[i].y; h_k2[i] = h_kx[i]*h_kx[i]; } // Allocate device arrays and copy from host hipfftComplex *d_psi; float *d_k2; hipMalloc((void **)&d_psi, sizeof(hipfftComplex)*XN); hipMalloc((void **)&d_k2, sizeof(float)*XN); hipMemcpy(d_psi, h_psi, sizeof(hipfftComplex)*XN, hipMemcpyHostToDevice); hipMemcpy(d_k2, h_k2, sizeof(float)*XN, hipMemcpyHostToDevice); // Initialize the grid dim3 threadsPerBlock(128,1,1); dim3 blocksPerGrid((XN + 127)/128,1,1); // Forward transform CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_psi, d_psi, HIPFFT_FORWARD)); // Timing starts here hipEventRecord(begin_event, 0); // Start time evolution for (int i = 1; i <= TN; i++) { // Solve linear part hipLaunchKernelGGL(( lin), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, d_k2, DT/2, XN); // Backward transform CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_psi, d_psi, HIPFFT_BACKWARD)); // Normalize the transform hipLaunchKernelGGL(( normalize), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, XN); // Solve nonlinear part hipLaunchKernelGGL(( nonlin), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, DT, XN); // Forward transform CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_psi, d_psi, HIPFFT_FORWARD)); // Solve linear part hipLaunchKernelGGL(( lin), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, d_k2, DT/2, XN); } float time_value; hipEventRecord(end_event, 0); hipEventSynchronize(end_event); hipEventElapsedTime(&time_value, begin_event, end_event); // Print time to file FILE *fp = fopen(TIME_F, "a"); fprintf(fp, "%f, ", time_value); fclose(fp); // Backward transform to retreive data CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_psi, d_psi, HIPFFT_BACKWARD)); // Normalize the transform hipLaunchKernelGGL(( normalize), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, XN); // Copy results to device hipMemcpy(h_psi, d_psi, sizeof(hipfftComplex)*XN, hipMemcpyDeviceToHost); // Plot results cm_plot_1df(h_psi_0, h_psi, LX, XN, PLOT_F); // Wrap up hipfftDestroy(plan); free(h_x); free(h_k2); free(h_kx); free(h_psi_0); free(h_psi); hipFree(d_psi); hipFree(d_k2); return 0; } __global__ void nonlin(hipfftComplex *psi, float dt, int xn) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Avoid first and last point (boundary conditions) (needs fixing) //if (i >= xn - 1 || i == 0) return; if (i >= xn) return; float psi2 = cuCabsf(psi[i])*cuCabsf(psi[i]); psi[i] = cuCmulf(psi[i], make_cuComplex(cos(psi2*dt), sin(psi2*dt))); } __global__ void lin(hipfftComplex *psi, float *k2, float dt, int xn) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Avoid first and last point (boundary conditions) (needs fixing) //if (i >= xn - 1 || i == 0) return; if (i >= xn) return; psi[i] = cuCmulf(psi[i], make_cuComplex(cos(k2[i]*dt), -sin(k2[i]*dt))); } __global__ void normalize(hipfftComplex *psi, int size) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Stay within range since grid might be larger if (i >= size) return; psi[i].x = psi[i].x/size; psi[i].y = psi[i].y/size; }
0d07215bd4f6df99401b634a48eb63fdc65e7d35.cu
/********************************************************************************** * Numerical Solution for the Cubic-Quintic Nonlinear Schrodinger Equation * * using second order split step Fourier method. * * Coded by: Omar Ashour, Texas A&M University at Qatar, February 2015. * **********************************************************************************/ #include "../lib/cu_helpers.h" #include <cufft.h> // Grid Parameters #define XN nodes // Number of Fourier modes #define TN 100 // Number of temporal nodes #define LX 10.0 // x-spatial domain [-LX,LX) #define TT 10.0 // Max time #define DX (2*LX / XN) // x-spatial step size #define DT (TT / TN) // temporal step size // Timing parameters #define IRVL 100 // Timing interval. Take a reading every N iterations. // Output files #define PLOT_F "gpu_ffts_plot.m" #define TIME_F argv[2] // Function prototypes __global__ void nonlin(cufftComplex *psi, float dt, int xn); __global__ void lin(cufftComplex *psi, float *k2, float dt, int xn); __global__ void normalize(cufftComplex *psi, int size); int main(int argc, char **argv) { // Timing info cudaEvent_t begin_event, end_event; cudaEventCreate(&begin_event); cudaEventCreate(&end_event); // Print basic info about simulation const int nodes = atoi(argv[1]); printf("XN: %d. DX: %f, DT: %f, dt/dx^2: %f\n", XN, DX, DT, DT/(DX*DX)); // Allocate host arrays float *h_x = (float*)malloc(sizeof(float) * XN); float *h_k2 = (float*)malloc(sizeof(float) * XN); float *h_kx = (float*)malloc(XN * sizeof(float)); cufftComplex *h_psi = (cufftComplex*)malloc(sizeof(cufftComplex)*XN); cufftComplex *h_psi_0 = (cufftComplex*)malloc(sizeof(cufftComplex)*XN); // Create transform plans cufftHandle plan; CUFFT_SAFE_CALL(cufftPlan1d(&plan, XN, CUFFT_C2C, 1)); // Create wave number float dkx = 2*M_PI/XN/DX; for(int i = XN/2; i >= 0; i--) h_kx[XN/2 - i]=(XN/2 - i) * dkx; for(int i = XN/2+1; i < XN; i++) h_kx[i]=(i - XN) * dkx; // Initial Conditions on host for(int i = 0; i < XN; i++) { h_x[i] = (i-XN/2)*DX; h_psi[i].x = sqrt(2)/cosh(h_x[i]); //h_psi[i].x = 2*exp(-(x[i]*x[i]/2.0/2.0)); h_psi[i].y = 0; h_psi_0[i].x = h_psi[i].x; h_psi_0[i].y = h_psi[i].y; h_k2[i] = h_kx[i]*h_kx[i]; } // Allocate device arrays and copy from host cufftComplex *d_psi; float *d_k2; cudaMalloc((void **)&d_psi, sizeof(cufftComplex)*XN); cudaMalloc((void **)&d_k2, sizeof(float)*XN); cudaMemcpy(d_psi, h_psi, sizeof(cufftComplex)*XN, cudaMemcpyHostToDevice); cudaMemcpy(d_k2, h_k2, sizeof(float)*XN, cudaMemcpyHostToDevice); // Initialize the grid dim3 threadsPerBlock(128,1,1); dim3 blocksPerGrid((XN + 127)/128,1,1); // Forward transform CUFFT_SAFE_CALL(cufftExecC2C(plan, d_psi, d_psi, CUFFT_FORWARD)); // Timing starts here cudaEventRecord(begin_event, 0); // Start time evolution for (int i = 1; i <= TN; i++) { // Solve linear part lin<<<blocksPerGrid, threadsPerBlock>>>(d_psi, d_k2, DT/2, XN); // Backward transform CUFFT_SAFE_CALL(cufftExecC2C(plan, d_psi, d_psi, CUFFT_INVERSE)); // Normalize the transform normalize<<<blocksPerGrid, threadsPerBlock>>>(d_psi, XN); // Solve nonlinear part nonlin<<<blocksPerGrid, threadsPerBlock>>>(d_psi, DT, XN); // Forward transform CUFFT_SAFE_CALL(cufftExecC2C(plan, d_psi, d_psi, CUFFT_FORWARD)); // Solve linear part lin<<<blocksPerGrid, threadsPerBlock>>>(d_psi, d_k2, DT/2, XN); } float time_value; cudaEventRecord(end_event, 0); cudaEventSynchronize(end_event); cudaEventElapsedTime(&time_value, begin_event, end_event); // Print time to file FILE *fp = fopen(TIME_F, "a"); fprintf(fp, "%f, ", time_value); fclose(fp); // Backward transform to retreive data CUFFT_SAFE_CALL(cufftExecC2C(plan, d_psi, d_psi, CUFFT_INVERSE)); // Normalize the transform normalize<<<blocksPerGrid, threadsPerBlock>>>(d_psi, XN); // Copy results to device cudaMemcpy(h_psi, d_psi, sizeof(cufftComplex)*XN, cudaMemcpyDeviceToHost); // Plot results cm_plot_1df(h_psi_0, h_psi, LX, XN, PLOT_F); // Wrap up cufftDestroy(plan); free(h_x); free(h_k2); free(h_kx); free(h_psi_0); free(h_psi); cudaFree(d_psi); cudaFree(d_k2); return 0; } __global__ void nonlin(cufftComplex *psi, float dt, int xn) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Avoid first and last point (boundary conditions) (needs fixing) //if (i >= xn - 1 || i == 0) return; if (i >= xn) return; float psi2 = cuCabsf(psi[i])*cuCabsf(psi[i]); psi[i] = cuCmulf(psi[i], make_cuComplex(cos(psi2*dt), sin(psi2*dt))); } __global__ void lin(cufftComplex *psi, float *k2, float dt, int xn) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Avoid first and last point (boundary conditions) (needs fixing) //if (i >= xn - 1 || i == 0) return; if (i >= xn) return; psi[i] = cuCmulf(psi[i], make_cuComplex(cos(k2[i]*dt), -sin(k2[i]*dt))); } __global__ void normalize(cufftComplex *psi, int size) { int i = threadIdx.x + blockIdx.x * blockDim.x; // Stay within range since grid might be larger if (i >= size) return; psi[i].x = psi[i].x/size; psi[i].y = psi[i].y/size; }
cc7161cd588c5457b6257a7d7c72f362b8d1d5b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <thrust/device_vector.h> #include "nvgraph_error.hxx" #include "nvgraph_vector_kernels.hxx" #include "pagerank_kernels.hxx" namespace nvgraph { template <typename ValueType_> __global__ void update_dn_kernel(int num_vertices, ValueType_* aa, ValueType_ beta) { int tidx = blockDim.x * blockIdx.x + threadIdx.x; for (int r = tidx; r < num_vertices; r += blockDim.x * gridDim.x) { // NOTE 1 : a = alpha*a + (1-alpha)e if (aa[r] == 0.0) aa[r] = beta; // NOTE 2 : alpha*0 + (1-alpha)*1 = (1-alpha) } } template <typename ValueType_> void update_dangling_nodes(int num_vertices, ValueType_* dangling_nodes, ValueType_ damping_factor, hipStream_t stream) { int num_threads = 256; int max_grid_size = 4096; int num_blocks = ::min(max_grid_size, (num_vertices/num_threads)+1); ValueType_ beta = 1.0-damping_factor; hipLaunchKernelGGL(( update_dn_kernel), dim3(num_blocks), dim3(num_threads), 0, stream, num_vertices, dangling_nodes,beta); cudaCheckError(); } //Explicit template void update_dangling_nodes<double> (int num_vertices, double* dangling_nodes, double damping_factor, hipStream_t stream); template void update_dangling_nodes<float> (int num_vertices, float* dangling_nodes, float damping_factor, hipStream_t stream); } // end namespace nvgraph
cc7161cd588c5457b6257a7d7c72f362b8d1d5b3.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <thrust/device_vector.h> #include "nvgraph_error.hxx" #include "nvgraph_vector_kernels.hxx" #include "pagerank_kernels.hxx" namespace nvgraph { template <typename ValueType_> __global__ void update_dn_kernel(int num_vertices, ValueType_* aa, ValueType_ beta) { int tidx = blockDim.x * blockIdx.x + threadIdx.x; for (int r = tidx; r < num_vertices; r += blockDim.x * gridDim.x) { // NOTE 1 : a = alpha*a + (1-alpha)e if (aa[r] == 0.0) aa[r] = beta; // NOTE 2 : alpha*0 + (1-alpha)*1 = (1-alpha) } } template <typename ValueType_> void update_dangling_nodes(int num_vertices, ValueType_* dangling_nodes, ValueType_ damping_factor, cudaStream_t stream) { int num_threads = 256; int max_grid_size = 4096; int num_blocks = std::min(max_grid_size, (num_vertices/num_threads)+1); ValueType_ beta = 1.0-damping_factor; update_dn_kernel<<<num_blocks, num_threads, 0, stream>>>(num_vertices, dangling_nodes,beta); cudaCheckError(); } //Explicit template void update_dangling_nodes<double> (int num_vertices, double* dangling_nodes, double damping_factor, cudaStream_t stream); template void update_dangling_nodes<float> (int num_vertices, float* dangling_nodes, float damping_factor, cudaStream_t stream); } // end namespace nvgraph
b8110552184d39b0910e5363f19aff6d7f08e6a9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kHingeQuadraticRowMajor.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *mat = NULL; hipMalloc(&mat, XSIZE*YSIZE); float *labels = NULL; hipMalloc(&labels, XSIZE*YSIZE); float *target = NULL; hipMalloc(&target, XSIZE*YSIZE); unsigned int width = 1; unsigned int height = 1; float margin = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kHingeQuadraticRowMajor), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,labels,target,width,height,margin); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kHingeQuadraticRowMajor), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,labels,target,width,height,margin); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kHingeQuadraticRowMajor), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,labels,target,width,height,margin); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b8110552184d39b0910e5363f19aff6d7f08e6a9.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kHingeQuadraticRowMajor.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *mat = NULL; cudaMalloc(&mat, XSIZE*YSIZE); float *labels = NULL; cudaMalloc(&labels, XSIZE*YSIZE); float *target = NULL; cudaMalloc(&target, XSIZE*YSIZE); unsigned int width = 1; unsigned int height = 1; float margin = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kHingeQuadraticRowMajor<<<gridBlock,threadBlock>>>(mat,labels,target,width,height,margin); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kHingeQuadraticRowMajor<<<gridBlock,threadBlock>>>(mat,labels,target,width,height,margin); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kHingeQuadraticRowMajor<<<gridBlock,threadBlock>>>(mat,labels,target,width,height,margin); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
723469dbf27e08c348c9747506e845d83f205dac.hip
// !!! This is a file automatically generated by hipify!!! /* The program takes an array as input, multiply the elements with 2 and stores the output in another array. For the array size less than 4000, CPU runs faster than GPU and then GPU takes over CPU's performance. */ #include <iostream> #include <stdio.h> #include <hip/hip_runtime.h> #include <sys/time.h> __global__ void vecMult_d (int *A, int *B, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x ; if (i < N) { B [i] = A [i] * 2; } } void vecMult_h(int *A, int *B, int N) { for (int i =0 ; i < N; i++) { B[i] = A [i] * 2; } } int main() { int *a_h, *b_h; int *a_d, *b_d; int blocksize = 512, n; struct timeval t1_start, t1_end, t2_start, t2_end; double time_d, time_h; remove ("gpuresult"); remove ("cpuresult"); FILE *fp1, *fp2; fp1 = fopen ("gpuresult", "a+"); fp2 = fopen ("cpuresult", "a+"); for (n = 0; n < 8000; n += 1) { a_h = (int *)malloc(sizeof(int)*n); b_h = (int *)malloc(sizeof(int)*n); hipMalloc ((void **) &a_d, n * sizeof (int)); hipMalloc ((void **) &b_d, n * sizeof (int)); dim3 dimBlock (blocksize); dim3 dimGrid (ceil (float (n) / float (dimBlock.x))); for (int j = 0; j < n; j++) a_h [j] = j; hipMemcpy (a_d, a_h, n * sizeof (int), hipMemcpyHostToDevice); gettimeofday (&t1_start, 0); hipLaunchKernelGGL(( vecMult_d) , dim3(dimGrid), dim3(dimBlock), 0, 0, a_d, b_d, n); hipDeviceSynchronize (); gettimeofday (&t1_end, 0); hipMemcpy (b_h, b_d, n * sizeof (int), hipMemcpyDeviceToHost); gettimeofday (&t2_start, 0); vecMult_h (a_h, b_h, n); gettimeofday (&t2_end, 0); time_d = (t1_end.tv_sec-t1_start.tv_sec) * 1000000 + t1_end.tv_usec - t1_start.tv_usec; time_h = (t2_end.tv_sec-t2_start.tv_sec) * 1000000 + t2_end.tv_usec - t2_start.tv_usec; fprintf (fp1, "%d\t%lf\t\n", n, time_d); fprintf (fp2, "%d\t%lf\t\n", n, time_h); free (a_h); free (b_h); hipFree (a_d); hipFree (b_d); } return (0); }
723469dbf27e08c348c9747506e845d83f205dac.cu
/* The program takes an array as input, multiply the elements with 2 and stores the output in another array. For the array size less than 4000, CPU runs faster than GPU and then GPU takes over CPU's performance. */ #include <iostream> #include <stdio.h> #include <cuda.h> #include <sys/time.h> __global__ void vecMult_d (int *A, int *B, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x ; if (i < N) { B [i] = A [i] * 2; } } void vecMult_h(int *A, int *B, int N) { for (int i =0 ; i < N; i++) { B[i] = A [i] * 2; } } int main() { int *a_h, *b_h; int *a_d, *b_d; int blocksize = 512, n; struct timeval t1_start, t1_end, t2_start, t2_end; double time_d, time_h; remove ("gpuresult"); remove ("cpuresult"); FILE *fp1, *fp2; fp1 = fopen ("gpuresult", "a+"); fp2 = fopen ("cpuresult", "a+"); for (n = 0; n < 8000; n += 1) { a_h = (int *)malloc(sizeof(int)*n); b_h = (int *)malloc(sizeof(int)*n); cudaMalloc ((void **) &a_d, n * sizeof (int)); cudaMalloc ((void **) &b_d, n * sizeof (int)); dim3 dimBlock (blocksize); dim3 dimGrid (ceil (float (n) / float (dimBlock.x))); for (int j = 0; j < n; j++) a_h [j] = j; cudaMemcpy (a_d, a_h, n * sizeof (int), cudaMemcpyHostToDevice); gettimeofday (&t1_start, 0); vecMult_d <<<dimGrid, dimBlock>>> (a_d, b_d, n); cudaThreadSynchronize (); gettimeofday (&t1_end, 0); cudaMemcpy (b_h, b_d, n * sizeof (int), cudaMemcpyDeviceToHost); gettimeofday (&t2_start, 0); vecMult_h (a_h, b_h, n); gettimeofday (&t2_end, 0); time_d = (t1_end.tv_sec-t1_start.tv_sec) * 1000000 + t1_end.tv_usec - t1_start.tv_usec; time_h = (t2_end.tv_sec-t2_start.tv_sec) * 1000000 + t2_end.tv_usec - t2_start.tv_usec; fprintf (fp1, "%d\t%lf\t\n", n, time_d); fprintf (fp2, "%d\t%lf\t\n", n, time_h); free (a_h); free (b_h); cudaFree (a_d); cudaFree (b_d); } return (0); }
6b81ad52562f84ed395b49d4302584e2d0332540.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @precisions normal z -> c d s */ #include "hip/hip_runtime.h" #include <stdio.h> #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif #define PRECISION_z // SELLC SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS __global__ void zgesellcmv_kernel( int num_rows, int num_cols, int blocksize, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { // threads assigned to rows int Idx = blockDim.x * blockIdx.x + threadIdx.x ; int offset = drowptr[ blockIdx.x ]; int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; if(Idx < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++){ int col = dcolind [offset+ blocksize * n + threadIdx.x ]; magmaDoubleComplex val = dval[offset+ blocksize * n + threadIdx.x]; if( val != 0){ dot=dot+val*dx[col]; } } dy[ Idx ] = dot * alpha + beta * dy [ Idx ]; } } /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is SELLC/SELLP. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] blocksize magma_int_t number of rows in one ELL-slice @param[in] slices magma_int_t number of slices in matrix @param[in] alignment magma_int_t number of threads assigned to one row (=1) @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in SELLC/P @param[in] dcolind magmaIndex_ptr columnindices of A in SELLC/P @param[in] drowptr magmaIndex_ptr rowpointer of SELLP @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgesellcmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { // the kernel can only handle up to 65535 slices // (~2M rows for blocksize 32) dim3 grid( slices, 1, 1); magma_int_t threads = blocksize; hipLaunchKernelGGL(( zgesellcmv_kernel), dim3(grid), dim3(threads), 0, queue , m, n, blocksize, alpha, dval, dcolind, drowptr, dx, beta, dy ); return MAGMA_SUCCESS; }
6b81ad52562f84ed395b49d4302584e2d0332540.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @precisions normal z -> c d s */ #include "cuda_runtime.h" #include <stdio.h> #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif #define PRECISION_z // SELLC SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS __global__ void zgesellcmv_kernel( int num_rows, int num_cols, int blocksize, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { // threads assigned to rows int Idx = blockDim.x * blockIdx.x + threadIdx.x ; int offset = drowptr[ blockIdx.x ]; int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; if(Idx < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++){ int col = dcolind [offset+ blocksize * n + threadIdx.x ]; magmaDoubleComplex val = dval[offset+ blocksize * n + threadIdx.x]; if( val != 0){ dot=dot+val*dx[col]; } } dy[ Idx ] = dot * alpha + beta * dy [ Idx ]; } } /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is SELLC/SELLP. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] blocksize magma_int_t number of rows in one ELL-slice @param[in] slices magma_int_t number of slices in matrix @param[in] alignment magma_int_t number of threads assigned to one row (=1) @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in SELLC/P @param[in] dcolind magmaIndex_ptr columnindices of A in SELLC/P @param[in] drowptr magmaIndex_ptr rowpointer of SELLP @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgesellcmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { // the kernel can only handle up to 65535 slices // (~2M rows for blocksize 32) dim3 grid( slices, 1, 1); magma_int_t threads = blocksize; zgesellcmv_kernel<<< grid, threads, 0, queue >>> ( m, n, blocksize, alpha, dval, dcolind, drowptr, dx, beta, dy ); return MAGMA_SUCCESS; }
335468e7091a39c19f595478f882998b265eed8e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gtest/gtest.h" #include "nbody_common.h" #include "nbody_gpu.h" #include "test_utils.h" #include <chrono> #include <fmt/core.h> namespace{ const float dt = 0.01f; // time step const int nIters = 10; // simulation iterations } // namespace void NBody_GPU_V1(int nBodies,bool verbose){ int bytes = nBodies * sizeof(Body); int thread_size = 256; Body *baseline = (Body*)malloc(bytes); test_utils::GetBaseline(baseline,nBodies); Body *buf = (Body*)malloc(bytes); nbody_common::initBodies(buf, nBodies); // Init pos & vel data hipError_t errSync, errAsync; Body *p; hipMallocManaged((void**)&p, bytes); errSync = hipGetLastError(); if (errSync!=hipSuccess){fmt::print("Malloc error: {}\n",hipGetErrorString(errSync));} memcpy(p, buf, bytes); auto st0 = std::chrono::high_resolution_clock::now(); for (int iter = 0; iter < nIters; iter++) { dim3 num_of_blocks((nBodies-1)/thread_size + 1,1,1); dim3 threads_per_block(thread_size,1,1); hipLaunchKernelGGL(( bodyForce_v1), dim3(num_of_blocks),dim3(threads_per_block), 0, 0, p, dt, nBodies); // compute interbody forces errSync = hipGetLastError(); if (errSync!=hipSuccess){fmt::print("Sync error: {}\n",hipGetErrorString(errSync));} errAsync = hipDeviceSynchronize(); if (errAsync!=hipSuccess){fmt::print("Async error: {}\n",hipGetErrorString(errAsync));} for (int i = 0 ; i < nBodies; i++) { // integrate position p[i].pos.x += p[i].vel.x*dt; p[i].pos.y += p[i].vel.y*dt; p[i].pos.z += p[i].vel.z*dt; } } auto st1 = std::chrono::high_resolution_clock::now(); auto elapsed_ms = 1e-6 * (st1-st0).count()/ nIters ; fmt::print("{:d} Bodies: average execution time = {:.6f} milliseconds\n",nBodies,elapsed_ms); float billionsOfOpsPerSecond = 1e-9 * nBodies * nBodies / (elapsed_ms/1000.0); fmt::print("{:d} Bodies: average {:0.6f} Billion Interactions / second\n",nBodies,billionsOfOpsPerSecond); if (verbose){ fmt::print("Output:\n"); test_utils::PrintNBodies(p,nBodies); fmt::print("\n"); fmt::print("Baseline:\n"); test_utils::PrintNBodies(baseline,nBodies); } EXPECT_TRUE(test_utils::AlmostEqual(p,baseline,nBodies,1e-4)); hipFree(p); free(buf); } TEST(Test01_RefactorBodyForce,TwoBodiesTest){ const int nBodies = 2; NBody_GPU_V1(nBodies, true); } TEST(Test01_RefactorBodyForce,EightBodiesTest){ const int nBodies = 8; NBody_GPU_V1(nBodies, true); } TEST(Test01_RefactorBodyForce,_32BodiesTest){ const int nBodies = 32; NBody_GPU_V1(nBodies, false); } TEST(Test01_RefactorBodyForce,_4096BodiesTest){ const int nBodies = 4096; NBody_GPU_V1(nBodies, false); }
335468e7091a39c19f595478f882998b265eed8e.cu
#include "gtest/gtest.h" #include "nbody_common.h" #include "nbody_gpu.h" #include "test_utils.h" #include <chrono> #include <fmt/core.h> namespace{ const float dt = 0.01f; // time step const int nIters = 10; // simulation iterations } // namespace void NBody_GPU_V1(int nBodies,bool verbose){ int bytes = nBodies * sizeof(Body); int thread_size = 256; Body *baseline = (Body*)malloc(bytes); test_utils::GetBaseline(baseline,nBodies); Body *buf = (Body*)malloc(bytes); nbody_common::initBodies(buf, nBodies); // Init pos & vel data cudaError_t errSync, errAsync; Body *p; cudaMallocManaged((void**)&p, bytes); errSync = cudaGetLastError(); if (errSync!=cudaSuccess){fmt::print("Malloc error: {}\n",cudaGetErrorString(errSync));} memcpy(p, buf, bytes); auto st0 = std::chrono::high_resolution_clock::now(); for (int iter = 0; iter < nIters; iter++) { dim3 num_of_blocks((nBodies-1)/thread_size + 1,1,1); dim3 threads_per_block(thread_size,1,1); bodyForce_v1<<<num_of_blocks,threads_per_block>>>(p, dt, nBodies); // compute interbody forces errSync = cudaGetLastError(); if (errSync!=cudaSuccess){fmt::print("Sync error: {}\n",cudaGetErrorString(errSync));} errAsync = cudaDeviceSynchronize(); if (errAsync!=cudaSuccess){fmt::print("Async error: {}\n",cudaGetErrorString(errAsync));} for (int i = 0 ; i < nBodies; i++) { // integrate position p[i].pos.x += p[i].vel.x*dt; p[i].pos.y += p[i].vel.y*dt; p[i].pos.z += p[i].vel.z*dt; } } auto st1 = std::chrono::high_resolution_clock::now(); auto elapsed_ms = 1e-6 * (st1-st0).count()/ nIters ; fmt::print("{:d} Bodies: average execution time = {:.6f} milliseconds\n",nBodies,elapsed_ms); float billionsOfOpsPerSecond = 1e-9 * nBodies * nBodies / (elapsed_ms/1000.0); fmt::print("{:d} Bodies: average {:0.6f} Billion Interactions / second\n",nBodies,billionsOfOpsPerSecond); if (verbose){ fmt::print("Output:\n"); test_utils::PrintNBodies(p,nBodies); fmt::print("\n"); fmt::print("Baseline:\n"); test_utils::PrintNBodies(baseline,nBodies); } EXPECT_TRUE(test_utils::AlmostEqual(p,baseline,nBodies,1e-4)); cudaFree(p); free(buf); } TEST(Test01_RefactorBodyForce,TwoBodiesTest){ const int nBodies = 2; NBody_GPU_V1(nBodies, true); } TEST(Test01_RefactorBodyForce,EightBodiesTest){ const int nBodies = 8; NBody_GPU_V1(nBodies, true); } TEST(Test01_RefactorBodyForce,_32BodiesTest){ const int nBodies = 32; NBody_GPU_V1(nBodies, false); } TEST(Test01_RefactorBodyForce,_4096BodiesTest){ const int nBodies = 4096; NBody_GPU_V1(nBodies, false); }
ecfddef900da76900aa93f4780857ef91766d243.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Equihash CUDA solver // Copyright (c) 2016 John Tromp #define XINTREE #define UNROLL #define htole32(x) (x) #define HAVE_DECL_HTOLE32 1 #include "../cpu_tromp/equi.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <functional> #include <vector> #include <iostream> #include "eqcuda.hpp" #include "blake2b.cu" typedef uint16_t u16; typedef uint64_t u64; #ifndef RESTBITS #define RESTBITS 4 #endif // 2_log of number of buckets #define BUCKBITS (DIGITBITS-RESTBITS) #ifndef SAVEMEM #if RESTBITS == 4 // can't save memory in such small buckets #define SAVEMEM 1 #elif RESTBITS >= 8 // take advantage of law of large numbers (sum of 2^8 random numbers) // this reduces (200,9) memory to under 144MB, with negligible discarding #define SAVEMEM 9/14 #endif #endif // number of buckets static const u32 NBUCKETS = 1 << BUCKBITS; // bucket mask static const u32 BUCKMASK = NBUCKETS - 1; // 2_log of number of slots per bucket static const u32 SLOTBITS = RESTBITS + 1 + 1; static const u32 SLOTRANGE = 1 << SLOTBITS; // number of slots per bucket static const u32 NSLOTS = SLOTRANGE * SAVEMEM; // SLOTBITS mask static const u32 SLOTMASK = SLOTRANGE - 1; // number of possible values of xhash (rest of n) bits static const u32 NRESTS = 1 << RESTBITS; // RESTBITS mask static const u32 RESTMASK = NRESTS - 1; // number of blocks of hashes extracted from single 512 bit blake2b output static const u32 NBLOCKS = (NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE; // nothing larger found in 100000 runs static const u32 MAXSOLS = 8; // tree node identifying its children as two different slots in // a bucket on previous layer with the same rest bits (x-tra hash) struct tree { u32 bid_s0_s1_x; // manual bitfields __device__ tree(const u32 idx, const u32 xh) { bid_s0_s1_x = idx << RESTBITS | xh; } __device__ tree(const u32 idx) { bid_s0_s1_x = idx; } __device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) { #ifdef XINTREE bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh; #else bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1; #endif } __device__ u32 getindex() const { #ifdef XINTREE return bid_s0_s1_x >> RESTBITS; #else return bid_s0_s1_x; #endif } __device__ u32 bucketid() const { #ifdef XINTREE return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS); #else return bid_s0_s1_x >> (2 * SLOTBITS); #endif } __device__ u32 slotid0() const { #ifdef XINTREE return (bid_s0_s1_x >> SLOTBITS + RESTBITS) & SLOTMASK; #else return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK; #endif } __device__ u32 slotid1() const { #ifdef XINTREE return (bid_s0_s1_x >> RESTBITS) & SLOTMASK; #else return bid_s0_s1_x & SLOTMASK; #endif } __device__ u32 xhash() const { return bid_s0_s1_x & RESTMASK; } }; union hashunit { u32 word; uchar bytes[sizeof(u32)]; }; #define WORDS(bits) ((bits + 31) / 32) #define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS) #define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS) struct slot0 { tree attr; hashunit hash[HASHWORDS0]; }; struct slot1 { tree attr; hashunit hash[HASHWORDS1]; }; // a bucket is NSLOTS treenodes typedef slot0 bucket0[NSLOTS]; typedef slot1 bucket1[NSLOTS]; // the N-bit hash consists of K+1 n-bit "digits" // each of which corresponds to a layer of NBUCKETS buckets typedef bucket0 digit0[NBUCKETS]; typedef bucket1 digit1[NBUCKETS]; // size (in bytes) of hash in round 0 <= r < WK u32 hhashsize(const u32 r) { #ifdef XINTREE const u32 hashbits = WN - (r + 1) * DIGITBITS; #else const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS; #endif return (hashbits + 7) / 8; } // size (in bytes) of hash in round 0 <= r < WK static __device__ u32 hashsize(const u32 r) { #ifdef XINTREE const u32 hashbits = WN - (r + 1) * DIGITBITS; #else const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS; #endif return (hashbits + 7) / 8; } static u32 hhashwords(u32 bytes) { return (bytes + 3) / 4; } static __device__ u32 hashwords(u32 bytes) { return (bytes + 3) / 4; } // manages hash and tree data struct htalloc { bucket0 *trees0[(WK + 1) / 2]; bucket1 *trees1[WK / 2]; }; typedef u32 bsizes[NBUCKETS]; struct equi { blake2b_state blake_ctx; htalloc hta; bsizes *nslots; proof *sols; u32 nsols; u32 nthreads; equi(const u32 n_threads) { nthreads = n_threads; } void setheadernonce(const char *header, const u32 len, const char* nonce, const u32 nlen) { setheader(&blake_ctx, header, len, nonce, nlen); checkCudaErrors(hipMemset(nslots, 0, NBUCKETS * sizeof(u32))); nsols = 0; } __device__ u32 getnslots0(const u32 bid) { u32 &nslot = nslots[0][bid]; const u32 n = min(nslot, NSLOTS); nslot = 0; return n; } __device__ u32 getnslots1(const u32 bid) { u32 &nslot = nslots[1][bid]; const u32 n = min(nslot, NSLOTS); nslot = 0; return n; } __device__ void orderindices(u32 *indices, u32 size) { if (indices[0] > indices[size]) { for (u32 i = 0; i < size; i++) { const u32 tmp = indices[i]; indices[i] = indices[size + i]; indices[size + i] = tmp; } } } __device__ void listindices1(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[0][t.bucketid()]; const u32 size = 1 << 0; indices[0] = buck[t.slotid0()].attr.getindex(); indices[size] = buck[t.slotid1()].attr.getindex(); orderindices(indices, size); } __device__ void listindices2(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[0][t.bucketid()]; const u32 size = 1 << 1; listindices1(buck[t.slotid0()].attr, indices); listindices1(buck[t.slotid1()].attr, indices + size); orderindices(indices, size); } __device__ void listindices3(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[1][t.bucketid()]; const u32 size = 1 << 2; listindices2(buck[t.slotid0()].attr, indices); listindices2(buck[t.slotid1()].attr, indices + size); orderindices(indices, size); } __device__ void listindices4(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[1][t.bucketid()]; const u32 size = 1 << 3; listindices3(buck[t.slotid0()].attr, indices); listindices3(buck[t.slotid1()].attr, indices + size); orderindices(indices, size); } __device__ void listindices5(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[2][t.bucketid()]; const u32 size = 1 << 4; listindices4(buck[t.slotid0()].attr, indices); listindices4(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void listindices6(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[2][t.bucketid()]; const u32 size = 1 << 5; listindices5(buck[t.slotid0()].attr, indices); listindices5(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void listindices7(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[3][t.bucketid()]; const u32 size = 1 << 6; listindices6(buck[t.slotid0()].attr, indices); listindices6(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void listindices8(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[3][t.bucketid()]; const u32 size = 1 << 7; listindices7(buck[t.slotid0()].attr, indices); listindices7(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void listindices9(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[4][t.bucketid()]; const u32 size = 1 << 8; listindices8(buck[t.slotid0()].attr, indices); listindices8(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void candidate(const tree t) { proof prf; #if WK==9 listindices9(t, prf); #elif WK==5 listindices5(t, prf); #else #error not implemented #endif if (probdupe(prf)) return; u32 soli = atomicAdd(&nsols, 1); if (soli < MAXSOLS) #if WK==9 listindices9(t, sols[soli]); #elif WK==5 listindices5(t, sols[soli]); #else #error not implemented #endif } void showbsizes(u32 r) { #if defined(HIST) || defined(SPARK) || defined(LOGSPARK) u32 ns[NBUCKETS]; checkCudaErrors(hipMemcpy(ns, nslots[r & 1], NBUCKETS * sizeof(u32), hipMemcpyDeviceToHost)); u32 binsizes[65]; memset(binsizes, 0, 65 * sizeof(u32)); for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) { u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS - 6); binsizes[bsize]++; } for (u32 i = 0; i < 65; i++) { #ifdef HIST printf(" %d:%d", i, binsizes[i]); #else #ifdef SPARK u32 sparks = binsizes[i] / SPARKSCALE; #else u32 sparks = 0; for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++; sparks = sparks * 7 / SPARKSCALE; #endif printf("\342\226%c", '\201' + sparks); #endif } printf("\n"); #endif } // proper dupe test is a little costly on GPU, so allow false negatives __device__ bool probdupe(u32 *prf) { unsigned short susp[PROOFSIZE]; memset(susp, 0xffff, PROOFSIZE * sizeof(unsigned short)); for (u32 i=0; i<PROOFSIZE; i++) { u32 bin = prf[i] & (PROOFSIZE-1); unsigned short msb = prf[i]>>WK; if (msb == susp[bin]) return true; susp[bin] = msb; } return false; } struct htlayout { htalloc hta; u32 prevhashunits; u32 nexthashunits; u32 dunits; u32 prevbo; u32 nextbo; __device__ htlayout(equi *eq, u32 r) : hta(eq->hta), prevhashunits(0), dunits(0) { u32 nexthashbytes = hashsize(r); nexthashunits = hashwords(nexthashbytes); prevbo = 0; nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3 if (r) { u32 prevhashbytes = hashsize(r-1); prevhashunits = hashwords(prevhashbytes); prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3 dunits = prevhashunits - nexthashunits; } } __device__ u32 getxhash0(const slot0* pslot) const { #ifdef XINTREE return pslot->attr.xhash(); #elif WN == 200 && RESTBITS == 4 return pslot->hash->bytes[prevbo] >> 4; #elif WN == 200 && RESTBITS == 8 return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo + 1] >> 4; #elif WN == 144 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 6 return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4; #else #error non implemented #endif } __device__ u32 getxhash1(const slot1* pslot) const { #ifdef XINTREE return pslot->attr.xhash(); #elif WN == 200 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 8 return pslot->hash->bytes[prevbo]; #elif WN == 144 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 6 return pslot->hash->bytes[prevbo] & 0x3f; #else #error non implemented #endif } __device__ bool equal(const hashunit *hash0, const hashunit *hash1) const { return hash0[prevhashunits - 1].word == hash1[prevhashunits - 1].word; } }; struct collisiondata { #ifdef XBITMAP #if NSLOTS > 64 #error cant use XBITMAP with more than 64 slots #endif u64 xhashmap[NRESTS]; u64 xmap; #else #if RESTBITS <= 6 typedef uchar xslot; #else typedef u16 xslot; #endif static const xslot xnil = ~0; xslot xhashslots[NRESTS]; xslot nextxhashslot[NSLOTS]; xslot nextslot; #endif u32 s0; __device__ void clear() { #ifdef XBITMAP memset(xhashmap, 0, NRESTS * sizeof(u64)); #else memset(xhashslots, xnil, NRESTS * sizeof(xslot)); memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot)); #endif } __device__ bool addslot(u32 s1, u32 xh) { #ifdef XBITMAP xmap = xhashmap[xh]; xhashmap[xh] |= (u64)1 << s1; s0 = ~0; return true; #else nextslot = xhashslots[xh]; nextxhashslot[s1] = nextslot; xhashslots[xh] = s1; return true; #endif } __device__ bool nextcollision() const { #ifdef XBITMAP return xmap != 0; #else return nextslot != xnil; #endif } __device__ u32 slot() { #ifdef XBITMAP const u32 ffs = __ffsll(xmap); s0 += ffs; xmap >>= ffs; #else nextslot = nextxhashslot[s0 = nextslot]; #endif return s0; } }; }; __global__ void digitH(equi *eq) { uchar hash[HASHOUT]; blake2b_state state; equi::htlayout htl(eq, 0); const u32 hashbytes = hashsize(0); // always 23 ? const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 block = id; block < NBLOCKS; block += eq->nthreads) { state = eq->blake_ctx; blake2b_gpu_hash(&state, block, hash, HASHOUT); for (u32 i = 0; i<HASHESPERBLAKE; i++) { const uchar *ph = hash + i * WN / 8; #if BUCKBITS == 16 && RESTBITS == 4 const u32 bucketid = ((u32)ph[0] << 8) | ph[1]; #ifdef XINTREE const u32 xhash = ph[2] >> 4; #endif #elif BUCKBITS == 14 && RESTBITS == 6 const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2; #elif BUCKBITS == 12 && RESTBITS == 8 const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4; #elif BUCKBITS == 20 && RESTBITS == 4 const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4; #ifdef XINTREE const u32 xhash = ph[2] & 0xf; #endif #elif BUCKBITS == 12 && RESTBITS == 4 const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4; const u32 xhash = ph[1] & 0xf; #else #error not implemented #endif const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1); if (slot >= NSLOTS) continue; slot0 &s = eq->hta.trees0[0][bucketid][slot]; #ifdef XINTREE s.attr = tree(block*HASHESPERBLAKE+i, xhash); #else s.attr = tree(block*HASHESPERBLAKE+i); #endif memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes); } } } __global__ void digitO(equi *eq, const u32 r) { equi::htlayout htl(eq, r); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[(r - 1) / 2][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; #if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE) xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8) | (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1])) << 4 | (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; xhash &= 0xf; #elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4 xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4) | (xhash = bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4; xhash &= 0xf; #elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4 xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4) | (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; xhash &= 0xf; #elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6 xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) & 0xf) << 8) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 2 | (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 6; #else #error not implemented #endif const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot]; #ifdef XINTREE xs.attr = tree(bucketid, s0, s1, xhash); #else xs.attr = tree(bucketid, s0, s1); #endif for (u32 i=htl.dunits; i < htl.prevhashunits; i++) xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word; } } } } __global__ void digitE(equi *eq, const u32 r) { equi::htlayout htl(eq, r); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[(r - 1) / 2][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; #if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE) xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8) | (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]); xhash = (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; #elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4 xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4) | (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4; #elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4 xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; #elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6 xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 6) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 2; #else #error not implemented #endif const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[r / 2][xorbucketid][xorslot]; #ifdef XINTREE xs.attr = tree(bucketid, s0, s1, xhash); #else xs.attr = tree(bucketid, s0, s1); #endif for (u32 i = htl.dunits; i < htl.prevhashunits; i++) xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word; } } } } #ifdef UNROLL __global__ void digit_1(equi *eq) { equi::htlayout htl(eq, 1); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[0][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 bexor = __byte_perm(xor0, 0, 0x0123); const u32 xorbucketid = bexor >> 4 & BUCKMASK; const u32 xhash = bexor & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word; xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word; } } } } __global__ void digit2(equi *eq) { equi::htlayout htl(eq, 2); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[0][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 bexor = __byte_perm(xor0, 0, 0x0123); const u32 xorbucketid = bexor >> 16; const u32 xhash = bexor >> 12 & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor0; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word; } } } } __global__ void digit3(equi *eq) { equi::htlayout htl(eq, 3); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[1][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; const u32 bexor = __byte_perm(xor0, xor1, 0x1234); const u32 xorbucketid = bexor >> 4 & BUCKMASK; const u32 xhash = bexor & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor1; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word; } } } } __global__ void digit4(equi *eq) { equi::htlayout htl(eq, 4); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[1][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 bexor = __byte_perm(xor0, 0, 0x4123); const u32 xorbucketid = bexor >> 8; const u32 xhash = bexor >> 4 & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor0; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word; } } } } __global__ void digit5(equi *eq) { equi::htlayout htl(eq, 5); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[2][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; const u32 bexor = __byte_perm(xor0, xor1, 0x2345); const u32 xorbucketid = bexor >> 4 & BUCKMASK; const u32 xhash = bexor & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor1; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; } } } } __global__ void digit6(equi *eq) { equi::htlayout htl(eq, 6); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[2][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; const u32 bexor = __byte_perm(xor0, xor1, 0x2345); const u32 xorbucketid = bexor >> 16; const u32 xhash = bexor >> 12 & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor1; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; } } } } __global__ void digit7(equi *eq) { equi::htlayout htl(eq, 7); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[3][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 bexor = __byte_perm(xor0, 0, 0x4012); const u32 xorbucketid = bexor >> 4 & BUCKMASK; const u32 xhash = bexor & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor0; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; } } } } __global__ void digit8(equi *eq) { equi::htlayout htl(eq, 8); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[3][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; const u32 bexor = __byte_perm(xor0, xor1, 0x3456); const u32 xorbucketid = bexor >> 16; const u32 xhash = bexor >> 12 & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor1; } } } } #endif __global__ void digitK(equi *eq) { equi::collisiondata cd; equi::htlayout htl(eq, WK); const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[(WK - 1) / 2][bucketid]; u32 bsize = eq->getnslots0(bucketid); // assume WK odd for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) // assume WK odd continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) { #ifdef XINTREE eq->candidate(tree(bucketid, s0, s1, 0)); #else eq->candidate(tree(bucketid, s0, s1)); #endif } } } } } eq_cuda_context::eq_cuda_context(int tpb, int blocks, int id) : threadsperblock(tpb), totalblocks(blocks), device_id(id) { eq = new equi(threadsperblock * totalblocks); sol_memory = malloc(sizeof(proof) * MAXSOLS + 4096); solutions = (proof*)(((long long)sol_memory + 4095) & -4096); checkCudaErrors(hipSetDevice(device_id)); checkCudaErrors(hipDeviceReset()); checkCudaErrors(hipSetDeviceFlags(hipDeviceScheduleBlockingSync)); checkCudaErrors(hipDeviceSetCacheConfig(hipFuncCachePreferL1)); checkCudaErrors(hipMalloc((void**)&heap0, sizeof(digit0))); checkCudaErrors(hipMalloc((void**)&heap1, sizeof(digit1))); for (u32 r = 0; r < WK; r++) if ((r & 1) == 0) eq->hta.trees0[r / 2] = (bucket0 *)(heap0 + r / 2); else eq->hta.trees1[r / 2] = (bucket1 *)(heap1 + r / 2); checkCudaErrors(hipMalloc((void**)&eq->nslots, 2 * NBUCKETS * sizeof(u32))); checkCudaErrors(hipMalloc((void**)&eq->sols, MAXSOLS * sizeof(proof))); checkCudaErrors(hipMalloc((void**)&device_eq, sizeof(equi))); } eq_cuda_context::~eq_cuda_context() { /*checkCudaErrors(hipFree(eq->nslots)); checkCudaErrors(hipFree(eq->sols)); checkCudaErrors(hipFree(eq->hta.trees0[0])); checkCudaErrors(hipFree(eq->hta.trees1[0]));*/ checkCudaErrors(hipSetDevice(device_id)); checkCudaErrors(hipDeviceReset()); free(sol_memory); delete eq; } void eq_cuda_context::solve(const char *tequihash_header, unsigned int tequihash_header_len, const char* nonce, unsigned int nonce_len, std::function<bool()> cancelf, std::function<void(const std::vector<uint32_t>&, size_t, uint32_t, const unsigned char*)> solutionf, std::function<void(void)> hashdonef) { checkCudaErrors(hipSetDevice(device_id)); eq->setheadernonce(tequihash_header, tequihash_header_len, nonce, nonce_len); checkCudaErrors(hipMemcpy(device_eq, eq, sizeof(equi), hipMemcpyHostToDevice)); digitH << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; #if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL) digit_1 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit2 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit3 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit4 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit5 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit6 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit7 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit8 << <totalblocks, threadsperblock >> >(device_eq); #else for (u32 r = 1; r < WK; r++) { r & 1 ? digitO << <totalblocks, threadsperblock >> >(device_eq, r) : digitE << <totalblocks, threadsperblock >> >(device_eq, r); } #endif if (cancelf()) return; digitK << <totalblocks, threadsperblock >> >(device_eq); checkCudaErrors(hipMemcpy(eq, device_eq, sizeof(equi), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(solutions, eq->sols, MAXSOLS * sizeof(proof), hipMemcpyDeviceToHost)); for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++) { std::vector<uint32_t> index_vector(PROOFSIZE); for (u32 i = 0; i < PROOFSIZE; i++) { index_vector[i] = solutions[s][i]; } solutionf(index_vector, DIGITBITS, 0, nullptr); if (cancelf()) return; } hashdonef(); }
ecfddef900da76900aa93f4780857ef91766d243.cu
// Equihash CUDA solver // Copyright (c) 2016 John Tromp #define XINTREE #define UNROLL #define htole32(x) (x) #define HAVE_DECL_HTOLE32 1 #include "../cpu_tromp/equi.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <functional> #include <vector> #include <iostream> #include "eqcuda.hpp" #include "blake2b.cu" typedef uint16_t u16; typedef uint64_t u64; #ifndef RESTBITS #define RESTBITS 4 #endif // 2_log of number of buckets #define BUCKBITS (DIGITBITS-RESTBITS) #ifndef SAVEMEM #if RESTBITS == 4 // can't save memory in such small buckets #define SAVEMEM 1 #elif RESTBITS >= 8 // take advantage of law of large numbers (sum of 2^8 random numbers) // this reduces (200,9) memory to under 144MB, with negligible discarding #define SAVEMEM 9/14 #endif #endif // number of buckets static const u32 NBUCKETS = 1 << BUCKBITS; // bucket mask static const u32 BUCKMASK = NBUCKETS - 1; // 2_log of number of slots per bucket static const u32 SLOTBITS = RESTBITS + 1 + 1; static const u32 SLOTRANGE = 1 << SLOTBITS; // number of slots per bucket static const u32 NSLOTS = SLOTRANGE * SAVEMEM; // SLOTBITS mask static const u32 SLOTMASK = SLOTRANGE - 1; // number of possible values of xhash (rest of n) bits static const u32 NRESTS = 1 << RESTBITS; // RESTBITS mask static const u32 RESTMASK = NRESTS - 1; // number of blocks of hashes extracted from single 512 bit blake2b output static const u32 NBLOCKS = (NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE; // nothing larger found in 100000 runs static const u32 MAXSOLS = 8; // tree node identifying its children as two different slots in // a bucket on previous layer with the same rest bits (x-tra hash) struct tree { u32 bid_s0_s1_x; // manual bitfields __device__ tree(const u32 idx, const u32 xh) { bid_s0_s1_x = idx << RESTBITS | xh; } __device__ tree(const u32 idx) { bid_s0_s1_x = idx; } __device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) { #ifdef XINTREE bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh; #else bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1; #endif } __device__ u32 getindex() const { #ifdef XINTREE return bid_s0_s1_x >> RESTBITS; #else return bid_s0_s1_x; #endif } __device__ u32 bucketid() const { #ifdef XINTREE return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS); #else return bid_s0_s1_x >> (2 * SLOTBITS); #endif } __device__ u32 slotid0() const { #ifdef XINTREE return (bid_s0_s1_x >> SLOTBITS + RESTBITS) & SLOTMASK; #else return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK; #endif } __device__ u32 slotid1() const { #ifdef XINTREE return (bid_s0_s1_x >> RESTBITS) & SLOTMASK; #else return bid_s0_s1_x & SLOTMASK; #endif } __device__ u32 xhash() const { return bid_s0_s1_x & RESTMASK; } }; union hashunit { u32 word; uchar bytes[sizeof(u32)]; }; #define WORDS(bits) ((bits + 31) / 32) #define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS) #define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS) struct slot0 { tree attr; hashunit hash[HASHWORDS0]; }; struct slot1 { tree attr; hashunit hash[HASHWORDS1]; }; // a bucket is NSLOTS treenodes typedef slot0 bucket0[NSLOTS]; typedef slot1 bucket1[NSLOTS]; // the N-bit hash consists of K+1 n-bit "digits" // each of which corresponds to a layer of NBUCKETS buckets typedef bucket0 digit0[NBUCKETS]; typedef bucket1 digit1[NBUCKETS]; // size (in bytes) of hash in round 0 <= r < WK u32 hhashsize(const u32 r) { #ifdef XINTREE const u32 hashbits = WN - (r + 1) * DIGITBITS; #else const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS; #endif return (hashbits + 7) / 8; } // size (in bytes) of hash in round 0 <= r < WK static __device__ u32 hashsize(const u32 r) { #ifdef XINTREE const u32 hashbits = WN - (r + 1) * DIGITBITS; #else const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS; #endif return (hashbits + 7) / 8; } static u32 hhashwords(u32 bytes) { return (bytes + 3) / 4; } static __device__ u32 hashwords(u32 bytes) { return (bytes + 3) / 4; } // manages hash and tree data struct htalloc { bucket0 *trees0[(WK + 1) / 2]; bucket1 *trees1[WK / 2]; }; typedef u32 bsizes[NBUCKETS]; struct equi { blake2b_state blake_ctx; htalloc hta; bsizes *nslots; proof *sols; u32 nsols; u32 nthreads; equi(const u32 n_threads) { nthreads = n_threads; } void setheadernonce(const char *header, const u32 len, const char* nonce, const u32 nlen) { setheader(&blake_ctx, header, len, nonce, nlen); checkCudaErrors(cudaMemset(nslots, 0, NBUCKETS * sizeof(u32))); nsols = 0; } __device__ u32 getnslots0(const u32 bid) { u32 &nslot = nslots[0][bid]; const u32 n = min(nslot, NSLOTS); nslot = 0; return n; } __device__ u32 getnslots1(const u32 bid) { u32 &nslot = nslots[1][bid]; const u32 n = min(nslot, NSLOTS); nslot = 0; return n; } __device__ void orderindices(u32 *indices, u32 size) { if (indices[0] > indices[size]) { for (u32 i = 0; i < size; i++) { const u32 tmp = indices[i]; indices[i] = indices[size + i]; indices[size + i] = tmp; } } } __device__ void listindices1(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[0][t.bucketid()]; const u32 size = 1 << 0; indices[0] = buck[t.slotid0()].attr.getindex(); indices[size] = buck[t.slotid1()].attr.getindex(); orderindices(indices, size); } __device__ void listindices2(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[0][t.bucketid()]; const u32 size = 1 << 1; listindices1(buck[t.slotid0()].attr, indices); listindices1(buck[t.slotid1()].attr, indices + size); orderindices(indices, size); } __device__ void listindices3(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[1][t.bucketid()]; const u32 size = 1 << 2; listindices2(buck[t.slotid0()].attr, indices); listindices2(buck[t.slotid1()].attr, indices + size); orderindices(indices, size); } __device__ void listindices4(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[1][t.bucketid()]; const u32 size = 1 << 3; listindices3(buck[t.slotid0()].attr, indices); listindices3(buck[t.slotid1()].attr, indices + size); orderindices(indices, size); } __device__ void listindices5(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[2][t.bucketid()]; const u32 size = 1 << 4; listindices4(buck[t.slotid0()].attr, indices); listindices4(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void listindices6(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[2][t.bucketid()]; const u32 size = 1 << 5; listindices5(buck[t.slotid0()].attr, indices); listindices5(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void listindices7(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[3][t.bucketid()]; const u32 size = 1 << 6; listindices6(buck[t.slotid0()].attr, indices); listindices6(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void listindices8(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[3][t.bucketid()]; const u32 size = 1 << 7; listindices7(buck[t.slotid0()].attr, indices); listindices7(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void listindices9(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[4][t.bucketid()]; const u32 size = 1 << 8; listindices8(buck[t.slotid0()].attr, indices); listindices8(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void candidate(const tree t) { proof prf; #if WK==9 listindices9(t, prf); #elif WK==5 listindices5(t, prf); #else #error not implemented #endif if (probdupe(prf)) return; u32 soli = atomicAdd(&nsols, 1); if (soli < MAXSOLS) #if WK==9 listindices9(t, sols[soli]); #elif WK==5 listindices5(t, sols[soli]); #else #error not implemented #endif } void showbsizes(u32 r) { #if defined(HIST) || defined(SPARK) || defined(LOGSPARK) u32 ns[NBUCKETS]; checkCudaErrors(cudaMemcpy(ns, nslots[r & 1], NBUCKETS * sizeof(u32), cudaMemcpyDeviceToHost)); u32 binsizes[65]; memset(binsizes, 0, 65 * sizeof(u32)); for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) { u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS - 6); binsizes[bsize]++; } for (u32 i = 0; i < 65; i++) { #ifdef HIST printf(" %d:%d", i, binsizes[i]); #else #ifdef SPARK u32 sparks = binsizes[i] / SPARKSCALE; #else u32 sparks = 0; for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++; sparks = sparks * 7 / SPARKSCALE; #endif printf("\342\226%c", '\201' + sparks); #endif } printf("\n"); #endif } // proper dupe test is a little costly on GPU, so allow false negatives __device__ bool probdupe(u32 *prf) { unsigned short susp[PROOFSIZE]; memset(susp, 0xffff, PROOFSIZE * sizeof(unsigned short)); for (u32 i=0; i<PROOFSIZE; i++) { u32 bin = prf[i] & (PROOFSIZE-1); unsigned short msb = prf[i]>>WK; if (msb == susp[bin]) return true; susp[bin] = msb; } return false; } struct htlayout { htalloc hta; u32 prevhashunits; u32 nexthashunits; u32 dunits; u32 prevbo; u32 nextbo; __device__ htlayout(equi *eq, u32 r) : hta(eq->hta), prevhashunits(0), dunits(0) { u32 nexthashbytes = hashsize(r); nexthashunits = hashwords(nexthashbytes); prevbo = 0; nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3 if (r) { u32 prevhashbytes = hashsize(r-1); prevhashunits = hashwords(prevhashbytes); prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3 dunits = prevhashunits - nexthashunits; } } __device__ u32 getxhash0(const slot0* pslot) const { #ifdef XINTREE return pslot->attr.xhash(); #elif WN == 200 && RESTBITS == 4 return pslot->hash->bytes[prevbo] >> 4; #elif WN == 200 && RESTBITS == 8 return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo + 1] >> 4; #elif WN == 144 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 6 return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4; #else #error non implemented #endif } __device__ u32 getxhash1(const slot1* pslot) const { #ifdef XINTREE return pslot->attr.xhash(); #elif WN == 200 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 8 return pslot->hash->bytes[prevbo]; #elif WN == 144 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 6 return pslot->hash->bytes[prevbo] & 0x3f; #else #error non implemented #endif } __device__ bool equal(const hashunit *hash0, const hashunit *hash1) const { return hash0[prevhashunits - 1].word == hash1[prevhashunits - 1].word; } }; struct collisiondata { #ifdef XBITMAP #if NSLOTS > 64 #error cant use XBITMAP with more than 64 slots #endif u64 xhashmap[NRESTS]; u64 xmap; #else #if RESTBITS <= 6 typedef uchar xslot; #else typedef u16 xslot; #endif static const xslot xnil = ~0; xslot xhashslots[NRESTS]; xslot nextxhashslot[NSLOTS]; xslot nextslot; #endif u32 s0; __device__ void clear() { #ifdef XBITMAP memset(xhashmap, 0, NRESTS * sizeof(u64)); #else memset(xhashslots, xnil, NRESTS * sizeof(xslot)); memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot)); #endif } __device__ bool addslot(u32 s1, u32 xh) { #ifdef XBITMAP xmap = xhashmap[xh]; xhashmap[xh] |= (u64)1 << s1; s0 = ~0; return true; #else nextslot = xhashslots[xh]; nextxhashslot[s1] = nextslot; xhashslots[xh] = s1; return true; #endif } __device__ bool nextcollision() const { #ifdef XBITMAP return xmap != 0; #else return nextslot != xnil; #endif } __device__ u32 slot() { #ifdef XBITMAP const u32 ffs = __ffsll(xmap); s0 += ffs; xmap >>= ffs; #else nextslot = nextxhashslot[s0 = nextslot]; #endif return s0; } }; }; __global__ void digitH(equi *eq) { uchar hash[HASHOUT]; blake2b_state state; equi::htlayout htl(eq, 0); const u32 hashbytes = hashsize(0); // always 23 ? const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 block = id; block < NBLOCKS; block += eq->nthreads) { state = eq->blake_ctx; blake2b_gpu_hash(&state, block, hash, HASHOUT); for (u32 i = 0; i<HASHESPERBLAKE; i++) { const uchar *ph = hash + i * WN / 8; #if BUCKBITS == 16 && RESTBITS == 4 const u32 bucketid = ((u32)ph[0] << 8) | ph[1]; #ifdef XINTREE const u32 xhash = ph[2] >> 4; #endif #elif BUCKBITS == 14 && RESTBITS == 6 const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2; #elif BUCKBITS == 12 && RESTBITS == 8 const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4; #elif BUCKBITS == 20 && RESTBITS == 4 const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4; #ifdef XINTREE const u32 xhash = ph[2] & 0xf; #endif #elif BUCKBITS == 12 && RESTBITS == 4 const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4; const u32 xhash = ph[1] & 0xf; #else #error not implemented #endif const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1); if (slot >= NSLOTS) continue; slot0 &s = eq->hta.trees0[0][bucketid][slot]; #ifdef XINTREE s.attr = tree(block*HASHESPERBLAKE+i, xhash); #else s.attr = tree(block*HASHESPERBLAKE+i); #endif memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes); } } } __global__ void digitO(equi *eq, const u32 r) { equi::htlayout htl(eq, r); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[(r - 1) / 2][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; #if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE) xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8) | (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1])) << 4 | (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; xhash &= 0xf; #elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4 xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4) | (xhash = bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4; xhash &= 0xf; #elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4 xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4) | (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; xhash &= 0xf; #elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6 xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) & 0xf) << 8) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 2 | (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 6; #else #error not implemented #endif const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot]; #ifdef XINTREE xs.attr = tree(bucketid, s0, s1, xhash); #else xs.attr = tree(bucketid, s0, s1); #endif for (u32 i=htl.dunits; i < htl.prevhashunits; i++) xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word; } } } } __global__ void digitE(equi *eq, const u32 r) { equi::htlayout htl(eq, r); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[(r - 1) / 2][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; #if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE) xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8) | (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]); xhash = (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; #elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4 xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4) | (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4; #elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4 xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; #elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6 xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 6) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 2; #else #error not implemented #endif const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[r / 2][xorbucketid][xorslot]; #ifdef XINTREE xs.attr = tree(bucketid, s0, s1, xhash); #else xs.attr = tree(bucketid, s0, s1); #endif for (u32 i = htl.dunits; i < htl.prevhashunits; i++) xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word; } } } } #ifdef UNROLL __global__ void digit_1(equi *eq) { equi::htlayout htl(eq, 1); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[0][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 bexor = __byte_perm(xor0, 0, 0x0123); const u32 xorbucketid = bexor >> 4 & BUCKMASK; const u32 xhash = bexor & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word; xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word; } } } } __global__ void digit2(equi *eq) { equi::htlayout htl(eq, 2); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[0][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 bexor = __byte_perm(xor0, 0, 0x0123); const u32 xorbucketid = bexor >> 16; const u32 xhash = bexor >> 12 & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor0; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word; } } } } __global__ void digit3(equi *eq) { equi::htlayout htl(eq, 3); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[1][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; const u32 bexor = __byte_perm(xor0, xor1, 0x1234); const u32 xorbucketid = bexor >> 4 & BUCKMASK; const u32 xhash = bexor & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor1; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word; } } } } __global__ void digit4(equi *eq) { equi::htlayout htl(eq, 4); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[1][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 bexor = __byte_perm(xor0, 0, 0x4123); const u32 xorbucketid = bexor >> 8; const u32 xhash = bexor >> 4 & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor0; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word; } } } } __global__ void digit5(equi *eq) { equi::htlayout htl(eq, 5); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[2][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; const u32 bexor = __byte_perm(xor0, xor1, 0x2345); const u32 xorbucketid = bexor >> 4 & BUCKMASK; const u32 xhash = bexor & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor1; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; } } } } __global__ void digit6(equi *eq) { equi::htlayout htl(eq, 6); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[2][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; const u32 bexor = __byte_perm(xor0, xor1, 0x2345); const u32 xorbucketid = bexor >> 16; const u32 xhash = bexor >> 12 & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor1; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; } } } } __global__ void digit7(equi *eq) { equi::htlayout htl(eq, 7); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[3][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 bexor = __byte_perm(xor0, 0, 0x4012); const u32 xorbucketid = bexor >> 4 & BUCKMASK; const u32 xhash = bexor & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor0; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; } } } } __global__ void digit8(equi *eq) { equi::htlayout htl(eq, 8); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[3][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; const u32 bexor = __byte_perm(xor0, xor1, 0x3456); const u32 xorbucketid = bexor >> 16; const u32 xhash = bexor >> 12 & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor1; } } } } #endif __global__ void digitK(equi *eq) { equi::collisiondata cd; equi::htlayout htl(eq, WK); const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[(WK - 1) / 2][bucketid]; u32 bsize = eq->getnslots0(bucketid); // assume WK odd for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) // assume WK odd continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) { #ifdef XINTREE eq->candidate(tree(bucketid, s0, s1, 0)); #else eq->candidate(tree(bucketid, s0, s1)); #endif } } } } } eq_cuda_context::eq_cuda_context(int tpb, int blocks, int id) : threadsperblock(tpb), totalblocks(blocks), device_id(id) { eq = new equi(threadsperblock * totalblocks); sol_memory = malloc(sizeof(proof) * MAXSOLS + 4096); solutions = (proof*)(((long long)sol_memory + 4095) & -4096); checkCudaErrors(cudaSetDevice(device_id)); checkCudaErrors(cudaDeviceReset()); checkCudaErrors(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync)); checkCudaErrors(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1)); checkCudaErrors(cudaMalloc((void**)&heap0, sizeof(digit0))); checkCudaErrors(cudaMalloc((void**)&heap1, sizeof(digit1))); for (u32 r = 0; r < WK; r++) if ((r & 1) == 0) eq->hta.trees0[r / 2] = (bucket0 *)(heap0 + r / 2); else eq->hta.trees1[r / 2] = (bucket1 *)(heap1 + r / 2); checkCudaErrors(cudaMalloc((void**)&eq->nslots, 2 * NBUCKETS * sizeof(u32))); checkCudaErrors(cudaMalloc((void**)&eq->sols, MAXSOLS * sizeof(proof))); checkCudaErrors(cudaMalloc((void**)&device_eq, sizeof(equi))); } eq_cuda_context::~eq_cuda_context() { /*checkCudaErrors(cudaFree(eq->nslots)); checkCudaErrors(cudaFree(eq->sols)); checkCudaErrors(cudaFree(eq->hta.trees0[0])); checkCudaErrors(cudaFree(eq->hta.trees1[0]));*/ checkCudaErrors(cudaSetDevice(device_id)); checkCudaErrors(cudaDeviceReset()); free(sol_memory); delete eq; } void eq_cuda_context::solve(const char *tequihash_header, unsigned int tequihash_header_len, const char* nonce, unsigned int nonce_len, std::function<bool()> cancelf, std::function<void(const std::vector<uint32_t>&, size_t, uint32_t, const unsigned char*)> solutionf, std::function<void(void)> hashdonef) { checkCudaErrors(cudaSetDevice(device_id)); eq->setheadernonce(tequihash_header, tequihash_header_len, nonce, nonce_len); checkCudaErrors(cudaMemcpy(device_eq, eq, sizeof(equi), cudaMemcpyHostToDevice)); digitH << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; #if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL) digit_1 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit2 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit3 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit4 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit5 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit6 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit7 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit8 << <totalblocks, threadsperblock >> >(device_eq); #else for (u32 r = 1; r < WK; r++) { r & 1 ? digitO << <totalblocks, threadsperblock >> >(device_eq, r) : digitE << <totalblocks, threadsperblock >> >(device_eq, r); } #endif if (cancelf()) return; digitK << <totalblocks, threadsperblock >> >(device_eq); checkCudaErrors(cudaMemcpy(eq, device_eq, sizeof(equi), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(solutions, eq->sols, MAXSOLS * sizeof(proof), cudaMemcpyDeviceToHost)); for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++) { std::vector<uint32_t> index_vector(PROOFSIZE); for (u32 i = 0; i < PROOFSIZE; i++) { index_vector[i] = solutions[s][i]; } solutionf(index_vector, DIGITBITS, 0, nullptr); if (cancelf()) return; } hashdonef(); }
280b4208a71a010ad40e614a6e5ed67d9e56444b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 csymv_upper.cu is nearly identical to chemv_upper.cu, just change names and drop cuConjf. chemv_kernel_U (upper) in chemv_upper.cu is very similar to chemv_kernel_L (lower) in chemv.cu; diff the two files to compare. @generated from zhemv_mgpu_upper.cu normal z -> c, Fri Jan 30 19:00:10 2015 @author Mark Gates */ #include "common_magma.h" #include "commonblas_c.h" #define PRECISION_c #define NB_X 64 #define NB_Y 4 #define bank_shift 33 #define quarter_NB_X 16 #define half_NB_X 32 /******************************************************************************* Upper case, compute block multiply, work = A*x, for any size n: [ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ] The order is different from the lower case, because the upper case processes a block row from the diagonal to the right, whereas the lower case processes a block row from the diagonal to the left. Uses a 64x4 thread block. For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed). For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles. In both cases, each thread multiplies 4 elements. For rows past the bottom of the matrix, the A pointer is adjusted to be the last valid row of A, which multiple threads will read. Extra rows are ignored when saving results to work. Columns past the right edge are explicitly ignored when loading. x values past the bottom are set to zero, thus, extra columns are zeroed when multiplying. ********************************************************************/ __global__ void chemv_kernel_U_mgpu( int n, magmaFloatComplex const * __restrict__ A, int lda, magmaFloatComplex const * __restrict__ x, int incx, magmaFloatComplex * __restrict__ work, int my_gpu_id, int ngpu, int block_offset) { #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) // treats sA as 16x64 block #define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ] // treats sA as 32x32 block #define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)]) // 64x4 thread block const int tx = threadIdx.x; const int ty = threadIdx.y; const int blk = blockIdx.x; const int blk_ind = NB_X * blk; const int td = NB_X * ty + tx; // 32x8 thread block const int tx2 = td % half_NB_X; const int ty2 = td / half_NB_X; // If this blk has fewer than NB_X rows, partial is the number of valid rows, // so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid. // Else, partial == 0. int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0); magmaFloatComplex psum, psum_t; magmaFloatComplex total = MAGMA_C_ZERO; // sA is used as a 32x32 block, sA32(i,j), // and as a 16x64 block, sA16(i,j), in different parts of the code. // sA must be at least half_NB_X*bank_shift = 32x33 = 1056; // quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056 __shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */ __shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ] __shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag magmaFloatComplex rA[4]; magmaFloatComplex psums_t[4]; // -------------------- // load 64x1 block x(blk_ind + 0:63) into sx_blk x += (blk_ind + tx)*incx; // x is x(blk_ind + tx) if ( ty == 0 ) { if ( (partial && tx >= partial) || (blk == 0 && tx < block_offset) ) { sx_blk[tx] = MAGMA_C_ZERO; } else { sx_blk[tx] = x[0]; } } // -------------------- // move to block row work += blk*lda; // work is work(0, blk) A += blk_ind; // A is A(blk_ind, 0) A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2) if ( blk % ngpu == my_gpu_id ) { // this GPU owns this diagonal block, so // move to 32x32 diag block A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2) // load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 >= partial ) { A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2) } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle, // as four 32x8 sections in parallel: // columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31 #pragma unroll for(int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = cuConjf( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x // each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3) psum = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 32x32 diag block, then repeat steps from first diag block A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2) // load 32x32 diag block A[block + 0:31, block + 0:31] into sA if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle #pragma unroll for(int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = cuConjf( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x psum = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to off-diag 32x32 block A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2) // load 32x32 block of A into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - (tx2) + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 >= partial ) { A = A + (tx2) - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // multiply 32x32 block (below diag) psum = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += cuConjf( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2]; } //__syncthreads(); // no sync needed here // multiply transposed 32x32 block (above diag) psum_t = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial sums for non-transposed 32x32 block sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // store partial sums for transposed 32x32 block sA32(ty2, tx2) = psum_t; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 64x64 block right of diag in block row, and // switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block A -= half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2) A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2) } // finish switching thread offset A -= ty2*lda + tx2; // A is A(blk_ind, 0) A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty) int next = blk + (my_gpu_id + ngpu - 1 - blk % ngpu) % ngpu + 1; A += (next/ngpu)*NB_X*lda; // A is A(blk_ind + tx, next*NB_X + 4*ty) // Unlike lower case, don't adjust A here for partial # of rows. // Since block is right of diagonal, it must have all NB rows, // but can have < NB columns, dealt with when loading below. x -= blk_ind*incx; // x is x(tx) // 16x16 thread block const int tx4 = td % quarter_NB_X; const int ty4 = td / quarter_NB_X; // cycle over blocks jj right of diagonal, in block row blk for(int jj=next; jj < gridDim.x; jj += ngpu) { partial = (jj == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0); // load 64x1 block x(jj_ind + 0:63) into sx_jj // block is right of diagonal, so don't need to worry about offset here if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_jj[tx] = x[jj*NB_X*incx]; } else { sx_jj[tx] = MAGMA_C_ZERO; } } __syncthreads(); for( int k=0; k < 4; k++ ) { // load 64x16 block of A into rA, 4 elements per thread, // as four 64x4 sections in parallel: // columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15 if ( partial ) { #pragma unroll for(int j=0; j < 4; j++) { if ( 4*ty + j + k*quarter_NB_X < partial ) { rA[j] = A[j*lda]; } else { rA[j] = MAGMA_C_ZERO; } } } else { #pragma unroll for(int j=0; j < 4; j++) { rA[j] = A[j*lda]; } } // 1) multiply 64x16 block A_{blk,jj} * x_jj // each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k) // 2) multiply 16x64 block A_{blk,jj} * x_blk, // storing each product Aji*xi to sA(j,i) #pragma unroll for(int j=0; j < 4; j++) { total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj sA16(ty*4 + j, tx) = cuConjf( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk } __syncthreads(); // do partial row sums for transposed 16x64 result // use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty) // sum sixteen 16x4 sections in parallel: // columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63 psum_t = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum_t += sA16(tx4, ty4*4 + j); } __syncthreads(); // store partial row sums of transposed result, y_jj (locally) psums_t[k] = psum_t; // move right to next 64x16 block A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty) } // already at next 64x64 block // A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty) // store partial row sums of transposed result, y_jj #pragma unroll for(int k=0; k < 4; k++) { sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k]; } __syncthreads(); // sum up partial row sums of transposed result, y_jj, and store final total to workspace // thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16 if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) { int ty4_nb4 = ty4*quarter_NB_X; psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4) + sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4) + sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4) + sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4) + sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4) + sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4) + sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4) + sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4); work[jj*NB_X + tx4 + ty4_nb4] = psum_t; //MAGMA_C_MAKE( tx4, blk ); // store at work( jj*NB_X + tx4 + ty4*16, blk ) } __syncthreads(); } // store row sums sA16(ty, tx) = total; __syncthreads(); partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0); // sum up final total, y_blk, for row tx if ( ty == 0 && (partial == 0 || tx < partial) ) { total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx); work[blk*NB_X + tx] = total; //MAGMA_C_MAKE( tx, blk ); // store at work( blk*NB_X + tx, blk ) } #endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */ } // end chemv_kernel_U_mgpu /************************************************************** Upper case, sum up partial results per GPU. Each block sums one block row; each thread sums one row. On input (for 3 blocks): [ (A11*x1 + A12*x2 + A13*x3) --- --- ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] On output: [ (A11*x1 + A12*x2 + A13*x3) ] y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] [ (A13^H*x1) + (A23^H*x2) + (A33*x3) ] Note beta*y is not included here; see magmablas_chemv_mgpu_sync. The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks: [ * ] blk=0 * data for non-transposed row w_blk = A_{blk,1:nblock} * x_{blk:nblock} work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk} [ x x * ] blk=2 blanks are not set [ * ] blk=3 [ x x x x * ] blk=4 [ * ] blk=0 work[gpu=1] = [ x * ] blk=1 [ * ] blk=2 [ x x x * ] blk=3 [ * ] blk=4 On output, rows across are summed up. Entries right of the diagonal blocks are not accessed. There are no blank lines; work has been set to 0 if a GPU has no data to contribute. [ * ] y[gpu=0] = [ * ] [ x + x + * ] [ * ] [ x + x + x + x + * ] [ * ] y[gpu=1] = [ x + * ] [ * ] [ x + x + x + * ] [ * ] ********************************************************************/ __global__ void chemv_kernel_U_mgpu_sum( int n, magmaFloatComplex alpha, int lda, magmaFloatComplex * __restrict__ y, int incy, magmaFloatComplex const * __restrict__ work, int my_gpu_id, int ngpu, int block_offset) { int tx = threadIdx.x; int blk = blockIdx.x; int blk_ind = blk * NB_X; int ind = blk_ind + tx; // Don't write outside [block_offset, ..., n+block_offset) if ( ind >= block_offset && ind < n+block_offset ) { magmaFloatComplex Ax = MAGMA_C_ZERO; work += ind; // if this GPU owns block-column blk, all blocks j=[0, ..., blk] contain data; // else only block j=blk contains data. int first = 0; if ( blk % ngpu != my_gpu_id ) { first = blk; } for(int j = first; j <= blk; ++j) { Ax += work[j*lda]; } y[ind * incy] = alpha*Ax; // see magmablas_chemv_sync for beta*y } } // end chemv_kernel_L_mgpu_sum
280b4208a71a010ad40e614a6e5ed67d9e56444b.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 csymv_upper.cu is nearly identical to chemv_upper.cu, just change names and drop cuConjf. chemv_kernel_U (upper) in chemv_upper.cu is very similar to chemv_kernel_L (lower) in chemv.cu; diff the two files to compare. @generated from zhemv_mgpu_upper.cu normal z -> c, Fri Jan 30 19:00:10 2015 @author Mark Gates */ #include "common_magma.h" #include "commonblas_c.h" #define PRECISION_c #define NB_X 64 #define NB_Y 4 #define bank_shift 33 #define quarter_NB_X 16 #define half_NB_X 32 /******************************************************************************* Upper case, compute block multiply, work = A*x, for any size n: [ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ] The order is different from the lower case, because the upper case processes a block row from the diagonal to the right, whereas the lower case processes a block row from the diagonal to the left. Uses a 64x4 thread block. For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed). For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles. In both cases, each thread multiplies 4 elements. For rows past the bottom of the matrix, the A pointer is adjusted to be the last valid row of A, which multiple threads will read. Extra rows are ignored when saving results to work. Columns past the right edge are explicitly ignored when loading. x values past the bottom are set to zero, thus, extra columns are zeroed when multiplying. ********************************************************************/ __global__ void chemv_kernel_U_mgpu( int n, magmaFloatComplex const * __restrict__ A, int lda, magmaFloatComplex const * __restrict__ x, int incx, magmaFloatComplex * __restrict__ work, int my_gpu_id, int ngpu, int block_offset) { #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) // treats sA as 16x64 block #define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ] // treats sA as 32x32 block #define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)]) // 64x4 thread block const int tx = threadIdx.x; const int ty = threadIdx.y; const int blk = blockIdx.x; const int blk_ind = NB_X * blk; const int td = NB_X * ty + tx; // 32x8 thread block const int tx2 = td % half_NB_X; const int ty2 = td / half_NB_X; // If this blk has fewer than NB_X rows, partial is the number of valid rows, // so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid. // Else, partial == 0. int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0); magmaFloatComplex psum, psum_t; magmaFloatComplex total = MAGMA_C_ZERO; // sA is used as a 32x32 block, sA32(i,j), // and as a 16x64 block, sA16(i,j), in different parts of the code. // sA must be at least half_NB_X*bank_shift = 32x33 = 1056; // quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056 __shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */ __shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ] __shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag magmaFloatComplex rA[4]; magmaFloatComplex psums_t[4]; // -------------------- // load 64x1 block x(blk_ind + 0:63) into sx_blk x += (blk_ind + tx)*incx; // x is x(blk_ind + tx) if ( ty == 0 ) { if ( (partial && tx >= partial) || (blk == 0 && tx < block_offset) ) { sx_blk[tx] = MAGMA_C_ZERO; } else { sx_blk[tx] = x[0]; } } // -------------------- // move to block row work += blk*lda; // work is work(0, blk) A += blk_ind; // A is A(blk_ind, 0) A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2) if ( blk % ngpu == my_gpu_id ) { // this GPU owns this diagonal block, so // move to 32x32 diag block A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2) // load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 >= partial ) { A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2) } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle, // as four 32x8 sections in parallel: // columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31 #pragma unroll for(int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = cuConjf( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x // each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3) psum = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 32x32 diag block, then repeat steps from first diag block A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2) // load 32x32 diag block A[block + 0:31, block + 0:31] into sA if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle #pragma unroll for(int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = cuConjf( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x psum = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to off-diag 32x32 block A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2) // load 32x32 block of A into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - (tx2) + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 >= partial ) { A = A + (tx2) - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // multiply 32x32 block (below diag) psum = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += cuConjf( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2]; } //__syncthreads(); // no sync needed here // multiply transposed 32x32 block (above diag) psum_t = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial sums for non-transposed 32x32 block sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // store partial sums for transposed 32x32 block sA32(ty2, tx2) = psum_t; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 64x64 block right of diag in block row, and // switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block A -= half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2) A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2) } // finish switching thread offset A -= ty2*lda + tx2; // A is A(blk_ind, 0) A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty) int next = blk + (my_gpu_id + ngpu - 1 - blk % ngpu) % ngpu + 1; A += (next/ngpu)*NB_X*lda; // A is A(blk_ind + tx, next*NB_X + 4*ty) // Unlike lower case, don't adjust A here for partial # of rows. // Since block is right of diagonal, it must have all NB rows, // but can have < NB columns, dealt with when loading below. x -= blk_ind*incx; // x is x(tx) // 16x16 thread block const int tx4 = td % quarter_NB_X; const int ty4 = td / quarter_NB_X; // cycle over blocks jj right of diagonal, in block row blk for(int jj=next; jj < gridDim.x; jj += ngpu) { partial = (jj == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0); // load 64x1 block x(jj_ind + 0:63) into sx_jj // block is right of diagonal, so don't need to worry about offset here if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_jj[tx] = x[jj*NB_X*incx]; } else { sx_jj[tx] = MAGMA_C_ZERO; } } __syncthreads(); for( int k=0; k < 4; k++ ) { // load 64x16 block of A into rA, 4 elements per thread, // as four 64x4 sections in parallel: // columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15 if ( partial ) { #pragma unroll for(int j=0; j < 4; j++) { if ( 4*ty + j + k*quarter_NB_X < partial ) { rA[j] = A[j*lda]; } else { rA[j] = MAGMA_C_ZERO; } } } else { #pragma unroll for(int j=0; j < 4; j++) { rA[j] = A[j*lda]; } } // 1) multiply 64x16 block A_{blk,jj} * x_jj // each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k) // 2) multiply 16x64 block A_{blk,jj} * x_blk, // storing each product Aji*xi to sA(j,i) #pragma unroll for(int j=0; j < 4; j++) { total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj sA16(ty*4 + j, tx) = cuConjf( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk } __syncthreads(); // do partial row sums for transposed 16x64 result // use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty) // sum sixteen 16x4 sections in parallel: // columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63 psum_t = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum_t += sA16(tx4, ty4*4 + j); } __syncthreads(); // store partial row sums of transposed result, y_jj (locally) psums_t[k] = psum_t; // move right to next 64x16 block A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty) } // already at next 64x64 block // A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty) // store partial row sums of transposed result, y_jj #pragma unroll for(int k=0; k < 4; k++) { sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k]; } __syncthreads(); // sum up partial row sums of transposed result, y_jj, and store final total to workspace // thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16 if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) { int ty4_nb4 = ty4*quarter_NB_X; psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4) + sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4) + sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4) + sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4) + sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4) + sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4) + sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4) + sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4); work[jj*NB_X + tx4 + ty4_nb4] = psum_t; //MAGMA_C_MAKE( tx4, blk ); // store at work( jj*NB_X + tx4 + ty4*16, blk ) } __syncthreads(); } // store row sums sA16(ty, tx) = total; __syncthreads(); partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0); // sum up final total, y_blk, for row tx if ( ty == 0 && (partial == 0 || tx < partial) ) { total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx); work[blk*NB_X + tx] = total; //MAGMA_C_MAKE( tx, blk ); // store at work( blk*NB_X + tx, blk ) } #endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */ } // end chemv_kernel_U_mgpu /************************************************************** Upper case, sum up partial results per GPU. Each block sums one block row; each thread sums one row. On input (for 3 blocks): [ (A11*x1 + A12*x2 + A13*x3) --- --- ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] On output: [ (A11*x1 + A12*x2 + A13*x3) ] y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] [ (A13^H*x1) + (A23^H*x2) + (A33*x3) ] Note beta*y is not included here; see magmablas_chemv_mgpu_sync. The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks: [ * ] blk=0 * data for non-transposed row w_blk = A_{blk,1:nblock} * x_{blk:nblock} work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk} [ x x * ] blk=2 blanks are not set [ * ] blk=3 [ x x x x * ] blk=4 [ * ] blk=0 work[gpu=1] = [ x * ] blk=1 [ * ] blk=2 [ x x x * ] blk=3 [ * ] blk=4 On output, rows across are summed up. Entries right of the diagonal blocks are not accessed. There are no blank lines; work has been set to 0 if a GPU has no data to contribute. [ * ] y[gpu=0] = [ * ] [ x + x + * ] [ * ] [ x + x + x + x + * ] [ * ] y[gpu=1] = [ x + * ] [ * ] [ x + x + x + * ] [ * ] ********************************************************************/ __global__ void chemv_kernel_U_mgpu_sum( int n, magmaFloatComplex alpha, int lda, magmaFloatComplex * __restrict__ y, int incy, magmaFloatComplex const * __restrict__ work, int my_gpu_id, int ngpu, int block_offset) { int tx = threadIdx.x; int blk = blockIdx.x; int blk_ind = blk * NB_X; int ind = blk_ind + tx; // Don't write outside [block_offset, ..., n+block_offset) if ( ind >= block_offset && ind < n+block_offset ) { magmaFloatComplex Ax = MAGMA_C_ZERO; work += ind; // if this GPU owns block-column blk, all blocks j=[0, ..., blk] contain data; // else only block j=blk contains data. int first = 0; if ( blk % ngpu != my_gpu_id ) { first = blk; } for(int j = first; j <= blk; ++j) { Ax += work[j*lda]; } y[ind * incy] = alpha*Ax; // see magmablas_chemv_sync for beta*y } } // end chemv_kernel_L_mgpu_sum
b1a4fda8eb7abd7fc25553e59b2b8177f26aae41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kernel_hadamard_sum(int N, double *y, double *x, double *w){ unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x; /* make sure to use only N threads */ if (tid<N) { y[tid]+=x[tid]*w[tid]; } }
b1a4fda8eb7abd7fc25553e59b2b8177f26aae41.cu
#include "includes.h" __global__ void kernel_hadamard_sum(int N, double *y, double *x, double *w){ unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x; /* make sure to use only N threads */ if (tid<N) { y[tid]+=x[tid]*w[tid]; } }
7dcf9e49cf0f2f438bbecd0c568d8557d0f52ed6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math_constants.h> #include "BC.h" const int numYPerStep = 8; const int SIDE = 32; /** * Calculates the next finite difference step given a * grid point and step lengths. * * @param curr Pointer to the grid point that should be updated. * @param width Number of grid points in the x dimension. * @param xcfl Courant number for x dimension. * @param ycfl Courant number for y dimension. * @returns Grid value of next timestep. */ template<int order> __device__ float Stencil(const float* curr, int width, float xcfl, float ycfl) { switch(order) { case 2: return curr[0] + xcfl * (curr[-1] + curr[1] - 2.f * curr[0]) + ycfl * (curr[width] + curr[-width] - 2.f * curr[0]); case 4: return curr[0] + xcfl * (-curr[2] + 16.f * curr[1] - 30.f * curr[0] + 16.f * curr[-1] - curr[-2]) + ycfl * (- curr[2 * width] + 16.f * curr[width] - 30.f * curr[0] + 16.f * curr[-width] - curr[-2 * width]); case 8: return curr[0] + xcfl * (-9.f * curr[4] + 128.f * curr[3] - 1008.f * curr[2] + 8064.f * curr[1] - 14350.f * curr[0] + 8064.f * curr[-1] - 1008.f * curr[-2] + 128.f * curr[-3] - 9.f * curr[-4]) + ycfl * (-9.f * curr[4 * width] + 128.f * curr[3 * width] - 1008.f * curr[2 * width] + 8064.f * curr[width] - 14350.f * curr[0] + 8064.f * curr[-width] - 1008.f * curr[-2 * width] + 128.f * curr[-3 * width] - 9.f * curr[-4 * width]); default: printf("ERROR: Order %d not supported", order); return CUDART_NAN_F; } } /** * Kernel to propagate finite difference grid from the current * time point to the next. * * This kernel should be very simple and only use global memory * and 1d threads and blocks. * * @param next[out] Next grid state. * @param curr Current grid state. * @param gx Number of grid points in the x dimension. * @param nx Number of grid points in the x dimension to which the full * stencil can be applied (ie the number of points that are at least * order/2 grid points away from the boundary). * @param ny Number of grid points in the y dimension to which th full * stencil can be applied. * @param xcfl Courant number for x dimension. * @param ycfl Courant number for y dimension. */ template<int order> __global__ void gpuStencilGlobal(float* next, const float* __restrict__ curr, int gx, int nx, int ny, float xcfl, float ycfl) { // TODO int tid = blockIdx.x * blockDim.x + threadIdx.x; int borderSize = order/2; if(tid< nx*ny){ int tidx = tid%nx + borderSize; int tidy = tid/nx + borderSize; int index = tidx + gx*tidy; next[index]=Stencil<order>(curr+index, gx, xcfl, ycfl); } } /** * Propagates the finite difference 2D heat diffusion solver * using the gpuStencilGlobal kernel. * * Use this function to do necessary setup and propagate params.iters() * number of times. * * @param curr_grid The current state of the grid. * @param params Parameters for the finite difference computation. * @returns Time required for computation. */ double gpuComputationGlobal(Grid& curr_grid, const simParams& params) { boundary_conditions BC(params); Grid next_grid(curr_grid); // TODO: Declare variables/Compute parameters. float xcfl = params.xcfl(); float ycfl = params.ycfl(); int nx = params.nx(); int ny = params.ny(); int gx = params.gx(); int order = params.order(); int numthreads = 512; int numblocks = (nx*ny+numthreads-1)/numthreads; event_pair timer; start_timer(&timer); for(int i = 0; i < params.iters(); ++i) { // update the values on the boundary only BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_); // TODO: Apply stencil. switch (order) { case 2: hipLaunchKernelGGL(( gpuStencilGlobal<2>), dim3(numblocks), dim3(numthreads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_,gx, nx, ny, xcfl, ycfl); break; case 4: hipLaunchKernelGGL(( gpuStencilGlobal<4>), dim3(numblocks), dim3(numthreads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl); break; case 8: hipLaunchKernelGGL(( gpuStencilGlobal<8>), dim3(numblocks), dim3(numthreads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_,gx, nx, ny, xcfl, ycfl); break; } Grid::swap(curr_grid, next_grid); // if (i==0) // { // curr_grid.fromGPU(); // curr_grid.saveStateToFile("0000.csv"); // } // if (i==1000) // { // curr_grid.fromGPU(); // curr_grid.saveStateToFile("1000.csv"); // } // if (i==2000) // { // curr_grid.fromGPU(); // curr_grid.saveStateToFile("2000.csv"); // } } check_launch("gpuStencilGlobal"); return stop_timer(&timer); } /** * Kernel to propagate finite difference grid from the current * time point to the next. * * This kernel should be optimized to compute finite difference updates * in blocks of size (blockDim.y * numYPerStep) * blockDim.x. Each thread * should calculate at most numYPerStep updates. It should still only use * global memory. * * @param next[out] Next grid state. * @param curr Current grid state. * @param gx Number of grid points in the x dimension. * @param nx Number of grid points in the x dimension to which the full * stencil can be applied (ie the number of points that are at least * order/2 grid points away from the boundary). * @param ny Number of grid points in the y dimension to which th full * stencil can be applied. * @param xcfl Courant number for x dimension. * @param ycfl Courant number for y dimension. */ template<int order, int numYPerStep> __global__ void gpuStencilBlock(float* next, const float* __restrict__ curr, int gx, int nx, int ny, float xcfl, float ycfl) { // TODO int borderSize = order/2; int tid_x = blockIdx.x * blockDim.x + threadIdx.x; int tid_y = blockIdx.y * blockDim.y + threadIdx.y; if(tid_x<nx){ for(int y = tid_y*numYPerStep; y<(tid_y+1)*numYPerStep; y++){ if( y < ny ){ int index = tid_x + borderSize + (y+borderSize)*gx; next[index]=Stencil<order>(curr+index, gx, xcfl, ycfl); } } } } /** * Propagates the finite difference 2D heat diffusion solver * using the gpuStencilBlock kernel. * * Use this function to do necessary setup and propagate params.iters() * number of times. * * @param curr_grid The current state of the grid. * @param params Parameters for the finite difference computation. * @returns Time required for computation. */ double gpuComputationBlock(Grid& curr_grid, const simParams& params) { boundary_conditions BC(params); Grid next_grid(curr_grid); // TODO: Declare variables/Compute parameters. float xcfl = params.xcfl(); float ycfl = params.ycfl(); int nx = params.nx(); int ny = params.ny(); int gx = params.gx(); int order = params.order(); int thread_num = 512; int thread_x = 64; int thread_y = (thread_num + thread_x -1)/thread_x; dim3 threads(thread_x, thread_y); int block_x = (nx+threads.x-1)/threads.x; int block_y = (ny+threads.y*numYPerStep-1)/(threads.y*numYPerStep); dim3 blocks(block_x, block_y); event_pair timer; start_timer(&timer); for(int i = 0; i < params.iters(); ++i) { // update the values on the boundary only BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_); // TODO: Apply stencil. switch (order) { case 2: hipLaunchKernelGGL(( gpuStencilBlock<2,numYPerStep>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_,gx, nx, ny, xcfl, ycfl); break; case 4: hipLaunchKernelGGL(( gpuStencilBlock<4,numYPerStep>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl); break; case 8: hipLaunchKernelGGL(( gpuStencilBlock<8,numYPerStep>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_,gx, nx, ny, xcfl, ycfl); break; } Grid::swap(curr_grid, next_grid); } check_launch("gpuStencilBlock"); return stop_timer(&timer); } /** * Kernel to propagate finite difference grid from the current * time point to the next. * * This kernel should be optimized to compute finite difference updates * in blocks of size side * side using shared memory. * * @param next[out] Next grid state. * @param curr Current grid state. * @param gx Number of grid points in the x dimension. * @param gy Number of grid points in the y dimension. * @param xcfl Courant number for x dimension. * @param ycfl Courant number for y dimension. */ template<int side, int order> __global__ void gpuStencilShared(float* next, const float* __restrict__ curr, int gx, int gy, float xcfl, float ycfl) { // TODO const int warp_id = threadIdx.y; const int lane = threadIdx.x; __shared__ float block[side*side]; int borderSize = order/2; int tid_x = blockIdx.x * (side-order) + threadIdx.x; int tid_y = blockIdx.y * (side-order) + threadIdx.y; int b_index = lane + warp_id*side; int g_index = tid_x + tid_y*gx; for (int i = 0; i < side/ order; ++i){ if ((tid_x< gx) && ((tid_y+i*order) < gy)) { block[b_index+i*order*side] = curr[g_index+i*order*gx]; } } __syncthreads(); for(int i = 0; i < side/ order; ++i) { if(( tid_x< gx-borderSize) && (lane >= borderSize) && (lane < side -borderSize) && ( (tid_y + i*order)< gy-borderSize) && ((warp_id+i*order) < side - borderSize) && ( (warp_id+i*order) >= borderSize) ) { next[g_index+i*order*gx] = Stencil<order>(block+b_index+i*order*side, side, xcfl, ycfl);; } } } /** * Propagates the finite difference 2D heat diffusion solver * using the gpuStencilShared kernel. * * Use this function to do necessary setup and propagate params.iters() * number of times. * * @param curr_grid The current state of the grid. * @param params Parameters for the finite difference computation. * @returns Time required for computation. */ template<int order> double gpuComputationShared(Grid& curr_grid, const simParams& params) { boundary_conditions BC(params); Grid next_grid(curr_grid); //TODO: Declare variables/Compute parameters. float xcfl = params.xcfl(); float ycfl = params.ycfl(); int nx = params.nx(); int ny = params.ny(); int gx = params.gx(); int gy = params.gy(); //block side*side int thread_x = SIDE; int thread_y = params.order(); dim3 threads(thread_x, thread_y); int blocks_x = (gx+thread_x-order-1)/(thread_x-order); int blocks_y = (gy+SIDE-order-1)/(SIDE-order); dim3 blocks(blocks_x, blocks_y); event_pair timer; start_timer(&timer); for(int i = 0; i < params.iters(); ++i) { // update the values on the boundary only BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_); // TODO: Apply stencil. switch (order) { case 2: hipLaunchKernelGGL(( gpuStencilShared<SIDE,2>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_,gx, gy, xcfl, ycfl); break; case 4: hipLaunchKernelGGL(( gpuStencilShared<SIDE,4>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_, gx, gy, xcfl, ycfl); break; case 8: hipLaunchKernelGGL(( gpuStencilShared<SIDE,8>), dim3(blocks), dim3(threads), 0, 0, next_grid.dGrid_, curr_grid.dGrid_,gx, gy, xcfl, ycfl); break; } Grid::swap(curr_grid, next_grid); } check_launch("gpuStencilShared"); return stop_timer(&timer); }
7dcf9e49cf0f2f438bbecd0c568d8557d0f52ed6.cu
#include <math_constants.h> #include "BC.h" const int numYPerStep = 8; const int SIDE = 32; /** * Calculates the next finite difference step given a * grid point and step lengths. * * @param curr Pointer to the grid point that should be updated. * @param width Number of grid points in the x dimension. * @param xcfl Courant number for x dimension. * @param ycfl Courant number for y dimension. * @returns Grid value of next timestep. */ template<int order> __device__ float Stencil(const float* curr, int width, float xcfl, float ycfl) { switch(order) { case 2: return curr[0] + xcfl * (curr[-1] + curr[1] - 2.f * curr[0]) + ycfl * (curr[width] + curr[-width] - 2.f * curr[0]); case 4: return curr[0] + xcfl * (-curr[2] + 16.f * curr[1] - 30.f * curr[0] + 16.f * curr[-1] - curr[-2]) + ycfl * (- curr[2 * width] + 16.f * curr[width] - 30.f * curr[0] + 16.f * curr[-width] - curr[-2 * width]); case 8: return curr[0] + xcfl * (-9.f * curr[4] + 128.f * curr[3] - 1008.f * curr[2] + 8064.f * curr[1] - 14350.f * curr[0] + 8064.f * curr[-1] - 1008.f * curr[-2] + 128.f * curr[-3] - 9.f * curr[-4]) + ycfl * (-9.f * curr[4 * width] + 128.f * curr[3 * width] - 1008.f * curr[2 * width] + 8064.f * curr[width] - 14350.f * curr[0] + 8064.f * curr[-width] - 1008.f * curr[-2 * width] + 128.f * curr[-3 * width] - 9.f * curr[-4 * width]); default: printf("ERROR: Order %d not supported", order); return CUDART_NAN_F; } } /** * Kernel to propagate finite difference grid from the current * time point to the next. * * This kernel should be very simple and only use global memory * and 1d threads and blocks. * * @param next[out] Next grid state. * @param curr Current grid state. * @param gx Number of grid points in the x dimension. * @param nx Number of grid points in the x dimension to which the full * stencil can be applied (ie the number of points that are at least * order/2 grid points away from the boundary). * @param ny Number of grid points in the y dimension to which th full * stencil can be applied. * @param xcfl Courant number for x dimension. * @param ycfl Courant number for y dimension. */ template<int order> __global__ void gpuStencilGlobal(float* next, const float* __restrict__ curr, int gx, int nx, int ny, float xcfl, float ycfl) { // TODO int tid = blockIdx.x * blockDim.x + threadIdx.x; int borderSize = order/2; if(tid< nx*ny){ int tidx = tid%nx + borderSize; int tidy = tid/nx + borderSize; int index = tidx + gx*tidy; next[index]=Stencil<order>(curr+index, gx, xcfl, ycfl); } } /** * Propagates the finite difference 2D heat diffusion solver * using the gpuStencilGlobal kernel. * * Use this function to do necessary setup and propagate params.iters() * number of times. * * @param curr_grid The current state of the grid. * @param params Parameters for the finite difference computation. * @returns Time required for computation. */ double gpuComputationGlobal(Grid& curr_grid, const simParams& params) { boundary_conditions BC(params); Grid next_grid(curr_grid); // TODO: Declare variables/Compute parameters. float xcfl = params.xcfl(); float ycfl = params.ycfl(); int nx = params.nx(); int ny = params.ny(); int gx = params.gx(); int order = params.order(); int numthreads = 512; int numblocks = (nx*ny+numthreads-1)/numthreads; event_pair timer; start_timer(&timer); for(int i = 0; i < params.iters(); ++i) { // update the values on the boundary only BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_); // TODO: Apply stencil. switch (order) { case 2: gpuStencilGlobal<2><<<numblocks, numthreads>>>(next_grid.dGrid_, curr_grid.dGrid_,gx, nx, ny, xcfl, ycfl); break; case 4: gpuStencilGlobal<4><<<numblocks, numthreads>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl); break; case 8: gpuStencilGlobal<8><<<numblocks, numthreads>>>(next_grid.dGrid_, curr_grid.dGrid_,gx, nx, ny, xcfl, ycfl); break; } Grid::swap(curr_grid, next_grid); // if (i==0) // { // curr_grid.fromGPU(); // curr_grid.saveStateToFile("0000.csv"); // } // if (i==1000) // { // curr_grid.fromGPU(); // curr_grid.saveStateToFile("1000.csv"); // } // if (i==2000) // { // curr_grid.fromGPU(); // curr_grid.saveStateToFile("2000.csv"); // } } check_launch("gpuStencilGlobal"); return stop_timer(&timer); } /** * Kernel to propagate finite difference grid from the current * time point to the next. * * This kernel should be optimized to compute finite difference updates * in blocks of size (blockDim.y * numYPerStep) * blockDim.x. Each thread * should calculate at most numYPerStep updates. It should still only use * global memory. * * @param next[out] Next grid state. * @param curr Current grid state. * @param gx Number of grid points in the x dimension. * @param nx Number of grid points in the x dimension to which the full * stencil can be applied (ie the number of points that are at least * order/2 grid points away from the boundary). * @param ny Number of grid points in the y dimension to which th full * stencil can be applied. * @param xcfl Courant number for x dimension. * @param ycfl Courant number for y dimension. */ template<int order, int numYPerStep> __global__ void gpuStencilBlock(float* next, const float* __restrict__ curr, int gx, int nx, int ny, float xcfl, float ycfl) { // TODO int borderSize = order/2; int tid_x = blockIdx.x * blockDim.x + threadIdx.x; int tid_y = blockIdx.y * blockDim.y + threadIdx.y; if(tid_x<nx){ for(int y = tid_y*numYPerStep; y<(tid_y+1)*numYPerStep; y++){ if( y < ny ){ int index = tid_x + borderSize + (y+borderSize)*gx; next[index]=Stencil<order>(curr+index, gx, xcfl, ycfl); } } } } /** * Propagates the finite difference 2D heat diffusion solver * using the gpuStencilBlock kernel. * * Use this function to do necessary setup and propagate params.iters() * number of times. * * @param curr_grid The current state of the grid. * @param params Parameters for the finite difference computation. * @returns Time required for computation. */ double gpuComputationBlock(Grid& curr_grid, const simParams& params) { boundary_conditions BC(params); Grid next_grid(curr_grid); // TODO: Declare variables/Compute parameters. float xcfl = params.xcfl(); float ycfl = params.ycfl(); int nx = params.nx(); int ny = params.ny(); int gx = params.gx(); int order = params.order(); int thread_num = 512; int thread_x = 64; int thread_y = (thread_num + thread_x -1)/thread_x; dim3 threads(thread_x, thread_y); int block_x = (nx+threads.x-1)/threads.x; int block_y = (ny+threads.y*numYPerStep-1)/(threads.y*numYPerStep); dim3 blocks(block_x, block_y); event_pair timer; start_timer(&timer); for(int i = 0; i < params.iters(); ++i) { // update the values on the boundary only BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_); // TODO: Apply stencil. switch (order) { case 2: gpuStencilBlock<2,numYPerStep><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_,gx, nx, ny, xcfl, ycfl); break; case 4: gpuStencilBlock<4,numYPerStep><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, nx, ny, xcfl, ycfl); break; case 8: gpuStencilBlock<8,numYPerStep><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_,gx, nx, ny, xcfl, ycfl); break; } Grid::swap(curr_grid, next_grid); } check_launch("gpuStencilBlock"); return stop_timer(&timer); } /** * Kernel to propagate finite difference grid from the current * time point to the next. * * This kernel should be optimized to compute finite difference updates * in blocks of size side * side using shared memory. * * @param next[out] Next grid state. * @param curr Current grid state. * @param gx Number of grid points in the x dimension. * @param gy Number of grid points in the y dimension. * @param xcfl Courant number for x dimension. * @param ycfl Courant number for y dimension. */ template<int side, int order> __global__ void gpuStencilShared(float* next, const float* __restrict__ curr, int gx, int gy, float xcfl, float ycfl) { // TODO const int warp_id = threadIdx.y; const int lane = threadIdx.x; __shared__ float block[side*side]; int borderSize = order/2; int tid_x = blockIdx.x * (side-order) + threadIdx.x; int tid_y = blockIdx.y * (side-order) + threadIdx.y; int b_index = lane + warp_id*side; int g_index = tid_x + tid_y*gx; for (int i = 0; i < side/ order; ++i){ if ((tid_x< gx) && ((tid_y+i*order) < gy)) { block[b_index+i*order*side] = curr[g_index+i*order*gx]; } } __syncthreads(); for(int i = 0; i < side/ order; ++i) { if(( tid_x< gx-borderSize) && (lane >= borderSize) && (lane < side -borderSize) && ( (tid_y + i*order)< gy-borderSize) && ((warp_id+i*order) < side - borderSize) && ( (warp_id+i*order) >= borderSize) ) { next[g_index+i*order*gx] = Stencil<order>(block+b_index+i*order*side, side, xcfl, ycfl);; } } } /** * Propagates the finite difference 2D heat diffusion solver * using the gpuStencilShared kernel. * * Use this function to do necessary setup and propagate params.iters() * number of times. * * @param curr_grid The current state of the grid. * @param params Parameters for the finite difference computation. * @returns Time required for computation. */ template<int order> double gpuComputationShared(Grid& curr_grid, const simParams& params) { boundary_conditions BC(params); Grid next_grid(curr_grid); //TODO: Declare variables/Compute parameters. float xcfl = params.xcfl(); float ycfl = params.ycfl(); int nx = params.nx(); int ny = params.ny(); int gx = params.gx(); int gy = params.gy(); //block side*side int thread_x = SIDE; int thread_y = params.order(); dim3 threads(thread_x, thread_y); int blocks_x = (gx+thread_x-order-1)/(thread_x-order); int blocks_y = (gy+SIDE-order-1)/(SIDE-order); dim3 blocks(blocks_x, blocks_y); event_pair timer; start_timer(&timer); for(int i = 0; i < params.iters(); ++i) { // update the values on the boundary only BC.updateBC(next_grid.dGrid_, curr_grid.dGrid_); // TODO: Apply stencil. switch (order) { case 2: gpuStencilShared<SIDE,2><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_,gx, gy, xcfl, ycfl); break; case 4: gpuStencilShared<SIDE,4><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_, gx, gy, xcfl, ycfl); break; case 8: gpuStencilShared<SIDE,8><<<blocks, threads>>>(next_grid.dGrid_, curr_grid.dGrid_,gx, gy, xcfl, ycfl); break; } Grid::swap(curr_grid, next_grid); } check_launch("gpuStencilShared"); return stop_timer(&timer); }
bff17fe3a6afe456cd930f38b63b93093e69f2f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "TSDFVolume.hpp" #include "GPURaycaster.hpp" #include "cuda_utilities.cuh" #include "TSDFUtilities.hpp" #include "PngUtilities.hpp" #include "RenderUtilitiesGPU.hpp" #include <iostream> #include <cstdio> #include <Eigen/Core> #include <math_constants.h> #include <hiprand/hiprand_kernel.h> /** * Compute a direction vector, in world coordinates, from cam centre * through the given pixel. * @param origin The camera position in world coordinates * @param pix_x The x pixel coordinate * @param pix_y The y pixel coordinate * @param rot the camera pose rotation matrix * @param kinv THe camera intrinsics inverse matrix * @return The unit direction vector for the ray in world coordinate space */ __device__ float3 compute_ray_direction_at_pixel(const float3 &origin, float pix_x, float pix_y, const Mat33 &rot, const Mat33 &kinv) { // Get point at depth 1mm. This is the direction vector in cam coords float3 ray_in_cam_space{ pix_x * kinv.m11 + pix_y * kinv.m12 + kinv.m13, pix_x * kinv.m21 + pix_y * kinv.m22 + kinv.m23, pix_x * kinv.m31 + pix_y * kinv.m32 + kinv.m33 }; // Convert this vector to world coordinate frame // We'd normally add in the camera origin but since we // want the direction, we'd deduct the camera origin // at the very next step so we'll just do the rotation instead. float3 ray_in_world_space = m3_f3_mul(rot, ray_in_cam_space); // Convert to unit vector f3_normalise(ray_in_world_space); return ray_in_world_space; } /** * Perform trilinear interpolation of the TSDF value at a given point in volume space * @param point The point (0,0,0) -> (max_x, max_y, max_z) * @param voxel_grid_size The size of the space in voxels * @param tsdf_values An array of max_x*max_y*max_z floats being the values in the space * @return The interpolated TSDF value */ __device__ float trilinearly_interpolate(const float3 point, const dim3 voxel_grid_size, const float3 voxel_size, const float *tsdf_values) { // Manage boundary points float3 max_values{ voxel_grid_size.x * voxel_size.x, voxel_grid_size.y * voxel_size.y, voxel_grid_size.z * voxel_size.z }; float3 adjusted_point = point; if (point.x >= max_values.x) adjusted_point.x = max_values.x - (voxel_size.x / 10.0f); if (point.y >= max_values.y) adjusted_point.y = max_values.y - (voxel_size.y / 10.0f); if (point.z >= max_values.z) adjusted_point.z = max_values.z - (voxel_size.z / 10.0f); if (point.x < 0.0f) adjusted_point.x = 0.0f; if (point.y < 0.0f) adjusted_point.y = 0.0f; if (point.z < 0.0f) adjusted_point.z = 0.0f; // Get the voxel containing this point int3 voxel = voxel_for_point(adjusted_point, voxel_size); // Handle voxel out of bounds if (voxel.x < 0 || voxel.y < 0 || voxel.z < 0 || voxel.x >= voxel_grid_size.x || voxel.y >= voxel_grid_size.y || voxel.z >= voxel_grid_size.z) { printf("Point outside of voxel space %f, %f, %f\n", adjusted_point.x, adjusted_point.y, adjusted_point.z); return CUDART_NAN_F; } // Get the centre of the voxel float3 v_centre = centre_of_voxel_at(voxel.x, voxel.y, voxel.z, voxel_size); // Set up the lower bound for trilinear interpolation int3 lower; lower.x = (point.x < v_centre.x) ? voxel.x - 1 : voxel.x; lower.y = (point.y < v_centre.y) ? voxel.y - 1 : voxel.y; lower.z = (point.z < v_centre.z) ? voxel.z - 1 : voxel.z; // Handle lower out of bounds lower.x = max(lower.x, 0); lower.y = max(lower.y, 0); lower.z = max(lower.z, 0); // Compute u,v,w float3 lower_centre = centre_of_voxel_at(lower.x, lower.y, lower.z, voxel_size); float3 uvw = f3_sub(point, lower_centre); uvw = f3_div_elem(uvw, voxel_size); float u = uvw.x; float v = uvw.y; float w = uvw.z; // Lookup c000 - c111 float c000 = tsdf_value_at(lower.x + 0, lower.y + 0, lower.z + 0, tsdf_values, voxel_grid_size); float c001 = tsdf_value_at(lower.x + 0, lower.y + 0, lower.z + 1, tsdf_values, voxel_grid_size); float c010 = tsdf_value_at(lower.x + 0, lower.y + 1, lower.z + 0, tsdf_values, voxel_grid_size); float c011 = tsdf_value_at(lower.x + 0, lower.y + 1, lower.z + 1, tsdf_values, voxel_grid_size); float c100 = tsdf_value_at(lower.x + 1, lower.y + 0, lower.z + 0, tsdf_values, voxel_grid_size); float c101 = tsdf_value_at(lower.x + 1, lower.y + 0, lower.z + 1, tsdf_values, voxel_grid_size); float c110 = tsdf_value_at(lower.x + 1, lower.y + 1, lower.z + 0, tsdf_values, voxel_grid_size); float c111 = tsdf_value_at(lower.x + 1, lower.y + 1, lower.z + 1, tsdf_values, voxel_grid_size); float interpolated = c000 * (1 - u) * (1 - v) * (1 - w) + c001 * (1 - u) * (1 - v) * w + c010 * (1 - u) * v * (1 - w) + c011 * (1 - u) * v * w + c100 * u * (1 - v) * (1 - w) + c101 * u * (1 - v) * w + c110 * u * v * (1 - w) + c111 * u * v * w; return interpolated; } /** * For a given dimension check whether the ray can intersect the volume * If this method returns true, it doesn't mean tha the ray _does_ intersect, just that it could * @param space_min The minimum bound of the voxel space in the current dimension * @param space_max The maximum bound of the voxel space in the current dimension * @param origin The starting point of the ray in the current dimension * @param direction The direction of the ray in the current dimension * @param near_t The nearest point of intersection of the voxel space which may be updated in this call * @param far_t The furthest point of intersection of the voxel space which may be updated in this call * @return true if an intersection is still possible otherwise false */ __device__ __forceinline__ bool can_intersect_in_dimension(float space_min, float space_max, float origin, float direction, float &near_t, float &far_t) { bool can_intersect = true; if (direction == 0) { // Not moving in this direction so we must be within bounds in order to have any intersection at all if (origin < space_min || origin > space_max) { can_intersect = false; } } else { // compute intersection distance of the planes float distance_to_space_min = (space_min - origin) / direction; float distance_to_space_max = (space_max - origin) / direction; // If distance_to_space_min > distance_to_space_max swap (distance_to_space_min, distance_to_space_max) since distance_to_space_min intersection with near plane if (distance_to_space_min > distance_to_space_max) { float temp_t = distance_to_space_min; distance_to_space_min = distance_to_space_max; distance_to_space_max = temp_t; } // if distance_to_space_min > t_near set t_near = distance_to_space_min : We want largest t_near if (distance_to_space_min > near_t) { near_t = distance_to_space_min; } //If distance_to_space_max < t_far set t_far="distance_to_space_max" want smallest t_far if (distance_to_space_max < far_t) { far_t = distance_to_space_max; } // If Tnear > Tfar box is missed so return false if (near_t > far_t) { can_intersect = false; } else { // If Tfar < 0 box is behind ray return false end if (far_t < 0) { can_intersect = false; } } } return can_intersect; } /** * Compute the intersections of the ray from origin in direction through the cube specified by space_min and space_max * There are three cases: * origin is inside the cube near_t =0, far_t is distance to exit and we return true * origin is outside of cube and ray misses it near_t and far_t are undefined, return value is false * origin is outside of cube and ray penetrates. near_t and far_t are defined (should have the same sign) and return value is true * @param origin The source of the ray in world coordinates * @param direction a Vector representing the direction of the ray in world coordinates * @param space_min The lower, leftmost, frontmost vertex of the voxel space in world coordinates * @param space_mac The upper, rightmost, rearmost vertex of the voxel space in world coordinates * @param near_t Populated with the nearpoint if return is true * @param far_t populated with the far point if return is true * @return true if the ray pierces the volme */ __device__ bool compute_near_and_far_t(const float3 &origin, const float3 &direction, const float3 &space_min, const float3 &space_max, float &near_t, float &far_t) { bool intersects = false; // Handle start_point in space if (origin.x >= space_min.x && origin.x <= space_max.x && origin.y >= space_min.y && origin.y <= space_max.y && origin.z >= space_min.z && origin.z <= space_max.z) { // Near_t is zero as origin is inside the space near_t = 0; float x_t = CUDART_NAN_F; float y_t = CUDART_NAN_F; float z_t = CUDART_NAN_F; if (direction.x > 0) { x_t = (space_max.x - origin.x) / direction.x; } else if (direction.x < 0) { x_t = (space_min.x - origin.x) / direction.x; } if (direction.y > 0) { y_t = (space_max.y - origin.y) / direction.y; } else if (direction.y < 0) { y_t = (space_min.y - origin.y) / direction.y; } if (direction.z > 0) { z_t = (space_max.z - origin.z) / direction.z; } else if (direction.z < 0) { z_t = (space_min.z - origin.z) / direction.z; } if (x_t < y_t) { if (x_t < z_t) far_t = x_t; else far_t = z_t; } else { if (y_t < z_t) far_t = y_t; else far_t = z_t; } intersects = true; } else { // Outside of the voxel space just now near_t = -CUDART_INF_F; far_t = CUDART_INF_F; // Consider X if ((can_intersect_in_dimension(space_min.x, space_max.x, origin.x, direction.x, near_t, far_t)) && (can_intersect_in_dimension(space_min.y, space_max.y, origin.y, direction.y, near_t, far_t)) && (can_intersect_in_dimension(space_min.z, space_max.z, origin.z, direction.z, near_t, far_t))) { intersects = true; } } // If we got here then return intersects; } /** * Walk a ray from the camera onto the TSDF determining where (if at all) it * intersects the ISO surface defined by the TSDF * @param origin The position of the camera in world coordinates * @param rot The camera pose rotation matrix in world terms * @param kinv The camera's intrinsic matrix inverse * @param space_min The lowest X,Y,Z in world coordinates occupied by the voxel space * @param space_max The highest X,Y,Z in world coordinates occupied by the voxel space * @param voxel_grid_size Dimension of the voxel space in each direction in voxels * @param voxel_size Dimensions of each voxel * @param tsdf_values A pointer to the TSDF distance values with Z,Y,X coords major * @param vertex The 3D world coordinate of the vertex intersected by this ray if it exists or {NaN, NaN, NaN} */ __global__ void process_ray(const float3 origin, const Mat33 rot, const Mat33 kinv, uint16_t width, uint16_t height, const float trunc_distance, const float3 space_min, const float3 space_max, const dim3 voxel_grid_size, const float3 voxel_size, const float *tsdf_values, float3 *vertices) { int imx = threadIdx.x + blockIdx.x * blockDim.x; int imy = threadIdx.y + blockIdx.y * blockDim.y; // Terminate early if the index is out of bounds if (imx >= width || imy >= height) { return; } size_t idx = imy * width + imx; // setup offsets auto offset_x = 0.; auto offset_y = 0.; if (gridDim.z > 1 && blockIdx.z > 0) { auto pi = 3.14159265359; auto angle = pi / (gridDim.z - 1) * (blockIdx.z - 1); offset_x = cos(angle) * log((float) gridDim.z - 1); offset_y = sin(angle) * log((float) gridDim.z - 1); } // Compute the ray direction for this pixel in world coordinates // which is R K-1 (x,y,1)T float3 direction = compute_ray_direction_at_pixel(origin, imx + offset_x, imy + offset_y, rot, kinv); // Compute near and far intersections of the TSDF volume by this ray // near and far T are valules of the parameter origin + t*(direction.unit) at which the // ray from the camera through the point intersects the voxel space. // near_t may be zero if origin is inside the voxel space // far_t must be > near_t float near_t, far_t; bool intersects = compute_near_and_far_t(origin, direction, space_min, space_max, near_t, far_t); // Only do this if the ray intersects space float3 intersection_point{CUDART_NAN_F, CUDART_NAN_F, CUDART_NAN_F}; if (intersects) { // Compute the start point in grid coords float3 start_point = f3_sub(f3_add(origin, f3_mul_scalar(near_t, direction)), space_min); bool done = false; // Initialise TSDF to trun distance float tsdf = trunc_distance; float previous_tsdf; // Set up current point to iterate float t = 0; float max_t = far_t - near_t; // Iterate until // We leave the voxel space (fail) // We transit from +ve to -ve tsdf (intersect) // We transit from -ve to +ve (fail) int count = 0; float step_size = trunc_distance * 0.05; while (!done) { float3 current_point = f3_add(start_point, f3_mul_scalar(t, direction)); // Save last TSDF (to check against crossing surface) previous_tsdf = tsdf; // Extract the tsdf float tsdf = trilinearly_interpolate(current_point, voxel_grid_size, voxel_size, tsdf_values); // If tsdf is negative then we're behind the surface else we're on it if (tsdf <= 0) { // If we stepped past the iso surface, work out when if (tsdf < 0) { // We just advanced by step_size so step back t = t - step_size; // Linearly interpolate the crossing point t = t + (previous_tsdf / (previous_tsdf - tsdf)) * step_size; } // Compute the point of intersection current_point = f3_add(start_point, f3_mul_scalar(t, direction)); // Put into world coordinates intersection_point = f3_add(space_min, current_point); done = true; } // tsdf is +ve, if previous tsdf was negative then we hit a backface and we're done else if (previous_tsdf < 0) { done = true; } // previous tsdf was +ve and so is this one. We've not crossed the surface // so keep stepping else { t = t + step_size; // Until we step out of the volume if (t >= max_t) { done = true; } } // Catch failures - this code shouldn';'t be invoked. if (count++ > 4400) { printf("Timed out @(%d,%d) with t:%f tsdf:%f\n", imx, imy, t, tsdf); done = true; } } } intersection_point = f3_mul_scalar(1. / gridDim.z, intersection_point); atomicAdd(&vertices[idx].x, intersection_point.x); atomicAdd(&vertices[idx].y, intersection_point.y); atomicAdd(&vertices[idx].z, intersection_point.z); } /** * Compute normals to the vertex data provided * @param width The with of the output matrix * @param height The height of the output matrix * @param vertices The input vertex array - unchanged by ths * @param normals the oputput normals array to be populated */ __global__ void compute_normals(uint16_t width, uint16_t height, const float3 *vertices, float3 *normals) { int imx = threadIdx.x + blockIdx.x * blockDim.x; int imy = threadIdx.y + blockIdx.y * blockDim.y; // Terminate eearly if the index is out of bounds if (imx >= width || imy >= height) { return; } size_t idx = (imy * width + imx); if (imy == height - 1) { normals[idx].x = 0; normals[idx].y = 0; normals[idx].z = 0; } else { if (imx == width - 1) { normals[idx].x = 0; normals[idx].y = 0; normals[idx].z = 0; } else { float3 v2{vertices[idx + 1].x - vertices[idx].x, vertices[idx + 1].y - vertices[idx].y, vertices[idx + 1].z - vertices[idx].z}; float3 v1{vertices[idx + width].x - vertices[idx].x, vertices[idx + width].y - vertices[idx].y, vertices[idx + width].z - vertices[idx].z}; float nx = v1.y * v2.z - v1.z * v2.y; float ny = v1.z * v2.x - v1.x * v2.z; float nz = v1.x * v2.y - v1.y * v2.x; float l = sqrt(nx * nx + ny * ny + nz * nz); normals[idx].x = nx / l; normals[idx].y = ny / l; normals[idx].z = nz / l; } } } /** * Compute the vertex map */ __host__ float3 *get_vertices(const TSDFVolume &volume, const Camera &camera, uint16_t width, uint16_t height, const int n_samples=1) { // Setup camera origin float3 origin = float3_from_eigen_vector(camera.position()); // Set up voxel grid size dim3 voxel_grid_size = volume.size(); float3 voxel_size = volume.voxel_size(); // Set up rotation matrices for pose and Kinv // Eigen stores data in column major format. const float *pose = camera.pose().data(); Mat33 rot{ pose[0], pose[1], pose[2], pose[4], pose[5], pose[6], pose[8], pose[9], pose[10] }; const float *cam_kinv = camera.kinv().data(); Mat33 kinv{ cam_kinv[0], cam_kinv[1], cam_kinv[2], cam_kinv[3], cam_kinv[4], cam_kinv[5], cam_kinv[6], cam_kinv[7], cam_kinv[8] }; // Set up world coords for min and max extremes of the voxel space float3 space_min = volume.offset(); float3 space_max = volume.offset() + volume.physical_size(); // Get reference to TSDF distance data const float *d_tsdf_values = volume.distance_data(); // Allocate storage for vertices on device hipError_t err; size_t data_size = width * height * sizeof(float3); float3 *d_vertices; err = hipMalloc(&d_vertices, data_size); check_cuda_error("Vertices alloc failed ", err); err = hipMemset(d_vertices, 0, data_size); check_cuda_error("Cant set default values for vertices ", err); // Execute the kernel dim3 block(32, 32); dim3 grid(divUp(width, block.x), divUp(height, block.y), n_samples); hipLaunchKernelGGL(( process_ray) , dim3(grid), dim3(block), 0, 0, origin, rot, kinv, width, height, volume.truncation_distance(), space_min, space_max, voxel_grid_size, voxel_size, d_tsdf_values, d_vertices); err = hipDeviceSynchronize(); check_cuda_error("process_ray failed ", err); return d_vertices; } /** * Compute the normals given the vertices and dimensions * @param width The width of the vertex data * @param height The height of the vertex data * @d_vertices Pointer to vertex data (on device) * @return An array of normals */ float3 *compute_normals(uint16_t width, uint16_t height, float3 *d_vertices) { float3 *d_normals; hipError_t err; err = hipMalloc(&d_normals, width * height * sizeof(float3)); check_cuda_error("Normals alloc failed ", err); dim3 block(32, 32); dim3 grid(divUp(width, block.x), divUp(height, block.y)); hipLaunchKernelGGL(( compute_normals) , dim3(grid), dim3(block), 0, 0, width, height, d_vertices, d_normals); check_cuda_error("compute_normals failed ", err); return d_normals; } /** * Raycast the TSDF and store discovered vertices and normals in the ubput arrays * @param volume The volume to cast * @param camera The camera * @param vertices The vertices discovered * @param normals The normals */ void GPURaycaster::raycast(const TSDFVolume &volume, const Camera &camera, Eigen::Matrix<float, 3, Eigen::Dynamic> &vertices, Eigen::Matrix<float, 3, Eigen::Dynamic> &normals) const { using namespace Eigen; // Compute vertices float3 *d_vertices = get_vertices(volume, camera, m_width, m_height); // Compute normals float3 *d_normals = compute_normals(m_width, m_height, d_vertices); // Copy vertex data back hipError_t err; vertices.resize(3, m_width * m_height); float *h_vertices = vertices.data(); err = hipMemcpy(h_vertices, d_vertices, m_width * m_height * 3 * sizeof(float), hipMemcpyDeviceToHost); check_cuda_error("Vertices Memcpy failed ", err); hipFree(d_vertices); // Copy normal data back normals.resize(3, m_width * m_height); float *h_normals = normals.data(); err = hipMemcpy(h_normals, d_normals, m_width * m_height * 3 * sizeof(float), hipMemcpyDeviceToHost); check_cuda_error("Normals Memcpy failed ", err); hipFree(d_normals); } /** * Render a depth image from a TSDF * @param volume The volume to cast * @param camera The camera * @return The DepthImage */ DepthImage *GPURaycaster::render_to_depth_image(const TSDFVolume &volume, const Camera &camera) const { using namespace Eigen; std::cout << "Rendering depth map" << std::endl; DepthImage *d = nullptr; // Compute vertices on device float3 *d_vertices = get_vertices(volume, camera, m_width, m_height); std::cout << " got vertices on device" << std::endl; // Copy device vertices ionto host float3 *h_vertices = new float3[m_width * m_height]; if (h_vertices) { hipMemcpy(h_vertices, d_vertices, m_width * m_height * sizeof(float3), hipMemcpyDeviceToHost); // Stash just the Z coords to a depth map uint16_t *depth_data = new uint16_t[m_width * m_height]; if (depth_data) { // Convert to and return a DepthImage for (int i = 0; i < m_width * m_height; i++) { float3 world_point = h_vertices[i]; // Eigen::Vector3f cam_point = camera.world_to_camera( Eigen::Vector3f{ world_point.x, world_point.y, world_point.z } ); // depth_data[i] = (uint16_t)roundf(cam_point.z()); depth_data[i] = (uint16_t) roundf( world_point.z * 255.99); // max depth is 1 -> multiply by 255.99 for visualization } std::cout << " making depthimage" << std::endl; d = new DepthImage(m_width, m_height, depth_data); // DEBUG // TODO: Remove me std::cout << " saving mesh depth image: REMOVE ME" << std::endl; save_png_to_file("mesh_depth_file.png", m_width, m_height, depth_data); delete[] depth_data; } else { std::cout << "Couldn't allocate depth data storage" << std::endl; } delete[] h_vertices; } else { std::cout << "Couldn't allocate host memory for vertices" << std::endl; } hipFree(d_vertices); std::cout << " done" << std::endl; return d; } void GPURaycaster::render_with_shading(const TSDFVolume &volume, const Camera &camera, Eigen::Matrix<float, 3, Eigen::Dynamic> &vertices, Eigen::Matrix<float, 3, Eigen::Dynamic> &normals, const Eigen::Vector3f &light_source, int n_samples, uint8_t *image) const { using namespace Eigen; // Compute vertices float3 *d_vertices = get_vertices(volume, camera, m_width, m_height, n_samples); // Compute normals float3 *d_normals = compute_normals(m_width, m_height, d_vertices); // shading and render render_scene(m_width, m_height, d_vertices, d_normals, light_source, image); // Copy vertex data back hipError_t err; vertices.resize(3, m_width * m_height); float *h_vertices = vertices.data(); err = hipMemcpy(h_vertices, d_vertices, m_width * m_height * 3 * sizeof(float), hipMemcpyDeviceToHost); check_cuda_error("Vertices Memcpy failed ", err); hipFree(d_vertices); // Copy normal data back normals.resize(3, m_width * m_height); float *h_normals = normals.data(); err = hipMemcpy(h_normals, d_normals, m_width * m_height * 3 * sizeof(float), hipMemcpyDeviceToHost); check_cuda_error("Normals Memcpy failed ", err); hipFree(d_normals); }
bff17fe3a6afe456cd930f38b63b93093e69f2f8.cu
#include "TSDFVolume.hpp" #include "GPURaycaster.hpp" #include "cuda_utilities.cuh" #include "TSDFUtilities.hpp" #include "PngUtilities.hpp" #include "RenderUtilitiesGPU.hpp" #include <iostream> #include <cstdio> #include <Eigen/Core> #include <math_constants.h> #include <curand_kernel.h> /** * Compute a direction vector, in world coordinates, from cam centre * through the given pixel. * @param origin The camera position in world coordinates * @param pix_x The x pixel coordinate * @param pix_y The y pixel coordinate * @param rot the camera pose rotation matrix * @param kinv THe camera intrinsics inverse matrix * @return The unit direction vector for the ray in world coordinate space */ __device__ float3 compute_ray_direction_at_pixel(const float3 &origin, float pix_x, float pix_y, const Mat33 &rot, const Mat33 &kinv) { // Get point at depth 1mm. This is the direction vector in cam coords float3 ray_in_cam_space{ pix_x * kinv.m11 + pix_y * kinv.m12 + kinv.m13, pix_x * kinv.m21 + pix_y * kinv.m22 + kinv.m23, pix_x * kinv.m31 + pix_y * kinv.m32 + kinv.m33 }; // Convert this vector to world coordinate frame // We'd normally add in the camera origin but since we // want the direction, we'd deduct the camera origin // at the very next step so we'll just do the rotation instead. float3 ray_in_world_space = m3_f3_mul(rot, ray_in_cam_space); // Convert to unit vector f3_normalise(ray_in_world_space); return ray_in_world_space; } /** * Perform trilinear interpolation of the TSDF value at a given point in volume space * @param point The point (0,0,0) -> (max_x, max_y, max_z) * @param voxel_grid_size The size of the space in voxels * @param tsdf_values An array of max_x*max_y*max_z floats being the values in the space * @return The interpolated TSDF value */ __device__ float trilinearly_interpolate(const float3 point, const dim3 voxel_grid_size, const float3 voxel_size, const float *tsdf_values) { // Manage boundary points float3 max_values{ voxel_grid_size.x * voxel_size.x, voxel_grid_size.y * voxel_size.y, voxel_grid_size.z * voxel_size.z }; float3 adjusted_point = point; if (point.x >= max_values.x) adjusted_point.x = max_values.x - (voxel_size.x / 10.0f); if (point.y >= max_values.y) adjusted_point.y = max_values.y - (voxel_size.y / 10.0f); if (point.z >= max_values.z) adjusted_point.z = max_values.z - (voxel_size.z / 10.0f); if (point.x < 0.0f) adjusted_point.x = 0.0f; if (point.y < 0.0f) adjusted_point.y = 0.0f; if (point.z < 0.0f) adjusted_point.z = 0.0f; // Get the voxel containing this point int3 voxel = voxel_for_point(adjusted_point, voxel_size); // Handle voxel out of bounds if (voxel.x < 0 || voxel.y < 0 || voxel.z < 0 || voxel.x >= voxel_grid_size.x || voxel.y >= voxel_grid_size.y || voxel.z >= voxel_grid_size.z) { printf("Point outside of voxel space %f, %f, %f\n", adjusted_point.x, adjusted_point.y, adjusted_point.z); return CUDART_NAN_F; } // Get the centre of the voxel float3 v_centre = centre_of_voxel_at(voxel.x, voxel.y, voxel.z, voxel_size); // Set up the lower bound for trilinear interpolation int3 lower; lower.x = (point.x < v_centre.x) ? voxel.x - 1 : voxel.x; lower.y = (point.y < v_centre.y) ? voxel.y - 1 : voxel.y; lower.z = (point.z < v_centre.z) ? voxel.z - 1 : voxel.z; // Handle lower out of bounds lower.x = max(lower.x, 0); lower.y = max(lower.y, 0); lower.z = max(lower.z, 0); // Compute u,v,w float3 lower_centre = centre_of_voxel_at(lower.x, lower.y, lower.z, voxel_size); float3 uvw = f3_sub(point, lower_centre); uvw = f3_div_elem(uvw, voxel_size); float u = uvw.x; float v = uvw.y; float w = uvw.z; // Lookup c000 - c111 float c000 = tsdf_value_at(lower.x + 0, lower.y + 0, lower.z + 0, tsdf_values, voxel_grid_size); float c001 = tsdf_value_at(lower.x + 0, lower.y + 0, lower.z + 1, tsdf_values, voxel_grid_size); float c010 = tsdf_value_at(lower.x + 0, lower.y + 1, lower.z + 0, tsdf_values, voxel_grid_size); float c011 = tsdf_value_at(lower.x + 0, lower.y + 1, lower.z + 1, tsdf_values, voxel_grid_size); float c100 = tsdf_value_at(lower.x + 1, lower.y + 0, lower.z + 0, tsdf_values, voxel_grid_size); float c101 = tsdf_value_at(lower.x + 1, lower.y + 0, lower.z + 1, tsdf_values, voxel_grid_size); float c110 = tsdf_value_at(lower.x + 1, lower.y + 1, lower.z + 0, tsdf_values, voxel_grid_size); float c111 = tsdf_value_at(lower.x + 1, lower.y + 1, lower.z + 1, tsdf_values, voxel_grid_size); float interpolated = c000 * (1 - u) * (1 - v) * (1 - w) + c001 * (1 - u) * (1 - v) * w + c010 * (1 - u) * v * (1 - w) + c011 * (1 - u) * v * w + c100 * u * (1 - v) * (1 - w) + c101 * u * (1 - v) * w + c110 * u * v * (1 - w) + c111 * u * v * w; return interpolated; } /** * For a given dimension check whether the ray can intersect the volume * If this method returns true, it doesn't mean tha the ray _does_ intersect, just that it could * @param space_min The minimum bound of the voxel space in the current dimension * @param space_max The maximum bound of the voxel space in the current dimension * @param origin The starting point of the ray in the current dimension * @param direction The direction of the ray in the current dimension * @param near_t The nearest point of intersection of the voxel space which may be updated in this call * @param far_t The furthest point of intersection of the voxel space which may be updated in this call * @return true if an intersection is still possible otherwise false */ __device__ __forceinline__ bool can_intersect_in_dimension(float space_min, float space_max, float origin, float direction, float &near_t, float &far_t) { bool can_intersect = true; if (direction == 0) { // Not moving in this direction so we must be within bounds in order to have any intersection at all if (origin < space_min || origin > space_max) { can_intersect = false; } } else { // compute intersection distance of the planes float distance_to_space_min = (space_min - origin) / direction; float distance_to_space_max = (space_max - origin) / direction; // If distance_to_space_min > distance_to_space_max swap (distance_to_space_min, distance_to_space_max) since distance_to_space_min intersection with near plane if (distance_to_space_min > distance_to_space_max) { float temp_t = distance_to_space_min; distance_to_space_min = distance_to_space_max; distance_to_space_max = temp_t; } // if distance_to_space_min > t_near set t_near = distance_to_space_min : We want largest t_near if (distance_to_space_min > near_t) { near_t = distance_to_space_min; } //If distance_to_space_max < t_far set t_far="distance_to_space_max" want smallest t_far if (distance_to_space_max < far_t) { far_t = distance_to_space_max; } // If Tnear > Tfar box is missed so return false if (near_t > far_t) { can_intersect = false; } else { // If Tfar < 0 box is behind ray return false end if (far_t < 0) { can_intersect = false; } } } return can_intersect; } /** * Compute the intersections of the ray from origin in direction through the cube specified by space_min and space_max * There are three cases: * origin is inside the cube near_t =0, far_t is distance to exit and we return true * origin is outside of cube and ray misses it near_t and far_t are undefined, return value is false * origin is outside of cube and ray penetrates. near_t and far_t are defined (should have the same sign) and return value is true * @param origin The source of the ray in world coordinates * @param direction a Vector representing the direction of the ray in world coordinates * @param space_min The lower, leftmost, frontmost vertex of the voxel space in world coordinates * @param space_mac The upper, rightmost, rearmost vertex of the voxel space in world coordinates * @param near_t Populated with the nearpoint if return is true * @param far_t populated with the far point if return is true * @return true if the ray pierces the volme */ __device__ bool compute_near_and_far_t(const float3 &origin, const float3 &direction, const float3 &space_min, const float3 &space_max, float &near_t, float &far_t) { bool intersects = false; // Handle start_point in space if (origin.x >= space_min.x && origin.x <= space_max.x && origin.y >= space_min.y && origin.y <= space_max.y && origin.z >= space_min.z && origin.z <= space_max.z) { // Near_t is zero as origin is inside the space near_t = 0; float x_t = CUDART_NAN_F; float y_t = CUDART_NAN_F; float z_t = CUDART_NAN_F; if (direction.x > 0) { x_t = (space_max.x - origin.x) / direction.x; } else if (direction.x < 0) { x_t = (space_min.x - origin.x) / direction.x; } if (direction.y > 0) { y_t = (space_max.y - origin.y) / direction.y; } else if (direction.y < 0) { y_t = (space_min.y - origin.y) / direction.y; } if (direction.z > 0) { z_t = (space_max.z - origin.z) / direction.z; } else if (direction.z < 0) { z_t = (space_min.z - origin.z) / direction.z; } if (x_t < y_t) { if (x_t < z_t) far_t = x_t; else far_t = z_t; } else { if (y_t < z_t) far_t = y_t; else far_t = z_t; } intersects = true; } else { // Outside of the voxel space just now near_t = -CUDART_INF_F; far_t = CUDART_INF_F; // Consider X if ((can_intersect_in_dimension(space_min.x, space_max.x, origin.x, direction.x, near_t, far_t)) && (can_intersect_in_dimension(space_min.y, space_max.y, origin.y, direction.y, near_t, far_t)) && (can_intersect_in_dimension(space_min.z, space_max.z, origin.z, direction.z, near_t, far_t))) { intersects = true; } } // If we got here then return intersects; } /** * Walk a ray from the camera onto the TSDF determining where (if at all) it * intersects the ISO surface defined by the TSDF * @param origin The position of the camera in world coordinates * @param rot The camera pose rotation matrix in world terms * @param kinv The camera's intrinsic matrix inverse * @param space_min The lowest X,Y,Z in world coordinates occupied by the voxel space * @param space_max The highest X,Y,Z in world coordinates occupied by the voxel space * @param voxel_grid_size Dimension of the voxel space in each direction in voxels * @param voxel_size Dimensions of each voxel * @param tsdf_values A pointer to the TSDF distance values with Z,Y,X coords major * @param vertex The 3D world coordinate of the vertex intersected by this ray if it exists or {NaN, NaN, NaN} */ __global__ void process_ray(const float3 origin, const Mat33 rot, const Mat33 kinv, uint16_t width, uint16_t height, const float trunc_distance, const float3 space_min, const float3 space_max, const dim3 voxel_grid_size, const float3 voxel_size, const float *tsdf_values, float3 *vertices) { int imx = threadIdx.x + blockIdx.x * blockDim.x; int imy = threadIdx.y + blockIdx.y * blockDim.y; // Terminate early if the index is out of bounds if (imx >= width || imy >= height) { return; } size_t idx = imy * width + imx; // setup offsets auto offset_x = 0.; auto offset_y = 0.; if (gridDim.z > 1 && blockIdx.z > 0) { auto pi = 3.14159265359; auto angle = pi / (gridDim.z - 1) * (blockIdx.z - 1); offset_x = cos(angle) * log((float) gridDim.z - 1); offset_y = sin(angle) * log((float) gridDim.z - 1); } // Compute the ray direction for this pixel in world coordinates // which is R K-1 (x,y,1)T float3 direction = compute_ray_direction_at_pixel(origin, imx + offset_x, imy + offset_y, rot, kinv); // Compute near and far intersections of the TSDF volume by this ray // near and far T are valules of the parameter origin + t*(direction.unit) at which the // ray from the camera through the point intersects the voxel space. // near_t may be zero if origin is inside the voxel space // far_t must be > near_t float near_t, far_t; bool intersects = compute_near_and_far_t(origin, direction, space_min, space_max, near_t, far_t); // Only do this if the ray intersects space float3 intersection_point{CUDART_NAN_F, CUDART_NAN_F, CUDART_NAN_F}; if (intersects) { // Compute the start point in grid coords float3 start_point = f3_sub(f3_add(origin, f3_mul_scalar(near_t, direction)), space_min); bool done = false; // Initialise TSDF to trun distance float tsdf = trunc_distance; float previous_tsdf; // Set up current point to iterate float t = 0; float max_t = far_t - near_t; // Iterate until // We leave the voxel space (fail) // We transit from +ve to -ve tsdf (intersect) // We transit from -ve to +ve (fail) int count = 0; float step_size = trunc_distance * 0.05; while (!done) { float3 current_point = f3_add(start_point, f3_mul_scalar(t, direction)); // Save last TSDF (to check against crossing surface) previous_tsdf = tsdf; // Extract the tsdf float tsdf = trilinearly_interpolate(current_point, voxel_grid_size, voxel_size, tsdf_values); // If tsdf is negative then we're behind the surface else we're on it if (tsdf <= 0) { // If we stepped past the iso surface, work out when if (tsdf < 0) { // We just advanced by step_size so step back t = t - step_size; // Linearly interpolate the crossing point t = t + (previous_tsdf / (previous_tsdf - tsdf)) * step_size; } // Compute the point of intersection current_point = f3_add(start_point, f3_mul_scalar(t, direction)); // Put into world coordinates intersection_point = f3_add(space_min, current_point); done = true; } // tsdf is +ve, if previous tsdf was negative then we hit a backface and we're done else if (previous_tsdf < 0) { done = true; } // previous tsdf was +ve and so is this one. We've not crossed the surface // so keep stepping else { t = t + step_size; // Until we step out of the volume if (t >= max_t) { done = true; } } // Catch failures - this code shouldn';'t be invoked. if (count++ > 4400) { printf("Timed out @(%d,%d) with t:%f tsdf:%f\n", imx, imy, t, tsdf); done = true; } } } intersection_point = f3_mul_scalar(1. / gridDim.z, intersection_point); atomicAdd(&vertices[idx].x, intersection_point.x); atomicAdd(&vertices[idx].y, intersection_point.y); atomicAdd(&vertices[idx].z, intersection_point.z); } /** * Compute normals to the vertex data provided * @param width The with of the output matrix * @param height The height of the output matrix * @param vertices The input vertex array - unchanged by ths * @param normals the oputput normals array to be populated */ __global__ void compute_normals(uint16_t width, uint16_t height, const float3 *vertices, float3 *normals) { int imx = threadIdx.x + blockIdx.x * blockDim.x; int imy = threadIdx.y + blockIdx.y * blockDim.y; // Terminate eearly if the index is out of bounds if (imx >= width || imy >= height) { return; } size_t idx = (imy * width + imx); if (imy == height - 1) { normals[idx].x = 0; normals[idx].y = 0; normals[idx].z = 0; } else { if (imx == width - 1) { normals[idx].x = 0; normals[idx].y = 0; normals[idx].z = 0; } else { float3 v2{vertices[idx + 1].x - vertices[idx].x, vertices[idx + 1].y - vertices[idx].y, vertices[idx + 1].z - vertices[idx].z}; float3 v1{vertices[idx + width].x - vertices[idx].x, vertices[idx + width].y - vertices[idx].y, vertices[idx + width].z - vertices[idx].z}; float nx = v1.y * v2.z - v1.z * v2.y; float ny = v1.z * v2.x - v1.x * v2.z; float nz = v1.x * v2.y - v1.y * v2.x; float l = sqrt(nx * nx + ny * ny + nz * nz); normals[idx].x = nx / l; normals[idx].y = ny / l; normals[idx].z = nz / l; } } } /** * Compute the vertex map */ __host__ float3 *get_vertices(const TSDFVolume &volume, const Camera &camera, uint16_t width, uint16_t height, const int n_samples=1) { // Setup camera origin float3 origin = float3_from_eigen_vector(camera.position()); // Set up voxel grid size dim3 voxel_grid_size = volume.size(); float3 voxel_size = volume.voxel_size(); // Set up rotation matrices for pose and Kinv // Eigen stores data in column major format. const float *pose = camera.pose().data(); Mat33 rot{ pose[0], pose[1], pose[2], pose[4], pose[5], pose[6], pose[8], pose[9], pose[10] }; const float *cam_kinv = camera.kinv().data(); Mat33 kinv{ cam_kinv[0], cam_kinv[1], cam_kinv[2], cam_kinv[3], cam_kinv[4], cam_kinv[5], cam_kinv[6], cam_kinv[7], cam_kinv[8] }; // Set up world coords for min and max extremes of the voxel space float3 space_min = volume.offset(); float3 space_max = volume.offset() + volume.physical_size(); // Get reference to TSDF distance data const float *d_tsdf_values = volume.distance_data(); // Allocate storage for vertices on device cudaError_t err; size_t data_size = width * height * sizeof(float3); float3 *d_vertices; err = cudaMalloc(&d_vertices, data_size); check_cuda_error("Vertices alloc failed ", err); err = cudaMemset(d_vertices, 0, data_size); check_cuda_error("Cant set default values for vertices ", err); // Execute the kernel dim3 block(32, 32); dim3 grid(divUp(width, block.x), divUp(height, block.y), n_samples); process_ray <<< grid, block>>>(origin, rot, kinv, width, height, volume.truncation_distance(), space_min, space_max, voxel_grid_size, voxel_size, d_tsdf_values, d_vertices); err = cudaDeviceSynchronize(); check_cuda_error("process_ray failed ", err); return d_vertices; } /** * Compute the normals given the vertices and dimensions * @param width The width of the vertex data * @param height The height of the vertex data * @d_vertices Pointer to vertex data (on device) * @return An array of normals */ float3 *compute_normals(uint16_t width, uint16_t height, float3 *d_vertices) { float3 *d_normals; cudaError_t err; err = cudaMalloc(&d_normals, width * height * sizeof(float3)); check_cuda_error("Normals alloc failed ", err); dim3 block(32, 32); dim3 grid(divUp(width, block.x), divUp(height, block.y)); compute_normals <<< grid, block>>>(width, height, d_vertices, d_normals); check_cuda_error("compute_normals failed ", err); return d_normals; } /** * Raycast the TSDF and store discovered vertices and normals in the ubput arrays * @param volume The volume to cast * @param camera The camera * @param vertices The vertices discovered * @param normals The normals */ void GPURaycaster::raycast(const TSDFVolume &volume, const Camera &camera, Eigen::Matrix<float, 3, Eigen::Dynamic> &vertices, Eigen::Matrix<float, 3, Eigen::Dynamic> &normals) const { using namespace Eigen; // Compute vertices float3 *d_vertices = get_vertices(volume, camera, m_width, m_height); // Compute normals float3 *d_normals = compute_normals(m_width, m_height, d_vertices); // Copy vertex data back cudaError_t err; vertices.resize(3, m_width * m_height); float *h_vertices = vertices.data(); err = cudaMemcpy(h_vertices, d_vertices, m_width * m_height * 3 * sizeof(float), cudaMemcpyDeviceToHost); check_cuda_error("Vertices Memcpy failed ", err); cudaFree(d_vertices); // Copy normal data back normals.resize(3, m_width * m_height); float *h_normals = normals.data(); err = cudaMemcpy(h_normals, d_normals, m_width * m_height * 3 * sizeof(float), cudaMemcpyDeviceToHost); check_cuda_error("Normals Memcpy failed ", err); cudaFree(d_normals); } /** * Render a depth image from a TSDF * @param volume The volume to cast * @param camera The camera * @return The DepthImage */ DepthImage *GPURaycaster::render_to_depth_image(const TSDFVolume &volume, const Camera &camera) const { using namespace Eigen; std::cout << "Rendering depth map" << std::endl; DepthImage *d = nullptr; // Compute vertices on device float3 *d_vertices = get_vertices(volume, camera, m_width, m_height); std::cout << " got vertices on device" << std::endl; // Copy device vertices ionto host float3 *h_vertices = new float3[m_width * m_height]; if (h_vertices) { cudaMemcpy(h_vertices, d_vertices, m_width * m_height * sizeof(float3), cudaMemcpyDeviceToHost); // Stash just the Z coords to a depth map uint16_t *depth_data = new uint16_t[m_width * m_height]; if (depth_data) { // Convert to and return a DepthImage for (int i = 0; i < m_width * m_height; i++) { float3 world_point = h_vertices[i]; // Eigen::Vector3f cam_point = camera.world_to_camera( Eigen::Vector3f{ world_point.x, world_point.y, world_point.z } ); // depth_data[i] = (uint16_t)roundf(cam_point.z()); depth_data[i] = (uint16_t) roundf( world_point.z * 255.99); // max depth is 1 -> multiply by 255.99 for visualization } std::cout << " making depthimage" << std::endl; d = new DepthImage(m_width, m_height, depth_data); // DEBUG // TODO: Remove me std::cout << " saving mesh depth image: REMOVE ME" << std::endl; save_png_to_file("mesh_depth_file.png", m_width, m_height, depth_data); delete[] depth_data; } else { std::cout << "Couldn't allocate depth data storage" << std::endl; } delete[] h_vertices; } else { std::cout << "Couldn't allocate host memory for vertices" << std::endl; } cudaFree(d_vertices); std::cout << " done" << std::endl; return d; } void GPURaycaster::render_with_shading(const TSDFVolume &volume, const Camera &camera, Eigen::Matrix<float, 3, Eigen::Dynamic> &vertices, Eigen::Matrix<float, 3, Eigen::Dynamic> &normals, const Eigen::Vector3f &light_source, int n_samples, uint8_t *image) const { using namespace Eigen; // Compute vertices float3 *d_vertices = get_vertices(volume, camera, m_width, m_height, n_samples); // Compute normals float3 *d_normals = compute_normals(m_width, m_height, d_vertices); // shading and render render_scene(m_width, m_height, d_vertices, d_normals, light_source, image); // Copy vertex data back cudaError_t err; vertices.resize(3, m_width * m_height); float *h_vertices = vertices.data(); err = cudaMemcpy(h_vertices, d_vertices, m_width * m_height * 3 * sizeof(float), cudaMemcpyDeviceToHost); check_cuda_error("Vertices Memcpy failed ", err); cudaFree(d_vertices); // Copy normal data back normals.resize(3, m_width * m_height); float *h_normals = normals.data(); err = cudaMemcpy(h_normals, d_normals, m_width * m_height * 3 * sizeof(float), cudaMemcpyDeviceToHost); check_cuda_error("Normals Memcpy failed ", err); cudaFree(d_normals); }
30429e3e1d7e41471fe4694ad1396d25118e084e.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file geo_app.cu * * @brief Geolocation Application */ #include <gunrock/gunrock.h> #include <gunrock/util/test_utils.cuh> #include <gunrock/graphio/graphio.cuh> #include <gunrock/graphio/labels.cuh> #include <gunrock/app/app_base.cuh> #include <gunrock/app/test_base.cuh> #include <gunrock/app/geo/geo_enactor.cuh> #include <gunrock/app/geo/geo_test.cuh> namespace gunrock { namespace app { namespace geo { hipError_t UseParameters(util::Parameters &parameters) { hipError_t retval = hipSuccess; GUARD_CU(UseParameters_app(parameters)); GUARD_CU(UseParameters_problem(parameters)); GUARD_CU(UseParameters_enactor(parameters)); GUARD_CU(parameters.Use<int>( "geo-iter", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 3, "Number of iterations geolocation should run for (default=3).", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "spatial-iter", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 1000, "Number of maximum iterations spatial median " "kernel should run for (default=1000).", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "geo-complete", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, false, "Run geolocation application until all locations for all nodes are " "found, uses an atomic (default=false).", __FILE__, __LINE__)); GUARD_CU(parameters.Use<std::string>( "labels-file", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, "", "User locations label file for geolocation app.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "debug", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, false, "Debug label values, this prints out the entire labels array (longitude, " "latitude).", __FILE__, __LINE__)); return retval; } /** * @brief Run geolocation tests * @tparam GraphT Type of the graph * @tparam ValueT Type of the distances * @param[in] parameters Excution parameters * @param[in] graph Input graph ... * @param[in] target where to perform the app * \return hipError_t error message(s), if any */ template <typename GraphT, typename ArrayT> hipError_t RunTests(util::Parameters &parameters, GraphT &graph, ArrayT &h_latitude, ArrayT &h_longitude, ArrayT &ref_predicted_lat, ArrayT &ref_predicted_lon, util::Location target) { hipError_t retval = hipSuccess; typedef typename GraphT::VertexT VertexT; typedef typename GraphT::ValueT ValueT; typedef typename GraphT::SizeT SizeT; typedef Problem<GraphT> ProblemT; typedef Enactor<ProblemT> EnactorT; // CLI parameters bool quiet_mode = parameters.Get<bool>("quiet"); int num_runs = parameters.Get<int>("num-runs"); std::string validation = parameters.Get<std::string>("validation"); int geo_iter = parameters.Get<int>("geo-iter"); int spatial_iter = parameters.Get<int>("spatial-iter"); util::PrintMsg("Number of iterations: " + std::to_string(geo_iter), !quiet_mode); util::Info info("geolocation", parameters, graph); util::CpuTimer cpu_timer, total_timer; cpu_timer.Start(); total_timer.Start(); // Allocate problem specific host data array to // extract device values to host ValueT *h_predicted_lat = new ValueT[graph.nodes]; ValueT *h_predicted_lon = new ValueT[graph.nodes]; // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; util::PrintMsg("Initializing problem ... ", !quiet_mode); GUARD_CU(problem.Init(graph, target)); util::PrintMsg("Initializing enactor ... ", !quiet_mode); GUARD_CU(enactor.Init(problem, target)); cpu_timer.Stop(); parameters.Set("preprocess-time", cpu_timer.ElapsedMillis()); for (int run_num = 0; run_num < num_runs; ++run_num) { GUARD_CU(problem.Reset(h_latitude.GetPointer(util::HOST), h_longitude.GetPointer(util::HOST), geo_iter, spatial_iter, target)); GUARD_CU(enactor.Reset(target)); util::PrintMsg("__________________________", !quiet_mode); cpu_timer.Start(); GUARD_CU(enactor.Enact()); cpu_timer.Stop(); info.CollectSingleRun(cpu_timer.ElapsedMillis()); util::PrintMsg( "--------------------------\nRun " + std::to_string(run_num) + " elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) + ", #iterations = " + std::to_string(enactor.enactor_slices[0].enactor_stats.iteration), !quiet_mode); if (validation == "each") { GUARD_CU(problem.Extract(h_predicted_lat, h_predicted_lon)); SizeT num_errors = Validate_Results(parameters, graph, h_predicted_lat, h_predicted_lon, ref_predicted_lat, ref_predicted_lon, false); } } cpu_timer.Start(); // Extract problem data GUARD_CU(problem.Extract(h_predicted_lat, h_predicted_lon)); if (validation == "last") { SizeT num_errors = Validate_Results(parameters, graph, h_predicted_lat, h_predicted_lon, ref_predicted_lat, ref_predicted_lon, false); } // compute running statistics info.ComputeTraversalStats(enactor, (VertexT *)NULL); // Display_Memory_Usage(problem); #ifdef ENABLE_PERFORMANCE_PROFILING // Display_Performance_Profiling(&enactor); #endif // Clean up GUARD_CU(enactor.Release(target)); GUARD_CU(problem.Release(target)); cpu_timer.Stop(); total_timer.Stop(); info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis()); return retval; } } // namespace geo } // namespace app } // namespace gunrock // =========================================================================================== // ========================= CODE BELOW THIS LINE NOT NEEDED FOR TESTS // ======================= // =========================================================================================== // /* // * @brief Entry of gunrock_template function // * @tparam GraphT Type of the graph // * @tparam ValueT Type of the distances // * @param[in] parameters Excution parameters // * @param[in] graph Input graph // * @param[out] distances Return shortest distance to source per vertex // * @param[out] preds Return predecessors of each vertex // * \return double Return accumulated elapsed times for all runs // */ // template <typename GraphT, typename ValueT = typename GraphT::ValueT> // double gunrock_Template( // gunrock::util::Parameters &parameters, // GraphT &graph // // TODO: add problem specific outputs, e.g.: // //ValueT **distances // ) // { // typedef typename GraphT::VertexT VertexT; // typedef gunrock::app::Template::Problem<GraphT > ProblemT; // typedef gunrock::app::Template::Enactor<ProblemT> EnactorT; // gunrock::util::CpuTimer cpu_timer; // gunrock::util::Location target = gunrock::util::DEVICE; // double total_time = 0; // if (parameters.UseDefault("quiet")) // parameters.Set("quiet", true); // // Allocate problem and enactor on GPU, and initialize them // ProblemT problem(parameters); // EnactorT enactor; // problem.Init(graph , target); // enactor.Init(problem, target); // int num_runs = parameters.Get<int>("num-runs"); // // TODO: get problem specific inputs, e.g.: // // std::vector<VertexT> srcs = // parameters.Get<std::vector<VertexT>>("srcs"); // // int num_srcs = srcs.size(); // for (int run_num = 0; run_num < num_runs; ++run_num) // { // // TODO: problem specific inputs, e.g.: // // int src_num = run_num % num_srcs; // // VertexT src = srcs[src_num]; // problem.Reset(/*src,*/ target); // enactor.Reset(/*src,*/ target); // cpu_timer.Start(); // enactor.Enact(/*src*/); // cpu_timer.Stop(); // total_time += cpu_timer.ElapsedMillis(); // // TODO: extract problem specific data, e.g.: // problem.Extract(/*distances[src_num]*/); // } // enactor.Release(target); // problem.Release(target); // // TODO: problem specific clean ups, e.g.: // // srcs.clear(); // return total_time; // } // * @brief Simple interface take in graph as CSR format // * @param[in] num_nodes Number of veritces in the input graph // * @param[in] num_edges Number of edges in the input graph // * @param[in] row_offsets CSR-formatted graph input row offsets // * @param[in] col_indices CSR-formatted graph input column indices // * @param[in] edge_values CSR-formatted graph input edge weights // * @param[in] num_runs Number of runs to perform SSSP // * @param[in] sources Sources to begin traverse, one for each run // * @param[in] mark_preds Whether to output predecessor info // * @param[out] distances Return shortest distance to source per vertex // * @param[out] preds Return predecessors of each vertex // * \return double Return accumulated elapsed times for all runs // template < // typename VertexT = int, // typename SizeT = int, // typename GValueT = unsigned int, // typename TValueT = GValueT> // float Geolocation( // const SizeT num_nodes, // const SizeT num_edges, // const SizeT *row_offsets, // const VertexT *col_indices, // const GValueT *edge_values, // const int num_runs // // TODO: add problem specific inputs and outputs, e.g.: // // VertexT *sources, // // SSSPValueT **distances // ) // { // // TODO: change to other graph representation, if not using CSR // typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT, // gunrock::graph::HAS_EDGE_VALUES | gunrock::graph::HAS_CSR> // GraphT; // typedef typename GraphT::CsrT CsrT; // // Setup parameters // gunrock::util::Parameters parameters("Template"); // gunrock::graphio::UseParameters(parameters); // gunrock::app::Template::UseParameters(parameters); // gunrock::app::UseParameters_test(parameters); // parameters.Parse_CommandLine(0, NULL); // parameters.Set("graph-type", "by-pass"); // parameters.Set("num-runs", num_runs); // // TODO: problem specific inputs, e.g.: // // std::vector<VertexT> srcs; // // for (int i = 0; i < num_runs; i ++) // // srcs.push_back(sources[i]); // // parameters.Set("srcs", srcs); // bool quiet = parameters.Get<bool>("quiet"); // GraphT graph; // // Assign pointers into gunrock graph format // // TODO: change to other graph representation, if not using CSR // graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST); // graph.CsrT::row_offsets .SetPointer(row_offsets, num_nodes + 1, // gunrock::util::HOST); graph.CsrT::column_indices.SetPointer(col_indices, // num_edges, gunrock::util::HOST); graph.FromCsr(graph.csr(), true, quiet); // gunrock::graphio::LoadGraph(parameters, graph); // // Run the Template // // TODO: add problem specific outputs, e.g. // double elapsed_time = gunrock_Template(parameters, graph /*, // distances*/); // // Cleanup // graph.Release(); // // TODO: problem specific cleanup // // srcs.clear(); // return elapsed_time; // } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // // End:
30429e3e1d7e41471fe4694ad1396d25118e084e.cu
// ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file geo_app.cu * * @brief Geolocation Application */ #include <gunrock/gunrock.h> #include <gunrock/util/test_utils.cuh> #include <gunrock/graphio/graphio.cuh> #include <gunrock/graphio/labels.cuh> #include <gunrock/app/app_base.cuh> #include <gunrock/app/test_base.cuh> #include <gunrock/app/geo/geo_enactor.cuh> #include <gunrock/app/geo/geo_test.cuh> namespace gunrock { namespace app { namespace geo { cudaError_t UseParameters(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(UseParameters_app(parameters)); GUARD_CU(UseParameters_problem(parameters)); GUARD_CU(UseParameters_enactor(parameters)); GUARD_CU(parameters.Use<int>( "geo-iter", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 3, "Number of iterations geolocation should run for (default=3).", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "spatial-iter", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 1000, "Number of maximum iterations spatial median " "kernel should run for (default=1000).", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "geo-complete", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, false, "Run geolocation application until all locations for all nodes are " "found, uses an atomic (default=false).", __FILE__, __LINE__)); GUARD_CU(parameters.Use<std::string>( "labels-file", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, "", "User locations label file for geolocation app.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "debug", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, false, "Debug label values, this prints out the entire labels array (longitude, " "latitude).", __FILE__, __LINE__)); return retval; } /** * @brief Run geolocation tests * @tparam GraphT Type of the graph * @tparam ValueT Type of the distances * @param[in] parameters Excution parameters * @param[in] graph Input graph ... * @param[in] target where to perform the app * \return cudaError_t error message(s), if any */ template <typename GraphT, typename ArrayT> cudaError_t RunTests(util::Parameters &parameters, GraphT &graph, ArrayT &h_latitude, ArrayT &h_longitude, ArrayT &ref_predicted_lat, ArrayT &ref_predicted_lon, util::Location target) { cudaError_t retval = cudaSuccess; typedef typename GraphT::VertexT VertexT; typedef typename GraphT::ValueT ValueT; typedef typename GraphT::SizeT SizeT; typedef Problem<GraphT> ProblemT; typedef Enactor<ProblemT> EnactorT; // CLI parameters bool quiet_mode = parameters.Get<bool>("quiet"); int num_runs = parameters.Get<int>("num-runs"); std::string validation = parameters.Get<std::string>("validation"); int geo_iter = parameters.Get<int>("geo-iter"); int spatial_iter = parameters.Get<int>("spatial-iter"); util::PrintMsg("Number of iterations: " + std::to_string(geo_iter), !quiet_mode); util::Info info("geolocation", parameters, graph); util::CpuTimer cpu_timer, total_timer; cpu_timer.Start(); total_timer.Start(); // Allocate problem specific host data array to // extract device values to host ValueT *h_predicted_lat = new ValueT[graph.nodes]; ValueT *h_predicted_lon = new ValueT[graph.nodes]; // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; util::PrintMsg("Initializing problem ... ", !quiet_mode); GUARD_CU(problem.Init(graph, target)); util::PrintMsg("Initializing enactor ... ", !quiet_mode); GUARD_CU(enactor.Init(problem, target)); cpu_timer.Stop(); parameters.Set("preprocess-time", cpu_timer.ElapsedMillis()); for (int run_num = 0; run_num < num_runs; ++run_num) { GUARD_CU(problem.Reset(h_latitude.GetPointer(util::HOST), h_longitude.GetPointer(util::HOST), geo_iter, spatial_iter, target)); GUARD_CU(enactor.Reset(target)); util::PrintMsg("__________________________", !quiet_mode); cpu_timer.Start(); GUARD_CU(enactor.Enact()); cpu_timer.Stop(); info.CollectSingleRun(cpu_timer.ElapsedMillis()); util::PrintMsg( "--------------------------\nRun " + std::to_string(run_num) + " elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) + ", #iterations = " + std::to_string(enactor.enactor_slices[0].enactor_stats.iteration), !quiet_mode); if (validation == "each") { GUARD_CU(problem.Extract(h_predicted_lat, h_predicted_lon)); SizeT num_errors = Validate_Results(parameters, graph, h_predicted_lat, h_predicted_lon, ref_predicted_lat, ref_predicted_lon, false); } } cpu_timer.Start(); // Extract problem data GUARD_CU(problem.Extract(h_predicted_lat, h_predicted_lon)); if (validation == "last") { SizeT num_errors = Validate_Results(parameters, graph, h_predicted_lat, h_predicted_lon, ref_predicted_lat, ref_predicted_lon, false); } // compute running statistics info.ComputeTraversalStats(enactor, (VertexT *)NULL); // Display_Memory_Usage(problem); #ifdef ENABLE_PERFORMANCE_PROFILING // Display_Performance_Profiling(&enactor); #endif // Clean up GUARD_CU(enactor.Release(target)); GUARD_CU(problem.Release(target)); cpu_timer.Stop(); total_timer.Stop(); info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis()); return retval; } } // namespace geo } // namespace app } // namespace gunrock // =========================================================================================== // ========================= CODE BELOW THIS LINE NOT NEEDED FOR TESTS // ======================= // =========================================================================================== // /* // * @brief Entry of gunrock_template function // * @tparam GraphT Type of the graph // * @tparam ValueT Type of the distances // * @param[in] parameters Excution parameters // * @param[in] graph Input graph // * @param[out] distances Return shortest distance to source per vertex // * @param[out] preds Return predecessors of each vertex // * \return double Return accumulated elapsed times for all runs // */ // template <typename GraphT, typename ValueT = typename GraphT::ValueT> // double gunrock_Template( // gunrock::util::Parameters &parameters, // GraphT &graph // // TODO: add problem specific outputs, e.g.: // //ValueT **distances // ) // { // typedef typename GraphT::VertexT VertexT; // typedef gunrock::app::Template::Problem<GraphT > ProblemT; // typedef gunrock::app::Template::Enactor<ProblemT> EnactorT; // gunrock::util::CpuTimer cpu_timer; // gunrock::util::Location target = gunrock::util::DEVICE; // double total_time = 0; // if (parameters.UseDefault("quiet")) // parameters.Set("quiet", true); // // Allocate problem and enactor on GPU, and initialize them // ProblemT problem(parameters); // EnactorT enactor; // problem.Init(graph , target); // enactor.Init(problem, target); // int num_runs = parameters.Get<int>("num-runs"); // // TODO: get problem specific inputs, e.g.: // // std::vector<VertexT> srcs = // parameters.Get<std::vector<VertexT>>("srcs"); // // int num_srcs = srcs.size(); // for (int run_num = 0; run_num < num_runs; ++run_num) // { // // TODO: problem specific inputs, e.g.: // // int src_num = run_num % num_srcs; // // VertexT src = srcs[src_num]; // problem.Reset(/*src,*/ target); // enactor.Reset(/*src,*/ target); // cpu_timer.Start(); // enactor.Enact(/*src*/); // cpu_timer.Stop(); // total_time += cpu_timer.ElapsedMillis(); // // TODO: extract problem specific data, e.g.: // problem.Extract(/*distances[src_num]*/); // } // enactor.Release(target); // problem.Release(target); // // TODO: problem specific clean ups, e.g.: // // srcs.clear(); // return total_time; // } // * @brief Simple interface take in graph as CSR format // * @param[in] num_nodes Number of veritces in the input graph // * @param[in] num_edges Number of edges in the input graph // * @param[in] row_offsets CSR-formatted graph input row offsets // * @param[in] col_indices CSR-formatted graph input column indices // * @param[in] edge_values CSR-formatted graph input edge weights // * @param[in] num_runs Number of runs to perform SSSP // * @param[in] sources Sources to begin traverse, one for each run // * @param[in] mark_preds Whether to output predecessor info // * @param[out] distances Return shortest distance to source per vertex // * @param[out] preds Return predecessors of each vertex // * \return double Return accumulated elapsed times for all runs // template < // typename VertexT = int, // typename SizeT = int, // typename GValueT = unsigned int, // typename TValueT = GValueT> // float Geolocation( // const SizeT num_nodes, // const SizeT num_edges, // const SizeT *row_offsets, // const VertexT *col_indices, // const GValueT *edge_values, // const int num_runs // // TODO: add problem specific inputs and outputs, e.g.: // // VertexT *sources, // // SSSPValueT **distances // ) // { // // TODO: change to other graph representation, if not using CSR // typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT, // gunrock::graph::HAS_EDGE_VALUES | gunrock::graph::HAS_CSR> // GraphT; // typedef typename GraphT::CsrT CsrT; // // Setup parameters // gunrock::util::Parameters parameters("Template"); // gunrock::graphio::UseParameters(parameters); // gunrock::app::Template::UseParameters(parameters); // gunrock::app::UseParameters_test(parameters); // parameters.Parse_CommandLine(0, NULL); // parameters.Set("graph-type", "by-pass"); // parameters.Set("num-runs", num_runs); // // TODO: problem specific inputs, e.g.: // // std::vector<VertexT> srcs; // // for (int i = 0; i < num_runs; i ++) // // srcs.push_back(sources[i]); // // parameters.Set("srcs", srcs); // bool quiet = parameters.Get<bool>("quiet"); // GraphT graph; // // Assign pointers into gunrock graph format // // TODO: change to other graph representation, if not using CSR // graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST); // graph.CsrT::row_offsets .SetPointer(row_offsets, num_nodes + 1, // gunrock::util::HOST); graph.CsrT::column_indices.SetPointer(col_indices, // num_edges, gunrock::util::HOST); graph.FromCsr(graph.csr(), true, quiet); // gunrock::graphio::LoadGraph(parameters, graph); // // Run the Template // // TODO: add problem specific outputs, e.g. // double elapsed_time = gunrock_Template(parameters, graph /*, // distances*/); // // Cleanup // graph.Release(); // // TODO: problem specific cleanup // // srcs.clear(); // return elapsed_time; // } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // // End:
699de06666da61ee1fdae42cb026a5bd7db0d636.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <dcgn/dcgn.h> #include <dcgn/CUDAFunctions.h> #include <cstdlib> #include <cstdio> const int MIN_SIZE = 1; const int MAX_SIZE = 1048576; const int ITERS = 30; __global__ void kernel(void * gmem, const dcgn::GPUInitRequest libParam) { dcgn::CommStatus stat; dcgn::gpu::init(libParam); for (int i = MIN_SIZE; i <= MAX_SIZE; i *= 2) { dcgn::gpu::barrier(0); for (int j = 0; j < ITERS; ++j) { dcgn::gpu::recv(0, 0, gmem, i, &stat); } dcgn::gpu::barrier(0); } } __host__ void gpuKernel(void * info, const dcgn::GPUInitRequest libParam, const uint3 & gridSize, const uint3 & blockSize, const int sharedMemSize, hipStream_t * const stream) { void ** mem = (void ** )info; hipMalloc(mem, MAX_SIZE); hipLaunchKernelGGL(( kernel), dim3(gridSize), dim3(blockSize), sharedMemSize, *stream, *mem, libParam); } __host__ void gpuDtor(void * info) { hipFree(*(void ** )info); } void cpuKernel(void * info) { void * mem = (void * )malloc(MAX_SIZE); for (int i = MIN_SIZE; i <= MAX_SIZE; i *= 2) { dcgn::barrier(); double t = dcgn::wallTime(); for (int j = 0; j < ITERS; ++j) { dcgn::send(1, mem, i); } t = dcgn::wallTime() - t; dcgn::barrier(); printf("%10d - %20.10f ms\n", i, t / ITERS * 1000.0f); } free(mem); } #include <mpi.h> int main(int argc, char ** argv) { void * gpuMem; int gpus[] = { 0, -1 }; uint3 gs = { 1, 1, 1 }, bs = { 1, 1, 1 }; dcgn::init(&argc, &argv); dcgn::initComm(-1); MPI_Barrier(MPI_COMM_WORLD); if (dcgn::getNodeID() == 0) { dcgn::initCPU(1); dcgn::initGPU(gpus + 1, 0, 0); } else { dcgn::initCPU(0); dcgn::initGPU(gpus, 1, 0); } dcgn::start(); if (dcgn::getNodeID() == 0) dcgn::launchCPUKernel(0, cpuKernel, 0); else dcgn::launchGPUKernel(0, gpuKernel, gpuDtor, &gpuMem, gs, bs); dcgn::finalize(); return 0; }
699de06666da61ee1fdae42cb026a5bd7db0d636.cu
#include <dcgn/dcgn.h> #include <dcgn/CUDAFunctions.h> #include <cstdlib> #include <cstdio> const int MIN_SIZE = 1; const int MAX_SIZE = 1048576; const int ITERS = 30; __global__ void kernel(void * gmem, const dcgn::GPUInitRequest libParam) { dcgn::CommStatus stat; dcgn::gpu::init(libParam); for (int i = MIN_SIZE; i <= MAX_SIZE; i *= 2) { dcgn::gpu::barrier(0); for (int j = 0; j < ITERS; ++j) { dcgn::gpu::recv(0, 0, gmem, i, &stat); } dcgn::gpu::barrier(0); } } __host__ void gpuKernel(void * info, const dcgn::GPUInitRequest libParam, const uint3 & gridSize, const uint3 & blockSize, const int sharedMemSize, cudaStream_t * const stream) { void ** mem = (void ** )info; cudaMalloc(mem, MAX_SIZE); kernel<<<gridSize, blockSize, sharedMemSize, *stream>>>(*mem, libParam); } __host__ void gpuDtor(void * info) { cudaFree(*(void ** )info); } void cpuKernel(void * info) { void * mem = (void * )malloc(MAX_SIZE); for (int i = MIN_SIZE; i <= MAX_SIZE; i *= 2) { dcgn::barrier(); double t = dcgn::wallTime(); for (int j = 0; j < ITERS; ++j) { dcgn::send(1, mem, i); } t = dcgn::wallTime() - t; dcgn::barrier(); printf("%10d - %20.10f ms\n", i, t / ITERS * 1000.0f); } free(mem); } #include <mpi.h> int main(int argc, char ** argv) { void * gpuMem; int gpus[] = { 0, -1 }; uint3 gs = { 1, 1, 1 }, bs = { 1, 1, 1 }; dcgn::init(&argc, &argv); dcgn::initComm(-1); MPI_Barrier(MPI_COMM_WORLD); if (dcgn::getNodeID() == 0) { dcgn::initCPU(1); dcgn::initGPU(gpus + 1, 0, 0); } else { dcgn::initCPU(0); dcgn::initGPU(gpus, 1, 0); } dcgn::start(); if (dcgn::getNodeID() == 0) dcgn::launchCPUKernel(0, cpuKernel, 0); else dcgn::launchGPUKernel(0, gpuKernel, gpuDtor, &gpuMem, gs, bs); dcgn::finalize(); return 0; }
8a64c3051f97442079105e31c1509ac0f5a66ad5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define CSC(call) do { \ hipError_t e = call; \ if (e != hipSuccess) { \ fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, hipGetErrorString(e)); \ exit(0); \ } \ } while(0) __global__ void subKernel(double* a, double* b, int n) { int idx = threadIdx.x + blockIdx.x * blockDim.x; // int offset = gridDim.x * blockDim.x; // - * while(idx < n) { b[idx] = a[idx] * a[idx]; idx += offset; } } void sub(double* a, double* b, int n, int numBytes) { double* aDev = NULL; double* bDev = NULL; // GPU CSC(hipMalloc ( (void**)&aDev, numBytes )); CSC(hipMalloc ( (void**)&bDev, numBytes )); // dim3 threads = 128; dim3 blocks = 128; CSC(hipMemcpy ( aDev, a, numBytes, hipMemcpyHostToDevice )); hipLaunchKernelGGL(( subKernel), dim3(blocks), dim3(threads), 0, 0, aDev, bDev, n); // CPU CSC(hipMemcpy ( b, bDev, numBytes, hipMemcpyDeviceToHost )); // CSC(hipFree ( aDev )); CSC(hipFree ( bDev )); } int main() { int n; scanf("%d", &n); int numBytes = n * sizeof(double); double* a = (double*) malloc(numBytes); double* b = (double*) malloc(numBytes); for (int i = 0; i < n; ++i) scanf("%lf", a + i); sub(a, b, n, numBytes); for (int i = 0; i < n; ++i) printf("%.10e ", b[i]); printf("\n"); free(a); free(b); return 0; }
8a64c3051f97442079105e31c1509ac0f5a66ad5.cu
#include <stdio.h> #define CSC(call) do { \ cudaError_t e = call; \ if (e != cudaSuccess) { \ fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(e)); \ exit(0); \ } \ } while(0) __global__ void subKernel(double* a, double* b, int n) { int idx = threadIdx.x + blockIdx.x * blockDim.x; // Индекс нити int offset = gridDim.x * blockDim.x; // кол-во блоков * размер блока while(idx < n) { b[idx] = a[idx] * a[idx]; idx += offset; } } void sub(double* a, double* b, int n, int numBytes) { double* aDev = NULL; double* bDev = NULL; // Выделяем память на GPU CSC(cudaMalloc ( (void**)&aDev, numBytes )); CSC(cudaMalloc ( (void**)&bDev, numBytes )); // Задаем конфигурацию запуска нитей dim3 threads = 128; dim3 blocks = 128; CSC(cudaMemcpy ( aDev, a, numBytes, cudaMemcpyHostToDevice )); subKernel<<<blocks, threads>>> (aDev, bDev, n); // Копируем результат в память CPU CSC(cudaMemcpy ( b, bDev, numBytes, cudaMemcpyDeviceToHost )); // Освобождаем выделенную память CSC(cudaFree ( aDev )); CSC(cudaFree ( bDev )); } int main() { int n; scanf("%d", &n); int numBytes = n * sizeof(double); double* a = (double*) malloc(numBytes); double* b = (double*) malloc(numBytes); for (int i = 0; i < n; ++i) scanf("%lf", a + i); sub(a, b, n, numBytes); for (int i = 0; i < n; ++i) printf("%.10e ", b[i]); printf("\n"); free(a); free(b); return 0; }
2fc648b2eb033f4287e09f83cf997f0e41427622.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "device.hpp" //#include <boost/graph/buffer_concepts.hpp> namespace pcl { namespace device { namespace kinfuLS { template<typename T> __global__ void initializeVolume (PtrStep<T> volume) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < VOLUME_X && y < VOLUME_Y) { T *pos = volume.ptr(y) + x; int z_step = VOLUME_Y * volume.step / sizeof(*pos); #pragma unroll for(int z = 0; z < VOLUME_Z; ++z, pos+=z_step) pack_tsdf (0.f, 0, *pos); } } template<typename T> __global__ void clearSliceKernel (PtrStep<T> volume, pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; //compute relative indices int idX, idY; if(x <= minBounds.x) idX = x + buffer.voxels_size.x; else idX = x; if(y <= minBounds.y) idY = y + buffer.voxels_size.y; else idY = y; if ( x < buffer.voxels_size.x && y < buffer.voxels_size.y) { if( (idX >= minBounds.x && idX <= maxBounds.x) || (idY >= minBounds.y && idY <= maxBounds.y) ) { // BLACK ZONE => clear on all Z values ///Pointer to the first x,y,0 T *pos = volume.ptr(y) + x; ///Get the step on Z int z_step = buffer.voxels_size.y * volume.step / sizeof(*pos); ///Get the size of the whole TSDF memory int size = buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1; ///Move along z axis #pragma unroll for(int z = 0; z < buffer.voxels_size.z; ++z, pos+=z_step) { ///If we went outside of the memory, make sure we go back to the beginning of it if(pos > buffer.tsdf_memory_end) pos -= size; if (pos >= buffer.tsdf_memory_start && pos <= buffer.tsdf_memory_end) // quickfix for http://dev.pointclouds.org/issues/894 pack_tsdf (0.f, 0, *pos); } } else /* if( idX > maxBounds.x && idY > maxBounds.y)*/ { ///RED ZONE => clear only appropriate Z ///Pointer to the first x,y,0 T *pos = volume.ptr(y) + x; ///Get the step on Z int z_step = buffer.voxels_size.y * volume.step / sizeof(*pos); ///Get the size of the whole TSDF memory int size = buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1; ///Move pointer to the Z origin pos+= minBounds.z * z_step; ///If the Z offset is negative, we move the pointer back if(maxBounds.z < 0) pos += maxBounds.z * z_step; ///We make sure that we are not already before the start of the memory if(pos < buffer.tsdf_memory_start) pos += size; int nbSteps = std::abs(maxBounds.z); #pragma unroll for(int z = 0; z < nbSteps; ++z, pos+=z_step) { ///If we went outside of the memory, make sure we go back to the beginning of it if(pos > buffer.tsdf_memory_end) pos -= size; if (pos >= buffer.tsdf_memory_start && pos <= buffer.tsdf_memory_end) // quickfix for http://dev.pointclouds.org/issues/894 pack_tsdf (0.f, 0, *pos); } } //else /* if( idX > maxBounds.x && idY > maxBounds.y)*/ } // if ( x < VOLUME_X && y < VOLUME_Y) } // clearSliceKernel void initVolume (PtrStep<short2> volume) { dim3 block (16, 16); dim3 grid (1, 1, 1); grid.x = divUp (VOLUME_X, block.x); grid.y = divUp (VOLUME_Y, block.y); hipLaunchKernelGGL(( initializeVolume), dim3(grid), dim3(block), 0, 0, volume); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } } } } namespace pcl { namespace device { namespace kinfuLS { struct Tsdf { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8, MAX_WEIGHT = 1 << 7 }; mutable PtrStep<short2> volume; float3 cell_size; Intr intr; Mat33 Rcurr_inv; float3 tcurr; PtrStepSz<ushort> depth_raw; //depth in mm float tranc_dist_mm; __device__ __forceinline__ float3 getVoxelGCoo (int x, int y, int z) const { float3 coo = make_float3 (x, y, z); coo += 0.5f; //shift to cell center; coo.x *= cell_size.x; coo.y *= cell_size.y; coo.z *= cell_size.z; return coo; } __device__ __forceinline__ void operator () () const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; if (x >= VOLUME_X || y >= VOLUME_Y) return; short2 *pos = volume.ptr (y) + x; int elem_step = volume.step * VOLUME_Y / sizeof(*pos); for (int z = 0; z < VOLUME_Z; ++z, pos += elem_step) { float3 v_g = getVoxelGCoo (x, y, z); //3 // p //transform to curr cam coo space float3 v = Rcurr_inv * (v_g - tcurr); //4 int2 coo; //project to current cam coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx); coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy); if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6 { int Dp = depth_raw.ptr (coo.y)[coo.x]; if (Dp != 0) { float xl = (coo.x - intr.cx) / intr.fx; float yl = (coo.y - intr.cy) / intr.fy; float lambda_inv = rsqrtf (xl * xl + yl * yl + 1); float sdf = 1000 * norm (tcurr - v_g) * lambda_inv - Dp; //mm sdf *= (-1); if (sdf >= -tranc_dist_mm) { float tsdf = fmin (1.f, sdf / tranc_dist_mm); int weight_prev; float tsdf_prev; //read and unpack unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } } } }; __global__ void integrateTsdfKernel (const Tsdf tsdf) { tsdf (); } __global__ void tsdf2 (PtrStep<short2> volume, const float tranc_dist_mm, const Mat33 Rcurr_inv, float3 tcurr, const Intr intr, const PtrStepSz<ushort> depth_raw, const float3 cell_size) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= VOLUME_X || y >= VOLUME_Y) return; short2 *pos = volume.ptr (y) + x; int elem_step = volume.step * VOLUME_Y / sizeof(short2); float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x; float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y; float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z; float v_x = Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z; float v_y = Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z; float v_z = Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z; //#pragma unroll for (int z = 0; z < VOLUME_Z; ++z) { float3 vr; vr.x = v_g_x; vr.y = v_g_y; vr.z = (v_g_z + z * cell_size.z); float3 v; v.x = v_x + Rcurr_inv.data[0].z * z * cell_size.z; v.y = v_y + Rcurr_inv.data[1].z * z * cell_size.z; v.z = v_z + Rcurr_inv.data[2].z * z * cell_size.z; int2 coo; //project to current cam coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx); coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy); if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6 { int Dp = depth_raw.ptr (coo.y)[coo.x]; //mm if (Dp != 0) { float xl = (coo.x - intr.cx) / intr.fx; float yl = (coo.y - intr.cy) / intr.fy; float lambda_inv = rsqrtf (xl * xl + yl * yl + 1); float sdf = Dp - norm (vr) * lambda_inv * 1000; //mm if (sdf >= -tranc_dist_mm) { float tsdf = fmin (1.f, sdf / tranc_dist_mm); int weight_prev; float tsdf_prev; //read and unpack unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } pos += elem_step; } /* for(int z = 0; z < VOLUME_Z; ++z) */ } /* __global__ */ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void integrateTsdfVolume (const PtrStepSz<ushort>& depth_raw, const Intr& intr, const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr, float tranc_dist, PtrStep<short2> volume) { Tsdf tsdf; tsdf.volume = volume; tsdf.cell_size.x = volume_size.x / VOLUME_X; tsdf.cell_size.y = volume_size.y / VOLUME_Y; tsdf.cell_size.z = volume_size.z / VOLUME_Z; tsdf.intr = intr; tsdf.Rcurr_inv = Rcurr_inv; tsdf.tcurr = tcurr; tsdf.depth_raw = depth_raw; tsdf.tranc_dist_mm = tranc_dist*1000; //mm dim3 block (Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y); dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y)); #if 0 //tsdf2<<<grid, block>>>(volume, tranc_dist, Rcurr_inv, tcurr, intr, depth_raw, tsdf.cell_size); hipLaunchKernelGGL(( integrateTsdfKernel), dim3(grid), dim3(block), 0, 0, tsdf); #endif cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } } } } namespace pcl { namespace device { namespace kinfuLS { __global__ void scaleDepth (const PtrStepSz<ushort> depth, PtrStep<float> scaled, const Intr intr) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= depth.cols || y >= depth.rows) return; int Dp = depth.ptr (y)[x]; float xl = (x - intr.cx) / intr.fx; float yl = (y - intr.cy) / intr.fy; float lambda = sqrtf (xl * xl + yl * yl + 1); scaled.ptr (y)[x] = Dp * lambda/1000.f; //meters } __global__ void tsdf23 (const PtrStepSz<float> depthScaled, PtrStep<short2> volume, const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size, const pcl::gpu::kinfuLS::tsdf_buffer buffer) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= buffer.voxels_size.x || y >= buffer.voxels_size.y) return; float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x; float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y; float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z; float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y; float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx; float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy; float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z); float z_scaled = 0; float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx; float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy; float tranc_dist_inv = 1.0f / tranc_dist; short2* pos = volume.ptr (y) + x; // shift the pointer to relative indices shift_tsdf_pointer(&pos, buffer); int elem_step = volume.step * buffer.voxels_size.y / sizeof(short2); //#pragma unroll for (int z = 0; z < buffer.voxels_size.z; ++z, v_g_z += cell_size.z, z_scaled += cell_size.z, v_x += Rcurr_inv_0_z_scaled, v_y += Rcurr_inv_1_z_scaled, pos += elem_step) { // As the pointer is incremented in the for loop, we have to make sure that the pointer is never outside the memory if(pos > buffer.tsdf_memory_end) pos -= (buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1); float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled); if (inv_z < 0) continue; // project to current cam int2 coo = { __float2int_rn (v_x * inv_z + intr.cx), __float2int_rn (v_y * inv_z + intr.cy) }; if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6 { float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm); if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters { float tsdf = fmin (1.0f, sdf * tranc_dist_inv); //read and unpack float tsdf_prev; int weight_prev; unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } // for(int z = 0; z < VOLUME_Z; ++z) } // __global__ __global__ void tsdf23normal_hack (const PtrStepSz<float> depthScaled, PtrStep<short2> volume, const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= VOLUME_X || y >= VOLUME_Y) return; const float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x; const float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y; float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z; float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y; float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx; float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy; float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z); float z_scaled = 0; float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx; float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy; float tranc_dist_inv = 1.0f / tranc_dist; short2* pos = volume.ptr (y) + x; int elem_step = volume.step * VOLUME_Y / sizeof(short2); //#pragma unroll for (int z = 0; z < VOLUME_Z; ++z, v_g_z += cell_size.z, z_scaled += cell_size.z, v_x += Rcurr_inv_0_z_scaled, v_y += Rcurr_inv_1_z_scaled, pos += elem_step) { float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled); if (inv_z < 0) continue; // project to current cam int2 coo = { __float2int_rn (v_x * inv_z + intr.cx), __float2int_rn (v_y * inv_z + intr.cy) }; if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6 { float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm); if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters { float tsdf = fmin (1.0f, sdf * tranc_dist_inv); bool integrate = true; if ((x > 0 && x < VOLUME_X-2) && (y > 0 && y < VOLUME_Y-2) && (z > 0 && z < VOLUME_Z-2)) { const float qnan = numeric_limits<float>::quiet_NaN(); float3 normal = make_float3(qnan, qnan, qnan); float Fn, Fp; int Wn = 0, Wp = 0; unpack_tsdf (*(pos + elem_step), Fn, Wn); unpack_tsdf (*(pos - elem_step), Fp, Wp); if (Wn > 16 && Wp > 16) normal.z = (Fn - Fp)/cell_size.z; unpack_tsdf (*(pos + volume.step/sizeof(short2) ), Fn, Wn); unpack_tsdf (*(pos - volume.step/sizeof(short2) ), Fp, Wp); if (Wn > 16 && Wp > 16) normal.y = (Fn - Fp)/cell_size.y; unpack_tsdf (*(pos + 1), Fn, Wn); unpack_tsdf (*(pos - 1), Fp, Wp); if (Wn > 16 && Wp > 16) normal.x = (Fn - Fp)/cell_size.x; if (normal.x != qnan && normal.y != qnan && normal.z != qnan) { float norm2 = dot(normal, normal); if (norm2 >= 1e-10) { normal *= rsqrt(norm2); float nt = v_g_x * normal.x + v_g_y * normal.y + v_g_z * normal.z; float cosine = nt * rsqrt(v_g_x * v_g_x + v_g_y * v_g_y + v_g_z * v_g_z); if (cosine < 0.5) integrate = false; } } } if (integrate) { //read and unpack float tsdf_prev; int weight_prev; unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } } // for(int z = 0; z < VOLUME_Z; ++z) } // __global__ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void integrateTsdfVolume (const PtrStepSz<ushort>& depth, const Intr& intr, const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr, float tranc_dist, PtrStep<short2> volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer, DeviceArray2D<float>& depthScaled) { depthScaled.create (depth.rows, depth.cols); dim3 block_scale (32, 8); dim3 grid_scale (divUp (depth.cols, block_scale.x), divUp (depth.rows, block_scale.y)); //scales depth along ray and converts mm -> meters. hipLaunchKernelGGL(( scaleDepth), dim3(grid_scale), dim3(block_scale), 0, 0, depth, depthScaled, intr); cudaSafeCall ( hipGetLastError () ); float3 cell_size; cell_size.x = volume_size.x / buffer->voxels_size.x; cell_size.y = volume_size.y / buffer->voxels_size.y; cell_size.z = volume_size.z / buffer->voxels_size.z; //dim3 block(Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y); dim3 block (16, 16); dim3 grid (divUp (buffer->voxels_size.x, block.x), divUp (buffer->voxels_size.y, block.y)); hipLaunchKernelGGL(( tsdf23), dim3(grid), dim3(block), 0, 0, depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size, *buffer); //tsdf23normal_hack<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// void clearTSDFSlice (PtrStep<short2> volume, pcl::gpu::kinfuLS::tsdf_buffer* buffer, int shiftX, int shiftY, int shiftZ) { int newX = buffer->origin_GRID.x + shiftX; int newY = buffer->origin_GRID.y + shiftY; int3 minBounds, maxBounds; //X if(newX >= 0) { minBounds.x = buffer->origin_GRID.x; maxBounds.x = newX; } else { minBounds.x = newX + buffer->voxels_size.x; maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x; } if(minBounds.x > maxBounds.x) std::swap(minBounds.x, maxBounds.x); //Y if(newY >= 0) { minBounds.y = buffer->origin_GRID.y; maxBounds.y = newY; } else { minBounds.y = newY + buffer->voxels_size.y; maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y; } if(minBounds.y > maxBounds.y) std::swap(minBounds.y, maxBounds.y); //Z minBounds.z = buffer->origin_GRID.z; maxBounds.z = shiftZ; // call kernel dim3 block (32, 16); dim3 grid (1, 1, 1); grid.x = divUp (buffer->voxels_size.x, block.x); grid.y = divUp (buffer->voxels_size.y, block.y); hipLaunchKernelGGL(( clearSliceKernel), dim3(grid), dim3(block), 0, 0, volume, *buffer, minBounds, maxBounds); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } } } }
2fc648b2eb033f4287e09f83cf997f0e41427622.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "device.hpp" //#include <boost/graph/buffer_concepts.hpp> namespace pcl { namespace device { namespace kinfuLS { template<typename T> __global__ void initializeVolume (PtrStep<T> volume) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < VOLUME_X && y < VOLUME_Y) { T *pos = volume.ptr(y) + x; int z_step = VOLUME_Y * volume.step / sizeof(*pos); #pragma unroll for(int z = 0; z < VOLUME_Z; ++z, pos+=z_step) pack_tsdf (0.f, 0, *pos); } } template<typename T> __global__ void clearSliceKernel (PtrStep<T> volume, pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; //compute relative indices int idX, idY; if(x <= minBounds.x) idX = x + buffer.voxels_size.x; else idX = x; if(y <= minBounds.y) idY = y + buffer.voxels_size.y; else idY = y; if ( x < buffer.voxels_size.x && y < buffer.voxels_size.y) { if( (idX >= minBounds.x && idX <= maxBounds.x) || (idY >= minBounds.y && idY <= maxBounds.y) ) { // BLACK ZONE => clear on all Z values ///Pointer to the first x,y,0 T *pos = volume.ptr(y) + x; ///Get the step on Z int z_step = buffer.voxels_size.y * volume.step / sizeof(*pos); ///Get the size of the whole TSDF memory int size = buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1; ///Move along z axis #pragma unroll for(int z = 0; z < buffer.voxels_size.z; ++z, pos+=z_step) { ///If we went outside of the memory, make sure we go back to the beginning of it if(pos > buffer.tsdf_memory_end) pos -= size; if (pos >= buffer.tsdf_memory_start && pos <= buffer.tsdf_memory_end) // quickfix for http://dev.pointclouds.org/issues/894 pack_tsdf (0.f, 0, *pos); } } else /* if( idX > maxBounds.x && idY > maxBounds.y)*/ { ///RED ZONE => clear only appropriate Z ///Pointer to the first x,y,0 T *pos = volume.ptr(y) + x; ///Get the step on Z int z_step = buffer.voxels_size.y * volume.step / sizeof(*pos); ///Get the size of the whole TSDF memory int size = buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1; ///Move pointer to the Z origin pos+= minBounds.z * z_step; ///If the Z offset is negative, we move the pointer back if(maxBounds.z < 0) pos += maxBounds.z * z_step; ///We make sure that we are not already before the start of the memory if(pos < buffer.tsdf_memory_start) pos += size; int nbSteps = std::abs(maxBounds.z); #pragma unroll for(int z = 0; z < nbSteps; ++z, pos+=z_step) { ///If we went outside of the memory, make sure we go back to the beginning of it if(pos > buffer.tsdf_memory_end) pos -= size; if (pos >= buffer.tsdf_memory_start && pos <= buffer.tsdf_memory_end) // quickfix for http://dev.pointclouds.org/issues/894 pack_tsdf (0.f, 0, *pos); } } //else /* if( idX > maxBounds.x && idY > maxBounds.y)*/ } // if ( x < VOLUME_X && y < VOLUME_Y) } // clearSliceKernel void initVolume (PtrStep<short2> volume) { dim3 block (16, 16); dim3 grid (1, 1, 1); grid.x = divUp (VOLUME_X, block.x); grid.y = divUp (VOLUME_Y, block.y); initializeVolume<<<grid, block>>>(volume); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } } } } namespace pcl { namespace device { namespace kinfuLS { struct Tsdf { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8, MAX_WEIGHT = 1 << 7 }; mutable PtrStep<short2> volume; float3 cell_size; Intr intr; Mat33 Rcurr_inv; float3 tcurr; PtrStepSz<ushort> depth_raw; //depth in mm float tranc_dist_mm; __device__ __forceinline__ float3 getVoxelGCoo (int x, int y, int z) const { float3 coo = make_float3 (x, y, z); coo += 0.5f; //shift to cell center; coo.x *= cell_size.x; coo.y *= cell_size.y; coo.z *= cell_size.z; return coo; } __device__ __forceinline__ void operator () () const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; if (x >= VOLUME_X || y >= VOLUME_Y) return; short2 *pos = volume.ptr (y) + x; int elem_step = volume.step * VOLUME_Y / sizeof(*pos); for (int z = 0; z < VOLUME_Z; ++z, pos += elem_step) { float3 v_g = getVoxelGCoo (x, y, z); //3 // p //transform to curr cam coo space float3 v = Rcurr_inv * (v_g - tcurr); //4 int2 coo; //project to current cam coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx); coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy); if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6 { int Dp = depth_raw.ptr (coo.y)[coo.x]; if (Dp != 0) { float xl = (coo.x - intr.cx) / intr.fx; float yl = (coo.y - intr.cy) / intr.fy; float lambda_inv = rsqrtf (xl * xl + yl * yl + 1); float sdf = 1000 * norm (tcurr - v_g) * lambda_inv - Dp; //mm sdf *= (-1); if (sdf >= -tranc_dist_mm) { float tsdf = fmin (1.f, sdf / tranc_dist_mm); int weight_prev; float tsdf_prev; //read and unpack unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } } } }; __global__ void integrateTsdfKernel (const Tsdf tsdf) { tsdf (); } __global__ void tsdf2 (PtrStep<short2> volume, const float tranc_dist_mm, const Mat33 Rcurr_inv, float3 tcurr, const Intr intr, const PtrStepSz<ushort> depth_raw, const float3 cell_size) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= VOLUME_X || y >= VOLUME_Y) return; short2 *pos = volume.ptr (y) + x; int elem_step = volume.step * VOLUME_Y / sizeof(short2); float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x; float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y; float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z; float v_x = Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z; float v_y = Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z; float v_z = Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z; //#pragma unroll for (int z = 0; z < VOLUME_Z; ++z) { float3 vr; vr.x = v_g_x; vr.y = v_g_y; vr.z = (v_g_z + z * cell_size.z); float3 v; v.x = v_x + Rcurr_inv.data[0].z * z * cell_size.z; v.y = v_y + Rcurr_inv.data[1].z * z * cell_size.z; v.z = v_z + Rcurr_inv.data[2].z * z * cell_size.z; int2 coo; //project to current cam coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx); coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy); if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6 { int Dp = depth_raw.ptr (coo.y)[coo.x]; //mm if (Dp != 0) { float xl = (coo.x - intr.cx) / intr.fx; float yl = (coo.y - intr.cy) / intr.fy; float lambda_inv = rsqrtf (xl * xl + yl * yl + 1); float sdf = Dp - norm (vr) * lambda_inv * 1000; //mm if (sdf >= -tranc_dist_mm) { float tsdf = fmin (1.f, sdf / tranc_dist_mm); int weight_prev; float tsdf_prev; //read and unpack unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } pos += elem_step; } /* for(int z = 0; z < VOLUME_Z; ++z) */ } /* __global__ */ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void integrateTsdfVolume (const PtrStepSz<ushort>& depth_raw, const Intr& intr, const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr, float tranc_dist, PtrStep<short2> volume) { Tsdf tsdf; tsdf.volume = volume; tsdf.cell_size.x = volume_size.x / VOLUME_X; tsdf.cell_size.y = volume_size.y / VOLUME_Y; tsdf.cell_size.z = volume_size.z / VOLUME_Z; tsdf.intr = intr; tsdf.Rcurr_inv = Rcurr_inv; tsdf.tcurr = tcurr; tsdf.depth_raw = depth_raw; tsdf.tranc_dist_mm = tranc_dist*1000; //mm dim3 block (Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y); dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y)); #if 0 //tsdf2<<<grid, block>>>(volume, tranc_dist, Rcurr_inv, tcurr, intr, depth_raw, tsdf.cell_size); integrateTsdfKernel<<<grid, block>>>(tsdf); #endif cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } } } } namespace pcl { namespace device { namespace kinfuLS { __global__ void scaleDepth (const PtrStepSz<ushort> depth, PtrStep<float> scaled, const Intr intr) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= depth.cols || y >= depth.rows) return; int Dp = depth.ptr (y)[x]; float xl = (x - intr.cx) / intr.fx; float yl = (y - intr.cy) / intr.fy; float lambda = sqrtf (xl * xl + yl * yl + 1); scaled.ptr (y)[x] = Dp * lambda/1000.f; //meters } __global__ void tsdf23 (const PtrStepSz<float> depthScaled, PtrStep<short2> volume, const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size, const pcl::gpu::kinfuLS::tsdf_buffer buffer) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= buffer.voxels_size.x || y >= buffer.voxels_size.y) return; float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x; float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y; float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z; float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y; float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx; float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy; float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z); float z_scaled = 0; float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx; float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy; float tranc_dist_inv = 1.0f / tranc_dist; short2* pos = volume.ptr (y) + x; // shift the pointer to relative indices shift_tsdf_pointer(&pos, buffer); int elem_step = volume.step * buffer.voxels_size.y / sizeof(short2); //#pragma unroll for (int z = 0; z < buffer.voxels_size.z; ++z, v_g_z += cell_size.z, z_scaled += cell_size.z, v_x += Rcurr_inv_0_z_scaled, v_y += Rcurr_inv_1_z_scaled, pos += elem_step) { // As the pointer is incremented in the for loop, we have to make sure that the pointer is never outside the memory if(pos > buffer.tsdf_memory_end) pos -= (buffer.tsdf_memory_end - buffer.tsdf_memory_start + 1); float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled); if (inv_z < 0) continue; // project to current cam int2 coo = { __float2int_rn (v_x * inv_z + intr.cx), __float2int_rn (v_y * inv_z + intr.cy) }; if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6 { float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm); if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters { float tsdf = fmin (1.0f, sdf * tranc_dist_inv); //read and unpack float tsdf_prev; int weight_prev; unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } // for(int z = 0; z < VOLUME_Z; ++z) } // __global__ __global__ void tsdf23normal_hack (const PtrStepSz<float> depthScaled, PtrStep<short2> volume, const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= VOLUME_X || y >= VOLUME_Y) return; const float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x; const float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y; float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z; float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y; float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx; float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy; float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z); float z_scaled = 0; float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx; float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy; float tranc_dist_inv = 1.0f / tranc_dist; short2* pos = volume.ptr (y) + x; int elem_step = volume.step * VOLUME_Y / sizeof(short2); //#pragma unroll for (int z = 0; z < VOLUME_Z; ++z, v_g_z += cell_size.z, z_scaled += cell_size.z, v_x += Rcurr_inv_0_z_scaled, v_y += Rcurr_inv_1_z_scaled, pos += elem_step) { float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled); if (inv_z < 0) continue; // project to current cam int2 coo = { __float2int_rn (v_x * inv_z + intr.cx), __float2int_rn (v_y * inv_z + intr.cy) }; if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6 { float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm); if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters { float tsdf = fmin (1.0f, sdf * tranc_dist_inv); bool integrate = true; if ((x > 0 && x < VOLUME_X-2) && (y > 0 && y < VOLUME_Y-2) && (z > 0 && z < VOLUME_Z-2)) { const float qnan = numeric_limits<float>::quiet_NaN(); float3 normal = make_float3(qnan, qnan, qnan); float Fn, Fp; int Wn = 0, Wp = 0; unpack_tsdf (*(pos + elem_step), Fn, Wn); unpack_tsdf (*(pos - elem_step), Fp, Wp); if (Wn > 16 && Wp > 16) normal.z = (Fn - Fp)/cell_size.z; unpack_tsdf (*(pos + volume.step/sizeof(short2) ), Fn, Wn); unpack_tsdf (*(pos - volume.step/sizeof(short2) ), Fp, Wp); if (Wn > 16 && Wp > 16) normal.y = (Fn - Fp)/cell_size.y; unpack_tsdf (*(pos + 1), Fn, Wn); unpack_tsdf (*(pos - 1), Fp, Wp); if (Wn > 16 && Wp > 16) normal.x = (Fn - Fp)/cell_size.x; if (normal.x != qnan && normal.y != qnan && normal.z != qnan) { float norm2 = dot(normal, normal); if (norm2 >= 1e-10) { normal *= rsqrt(norm2); float nt = v_g_x * normal.x + v_g_y * normal.y + v_g_z * normal.z; float cosine = nt * rsqrt(v_g_x * v_g_x + v_g_y * v_g_y + v_g_z * v_g_z); if (cosine < 0.5) integrate = false; } } } if (integrate) { //read and unpack float tsdf_prev; int weight_prev; unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } } // for(int z = 0; z < VOLUME_Z; ++z) } // __global__ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void integrateTsdfVolume (const PtrStepSz<ushort>& depth, const Intr& intr, const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr, float tranc_dist, PtrStep<short2> volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer, DeviceArray2D<float>& depthScaled) { depthScaled.create (depth.rows, depth.cols); dim3 block_scale (32, 8); dim3 grid_scale (divUp (depth.cols, block_scale.x), divUp (depth.rows, block_scale.y)); //scales depth along ray and converts mm -> meters. scaleDepth<<<grid_scale, block_scale>>>(depth, depthScaled, intr); cudaSafeCall ( cudaGetLastError () ); float3 cell_size; cell_size.x = volume_size.x / buffer->voxels_size.x; cell_size.y = volume_size.y / buffer->voxels_size.y; cell_size.z = volume_size.z / buffer->voxels_size.z; //dim3 block(Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y); dim3 block (16, 16); dim3 grid (divUp (buffer->voxels_size.x, block.x), divUp (buffer->voxels_size.y, block.y)); tsdf23<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size, *buffer); //tsdf23normal_hack<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// void clearTSDFSlice (PtrStep<short2> volume, pcl::gpu::kinfuLS::tsdf_buffer* buffer, int shiftX, int shiftY, int shiftZ) { int newX = buffer->origin_GRID.x + shiftX; int newY = buffer->origin_GRID.y + shiftY; int3 minBounds, maxBounds; //X if(newX >= 0) { minBounds.x = buffer->origin_GRID.x; maxBounds.x = newX; } else { minBounds.x = newX + buffer->voxels_size.x; maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x; } if(minBounds.x > maxBounds.x) std::swap(minBounds.x, maxBounds.x); //Y if(newY >= 0) { minBounds.y = buffer->origin_GRID.y; maxBounds.y = newY; } else { minBounds.y = newY + buffer->voxels_size.y; maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y; } if(minBounds.y > maxBounds.y) std::swap(minBounds.y, maxBounds.y); //Z minBounds.z = buffer->origin_GRID.z; maxBounds.z = shiftZ; // call kernel dim3 block (32, 16); dim3 grid (1, 1, 1); grid.x = divUp (buffer->voxels_size.x, block.x); grid.y = divUp (buffer->voxels_size.y, block.y); clearSliceKernel<<<grid, block>>>(volume, *buffer, minBounds, maxBounds); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } } } }
284699cc3d3536ffc2d2716494d8ef4c677538a0.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <utils.hpp> #include <cuco/static_map.cuh> #include <thrust/device_vector.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <catch2/catch_template_test_macros.hpp> #define SIZE 10 __device__ int A[SIZE]; template <typename T> struct custom_equals { __device__ bool operator()(T lhs, T rhs) { return A[lhs] == A[rhs]; } }; TEMPLATE_TEST_CASE_SIG( "Key comparison against sentinel", "", ((typename T), T), (int32_t), (int64_t)) { using Key = T; using Value = T; constexpr std::size_t num_keys{SIZE}; cuco::static_map<Key, Value> map{ SIZE * 2, cuco::empty_key<Key>{-1}, cuco::empty_value<Value>{-1}}; auto m_view = map.get_device_mutable_view(); auto view = map.get_device_view(); int h_A[SIZE]; for (int i = 0; i < SIZE; i++) { h_A[i] = i; } CUCO_CUDA_TRY(hipMemcpyToSymbol(A, h_A, SIZE * sizeof(int))); auto pairs_begin = thrust::make_transform_iterator(thrust::make_counting_iterator<T>(0), [] __device__(auto i) { return cuco::pair<Key, Value>(i, i); }); SECTION( "Tests of non-CG insert: The custom `key_equal` can never be used to compare against sentinel") { REQUIRE(cuco::test::all_of(pairs_begin, pairs_begin + num_keys, [m_view] __device__(cuco::pair<Key, Value> const& pair) mutable { return m_view.insert( pair, cuco::default_hash_function<Key>{}, custom_equals<Key>{}); })); } SECTION( "Tests of CG insert: The custom `key_equal` can never be used to compare against sentinel") { map.insert(pairs_begin, pairs_begin + num_keys, cuco::default_hash_function<Key>{}, custom_equals<Key>{}); // All keys inserted via custom `key_equal` should be found REQUIRE(cuco::test::all_of( pairs_begin, pairs_begin + num_keys, [view] __device__(cuco::pair<Key, Value> const& pair) { auto const found = view.find(pair.first); return (found != view.end()) and (found->first.load() == pair.first and found->second.load() == pair.second); })); } }
284699cc3d3536ffc2d2716494d8ef4c677538a0.cu
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <utils.hpp> #include <cuco/static_map.cuh> #include <thrust/device_vector.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <catch2/catch_template_test_macros.hpp> #define SIZE 10 __device__ int A[SIZE]; template <typename T> struct custom_equals { __device__ bool operator()(T lhs, T rhs) { return A[lhs] == A[rhs]; } }; TEMPLATE_TEST_CASE_SIG( "Key comparison against sentinel", "", ((typename T), T), (int32_t), (int64_t)) { using Key = T; using Value = T; constexpr std::size_t num_keys{SIZE}; cuco::static_map<Key, Value> map{ SIZE * 2, cuco::empty_key<Key>{-1}, cuco::empty_value<Value>{-1}}; auto m_view = map.get_device_mutable_view(); auto view = map.get_device_view(); int h_A[SIZE]; for (int i = 0; i < SIZE; i++) { h_A[i] = i; } CUCO_CUDA_TRY(cudaMemcpyToSymbol(A, h_A, SIZE * sizeof(int))); auto pairs_begin = thrust::make_transform_iterator(thrust::make_counting_iterator<T>(0), [] __device__(auto i) { return cuco::pair<Key, Value>(i, i); }); SECTION( "Tests of non-CG insert: The custom `key_equal` can never be used to compare against sentinel") { REQUIRE(cuco::test::all_of(pairs_begin, pairs_begin + num_keys, [m_view] __device__(cuco::pair<Key, Value> const& pair) mutable { return m_view.insert( pair, cuco::default_hash_function<Key>{}, custom_equals<Key>{}); })); } SECTION( "Tests of CG insert: The custom `key_equal` can never be used to compare against sentinel") { map.insert(pairs_begin, pairs_begin + num_keys, cuco::default_hash_function<Key>{}, custom_equals<Key>{}); // All keys inserted via custom `key_equal` should be found REQUIRE(cuco::test::all_of( pairs_begin, pairs_begin + num_keys, [view] __device__(cuco::pair<Key, Value> const& pair) { auto const found = view.find(pair.first); return (found != view.end()) and (found->first.load() == pair.first and found->second.load() == pair.second); })); } }
cda6dc751e918083cea843cb3df891f0d7112266.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/OpMathType.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/hip/JitLoops.cuh> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Math.cuh> #include <limits> namespace at::native { #if AT_USE_JITERATOR() CONSTEXPR_EXCEPT_WIN_CUDA char tan_name[] = "tan_impl"; #endif void tan_kernel_cuda(TensorIteratorBase& iter) { auto common_dtype = iter.common_dtype(); if (at::isComplexType(common_dtype)) { #if AT_USE_JITERATOR() static const auto tan_string = jiterator_stringify( template <typename T> T tan_impl(T a) { return std::tan(a); }); AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "tan_name", [&]() { jitted_gpu_kernel< /*name=*/tan_name, /*return_dtype=*/scalar_t, /*common_dtype=*/scalar_t, /*arity=*/1>(iter, tan_string); }); #else AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "tan_name", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; return ::tan(static_cast<opmath_t>(a)); }); }); #endif } else { AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, common_dtype, "tan_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::tan(a); }); }); } } REGISTER_DISPATCH(tan_stub, &tan_kernel_cuda); } // namespace at::native
cda6dc751e918083cea843cb3df891f0d7112266.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/OpMathType.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/cuda/JitLoops.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <limits> namespace at::native { #if AT_USE_JITERATOR() CONSTEXPR_EXCEPT_WIN_CUDA char tan_name[] = "tan_impl"; #endif void tan_kernel_cuda(TensorIteratorBase& iter) { auto common_dtype = iter.common_dtype(); if (at::isComplexType(common_dtype)) { #if AT_USE_JITERATOR() static const auto tan_string = jiterator_stringify( template <typename T> T tan_impl(T a) { return std::tan(a); }); AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "tan_name", [&]() { jitted_gpu_kernel< /*name=*/tan_name, /*return_dtype=*/scalar_t, /*common_dtype=*/scalar_t, /*arity=*/1>(iter, tan_string); }); #else AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "tan_name", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; return ::tan(static_cast<opmath_t>(a)); }); }); #endif } else { AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, common_dtype, "tan_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::tan(a); }); }); } } REGISTER_DISPATCH(tan_stub, &tan_kernel_cuda); } // namespace at::native
ff7b0810fb7380f11499b2208ce3cab3ca09df64.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////// // // Copyright 2014 PMC-Sierra, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 Unless required by // applicable law or agreed to in writing, software distributed under the // License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for // the specific language governing permissions and limitations under the // License. // //////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////// // // Author: Logan Gunthorpe // // Date: Oct 23 2014 // // Description: // Image Search CUDA Routines // //////////////////////////////////////////////////////////////////////// #include "img_search_cuda.h" __global__ void cmplx_mult_and_scale(complex_cuda_px *x, complex_cuda_px *y, image_px divconst) { const int i = blockDim.x * blockIdx.x + threadIdx.x; image_px a,b,c,d; a = x[i].x; b = x[i].y; c = y[i].x; d = y[i].y; x[i].x = (a*c - b*d) / divconst; x[i].y = (b*c + a*d) / divconst; } hipError_t img_search_cuda_multiply(complex_cuda_px *x, complex_cuda_px *y, size_t bufsize, image_px divconst, hipStream_t stream) { dim3 block_size, grid_size; block_size.x = 1024; while (bufsize & (block_size.x - 1)) block_size.x >>= 1; grid_size.x = bufsize / block_size.x; hipLaunchKernelGGL(( cmplx_mult_and_scale), dim3(grid_size), dim3(block_size), 0, stream, x, y, divconst); return hipPeekAtLastError(); }
ff7b0810fb7380f11499b2208ce3cab3ca09df64.cu
//////////////////////////////////////////////////////////////////////// // // Copyright 2014 PMC-Sierra, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 Unless required by // applicable law or agreed to in writing, software distributed under the // License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for // the specific language governing permissions and limitations under the // License. // //////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////// // // Author: Logan Gunthorpe // // Date: Oct 23 2014 // // Description: // Image Search CUDA Routines // //////////////////////////////////////////////////////////////////////// #include "img_search_cuda.h" __global__ void cmplx_mult_and_scale(complex_cuda_px *x, complex_cuda_px *y, image_px divconst) { const int i = blockDim.x * blockIdx.x + threadIdx.x; image_px a,b,c,d; a = x[i].x; b = x[i].y; c = y[i].x; d = y[i].y; x[i].x = (a*c - b*d) / divconst; x[i].y = (b*c + a*d) / divconst; } cudaError_t img_search_cuda_multiply(complex_cuda_px *x, complex_cuda_px *y, size_t bufsize, image_px divconst, cudaStream_t stream) { dim3 block_size, grid_size; block_size.x = 1024; while (bufsize & (block_size.x - 1)) block_size.x >>= 1; grid_size.x = bufsize / block_size.x; cmplx_mult_and_scale<<<grid_size, block_size, 0, stream>>> (x, y, divconst); return cudaPeekAtLastError(); }
b2ceede1eb617a44a9e7bbe36d0032cc8f3300fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 1000 #define THREAD_X 4 __global__ void index(float *A, float *B){ int i = blockDim.x*blockIdx.x+threadIdx.x; //int i = threadIdx.x; float X = 1.23; float Y = 2.34 ; B[i] = A[i]*X + Y; } int main(){ float A[N], *A_d; float B[N], *B_d; int i; dim3 dimBlock(THREAD_X); dim3 dimGrid(N/THREAD_X); for(i = 0 ; i < N; i++){ A[i] = i*2; } hipMalloc((void**)&A_d, sizeof(int)*N); hipMalloc((void**)&B_d, sizeof(int)*N); hipMemcpy(A_d, A, sizeof(int)*N, hipMemcpyHostToDevice); hipMemcpy(B_d, B, sizeof(int)*N, hipMemcpyHostToDevice); hipLaunchKernelGGL(( index), dim3(dimGrid), dim3(dimBlock), 0, 0, A_d, B_d); hipMemcpy(A, A_d, sizeof(int)*N, hipMemcpyDeviceToHost); hipMemcpy(B, B_d, sizeof(int)*N, hipMemcpyDeviceToHost); for(i = 0; i < N; i++){ printf("%f ",B[i]); } hipFree(A_d); }
b2ceede1eb617a44a9e7bbe36d0032cc8f3300fa.cu
#include <stdio.h> #define N 1000 #define THREAD_X 4 __global__ void index(float *A, float *B){ int i = blockDim.x*blockIdx.x+threadIdx.x; //int i = threadIdx.x; float X = 1.23; float Y = 2.34 ; B[i] = A[i]*X + Y; } int main(){ float A[N], *A_d; float B[N], *B_d; int i; dim3 dimBlock(THREAD_X); dim3 dimGrid(N/THREAD_X); for(i = 0 ; i < N; i++){ A[i] = i*2; } cudaMalloc((void**)&A_d, sizeof(int)*N); cudaMalloc((void**)&B_d, sizeof(int)*N); cudaMemcpy(A_d, A, sizeof(int)*N, cudaMemcpyHostToDevice); cudaMemcpy(B_d, B, sizeof(int)*N, cudaMemcpyHostToDevice); index<<<dimGrid, dimBlock>>>(A_d, B_d); cudaMemcpy(A, A_d, sizeof(int)*N, cudaMemcpyDeviceToHost); cudaMemcpy(B, B_d, sizeof(int)*N, cudaMemcpyDeviceToHost); for(i = 0; i < N; i++){ printf("%f ",B[i]); } cudaFree(A_d); }
1bb3ae6b92a34f6470049322718c754e547a5da1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Part of the following code in this file refs to // https://github.com/msracver/Deformable-ConvNets/blob/master/faster_rcnn/operator_cxx/deformable_convolution.cu // // Copyright (c) 2017 Microsoft // Licensed under The Apache-2.0 License [see LICENSE for details] // \file deformable_psroi_pooling.cu // \brief // \author Yi Li, Guodong Zhang, Jifeng Dai #pragma once #include <algorithm> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/deformable_conv_filter.cu.h" #include "paddle/fluid/operators/deformable_conv_func.h" #include "paddle/fluid/operators/deformable_conv_v1_op.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using CUDADeviceContext = paddle::platform::CUDADeviceContext; static constexpr int kNumCUDAThread = 512; static constexpr int kNumMaximumNumBlock = 4096; static inline int NumBlock(const int N) { return ::min((N + kNumCUDAThread - 1) / kNumCUDAThread, kNumMaximumNumBlock); } template <typename T> __global__ void DeformableCol2imCUDAKernel( const int nthreads, const T* data_col, const T* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, T* grad_im) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t thread = index; thread < nthreads; thread += offset) { const int j = (thread / width_col / height_col / batch_size) % kernel_w; const int i = (thread / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = thread / width_col / height_col / batch_size / kernel_w / kernel_h; const int deformable_group_index = c / channel_per_deformable_group; int w_out = thread % width_col; int h_out = (thread / width_col) % height_col; int b = (thread / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const T* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const T offset_h = data_offset_ptr[data_offset_h_ptr]; const T offset_w = data_offset_ptr[data_offset_w_ptr]; const T cur_inv_h_data = h_in + i * dilation_h + offset_h; const T cur_inv_w_data = w_in + j * dilation_w + offset_w; const T cur_top_grad = data_col[thread]; const int cur_h = static_cast<int>(cur_inv_h_data); const int cur_w = static_cast<int>(cur_inv_w_data); for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; T weight = DmcnGetGradientWeight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); platform::CudaAtomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename T> inline void DeformableCol2im(const platform::CUDADeviceContext& ctx, const T* data_col, const T* data_offset, const std::vector<int64_t> im_shape, const std::vector<int64_t> col_shape, const std::vector<int64_t> kernel_shape, const std::vector<int> pad, const std::vector<int> stride, const std::vector<int> dilation, const int deformable_group, T* grad_im) { int channel_per_deformable_group = im_shape[0] / deformable_group; int num_kernels = col_shape[0] * col_shape[1] * col_shape[2] * col_shape[3]; int blocks = NumBlock(num_kernels); int threads = kNumCUDAThread; hipLaunchKernelGGL(( DeformableCol2imCUDAKernel<T>), dim3(blocks), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream(), num_kernels, data_col, data_offset, im_shape[0], im_shape[1], im_shape[2], kernel_shape[2], kernel_shape[3], pad[0], pad[1], stride[0], stride[1], dilation[0], dilation[1], channel_per_deformable_group, col_shape[1], deformable_group, col_shape[2], col_shape[3], grad_im); } template <typename T> __global__ void DeformableCol2imCoordCUDAKernel( const int nthreads, const T* data_col, const T* data_im, const T* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, T* grad_offset) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { T val = 0, mval = 0; const int w = i % width_col; const int h = (i / width_col) % height_col; const int c = (i / width_col / height_col) % offset_channels; const int b = (i / width_col / height_col) / offset_channels; const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const T* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const T* data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const T* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = offset_c / 2; col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const T offset_h = data_offset_ptr[data_offset_h_ptr]; const T offset_w = data_offset_ptr[data_offset_w_ptr]; T inv_h = h_in + i * dilation_h + offset_h; T inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * DmcnIm2colBilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const T weight = DmcnGetCoordinateWeight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[i] = val; } } template <typename T> inline void DeformableCol2imCoord( const platform::CUDADeviceContext& ctx, const T* data_col, const T* data_im, const T* data_offset, const std::vector<int64_t> im_shape, const std::vector<int64_t> col_shape, const std::vector<int64_t> kernel_shape, const std::vector<int> paddings, const std::vector<int> strides, const std::vector<int> dilations, const int deformable_groups, T* grad_offset) { int num_kernels = 2 * kernel_shape[2] * kernel_shape[3] * col_shape[1] * col_shape[2] * col_shape[3] * deformable_groups; int channel_per_deformable_group = col_shape[0] / deformable_groups; int blocks = NumBlock(num_kernels); int threads = kNumCUDAThread; hipLaunchKernelGGL(( DeformableCol2imCoordCUDAKernel<T>), dim3(blocks), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream(), num_kernels, data_col, data_im, data_offset, im_shape[0], im_shape[1], im_shape[2], kernel_shape[2], kernel_shape[3], paddings[0], paddings[1], strides[0], strides[1], dilations[0], dilations[1], channel_per_deformable_group, col_shape[1], 2 * kernel_shape[2] * kernel_shape[3] * deformable_groups, deformable_groups, col_shape[2], col_shape[3], grad_offset); } template <typename T> __global__ void DeformableIm2colCUDAKernel( const int nthreads, const T* data_im, const T* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, T* data_col) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { const int w_col = i % width_col; const int h_col = (i / width_col) % height_col; const int b_col = (i / width_col) / height_col % batch_size; const int c_im = (i / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; T* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; const T* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const T* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const T offset_h = data_offset_ptr[data_offset_h_ptr]; const T offset_w = data_offset_ptr[data_offset_w_ptr]; T val = static_cast<T>(0); const T h_im = h_in + i * dilation_h + offset_h; const T w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { val = DmcnIm2colBilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } template <typename T> inline void DeformableIm2col(const platform::CUDADeviceContext& ctx, const T* data_im, const T* data_offset, const std::vector<int64_t> im_shape, const std::vector<int64_t> col_shape, const std::vector<int64_t> filter_shape, const std::vector<int> paddings, const std::vector<int> strides, const std::vector<int> dilations, const int deformable_groups, T* data_col) { int channel_per_deformable_group = im_shape[0] / deformable_groups; int num_kernels = im_shape[0] * col_shape[1] * col_shape[2] * col_shape[3]; int blocks = NumBlock(num_kernels); int threads = kNumCUDAThread; // get outputs of im2col with offset by bilinear interpolation hipLaunchKernelGGL(( DeformableIm2colCUDAKernel<T>), dim3(blocks), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream(), num_kernels, data_im, data_offset, im_shape[1], im_shape[2], filter_shape[2], filter_shape[3], paddings[0], paddings[1], strides[0], strides[1], dilations[0], dilations[1], channel_per_deformable_group, col_shape[1], im_shape[0], deformable_groups, col_shape[2], col_shape[3], data_col); } template <typename T> class DeformableConvV1CUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const Tensor* input = ctx.Input<Tensor>("Input"); const Tensor offset = *ctx.Input<Tensor>("Offset"); Tensor filter = *ctx.Input<Tensor>("Filter"); Tensor* output = ctx.Output<Tensor>("Output"); output->mutable_data<T>(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context<CUDADeviceContext>(); const int groups = ctx.Attr<int>("groups"); const int deformable_groups = ctx.Attr<int>("deformable_groups"); const int im2col_step = ctx.Attr<int>("im2col_step"); const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); const std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); const std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); const int batch_size = static_cast<int>(input->dims()[0]); std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims())); std::vector<int64_t> output_shape_vec(framework::vectorize(output->dims())); // col_shape_vec: {c_i * k_h * k_w, im2col_step, o_h, o_w} std::vector<int64_t> col_buffer_shape_vec(filter_shape_vec.size()); col_buffer_shape_vec[0] = input->dims()[1] * filter.dims()[2] * filter.dims()[3]; col_buffer_shape_vec[1] = im2col_step; for (size_t j = 0; j < filter_shape_vec.size() - 2; ++j) { col_buffer_shape_vec[j + 2] = output_shape_vec[j + 2]; } framework::DDim col_shape(framework::make_ddim(col_buffer_shape_vec)); std::vector<int64_t> output_buffer_shape_vec(1); output_buffer_shape_vec[0] = batch_size * output_shape_vec[1] * output_shape_vec[2] * output_shape_vec[3]; framework::DDim output_shape(framework::make_ddim(output_buffer_shape_vec)); Tensor col_buffer; Tensor output_buffer; col_buffer = ctx.AllocateTmpTensor<T, CUDADeviceContext>(col_shape, dev_ctx); output_buffer = ctx.AllocateTmpTensor<T, CUDADeviceContext>(output_shape, dev_ctx); int64_t M = output_shape_vec[1] / groups; int64_t N = im2col_step * output_shape_vec[2] * output_shape_vec[3]; int64_t K = input->dims()[1] * filter_shape_vec[2] * filter_shape_vec[3] / groups; Tensor weight_3d; weight_3d.ShareDataWith(filter).Resize( framework::make_ddim({groups, M, K})); Tensor col_buffer_3d; col_buffer_3d.ShareDataWith(col_buffer) .Resize(framework::make_ddim({groups, K, N})); Tensor output_4d; output_4d.ShareDataWith(output_buffer) .Resize(framework::make_ddim({batch_size / im2col_step, groups, M, N})); output_4d.mutable_data<T>(ctx.GetPlace()); framework::DDim input_shape = framework::slice_ddim(input->dims(), 1, input->dims().size()); std::vector<int64_t> input_shape_vec = framework::vectorize(input_shape); int input_dim = input->numel() / input->dims()[0]; int input_offset_dim = offset.numel() / offset.dims()[0]; auto blas = math::GetBlas<CUDADeviceContext, T>(dev_ctx); const T* input_ptr = input->data<T>(); const T* offset_ptr = offset.data<T>(); col_buffer.mutable_data<T>(ctx.GetPlace()); T* col_buffer_ptr = col_buffer.data<T>(); for (int i = 0; i < batch_size / im2col_step; ++i) { DeformableIm2col(dev_ctx, input_ptr + i * im2col_step * input_dim, offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec, col_buffer_shape_vec, filter_shape_vec, paddings, strides, dilations, deformable_groups, col_buffer_ptr); Tensor output_3d = output_4d.Slice(i, i + 1).Resize( framework::slice_ddim(output_4d.dims(), 1, output_4d.dims().size())); // get the product of pixel and weight for (int g = 0; g < groups; ++g) { Tensor weight_3d_slice = weight_3d.Slice(g, g + 1).Resize(framework::slice_ddim( weight_3d.dims(), 1, weight_3d.dims().size())); Tensor col_buffer_3d_slice = col_buffer_3d.Slice(g, g + 1).Resize(framework::slice_ddim( col_buffer_3d.dims(), 1, col_buffer_3d.dims().size())); Tensor output_3d_slice = output_3d.Slice(g, g + 1).Resize(framework::slice_ddim( output_3d.dims(), 1, output_3d.dims().size())); blas.MatMul(weight_3d_slice, false, col_buffer_3d_slice, false, T(1.0), &output_3d_slice, T(0.0)); } } output->ShareDataWith(output_buffer) .Resize(framework::make_ddim(output_shape_vec)); } }; template <typename T> class DeformableConvV1GradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const Tensor* output_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); Tensor* input_grad = ctx.Output<Tensor>(framework::GradVarName("Input")); Tensor* filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter")); Tensor* offset_grad = ctx.Output<Tensor>(framework::GradVarName("Offset")); const Tensor* input = ctx.Input<Tensor>("Input"); Tensor offset = *ctx.Input<Tensor>("Offset"); Tensor filter = *ctx.Input<Tensor>("Filter"); if (!input_grad && !filter_grad && !offset_grad) return; int groups = ctx.Attr<int>("groups"); int deformable_groups = ctx.Attr<int>("deformable_groups"); int im2col_step = ctx.Attr<int>("im2col_step"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); auto& dev_ctx = ctx.template device_context<CUDADeviceContext>(); const int batch_size = static_cast<int>(input->dims()[0]); framework::DDim input_shape = framework::slice_ddim(input->dims(), 1, input->dims().size()); std::vector<int64_t> input_shape_vec = framework::vectorize(input_shape); std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims())); std::vector<int64_t> output_shape_vec( framework::vectorize(output_grad->dims())); std::vector<int64_t> col_buffer_shape_vec(filter_shape_vec.size()); col_buffer_shape_vec[0] = input->dims()[1] * filter.dims()[2] * filter.dims()[3]; col_buffer_shape_vec[1] = im2col_step; for (size_t j = 0; j < filter_shape_vec.size() - 2; ++j) { col_buffer_shape_vec[j + 2] = output_shape_vec[j + 2]; } framework::DDim col_shape(framework::make_ddim(col_buffer_shape_vec)); std::vector<int64_t> output_buffer_shape_vec(1); output_buffer_shape_vec[0] = batch_size * output_shape_vec[1] * output_shape_vec[2] * output_shape_vec[3]; framework::DDim output_shape(framework::make_ddim(output_buffer_shape_vec)); Tensor col_buffer; Tensor output_buffer; col_buffer = ctx.AllocateTmpTensor<T, CUDADeviceContext>(col_shape, dev_ctx); output_buffer = ctx.AllocateTmpTensor<T, CUDADeviceContext>(output_shape, dev_ctx); output_buffer.ShareDataWith(*output_grad); int64_t M = input_shape_vec[0] / groups * filter_shape_vec[2] * filter_shape_vec[3]; int64_t N = im2col_step * output_shape_vec[2] * output_shape_vec[3]; int64_t K = output_shape_vec[1] / groups; framework::DDim weight_3d_shape = {groups, K, M}; framework::DDim out_grad_4d_shape = {batch_size / im2col_step, groups, K, N}; framework::DDim col_buffer_3d_shape = {groups, M, N}; framework::DDim filter_grad_shape = {groups, K, M}; Tensor weight_3d; weight_3d.ShareDataWith(filter).Resize(weight_3d_shape); Tensor out_grad_4d; out_grad_4d.ShareDataWith(output_buffer).Resize(out_grad_4d_shape); Tensor col_buffer_3d; col_buffer_3d.ShareDataWith(col_buffer).Resize(col_buffer_3d_shape); math::SetConstant<CUDADeviceContext, T> set_zero; auto blas = math::GetBlas<CUDADeviceContext, T>(dev_ctx); col_buffer.mutable_data<T>(ctx.GetPlace()); col_buffer_3d.mutable_data<T>(ctx.GetPlace()); out_grad_4d.mutable_data<T>(ctx.GetPlace()); int input_dim = input->numel() / input->dims()[0]; int input_offset_dim = offset.numel() / offset.dims()[0]; if (filter_grad) { filter_grad->mutable_data<T>(ctx.GetPlace()); filter_grad->Resize(filter_grad_shape); set_zero(dev_ctx, filter_grad, static_cast<T>(0)); } if (input_grad) { input_grad->mutable_data<T>(ctx.GetPlace()); set_zero(dev_ctx, input_grad, static_cast<T>(0)); } if (offset_grad) { offset_grad->mutable_data<T>(ctx.GetPlace()); set_zero(dev_ctx, offset_grad, static_cast<T>(0)); } for (int i = 0; i < batch_size / im2col_step; ++i) { Tensor out_grad_3d = out_grad_4d.Slice(i, i + 1).Resize(framework::slice_ddim( out_grad_4d.dims(), 1, out_grad_4d.dims().size())); for (int g = 0; g < groups; ++g) { Tensor weight_3d_slice = weight_3d.Slice(g, g + 1).Resize(framework::slice_ddim( weight_3d.dims(), 1, weight_3d.dims().size())); Tensor out_grad_3d_slice = out_grad_3d.Slice(g, g + 1).Resize(framework::slice_ddim( out_grad_3d.dims(), 1, out_grad_3d.dims().size())); Tensor col_buffer_3d_slice = col_buffer_3d.Slice(g, g + 1).Resize(framework::slice_ddim( col_buffer_3d.dims(), 1, col_buffer_3d.dims().size())); blas.MatMul(weight_3d_slice, true, out_grad_3d_slice, false, T(1.0), &col_buffer_3d_slice, T(0.0)); } col_buffer.Resize(col_shape); T* col_buffer_ptr = col_buffer.data<T>(); const T* input_ptr = input->data<T>(); const T* offset_ptr = offset.data<T>(); if (offset_grad) { T* offset_grad_ptr = offset_grad->data<T>(); // get grad of offset DeformableCol2imCoord( dev_ctx, col_buffer_ptr, input_ptr + i * im2col_step * input_dim, offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec, col_buffer_shape_vec, filter_shape_vec, paddings, strides, dilations, deformable_groups, offset_grad_ptr + i * im2col_step * input_offset_dim); } if (input_grad) { T* input_grad_ptr = input_grad->data<T>(); // get grad of input DeformableCol2im(dev_ctx, col_buffer_ptr, offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec, col_buffer_shape_vec, filter_shape_vec, paddings, strides, dilations, deformable_groups, input_grad_ptr + i * im2col_step * input_dim); input_grad->Resize(input->dims()); } DeformableIm2col(dev_ctx, input_ptr + i * im2col_step * input_dim, offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec, col_buffer_shape_vec, filter_shape_vec, paddings, strides, dilations, deformable_groups, col_buffer_ptr); col_buffer_3d.Resize(col_buffer_3d_shape); if (filter_grad) { Tensor dweight_3d; dweight_3d = ctx.AllocateTmpTensor<T, CUDADeviceContext>( filter_grad_shape, dev_ctx); for (int g = 0; g < groups; ++g) { Tensor out_grad_3d_slice = out_grad_3d.Slice(g, g + 1).Resize(framework::slice_ddim( out_grad_3d.dims(), 1, out_grad_3d.dims().size())); Tensor col_buffer_3d_slice = col_buffer_3d.Slice(g, g + 1).Resize(framework::slice_ddim( col_buffer_3d.dims(), 1, col_buffer_3d.dims().size())); Tensor dweight_3d_slice = dweight_3d.Slice(g, g + 1).Resize(framework::slice_ddim( dweight_3d.dims(), 1, dweight_3d.dims().size())); blas.MatMul(out_grad_3d_slice, false, col_buffer_3d_slice, true, T(1.0), &dweight_3d_slice, T(0.0)); } hipLaunchKernelGGL(( FilterGradAddupCUDAKernel<T>), dim3(NumBlock(dweight_3d.numel())), dim3(kNumCUDAThread), 0, dev_ctx.stream(), dweight_3d.numel(), groups, K, M, dweight_3d.data<T>(), filter_grad->data<T>()); } } if (filter_grad) { filter_grad->Resize(filter.dims()); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(deformable_conv_v1, ops::DeformableConvV1CUDAKernel<float>, ops::DeformableConvV1CUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(deformable_conv_v1_grad, ops::DeformableConvV1GradCUDAKernel<float>, ops::DeformableConvV1GradCUDAKernel<double>);
1bb3ae6b92a34f6470049322718c754e547a5da1.cu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Part of the following code in this file refs to // https://github.com/msracver/Deformable-ConvNets/blob/master/faster_rcnn/operator_cxx/deformable_convolution.cu // // Copyright (c) 2017 Microsoft // Licensed under The Apache-2.0 License [see LICENSE for details] // \file deformable_psroi_pooling.cu // \brief // \author Yi Li, Guodong Zhang, Jifeng Dai #pragma once #include <algorithm> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/deformable_conv_filter.cu.h" #include "paddle/fluid/operators/deformable_conv_func.h" #include "paddle/fluid/operators/deformable_conv_v1_op.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using CUDADeviceContext = paddle::platform::CUDADeviceContext; static constexpr int kNumCUDAThread = 512; static constexpr int kNumMaximumNumBlock = 4096; static inline int NumBlock(const int N) { return std::min((N + kNumCUDAThread - 1) / kNumCUDAThread, kNumMaximumNumBlock); } template <typename T> __global__ void DeformableCol2imCUDAKernel( const int nthreads, const T* data_col, const T* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, T* grad_im) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t thread = index; thread < nthreads; thread += offset) { const int j = (thread / width_col / height_col / batch_size) % kernel_w; const int i = (thread / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = thread / width_col / height_col / batch_size / kernel_w / kernel_h; const int deformable_group_index = c / channel_per_deformable_group; int w_out = thread % width_col; int h_out = (thread / width_col) % height_col; int b = (thread / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const T* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const T offset_h = data_offset_ptr[data_offset_h_ptr]; const T offset_w = data_offset_ptr[data_offset_w_ptr]; const T cur_inv_h_data = h_in + i * dilation_h + offset_h; const T cur_inv_w_data = w_in + j * dilation_w + offset_w; const T cur_top_grad = data_col[thread]; const int cur_h = static_cast<int>(cur_inv_h_data); const int cur_w = static_cast<int>(cur_inv_w_data); for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; T weight = DmcnGetGradientWeight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); platform::CudaAtomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename T> inline void DeformableCol2im(const platform::CUDADeviceContext& ctx, const T* data_col, const T* data_offset, const std::vector<int64_t> im_shape, const std::vector<int64_t> col_shape, const std::vector<int64_t> kernel_shape, const std::vector<int> pad, const std::vector<int> stride, const std::vector<int> dilation, const int deformable_group, T* grad_im) { int channel_per_deformable_group = im_shape[0] / deformable_group; int num_kernels = col_shape[0] * col_shape[1] * col_shape[2] * col_shape[3]; int blocks = NumBlock(num_kernels); int threads = kNumCUDAThread; DeformableCol2imCUDAKernel<T><<< blocks, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream()>>>( num_kernels, data_col, data_offset, im_shape[0], im_shape[1], im_shape[2], kernel_shape[2], kernel_shape[3], pad[0], pad[1], stride[0], stride[1], dilation[0], dilation[1], channel_per_deformable_group, col_shape[1], deformable_group, col_shape[2], col_shape[3], grad_im); } template <typename T> __global__ void DeformableCol2imCoordCUDAKernel( const int nthreads, const T* data_col, const T* data_im, const T* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, T* grad_offset) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { T val = 0, mval = 0; const int w = i % width_col; const int h = (i / width_col) % height_col; const int c = (i / width_col / height_col) % offset_channels; const int b = (i / width_col / height_col) / offset_channels; const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const T* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const T* data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const T* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = offset_c / 2; col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const T offset_h = data_offset_ptr[data_offset_h_ptr]; const T offset_w = data_offset_ptr[data_offset_w_ptr]; T inv_h = h_in + i * dilation_h + offset_h; T inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * DmcnIm2colBilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const T weight = DmcnGetCoordinateWeight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[i] = val; } } template <typename T> inline void DeformableCol2imCoord( const platform::CUDADeviceContext& ctx, const T* data_col, const T* data_im, const T* data_offset, const std::vector<int64_t> im_shape, const std::vector<int64_t> col_shape, const std::vector<int64_t> kernel_shape, const std::vector<int> paddings, const std::vector<int> strides, const std::vector<int> dilations, const int deformable_groups, T* grad_offset) { int num_kernels = 2 * kernel_shape[2] * kernel_shape[3] * col_shape[1] * col_shape[2] * col_shape[3] * deformable_groups; int channel_per_deformable_group = col_shape[0] / deformable_groups; int blocks = NumBlock(num_kernels); int threads = kNumCUDAThread; DeformableCol2imCoordCUDAKernel<T><<< blocks, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream()>>>( num_kernels, data_col, data_im, data_offset, im_shape[0], im_shape[1], im_shape[2], kernel_shape[2], kernel_shape[3], paddings[0], paddings[1], strides[0], strides[1], dilations[0], dilations[1], channel_per_deformable_group, col_shape[1], 2 * kernel_shape[2] * kernel_shape[3] * deformable_groups, deformable_groups, col_shape[2], col_shape[3], grad_offset); } template <typename T> __global__ void DeformableIm2colCUDAKernel( const int nthreads, const T* data_im, const T* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, T* data_col) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { const int w_col = i % width_col; const int h_col = (i / width_col) % height_col; const int b_col = (i / width_col) / height_col % batch_size; const int c_im = (i / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; T* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; const T* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const T* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const T offset_h = data_offset_ptr[data_offset_h_ptr]; const T offset_w = data_offset_ptr[data_offset_w_ptr]; T val = static_cast<T>(0); const T h_im = h_in + i * dilation_h + offset_h; const T w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { val = DmcnIm2colBilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } template <typename T> inline void DeformableIm2col(const platform::CUDADeviceContext& ctx, const T* data_im, const T* data_offset, const std::vector<int64_t> im_shape, const std::vector<int64_t> col_shape, const std::vector<int64_t> filter_shape, const std::vector<int> paddings, const std::vector<int> strides, const std::vector<int> dilations, const int deformable_groups, T* data_col) { int channel_per_deformable_group = im_shape[0] / deformable_groups; int num_kernels = im_shape[0] * col_shape[1] * col_shape[2] * col_shape[3]; int blocks = NumBlock(num_kernels); int threads = kNumCUDAThread; // get outputs of im2col with offset by bilinear interpolation DeformableIm2colCUDAKernel<T><<< blocks, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream()>>>( num_kernels, data_im, data_offset, im_shape[1], im_shape[2], filter_shape[2], filter_shape[3], paddings[0], paddings[1], strides[0], strides[1], dilations[0], dilations[1], channel_per_deformable_group, col_shape[1], im_shape[0], deformable_groups, col_shape[2], col_shape[3], data_col); } template <typename T> class DeformableConvV1CUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const Tensor* input = ctx.Input<Tensor>("Input"); const Tensor offset = *ctx.Input<Tensor>("Offset"); Tensor filter = *ctx.Input<Tensor>("Filter"); Tensor* output = ctx.Output<Tensor>("Output"); output->mutable_data<T>(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context<CUDADeviceContext>(); const int groups = ctx.Attr<int>("groups"); const int deformable_groups = ctx.Attr<int>("deformable_groups"); const int im2col_step = ctx.Attr<int>("im2col_step"); const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); const std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); const std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); const int batch_size = static_cast<int>(input->dims()[0]); std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims())); std::vector<int64_t> output_shape_vec(framework::vectorize(output->dims())); // col_shape_vec: {c_i * k_h * k_w, im2col_step, o_h, o_w} std::vector<int64_t> col_buffer_shape_vec(filter_shape_vec.size()); col_buffer_shape_vec[0] = input->dims()[1] * filter.dims()[2] * filter.dims()[3]; col_buffer_shape_vec[1] = im2col_step; for (size_t j = 0; j < filter_shape_vec.size() - 2; ++j) { col_buffer_shape_vec[j + 2] = output_shape_vec[j + 2]; } framework::DDim col_shape(framework::make_ddim(col_buffer_shape_vec)); std::vector<int64_t> output_buffer_shape_vec(1); output_buffer_shape_vec[0] = batch_size * output_shape_vec[1] * output_shape_vec[2] * output_shape_vec[3]; framework::DDim output_shape(framework::make_ddim(output_buffer_shape_vec)); Tensor col_buffer; Tensor output_buffer; col_buffer = ctx.AllocateTmpTensor<T, CUDADeviceContext>(col_shape, dev_ctx); output_buffer = ctx.AllocateTmpTensor<T, CUDADeviceContext>(output_shape, dev_ctx); int64_t M = output_shape_vec[1] / groups; int64_t N = im2col_step * output_shape_vec[2] * output_shape_vec[3]; int64_t K = input->dims()[1] * filter_shape_vec[2] * filter_shape_vec[3] / groups; Tensor weight_3d; weight_3d.ShareDataWith(filter).Resize( framework::make_ddim({groups, M, K})); Tensor col_buffer_3d; col_buffer_3d.ShareDataWith(col_buffer) .Resize(framework::make_ddim({groups, K, N})); Tensor output_4d; output_4d.ShareDataWith(output_buffer) .Resize(framework::make_ddim({batch_size / im2col_step, groups, M, N})); output_4d.mutable_data<T>(ctx.GetPlace()); framework::DDim input_shape = framework::slice_ddim(input->dims(), 1, input->dims().size()); std::vector<int64_t> input_shape_vec = framework::vectorize(input_shape); int input_dim = input->numel() / input->dims()[0]; int input_offset_dim = offset.numel() / offset.dims()[0]; auto blas = math::GetBlas<CUDADeviceContext, T>(dev_ctx); const T* input_ptr = input->data<T>(); const T* offset_ptr = offset.data<T>(); col_buffer.mutable_data<T>(ctx.GetPlace()); T* col_buffer_ptr = col_buffer.data<T>(); for (int i = 0; i < batch_size / im2col_step; ++i) { DeformableIm2col(dev_ctx, input_ptr + i * im2col_step * input_dim, offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec, col_buffer_shape_vec, filter_shape_vec, paddings, strides, dilations, deformable_groups, col_buffer_ptr); Tensor output_3d = output_4d.Slice(i, i + 1).Resize( framework::slice_ddim(output_4d.dims(), 1, output_4d.dims().size())); // get the product of pixel and weight for (int g = 0; g < groups; ++g) { Tensor weight_3d_slice = weight_3d.Slice(g, g + 1).Resize(framework::slice_ddim( weight_3d.dims(), 1, weight_3d.dims().size())); Tensor col_buffer_3d_slice = col_buffer_3d.Slice(g, g + 1).Resize(framework::slice_ddim( col_buffer_3d.dims(), 1, col_buffer_3d.dims().size())); Tensor output_3d_slice = output_3d.Slice(g, g + 1).Resize(framework::slice_ddim( output_3d.dims(), 1, output_3d.dims().size())); blas.MatMul(weight_3d_slice, false, col_buffer_3d_slice, false, T(1.0), &output_3d_slice, T(0.0)); } } output->ShareDataWith(output_buffer) .Resize(framework::make_ddim(output_shape_vec)); } }; template <typename T> class DeformableConvV1GradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const Tensor* output_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); Tensor* input_grad = ctx.Output<Tensor>(framework::GradVarName("Input")); Tensor* filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter")); Tensor* offset_grad = ctx.Output<Tensor>(framework::GradVarName("Offset")); const Tensor* input = ctx.Input<Tensor>("Input"); Tensor offset = *ctx.Input<Tensor>("Offset"); Tensor filter = *ctx.Input<Tensor>("Filter"); if (!input_grad && !filter_grad && !offset_grad) return; int groups = ctx.Attr<int>("groups"); int deformable_groups = ctx.Attr<int>("deformable_groups"); int im2col_step = ctx.Attr<int>("im2col_step"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); auto& dev_ctx = ctx.template device_context<CUDADeviceContext>(); const int batch_size = static_cast<int>(input->dims()[0]); framework::DDim input_shape = framework::slice_ddim(input->dims(), 1, input->dims().size()); std::vector<int64_t> input_shape_vec = framework::vectorize(input_shape); std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims())); std::vector<int64_t> output_shape_vec( framework::vectorize(output_grad->dims())); std::vector<int64_t> col_buffer_shape_vec(filter_shape_vec.size()); col_buffer_shape_vec[0] = input->dims()[1] * filter.dims()[2] * filter.dims()[3]; col_buffer_shape_vec[1] = im2col_step; for (size_t j = 0; j < filter_shape_vec.size() - 2; ++j) { col_buffer_shape_vec[j + 2] = output_shape_vec[j + 2]; } framework::DDim col_shape(framework::make_ddim(col_buffer_shape_vec)); std::vector<int64_t> output_buffer_shape_vec(1); output_buffer_shape_vec[0] = batch_size * output_shape_vec[1] * output_shape_vec[2] * output_shape_vec[3]; framework::DDim output_shape(framework::make_ddim(output_buffer_shape_vec)); Tensor col_buffer; Tensor output_buffer; col_buffer = ctx.AllocateTmpTensor<T, CUDADeviceContext>(col_shape, dev_ctx); output_buffer = ctx.AllocateTmpTensor<T, CUDADeviceContext>(output_shape, dev_ctx); output_buffer.ShareDataWith(*output_grad); int64_t M = input_shape_vec[0] / groups * filter_shape_vec[2] * filter_shape_vec[3]; int64_t N = im2col_step * output_shape_vec[2] * output_shape_vec[3]; int64_t K = output_shape_vec[1] / groups; framework::DDim weight_3d_shape = {groups, K, M}; framework::DDim out_grad_4d_shape = {batch_size / im2col_step, groups, K, N}; framework::DDim col_buffer_3d_shape = {groups, M, N}; framework::DDim filter_grad_shape = {groups, K, M}; Tensor weight_3d; weight_3d.ShareDataWith(filter).Resize(weight_3d_shape); Tensor out_grad_4d; out_grad_4d.ShareDataWith(output_buffer).Resize(out_grad_4d_shape); Tensor col_buffer_3d; col_buffer_3d.ShareDataWith(col_buffer).Resize(col_buffer_3d_shape); math::SetConstant<CUDADeviceContext, T> set_zero; auto blas = math::GetBlas<CUDADeviceContext, T>(dev_ctx); col_buffer.mutable_data<T>(ctx.GetPlace()); col_buffer_3d.mutable_data<T>(ctx.GetPlace()); out_grad_4d.mutable_data<T>(ctx.GetPlace()); int input_dim = input->numel() / input->dims()[0]; int input_offset_dim = offset.numel() / offset.dims()[0]; if (filter_grad) { filter_grad->mutable_data<T>(ctx.GetPlace()); filter_grad->Resize(filter_grad_shape); set_zero(dev_ctx, filter_grad, static_cast<T>(0)); } if (input_grad) { input_grad->mutable_data<T>(ctx.GetPlace()); set_zero(dev_ctx, input_grad, static_cast<T>(0)); } if (offset_grad) { offset_grad->mutable_data<T>(ctx.GetPlace()); set_zero(dev_ctx, offset_grad, static_cast<T>(0)); } for (int i = 0; i < batch_size / im2col_step; ++i) { Tensor out_grad_3d = out_grad_4d.Slice(i, i + 1).Resize(framework::slice_ddim( out_grad_4d.dims(), 1, out_grad_4d.dims().size())); for (int g = 0; g < groups; ++g) { Tensor weight_3d_slice = weight_3d.Slice(g, g + 1).Resize(framework::slice_ddim( weight_3d.dims(), 1, weight_3d.dims().size())); Tensor out_grad_3d_slice = out_grad_3d.Slice(g, g + 1).Resize(framework::slice_ddim( out_grad_3d.dims(), 1, out_grad_3d.dims().size())); Tensor col_buffer_3d_slice = col_buffer_3d.Slice(g, g + 1).Resize(framework::slice_ddim( col_buffer_3d.dims(), 1, col_buffer_3d.dims().size())); blas.MatMul(weight_3d_slice, true, out_grad_3d_slice, false, T(1.0), &col_buffer_3d_slice, T(0.0)); } col_buffer.Resize(col_shape); T* col_buffer_ptr = col_buffer.data<T>(); const T* input_ptr = input->data<T>(); const T* offset_ptr = offset.data<T>(); if (offset_grad) { T* offset_grad_ptr = offset_grad->data<T>(); // get grad of offset DeformableCol2imCoord( dev_ctx, col_buffer_ptr, input_ptr + i * im2col_step * input_dim, offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec, col_buffer_shape_vec, filter_shape_vec, paddings, strides, dilations, deformable_groups, offset_grad_ptr + i * im2col_step * input_offset_dim); } if (input_grad) { T* input_grad_ptr = input_grad->data<T>(); // get grad of input DeformableCol2im(dev_ctx, col_buffer_ptr, offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec, col_buffer_shape_vec, filter_shape_vec, paddings, strides, dilations, deformable_groups, input_grad_ptr + i * im2col_step * input_dim); input_grad->Resize(input->dims()); } DeformableIm2col(dev_ctx, input_ptr + i * im2col_step * input_dim, offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec, col_buffer_shape_vec, filter_shape_vec, paddings, strides, dilations, deformable_groups, col_buffer_ptr); col_buffer_3d.Resize(col_buffer_3d_shape); if (filter_grad) { Tensor dweight_3d; dweight_3d = ctx.AllocateTmpTensor<T, CUDADeviceContext>( filter_grad_shape, dev_ctx); for (int g = 0; g < groups; ++g) { Tensor out_grad_3d_slice = out_grad_3d.Slice(g, g + 1).Resize(framework::slice_ddim( out_grad_3d.dims(), 1, out_grad_3d.dims().size())); Tensor col_buffer_3d_slice = col_buffer_3d.Slice(g, g + 1).Resize(framework::slice_ddim( col_buffer_3d.dims(), 1, col_buffer_3d.dims().size())); Tensor dweight_3d_slice = dweight_3d.Slice(g, g + 1).Resize(framework::slice_ddim( dweight_3d.dims(), 1, dweight_3d.dims().size())); blas.MatMul(out_grad_3d_slice, false, col_buffer_3d_slice, true, T(1.0), &dweight_3d_slice, T(0.0)); } FilterGradAddupCUDAKernel<T><<<NumBlock(dweight_3d.numel()), kNumCUDAThread, 0, dev_ctx.stream()>>>( dweight_3d.numel(), groups, K, M, dweight_3d.data<T>(), filter_grad->data<T>()); } } if (filter_grad) { filter_grad->Resize(filter.dims()); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(deformable_conv_v1, ops::DeformableConvV1CUDAKernel<float>, ops::DeformableConvV1CUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(deformable_conv_v1_grad, ops::DeformableConvV1GradCUDAKernel<float>, ops::DeformableConvV1GradCUDAKernel<double>);
f35efe4c2d69dd8d7ee13ea98bcdbeefbfb6a6e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef WITH_CUDA #include "core/context_cuda.h" #include "utils/cast.h" #include "utils/op_kernel.h" namespace dragon { namespace kernel { /*! MixedPrecisionL2Decay <T = float16, Device = CUDA> */ __global__ void _MixedPrecisionL2DecayHalf( const int count, const float alpha, const half* w, float* dx) { CUDA_1D_KERNEL_LOOP(i, count) { #if __CUDA_ARCH__ >= 530 dx[i] += (__half2float(w[i]) * alpha); #endif } } template <> void MixedPrecisionL2Decay<float16, CUDAContext>( const int count, const float alpha, const float16* w, float* dx, CUDAContext* ctx) { _MixedPrecisionL2DecayHalf << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, alpha, reinterpret_cast<const half*>(w), dx); } /*! MixedPrecisionUpdate <T = float16, Device = CUDA> */ __global__ void _MixedPrecisionUpdateHalf( const int count, const float* updates, half* w) { CUDA_1D_KERNEL_LOOP(i, count) { #if __CUDA_ARCH__ >= 530 w[i] = __float2half(__half2float( w[i]) - updates[i]); #endif } } template <> void MixedPrecisionUpdate<float16, CUDAContext>( const int count, const float* updates, float16* w, CUDAContext* ctx) { _MixedPrecisionUpdateHalf << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, updates, reinterpret_cast<half*>(w)); } } // namespace kernel } // namepsace dragon #endif // WITH_CUDA
f35efe4c2d69dd8d7ee13ea98bcdbeefbfb6a6e1.cu
#ifdef WITH_CUDA #include "core/context_cuda.h" #include "utils/cast.h" #include "utils/op_kernel.h" namespace dragon { namespace kernel { /*! MixedPrecisionL2Decay <T = float16, Device = CUDA> */ __global__ void _MixedPrecisionL2DecayHalf( const int count, const float alpha, const half* w, float* dx) { CUDA_1D_KERNEL_LOOP(i, count) { #if __CUDA_ARCH__ >= 530 dx[i] += (__half2float(w[i]) * alpha); #endif } } template <> void MixedPrecisionL2Decay<float16, CUDAContext>( const int count, const float alpha, const float16* w, float* dx, CUDAContext* ctx) { _MixedPrecisionL2DecayHalf << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, alpha, reinterpret_cast<const half*>(w), dx); } /*! MixedPrecisionUpdate <T = float16, Device = CUDA> */ __global__ void _MixedPrecisionUpdateHalf( const int count, const float* updates, half* w) { CUDA_1D_KERNEL_LOOP(i, count) { #if __CUDA_ARCH__ >= 530 w[i] = __float2half(__half2float( w[i]) - updates[i]); #endif } } template <> void MixedPrecisionUpdate<float16, CUDAContext>( const int count, const float* updates, float16* w, CUDAContext* ctx) { _MixedPrecisionUpdateHalf << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, updates, reinterpret_cast<half*>(w)); } } // namespace kernel } // namepsace dragon #endif // WITH_CUDA
7646bda32fbf1be14a160ac23db6057df047ab39.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "rocblas.h" using namespace std; #define CUDA_CALL(F, ...)\ if((F(__VA_ARGS__)) != hipSuccess){\ hipError_t e = hipGetLastError();\ printf("CUDA failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e));\ return(EXIT_FAILURE);\ } const int N = 1 << 10; int main(){ float *a_h, *b_h; a_h = new float[N]; b_h = new float[N]; float *a_d, *b_d; for(int i = 0; i < N; i++){ a_h[i] = (b_h[i] = 1.0f * i); } hipblasHandle_t handle; CUDA_CALL(hipblasCreate, &handle); CUDA_CALL(hipMalloc, (void**) &a_d, sizeof(float) * N); CUDA_CALL(hipMalloc, (void**) &b_d, sizeof(float) * N); CUDA_CALL(hipblasSetVector, N, sizeof(float), a_h, 1, a_d, 1); CUDA_CALL(hipblasSetVector, N, sizeof(float), b_h, 1, b_d, 1); const float s = 2.0f; CUDA_CALL(hipblasSaxpy, handle, N, &s, a_d, 1, b_d, 1); CUDA_CALL(hipblasGetVector, N, sizeof(float), b_d, 1, b_h, 1); CUDA_CALL(hipFree, a_d); CUDA_CALL(hipFree, b_d); CUDA_CALL(hipblasDestroy, handle); for(int i = 0; i < N; i++) cout << "b_h[" << i << "] = " << b_h[i] << endl; delete[] a_h; delete[] b_h; return 0; }
7646bda32fbf1be14a160ac23db6057df047ab39.cu
#include <iostream> #include "cublas_v2.h" using namespace std; #define CUDA_CALL(F, ...)\ if((F(__VA_ARGS__)) != cudaSuccess){\ cudaError_t e = cudaGetLastError();\ printf("CUDA failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e));\ return(EXIT_FAILURE);\ } const int N = 1 << 10; int main(){ float *a_h, *b_h; a_h = new float[N]; b_h = new float[N]; float *a_d, *b_d; for(int i = 0; i < N; i++){ a_h[i] = (b_h[i] = 1.0f * i); } cublasHandle_t handle; CUDA_CALL(cublasCreate, &handle); CUDA_CALL(cudaMalloc, (void**) &a_d, sizeof(float) * N); CUDA_CALL(cudaMalloc, (void**) &b_d, sizeof(float) * N); CUDA_CALL(cublasSetVector, N, sizeof(float), a_h, 1, a_d, 1); CUDA_CALL(cublasSetVector, N, sizeof(float), b_h, 1, b_d, 1); const float s = 2.0f; CUDA_CALL(cublasSaxpy, handle, N, &s, a_d, 1, b_d, 1); CUDA_CALL(cublasGetVector, N, sizeof(float), b_d, 1, b_h, 1); CUDA_CALL(cudaFree, a_d); CUDA_CALL(cudaFree, b_d); CUDA_CALL(cublasDestroy, handle); for(int i = 0; i < N; i++) cout << "b_h[" << i << "] = " << b_h[i] << endl; delete[] a_h; delete[] b_h; return 0; }
30bc96415e9709e72d1ecd7fed846e2f17f8a426.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ftl/cuda_common.hpp> #include <ftl/rgbd/camera.hpp> #include <opencv2/core/cuda_stream_accessor.hpp> #include <ftl/operators/cuda/disparity.hpp> #include <ftl/operators/cuda/mask.hpp> #include <ftl/cuda/fixed.hpp> #ifndef PINF #define PINF __int_as_float(0x7f800000) #endif template<typename T_in, typename T_out> __global__ void d2d_kernel(cv::cuda::PtrStepSz<T_in> disp, cv::cuda::PtrStepSz<T_out> depth, const ftl::rgbd::Camera cam, const float scale) { for (STRIDE_Y(v,disp.rows)) { for (STRIDE_X(u,disp.cols)) { short d = disp(v,u); depth(v,u) = (d == 0) ? 0.0f : ((cam.baseline*cam.fx) / ((float(d)*scale) - cam.doffs)); } } } namespace ftl { namespace cuda { template<typename T_in, typename T_out> void disparity_to_depth(const cv::cuda::GpuMat &disparity, cv::cuda::GpuMat &depth, const ftl::rgbd::Camera &c, float scale, hipStream_t &stream) { dim3 grid(1,1,1); dim3 threads(128, 1, 1); grid.x = cv::cuda::device::divUp(disparity.cols, 128); grid.y = cv::cuda::device::divUp(disparity.rows, 1); hipLaunchKernelGGL(( d2d_kernel<T_in, T_out>), dim3(grid), dim3(threads), 0, stream, disparity, depth, c, scale); cudaSafeCall( hipGetLastError() ); } template void disparity_to_depth<short, float>(const cv::cuda::GpuMat&, cv::cuda::GpuMat&, const ftl::rgbd::Camera&, float, hipStream_t&); template void disparity_to_depth<float, float>(const cv::cuda::GpuMat&, cv::cuda::GpuMat&, const ftl::rgbd::Camera&, float, hipStream_t&); } } //============================================================================== template<typename T_in, typename T_out> __global__ void d2drev_kernel(cv::cuda::PtrStepSz<T_in> disp, cv::cuda::PtrStepSz<T_out> depth, const ftl::rgbd::Camera cam, const float scale) { for (STRIDE_Y(v,disp.rows)) { for (STRIDE_X(u,disp.cols)) { float d = depth(v,u); float disparity = (d > cam.maxDepth || d < cam.minDepth) ? 0.0f : ((cam.baseline*cam.fx) / d) + cam.doffs; disp(v,u) = T_out(disparity*scale); }} } namespace ftl { namespace cuda { template<typename T_in, typename T_out> void depth_to_disparity(const cv::cuda::GpuMat &depth, cv::cuda::GpuMat &disparity, const ftl::rgbd::Camera &c, float scale, hipStream_t &stream) { dim3 grid(1,1,1); dim3 threads(128, 1, 1); grid.x = cv::cuda::device::divUp(disparity.cols, 128); grid.y = cv::cuda::device::divUp(disparity.rows, 1); hipLaunchKernelGGL(( d2drev_kernel<T_in, T_out>), dim3(grid), dim3(threads), 0, stream, disparity, depth, c, scale); cudaSafeCall( hipGetLastError() ); } template void depth_to_disparity<float, float>(const cv::cuda::GpuMat&, cv::cuda::GpuMat&, const ftl::rgbd::Camera&, float, hipStream_t&); template void depth_to_disparity<float, short>(const cv::cuda::GpuMat&, cv::cuda::GpuMat&, const ftl::rgbd::Camera&, float, hipStream_t&); } } // ============================================================================= __global__ void remove_occ_kernel(cv::cuda::PtrStepSz<float> depth, cv::cuda::PtrStepSz<float> depthR, ftl::rgbd::Camera cam) { for (STRIDE_Y(v,depth.rows)) { for (STRIDE_X(u,depth.cols)) { float d = depth(v,u); int disparity = int((d > cam.maxDepth || d < cam.minDepth) ? 0.0f : ((cam.baseline*cam.fx) / d) + cam.doffs); if (disparity > 0 && u-disparity > 0) { float dR = depthR(v,u-disparity); if (fabsf(d-dR) > 0.01f*d) { depth(v,u) = 0.0f; } } } } } void ftl::cuda::remove_occlusions(cv::cuda::GpuMat &depth, const cv::cuda::GpuMat &depthR, const ftl::rgbd::Camera &c, hipStream_t stream) { dim3 grid(1,1,1); dim3 threads(128, 4, 1); grid.x = cv::cuda::device::divUp(depth.cols, 128); grid.y = cv::cuda::device::divUp(depth.rows, 4); hipLaunchKernelGGL(( remove_occ_kernel), dim3(grid), dim3(threads), 0, stream, depth, depthR, c); cudaSafeCall( hipGetLastError() ); } __global__ void mask_occ_kernel(cv::cuda::PtrStepSz<float> depth, cv::cuda::PtrStepSz<float> depthR, cv::cuda::PtrStepSz<uchar> mask, ftl::rgbd::Camera cam) { for (STRIDE_Y(v,depth.rows)) { for (STRIDE_X(u,depth.cols)) { float d = depth(v,u); int disparity = int((d > cam.maxDepth || d < cam.minDepth) ? 0.0f : ((cam.baseline*cam.fx) / d) + cam.doffs); if (disparity > 0 && u-disparity > 0) { float dR = depthR(v,u-disparity); if (fabsf(d-dR) > 0.01f*d) { mask(v,u) = mask(v,u) | ftl::cuda::Mask::kMask_Occlusion; } } } } } void ftl::cuda::mask_occlusions(const cv::cuda::GpuMat &depth, const cv::cuda::GpuMat &depthR, cv::cuda::GpuMat &mask, const ftl::rgbd::Camera &c, hipStream_t stream) { dim3 grid(1,1,1); dim3 threads(128, 4, 1); grid.x = cv::cuda::device::divUp(depth.cols, 128); grid.y = cv::cuda::device::divUp(depth.rows, 4); hipLaunchKernelGGL(( mask_occ_kernel), dim3(grid), dim3(threads), 0, stream, depth, depthR, mask, c); cudaSafeCall( hipGetLastError() ); } // ============================================================================= __global__ void check_reprojection_kernel(cv::cuda::PtrStepSz<short> disp, ftl::cuda::TextureObject<uchar4> left, ftl::cuda::TextureObject<uchar4> right) { for (STRIDE_Y(v,disp.rows)) { for (STRIDE_X(u,disp.cols)) { const float d = float(disp(v,u)) / 16.0f; const float4 l = left.tex2D(float(u)+0.5f, float(v)+0.5f); if (d > 0) { const float4 r = right.tex2D(float(u-d)+0.5f, float(v)+0.5f); const float diff = max(fabsf(l.x-r.x),max(fabsf(l.y-r.y), fabsf(l.z-r.z))); if (diff > 10.0f) disp(v,u) = 0; } } } } void ftl::cuda::check_reprojection(const cv::cuda::GpuMat &disp, const ftl::cuda::TextureObject<uchar4> &left, const ftl::cuda::TextureObject<uchar4> &right, hipStream_t stream) { dim3 grid(1,1,1); dim3 threads(128, 4, 1); grid.x = cv::cuda::device::divUp(disp.cols, 128); grid.y = cv::cuda::device::divUp(disp.rows, 4); hipLaunchKernelGGL(( check_reprojection_kernel), dim3(grid), dim3(threads), 0, stream, disp, left, right); cudaSafeCall( hipGetLastError() ); } // ============================================================================= __global__ void show_rpe_kernel(cv::cuda::PtrStepSz<short> disp, cv::cuda::PtrStepSz<uchar4> left, cv::cuda::PtrStepSz<uchar4> right, float scale) { for (STRIDE_Y(v,left.rows)) { for (STRIDE_X(u,left.cols)) { short d = disp(v,u) / 16; if (d > 0 && u-d >= 0) { uchar4 l = left(v,u); uchar4 r = right(v,u-d); float d = max(abs(int(l.x)-int(r.x)),max(abs(int(l.y)-int(r.y)), abs(int(l.z)-int(r.z)))); left(v,u) = make_uchar4(0,0,min(255.0f, (d/scale) * 255.0f),255); } } } } void ftl::cuda::show_rpe(const cv::cuda::GpuMat &disp, cv::cuda::GpuMat &left, const cv::cuda::GpuMat &right, float scale, hipStream_t stream) { dim3 grid(1,1,1); dim3 threads(128, 4, 1); grid.x = cv::cuda::device::divUp(disp.cols, 128); grid.y = cv::cuda::device::divUp(disp.rows, 4); hipLaunchKernelGGL(( show_rpe_kernel), dim3(grid), dim3(threads), 0, stream, disp, left, right, scale); cudaSafeCall( hipGetLastError() ); } // ============================================================================= __global__ void merge_disp_kernel(cv::cuda::PtrStepSz<short> disp, cv::cuda::PtrStepSz<short> estimate) { for (STRIDE_Y(v,disp.rows)) { for (STRIDE_X(u,disp.cols)) { short cd = disp(v,u); float d = fixed2float<4>((cd >= 4096) ? 0 : cd); float e = fixed2float<4>(estimate(v,u)); if (e == 0.0f) d = 0.0f; if (fabsf(d-e) > 4.0f) d = 0.0f; disp(v,u) = float2fixed<4>(d); } } } void ftl::cuda::merge_disparities(cv::cuda::GpuMat &disp, const cv::cuda::GpuMat &estimate, hipStream_t stream) { dim3 grid(1,1,1); dim3 threads(128, 4, 1); grid.x = cv::cuda::device::divUp(disp.cols, 128); grid.y = cv::cuda::device::divUp(disp.rows, 4); hipLaunchKernelGGL(( merge_disp_kernel), dim3(grid), dim3(threads), 0, stream, disp, estimate); cudaSafeCall( hipGetLastError() ); } // ============================================================================= template <int MAX_DISP> __global__ void show_disp_density_kernel(cv::cuda::PtrStepSz<short> disp, cv::cuda::PtrStepSz<uchar4> left, float scale) { for (STRIDE_Y(v,disp.rows)) { for (STRIDE_X(u,disp.cols)) { short d = disp(v,u) / 16; int count = 0; for (int i=1; i<MAX_DISP; ++i) { if (u+i-d < disp.cols && u+i-d >= 0) { short dd = disp(v,u+i-d) / 16; if (d > 0 && dd == i) ++count; } } count = max(0,count-1); left(v,u) = make_uchar4(0,0,min(255.0f, (float(count)/4.0f) * 255.0f),255); } } } void ftl::cuda::show_disp_density(const cv::cuda::GpuMat &disp, cv::cuda::GpuMat &left, float scale, hipStream_t stream) { dim3 grid(1,1,1); dim3 threads(128, 4, 1); grid.x = cv::cuda::device::divUp(disp.cols, 128); grid.y = cv::cuda::device::divUp(disp.rows, 4); hipLaunchKernelGGL(( show_disp_density_kernel<256>), dim3(grid), dim3(threads), 0, stream, disp, left, scale); cudaSafeCall( hipGetLastError() ); }
30bc96415e9709e72d1ecd7fed846e2f17f8a426.cu
#include <ftl/cuda_common.hpp> #include <ftl/rgbd/camera.hpp> #include <opencv2/core/cuda_stream_accessor.hpp> #include <ftl/operators/cuda/disparity.hpp> #include <ftl/operators/cuda/mask.hpp> #include <ftl/cuda/fixed.hpp> #ifndef PINF #define PINF __int_as_float(0x7f800000) #endif template<typename T_in, typename T_out> __global__ void d2d_kernel(cv::cuda::PtrStepSz<T_in> disp, cv::cuda::PtrStepSz<T_out> depth, const ftl::rgbd::Camera cam, const float scale) { for (STRIDE_Y(v,disp.rows)) { for (STRIDE_X(u,disp.cols)) { short d = disp(v,u); depth(v,u) = (d == 0) ? 0.0f : ((cam.baseline*cam.fx) / ((float(d)*scale) - cam.doffs)); } } } namespace ftl { namespace cuda { template<typename T_in, typename T_out> void disparity_to_depth(const cv::cuda::GpuMat &disparity, cv::cuda::GpuMat &depth, const ftl::rgbd::Camera &c, float scale, cudaStream_t &stream) { dim3 grid(1,1,1); dim3 threads(128, 1, 1); grid.x = cv::cuda::device::divUp(disparity.cols, 128); grid.y = cv::cuda::device::divUp(disparity.rows, 1); d2d_kernel<T_in, T_out><<<grid, threads, 0, stream>>>( disparity, depth, c, scale); cudaSafeCall( cudaGetLastError() ); } template void disparity_to_depth<short, float>(const cv::cuda::GpuMat&, cv::cuda::GpuMat&, const ftl::rgbd::Camera&, float, cudaStream_t&); template void disparity_to_depth<float, float>(const cv::cuda::GpuMat&, cv::cuda::GpuMat&, const ftl::rgbd::Camera&, float, cudaStream_t&); } } //============================================================================== template<typename T_in, typename T_out> __global__ void d2drev_kernel(cv::cuda::PtrStepSz<T_in> disp, cv::cuda::PtrStepSz<T_out> depth, const ftl::rgbd::Camera cam, const float scale) { for (STRIDE_Y(v,disp.rows)) { for (STRIDE_X(u,disp.cols)) { float d = depth(v,u); float disparity = (d > cam.maxDepth || d < cam.minDepth) ? 0.0f : ((cam.baseline*cam.fx) / d) + cam.doffs; disp(v,u) = T_out(disparity*scale); }} } namespace ftl { namespace cuda { template<typename T_in, typename T_out> void depth_to_disparity(const cv::cuda::GpuMat &depth, cv::cuda::GpuMat &disparity, const ftl::rgbd::Camera &c, float scale, cudaStream_t &stream) { dim3 grid(1,1,1); dim3 threads(128, 1, 1); grid.x = cv::cuda::device::divUp(disparity.cols, 128); grid.y = cv::cuda::device::divUp(disparity.rows, 1); d2drev_kernel<T_in, T_out><<<grid, threads, 0, stream>>>( disparity, depth, c, scale); cudaSafeCall( cudaGetLastError() ); } template void depth_to_disparity<float, float>(const cv::cuda::GpuMat&, cv::cuda::GpuMat&, const ftl::rgbd::Camera&, float, cudaStream_t&); template void depth_to_disparity<float, short>(const cv::cuda::GpuMat&, cv::cuda::GpuMat&, const ftl::rgbd::Camera&, float, cudaStream_t&); } } // ============================================================================= __global__ void remove_occ_kernel(cv::cuda::PtrStepSz<float> depth, cv::cuda::PtrStepSz<float> depthR, ftl::rgbd::Camera cam) { for (STRIDE_Y(v,depth.rows)) { for (STRIDE_X(u,depth.cols)) { float d = depth(v,u); int disparity = int((d > cam.maxDepth || d < cam.minDepth) ? 0.0f : ((cam.baseline*cam.fx) / d) + cam.doffs); if (disparity > 0 && u-disparity > 0) { float dR = depthR(v,u-disparity); if (fabsf(d-dR) > 0.01f*d) { depth(v,u) = 0.0f; } } } } } void ftl::cuda::remove_occlusions(cv::cuda::GpuMat &depth, const cv::cuda::GpuMat &depthR, const ftl::rgbd::Camera &c, cudaStream_t stream) { dim3 grid(1,1,1); dim3 threads(128, 4, 1); grid.x = cv::cuda::device::divUp(depth.cols, 128); grid.y = cv::cuda::device::divUp(depth.rows, 4); remove_occ_kernel<<<grid, threads, 0, stream>>>( depth, depthR, c); cudaSafeCall( cudaGetLastError() ); } __global__ void mask_occ_kernel(cv::cuda::PtrStepSz<float> depth, cv::cuda::PtrStepSz<float> depthR, cv::cuda::PtrStepSz<uchar> mask, ftl::rgbd::Camera cam) { for (STRIDE_Y(v,depth.rows)) { for (STRIDE_X(u,depth.cols)) { float d = depth(v,u); int disparity = int((d > cam.maxDepth || d < cam.minDepth) ? 0.0f : ((cam.baseline*cam.fx) / d) + cam.doffs); if (disparity > 0 && u-disparity > 0) { float dR = depthR(v,u-disparity); if (fabsf(d-dR) > 0.01f*d) { mask(v,u) = mask(v,u) | ftl::cuda::Mask::kMask_Occlusion; } } } } } void ftl::cuda::mask_occlusions(const cv::cuda::GpuMat &depth, const cv::cuda::GpuMat &depthR, cv::cuda::GpuMat &mask, const ftl::rgbd::Camera &c, cudaStream_t stream) { dim3 grid(1,1,1); dim3 threads(128, 4, 1); grid.x = cv::cuda::device::divUp(depth.cols, 128); grid.y = cv::cuda::device::divUp(depth.rows, 4); mask_occ_kernel<<<grid, threads, 0, stream>>>( depth, depthR, mask, c); cudaSafeCall( cudaGetLastError() ); } // ============================================================================= __global__ void check_reprojection_kernel(cv::cuda::PtrStepSz<short> disp, ftl::cuda::TextureObject<uchar4> left, ftl::cuda::TextureObject<uchar4> right) { for (STRIDE_Y(v,disp.rows)) { for (STRIDE_X(u,disp.cols)) { const float d = float(disp(v,u)) / 16.0f; const float4 l = left.tex2D(float(u)+0.5f, float(v)+0.5f); if (d > 0) { const float4 r = right.tex2D(float(u-d)+0.5f, float(v)+0.5f); const float diff = max(fabsf(l.x-r.x),max(fabsf(l.y-r.y), fabsf(l.z-r.z))); if (diff > 10.0f) disp(v,u) = 0; } } } } void ftl::cuda::check_reprojection(const cv::cuda::GpuMat &disp, const ftl::cuda::TextureObject<uchar4> &left, const ftl::cuda::TextureObject<uchar4> &right, cudaStream_t stream) { dim3 grid(1,1,1); dim3 threads(128, 4, 1); grid.x = cv::cuda::device::divUp(disp.cols, 128); grid.y = cv::cuda::device::divUp(disp.rows, 4); check_reprojection_kernel<<<grid, threads, 0, stream>>>(disp, left, right); cudaSafeCall( cudaGetLastError() ); } // ============================================================================= __global__ void show_rpe_kernel(cv::cuda::PtrStepSz<short> disp, cv::cuda::PtrStepSz<uchar4> left, cv::cuda::PtrStepSz<uchar4> right, float scale) { for (STRIDE_Y(v,left.rows)) { for (STRIDE_X(u,left.cols)) { short d = disp(v,u) / 16; if (d > 0 && u-d >= 0) { uchar4 l = left(v,u); uchar4 r = right(v,u-d); float d = max(abs(int(l.x)-int(r.x)),max(abs(int(l.y)-int(r.y)), abs(int(l.z)-int(r.z)))); left(v,u) = make_uchar4(0,0,min(255.0f, (d/scale) * 255.0f),255); } } } } void ftl::cuda::show_rpe(const cv::cuda::GpuMat &disp, cv::cuda::GpuMat &left, const cv::cuda::GpuMat &right, float scale, cudaStream_t stream) { dim3 grid(1,1,1); dim3 threads(128, 4, 1); grid.x = cv::cuda::device::divUp(disp.cols, 128); grid.y = cv::cuda::device::divUp(disp.rows, 4); show_rpe_kernel<<<grid, threads, 0, stream>>>( disp, left, right, scale); cudaSafeCall( cudaGetLastError() ); } // ============================================================================= __global__ void merge_disp_kernel(cv::cuda::PtrStepSz<short> disp, cv::cuda::PtrStepSz<short> estimate) { for (STRIDE_Y(v,disp.rows)) { for (STRIDE_X(u,disp.cols)) { short cd = disp(v,u); float d = fixed2float<4>((cd >= 4096) ? 0 : cd); float e = fixed2float<4>(estimate(v,u)); if (e == 0.0f) d = 0.0f; if (fabsf(d-e) > 4.0f) d = 0.0f; disp(v,u) = float2fixed<4>(d); } } } void ftl::cuda::merge_disparities(cv::cuda::GpuMat &disp, const cv::cuda::GpuMat &estimate, cudaStream_t stream) { dim3 grid(1,1,1); dim3 threads(128, 4, 1); grid.x = cv::cuda::device::divUp(disp.cols, 128); grid.y = cv::cuda::device::divUp(disp.rows, 4); merge_disp_kernel<<<grid, threads, 0, stream>>>(disp, estimate); cudaSafeCall( cudaGetLastError() ); } // ============================================================================= template <int MAX_DISP> __global__ void show_disp_density_kernel(cv::cuda::PtrStepSz<short> disp, cv::cuda::PtrStepSz<uchar4> left, float scale) { for (STRIDE_Y(v,disp.rows)) { for (STRIDE_X(u,disp.cols)) { short d = disp(v,u) / 16; int count = 0; for (int i=1; i<MAX_DISP; ++i) { if (u+i-d < disp.cols && u+i-d >= 0) { short dd = disp(v,u+i-d) / 16; if (d > 0 && dd == i) ++count; } } count = max(0,count-1); left(v,u) = make_uchar4(0,0,min(255.0f, (float(count)/4.0f) * 255.0f),255); } } } void ftl::cuda::show_disp_density(const cv::cuda::GpuMat &disp, cv::cuda::GpuMat &left, float scale, cudaStream_t stream) { dim3 grid(1,1,1); dim3 threads(128, 4, 1); grid.x = cv::cuda::device::divUp(disp.cols, 128); grid.y = cv::cuda::device::divUp(disp.rows, 4); show_disp_density_kernel<256><<<grid, threads, 0, stream>>>( disp, left, scale); cudaSafeCall( cudaGetLastError() ); }