hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
120f7988fe2ecb9137c6caa31fb9f7398a287b8b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/BatchNormalization.cu" #else #define DeviceTensor3 THCDeviceTensor<real, 3> #define DeviceTensor1 THCDeviceTensor<real, 1> template <int Dim> static THCDeviceTensor<real, Dim> THNN_(devicetensor)(THCState *state, THCTensor *t) { if (!t) { return THCDeviceTensor<real, Dim>(); } int inDim = THCTensor__nDimension(state, t); if (inDim == Dim) { return toDeviceTensor<real, Dim>(state, t); } // View in which the last dimensions are collapsed or expanded as needed THAssert(THCTensor_isContiguous(state, t)); int size[Dim]; for (int i = 0; i < Dim || i < inDim; ++i) { if (i < Dim && i < inDim) { size[i] = t->size(i); } else if (i < Dim) { size[i] = 1; } else { size[Dim - 1] *= t->size(i); } } return THCDeviceTensor<real, Dim>(t->data<real>(), size); } void THNN_(BatchNormalization_updateOutput)( THCState *state, THCTensor *input_, THCTensor *output_, THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_, THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_, bool train, double momentum, double eps) { THCTensor_(resizeAs)(state, output_, input_); if (train) { int64_t nInput = THCTensor_(size)(state, input_, 1); THCTensor_(resize1d)(state, saveMean_, nInput); THCTensor_(resize1d)(state, saveStd_, nInput); } DeviceTensor3 input = THNN_(devicetensor)<3>(state, input_); DeviceTensor3 output = THNN_(devicetensor)<3>(state, output_); DeviceTensor1 weight = THNN_(devicetensor)<1>(state, weight_); DeviceTensor1 bias = THNN_(devicetensor)<1>(state, bias_); DeviceTensor1 runningMean = THNN_(devicetensor)<1>(state, runningMean_); DeviceTensor1 runningVar = THNN_(devicetensor)<1>(state, runningVar_); DeviceTensor1 saveMean = THNN_(devicetensor)<1>(state, saveMean_); DeviceTensor1 saveStd = THNN_(devicetensor)<1>(state, saveStd_); hipStream_t s = THCState_getCurrentStream(state); hipDeviceProp_t *prop = THCState_getCurrentDeviceProperties(state); if (!train) { dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); hipLaunchKernelGGL(( BatchNormalizationUpdateOutputInference_kernel<real, accreal, DeviceTensor1, DeviceTensor3>) , dim3(blocks), dim3(threads), 0, s, input, output, runningMean, runningVar, weight, bias, eps); } else { dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); hipLaunchKernelGGL(( BatchNormalizationUpdateOutput_kernel<real, accreal, DeviceTensor1, DeviceTensor3>) , dim3(blocks), dim3(threads), 0, s, input, output, weight, bias, eps, momentum, runningMean, runningVar, saveMean, saveStd); } THCudaCheck(hipGetLastError()); } void THNN_(BatchNormalization_backward)( THCState *state, THCTensor *input_, THCTensor *gradOutput_, THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_, THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_, bool train, double scale, double eps) { THCUNN_check_shape(state, input_, gradOutput_); if (gradInput_) { THCTensor_(resizeAs)(state, gradInput_, input_); } DeviceTensor3 input = THNN_(devicetensor)<3>(state, input_); DeviceTensor3 gradOutput = THNN_(devicetensor)<3>(state, gradOutput_); DeviceTensor3 gradInput = THNN_(devicetensor)<3>(state, gradInput_); DeviceTensor1 gradWeight = THNN_(devicetensor)<1>(state, gradWeight_); DeviceTensor1 gradBias = THNN_(devicetensor)<1>(state, gradBias_); DeviceTensor1 weight = THNN_(devicetensor)<1>(state, weight_); DeviceTensor1 runningMean = THNN_(devicetensor)<1>(state, runningMean_); DeviceTensor1 runningVar = THNN_(devicetensor)<1>(state, runningVar_); DeviceTensor1 saveMean = THNN_(devicetensor)<1>(state, saveMean_); DeviceTensor1 saveStd = THNN_(devicetensor)<1>(state, saveStd_); hipStream_t s = THCState_getCurrentStream(state); dim3 blocks(gradOutput.getSize(1)); dim3 threads(getNumThreads(gradOutput.getSize(2))); hipLaunchKernelGGL(( BatchNormalizationBackward_kernel<real, accreal, DeviceTensor1, DeviceTensor3>) , dim3(blocks), dim3(threads), 0, s, input, gradOutput, gradInput, gradWeight, gradBias, weight, runningMean, runningVar, saveMean, saveStd, train, scale, eps); THCudaCheck(hipGetLastError()); } #undef DeviceTensor3 #undef DeviceTensor1 #endif
120f7988fe2ecb9137c6caa31fb9f7398a287b8b.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/BatchNormalization.cu" #else #define DeviceTensor3 THCDeviceTensor<real, 3> #define DeviceTensor1 THCDeviceTensor<real, 1> template <int Dim> static THCDeviceTensor<real, Dim> THNN_(devicetensor)(THCState *state, THCTensor *t) { if (!t) { return THCDeviceTensor<real, Dim>(); } int inDim = THCTensor__nDimension(state, t); if (inDim == Dim) { return toDeviceTensor<real, Dim>(state, t); } // View in which the last dimensions are collapsed or expanded as needed THAssert(THCTensor_isContiguous(state, t)); int size[Dim]; for (int i = 0; i < Dim || i < inDim; ++i) { if (i < Dim && i < inDim) { size[i] = t->size(i); } else if (i < Dim) { size[i] = 1; } else { size[Dim - 1] *= t->size(i); } } return THCDeviceTensor<real, Dim>(t->data<real>(), size); } void THNN_(BatchNormalization_updateOutput)( THCState *state, THCTensor *input_, THCTensor *output_, THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_, THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_, bool train, double momentum, double eps) { THCTensor_(resizeAs)(state, output_, input_); if (train) { int64_t nInput = THCTensor_(size)(state, input_, 1); THCTensor_(resize1d)(state, saveMean_, nInput); THCTensor_(resize1d)(state, saveStd_, nInput); } DeviceTensor3 input = THNN_(devicetensor)<3>(state, input_); DeviceTensor3 output = THNN_(devicetensor)<3>(state, output_); DeviceTensor1 weight = THNN_(devicetensor)<1>(state, weight_); DeviceTensor1 bias = THNN_(devicetensor)<1>(state, bias_); DeviceTensor1 runningMean = THNN_(devicetensor)<1>(state, runningMean_); DeviceTensor1 runningVar = THNN_(devicetensor)<1>(state, runningVar_); DeviceTensor1 saveMean = THNN_(devicetensor)<1>(state, saveMean_); DeviceTensor1 saveStd = THNN_(devicetensor)<1>(state, saveStd_); cudaStream_t s = THCState_getCurrentStream(state); cudaDeviceProp *prop = THCState_getCurrentDeviceProperties(state); if (!train) { dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); BatchNormalizationUpdateOutputInference_kernel<real, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>( input, output, runningMean, runningVar, weight, bias, eps); } else { dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); BatchNormalizationUpdateOutput_kernel<real, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>( input, output, weight, bias, eps, momentum, runningMean, runningVar, saveMean, saveStd); } THCudaCheck(cudaGetLastError()); } void THNN_(BatchNormalization_backward)( THCState *state, THCTensor *input_, THCTensor *gradOutput_, THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_, THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_, bool train, double scale, double eps) { THCUNN_check_shape(state, input_, gradOutput_); if (gradInput_) { THCTensor_(resizeAs)(state, gradInput_, input_); } DeviceTensor3 input = THNN_(devicetensor)<3>(state, input_); DeviceTensor3 gradOutput = THNN_(devicetensor)<3>(state, gradOutput_); DeviceTensor3 gradInput = THNN_(devicetensor)<3>(state, gradInput_); DeviceTensor1 gradWeight = THNN_(devicetensor)<1>(state, gradWeight_); DeviceTensor1 gradBias = THNN_(devicetensor)<1>(state, gradBias_); DeviceTensor1 weight = THNN_(devicetensor)<1>(state, weight_); DeviceTensor1 runningMean = THNN_(devicetensor)<1>(state, runningMean_); DeviceTensor1 runningVar = THNN_(devicetensor)<1>(state, runningVar_); DeviceTensor1 saveMean = THNN_(devicetensor)<1>(state, saveMean_); DeviceTensor1 saveStd = THNN_(devicetensor)<1>(state, saveStd_); cudaStream_t s = THCState_getCurrentStream(state); dim3 blocks(gradOutput.getSize(1)); dim3 threads(getNumThreads(gradOutput.getSize(2))); BatchNormalizationBackward_kernel<real, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>( input, gradOutput, gradInput, gradWeight, gradBias, weight, runningMean, runningVar, saveMean, saveStd, train, scale, eps); THCudaCheck(cudaGetLastError()); } #undef DeviceTensor3 #undef DeviceTensor1 #endif
4bce74da3098c86549d7e40e5b6bfe1bb4367530.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// /// vecMaxKernel00.cu : code computes partiol answer per each thread. /// By Waruna Ranasinghe /// Created: 15 Aug 2017 /// Last Modified: /// The code computes partial answer for reduction over max per a thread. /// Each thread computes max of a consective chunck of data of size C. /// The memory loads are not coalesced. /* * A - input vector of floats of size G*B*C * reductions - output of partial answers compted by each thread * C - chunck size - number of elements processed by ech thread */ __global__ void reduce(const float* A, float* reductions, int C) { int tid = threadIdx.x; //Thread index within a thread block int blockid = blockIdx.x; //Block index within the grid int B = blockDim.x; //numer of threads per block //The index of the array corresponds to the start of a thread block int start_of_the_block = blockid*B*C; reductions[blockid*B + tid] = 0.0f; for (int i=start_of_the_block + tid*C; i<start_of_the_block+(tid+1)*C; i++) { reductions[blockid*B+tid] = max(reductions[blockid*B+tid],A[i]); } }
4bce74da3098c86549d7e40e5b6bfe1bb4367530.cu
/// /// vecMaxKernel00.cu : code computes partiol answer per each thread. /// By Waruna Ranasinghe /// Created: 15 Aug 2017 /// Last Modified: /// The code computes partial answer for reduction over max per a thread. /// Each thread computes max of a consective chunck of data of size C. /// The memory loads are not coalesced. /* * A - input vector of floats of size G*B*C * reductions - output of partial answers compted by each thread * C - chunck size - number of elements processed by ech thread */ __global__ void reduce(const float* A, float* reductions, int C) { int tid = threadIdx.x; //Thread index within a thread block int blockid = blockIdx.x; //Block index within the grid int B = blockDim.x; //numer of threads per block //The index of the array corresponds to the start of a thread block int start_of_the_block = blockid*B*C; reductions[blockid*B + tid] = 0.0f; for (int i=start_of_the_block + tid*C; i<start_of_the_block+(tid+1)*C; i++) { reductions[blockid*B+tid] = max(reductions[blockid*B+tid],A[i]); } }
6ece75ff14ba962e7e72c707c901f8f4127e5eb1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "FixChargeEwald.h" #include "BoundsGPU.h" #include "cutils_func.h" #include "cutils_math.h" #include "GridGPU.h" #include "State.h" #include <hipfft.h> #include "globalDefs.h" #include <fstream> #include "Virial.h" #include "helpers.h" #include "PairEvaluatorNone.h" #include "EvaluatorWrapper.h" // #include <cmath> using namespace std; namespace py = boost::python; const std::string chargeEwaldType = "ChargeEwald"; // #define THREADS_PER_BLOCK_ // MW: Note that this function is a verbatim copy of that which appears in GridGPU.cu // consider combining __global__ void computeCentroid(float4 *centroids, float4 *xs, int nAtoms, int nPerRingPoly, BoundsGPU bounds) { int idx = GETIDX(); int nRingPoly = nAtoms / nPerRingPoly; if (idx < nRingPoly) { int baseIdx = idx * nPerRingPoly; float3 init = make_float3(xs[baseIdx]); float3 diffSum = make_float3(0, 0, 0); for (int i=baseIdx+1; i<baseIdx + nPerRingPoly; i++) { float3 next = make_float3(xs[i]); float3 dx = bounds.minImage(next - init); diffSum += dx; } diffSum /= nPerRingPoly; float3 unwrappedPos = init + diffSum; float3 trace = bounds.trace(); float3 diffFromLo = unwrappedPos - bounds.lo; float3 imgs = floorf(diffFromLo / trace); //are unskewed at this point float3 wrappedPos = unwrappedPos - trace * imgs * bounds.periodic; centroids[idx] = make_float4(wrappedPos); } } // MW: This is a duplicated function from GridGPU.cu __global__ void periodicWrapCpy(float4 *xs, int nAtoms, BoundsGPU bounds) { int idx = GETIDX(); if (idx < nAtoms) { float4 pos = xs[idx]; float id = pos.w; float3 trace = bounds.trace(); float3 diffFromLo = make_float3(pos) - bounds.lo; float3 imgs = floorf(diffFromLo / trace); //are unskewed at this point pos -= make_float4(trace * imgs * bounds.periodic); pos.w = id; //if (not(pos.x==orig.x and pos.y==orig.y and pos.z==orig.z)) { //sigh if (imgs.x != 0 or imgs.y != 0 or imgs.z != 0) { xs[idx] = pos; } } } //different implementation for different interpolation orders //TODO template //order 1 nearest point __global__ void map_charge_to_grid_order_1_cu(int nRingPoly, int nPerRingPoly, float4 *xs, float *qs, BoundsGPU bounds, int3 sz,float *grid/*convert to float for cufffComplex*/,float Qunit) { int idx = GETIDX(); if (idx < nRingPoly) { float4 posWhole = xs[idx]; float3 pos = make_float3(posWhole)-bounds.lo; float qi = Qunit*qs[idx * nPerRingPoly]; //find nearest grid point float3 h=bounds.trace()/make_float3(sz); int3 nearest_grid_point=make_int3((pos+0.5*h)/h); //or int3 p=nearest_grid_point; if (p.x>0) p.x-=int(p.x/sz.x)*sz.x; if (p.y>0) p.y-=int(p.y/sz.y)*sz.y; if (p.z>0) p.z-=int(p.z/sz.z)*sz.z; if (p.x<0) p.x-=int((p.x+1)/sz.x-1)*sz.x; if (p.y<0) p.y-=int((p.y+1)/sz.y-1)*sz.y; if (p.z<0) p.z-=int((p.z+1)/sz.z-1)*sz.z; atomicAdd(&grid[p.x*sz.y*sz.z*2+p.y*sz.z*2+p.z*2], 1.0*qi); } } inline __host__ __device__ float W_p_3(int i,float x){ if (i==-1) return 0.125-0.5*x+0.5*x*x; if (i== 0) return 0.75-x*x; /*if (i== 1)*/ return 0.125+0.5*x+0.5*x*x; } __global__ void map_charge_to_grid_order_3_cu(int nRingPoly, int nPerRingPoly, float4 *xs, float *qs, BoundsGPU bounds, int3 sz,float *grid/*convert to float for cufffComplex*/,float Qunit) { int idx = GETIDX(); if (idx < nRingPoly) { float4 posWhole = xs[idx]; float3 pos = make_float3(posWhole)-bounds.lo; float qi = Qunit*qs[idx * nPerRingPoly]; //find nearest grid point float3 h=bounds.trace()/make_float3(sz); int3 nearest_grid_point=make_int3((pos+0.5*h)/h); //distance from nearest_grid_point /h float3 d=pos/h-make_float3(nearest_grid_point); int3 p=nearest_grid_point; for (int ix=-1;ix<=1;ix++){ p.x=nearest_grid_point.x+ix; float charge_yz_w=qi*W_p_3(ix,d.x); for (int iy=-1;iy<=1;iy++){ p.y=nearest_grid_point.y+iy; float charge_z_w=charge_yz_w*W_p_3(iy,d.y); for (int iz=-1;iz<=1;iz++){ p.z=nearest_grid_point.z+iz; float charge_w=charge_z_w*W_p_3(iz,d.z); if (p.x>0) p.x-=int(p.x/sz.x)*sz.x; if (p.y>0) p.y-=int(p.y/sz.y)*sz.y; if (p.z>0) p.z-=int(p.z/sz.z)*sz.z; if (p.x<0) p.x-=int((p.x+1)/sz.x-1)*sz.x; if (p.y<0) p.y-=int((p.y+1)/sz.y-1)*sz.y; if (p.z<0) p.z-=int((p.z+1)/sz.z-1)*sz.z; if ((p.x<0) or (p.x>sz.x-1)) printf("grid point miss x %d, %d, %d, %f \n", idx,p.x,nearest_grid_point.x,pos.x); if ((p.y<0) or (p.y>sz.y-1)) printf("grid point miss y %d, %d, %d, %f \n", idx,p.y,nearest_grid_point.y,pos.y); if ((p.z<0) or (p.z>sz.z-1)) printf("grid point miss z %d, %d, %d, %f \n", idx,p.z,nearest_grid_point.z,pos.z); atomicAdd(&grid[p.x*sz.y*sz.z*2+p.y*sz.z*2+p.z*2], charge_w); } } } } } __global__ void map_charge_set_to_zero_cu(int3 sz,hipfftComplex *grid) { int3 id = make_int3( blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z); if ((id.x<sz.x)&&(id.y<sz.y)&&(id.z<sz.z)) grid[id.x*sz.y*sz.z+id.y*sz.z+id.z]=make_cuComplex (0.0f, 0.0f); } __device__ float sinc(float x){ if ((x<0.1)&&(x>-0.1)){ float x2=x*x; return 1.0 - x2*0.16666666667f + x2*x2*0.008333333333333333f - x2*x2*x2*0.00019841269841269841f; } else return sin(x)/x; } __global__ void Green_function_cu(BoundsGPU bounds, int3 sz,float *Green_function,float alpha, //now some parameter for Gf calc int sum_limits, int intrpl_order) { int3 id = make_int3( blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z); if ((id.x<sz.x)&&(id.y<sz.y)&&(id.z<sz.z)){ float3 h =bounds.trace()/make_float3(sz); // 2*PI float3 k= 6.28318530717958647693f*make_float3(id)/bounds.trace(); if (id.x>sz.x/2) k.x= 6.28318530717958647693f*(id.x-sz.x)/bounds.trace().x; if (id.y>sz.y/2) k.y= 6.28318530717958647693f*(id.y-sz.y)/bounds.trace().y; if (id.z>sz.z/2) k.z= 6.28318530717958647693f*(id.z-sz.z)/bounds.trace().z; //OK GF(k) = 4Pi/K^2 [SumforM(W(K+M)^2 exp(-(K+M)^2/4alpha) dot(K,K+M)/(K+M^2))] / // [SumforM^2(W(K+M)^2)] float sum1=0.0f; float sum2=0.0f; float k2=lengthSqr(k); float Fouralpha2inv=0.25/alpha/alpha; if (k2!=0.0){ for (int ix=-sum_limits;ix<=sum_limits;ix++){//TODO different limits for (int iy=-sum_limits;iy<=sum_limits;iy++){ for (int iz=-sum_limits;iz<=sum_limits;iz++){ float3 kpM=k+6.28318530717958647693f*make_float3(ix,iy,iz)/h; // kpM.x+=6.28318530717958647693f/h.x*ix;//TODO rewrite // kpM.y+=6.28318530717958647693f/h.y*iy; // kpM.z+=6.28318530717958647693f/h.z*iz; float kpMlen=lengthSqr(kpM); float W=sinc(kpM.x*h.x*0.5)*sinc(kpM.y*h.y*0.5)*sinc(kpM.z*h.z*0.5); // for(int p=1;p<intrpl_order;p++) // W*=W; // W*=h;//not need- cancels out // float W2=W*W; float W2=pow(W,intrpl_order*2); //4*PI sum1+=12.56637061435917295385*exp(-kpMlen*Fouralpha2inv)*dot(k,kpM)/kpMlen*W2; sum2+=W2; } } } Green_function[id.x*sz.y*sz.z+id.y*sz.z+id.z]=sum1/(sum2*sum2)/k2; }else{ Green_function[id.x*sz.y*sz.z+id.y*sz.z+id.z]=0.0f; } } } __global__ void potential_cu(int3 sz,float *Green_function, hipfftComplex *FFT_qs, hipfftComplex *FFT_phi){ int3 id = make_int3( blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z); if ((id.x<sz.x)&&(id.y<sz.y)&&(id.z<sz.z)){ FFT_phi[id.x*sz.y*sz.z+id.y*sz.z+id.z]=FFT_qs[id.x*sz.y*sz.z+id.y*sz.z+id.z]*Green_function[id.x*sz.y*sz.z+id.y*sz.z+id.z]; //TODO after Inverse FFT divide by volume } } __global__ void E_field_cu(BoundsGPU bounds, int3 sz,float *Green_function, hipfftComplex *FFT_qs, hipfftComplex *FFT_Ex,hipfftComplex *FFT_Ey,hipfftComplex *FFT_Ez){ int3 id = make_int3( blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z); if ((id.x<sz.x)&&(id.y<sz.y)&&(id.z<sz.z)){ //K vector float3 k= 6.28318530717958647693f*make_float3(id)/bounds.trace(); if (id.x>sz.x/2) k.x= 6.28318530717958647693f*(id.x-sz.x)/bounds.trace().x; if (id.y>sz.y/2) k.y= 6.28318530717958647693f*(id.y-sz.y)/bounds.trace().y; if (id.z>sz.z/2) k.z= 6.28318530717958647693f*(id.z-sz.z)/bounds.trace().z; //ik*q(k)*Gf(k) hipfftComplex Ex,Ey,Ez; float GF=Green_function[id.x*sz.y*sz.z+id.y*sz.z+id.z]; hipfftComplex q=FFT_qs[id.x*sz.y*sz.z+id.y*sz.z+id.z]; Ex.y= k.x*q.x*GF; Ex.x=-k.x*q.y*GF; Ey.y= k.y*q.x*GF; Ey.x=-k.y*q.y*GF; Ez.y= k.z*q.x*GF; Ez.x=-k.z*q.y*GF; FFT_Ex[id.x*sz.y*sz.z+id.y*sz.z+id.z]=Ex; FFT_Ey[id.x*sz.y*sz.z+id.y*sz.z+id.z]=Ey; FFT_Ez[id.x*sz.y*sz.z+id.y*sz.z+id.z]=Ez; //TODO after Inverse FFT divide by -volume } } __global__ void Ewald_long_range_forces_order_1_cu(int nRingPoly, int nPerRingPoly, float4 *xs, float4 *fs, float *qs, BoundsGPU bounds, int3 sz, hipfftComplex *FFT_Ex, hipfftComplex *FFT_Ey,hipfftComplex *FFT_Ez,float Qunit, bool storeForces, uint *ids, float4 *storedForces) { int idx = GETIDX(); if (idx < nRingPoly) { float4 posWhole= xs[idx]; float3 pos = make_float3(posWhole)-bounds.lo; int baseIdx = idx*nPerRingPoly; float qi = qs[baseIdx]; //find nearest grid point float3 h=bounds.trace()/make_float3(sz); int3 nearest_grid_point=make_int3((pos+0.5*h)/h); int3 p=nearest_grid_point; if (p.x>0) p.x-=int(p.x/sz.x)*sz.x; if (p.y>0) p.y-=int(p.y/sz.y)*sz.y; if (p.z>0) p.z-=int(p.z/sz.z)*sz.z; if (p.x<0) p.x-=int((p.x+1)/sz.x-1)*sz.x; if (p.y<0) p.y-=int((p.y+1)/sz.y-1)*sz.y; if (p.z<0) p.z-=int((p.z+1)/sz.z-1)*sz.z; //get E field float3 E; float volume=bounds.trace().x*bounds.trace().y*bounds.trace().z; E.x= -FFT_Ex[p.x*sz.y*sz.z+p.y*sz.z+p.z].x/volume; E.y= -FFT_Ey[p.x*sz.y*sz.z+p.y*sz.z+p.z].x/volume; E.z= -FFT_Ez[p.x*sz.y*sz.z+p.y*sz.z+p.z].x/volume; // Apply force on centroid to all time slices for given atom float3 force= Qunit*qi*E; for (int i = 0; i< nPerRingPoly; i++) { fs[baseIdx + i] += force; } if (storeForces) { for (int i = 0; i < nPerRingPoly; i++) { storedForces[ids[baseIdx+i]] = make_float4(force.x, force.y, force.z, 0); } } } } __global__ void Ewald_long_range_forces_order_3_cu(int nRingPoly, int nPerRingPoly, float4 *xs, float4 *fs, float *qs, BoundsGPU bounds, int3 sz, hipfftComplex *FFT_Ex, hipfftComplex *FFT_Ey,hipfftComplex *FFT_Ez,float Qunit, bool storeForces, uint *ids, float4 *storedForces) { int idx = GETIDX(); if (idx < nRingPoly) { float4 posWhole= xs[idx]; float3 pos = make_float3(posWhole)-bounds.lo; int baseIdx = idx*nPerRingPoly; float qi = qs[baseIdx]; //find nearest grid point float3 h=bounds.trace()/make_float3(sz); int3 nearest_grid_point=make_int3((pos+0.5*h)/h); //distance from nearest_grid_point /h float3 d=pos/h-make_float3(nearest_grid_point); float3 E=make_float3(0,0,0); float volume=bounds.trace().x*bounds.trace().y*bounds.trace().z; int3 p=nearest_grid_point; for (int ix=-1;ix<=1;ix++){ p.x=nearest_grid_point.x+ix; for (int iy=-1;iy<=1;iy++){ p.y=nearest_grid_point.y+iy; for (int iz=-1;iz<=1;iz++){ p.z=nearest_grid_point.z+iz; if (p.x>0) p.x-=int(p.x/sz.x)*sz.x; if (p.y>0) p.y-=int(p.y/sz.y)*sz.y; if (p.z>0) p.z-=int(p.z/sz.z)*sz.z; if (p.x<0) p.x-=int((p.x+1)/sz.x-1)*sz.x; if (p.y<0) p.y-=int((p.y+1)/sz.y-1)*sz.y; if (p.z<0) p.z-=int((p.z+1)/sz.z-1)*sz.z; float3 Ep; float W_xyz=W_p_3(ix,d.x)*W_p_3(iy,d.y)*W_p_3(iz,d.z); Ep.x= -FFT_Ex[p.x*sz.y*sz.z+p.y*sz.z+p.z].x/volume; Ep.y= -FFT_Ey[p.x*sz.y*sz.z+p.y*sz.z+p.z].x/volume; Ep.z= -FFT_Ez[p.x*sz.y*sz.z+p.y*sz.z+p.z].x/volume; E+=W_xyz*Ep; } } } float3 force= Qunit*qi*E; // Apply force on centroid to all time slices for given atom for (int i = 0; i < nPerRingPoly; i++) { fs[baseIdx + i] += force; } if (storeForces) { for (int i = 0; i < nPerRingPoly; i++) { storedForces[ids[baseIdx+i]] = make_float4(force.x, force.y, force.z, 0); } } } } __global__ void Energy_cu(int3 sz,float *Green_function, hipfftComplex *FFT_qs, hipfftComplex *E_grid){ int3 id = make_int3( blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z); if ((id.x<sz.x)&&(id.y<sz.y)&&(id.z<sz.z)){ hipfftComplex qi=FFT_qs[id.x*sz.y*sz.z+id.y*sz.z+id.z]; E_grid[id.x*sz.y*sz.z+id.y*sz.z+id.z] =make_cuComplex((qi.x*qi.x+qi.y*qi.y)*Green_function[id.x*sz.y*sz.z+id.y*sz.z+id.z],0.0); //TODO after Inverse FFT divide by volume } } __global__ void virials_cu(BoundsGPU bounds,int3 sz,Virial *dest,float alpha, float *Green_function,hipfftComplex *FFT_qs,int warpSize){ int3 id = make_int3( blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z); if ((id.x<sz.x)&&(id.y<sz.y)&&(id.z<sz.z)){ float3 k= 6.28318530717958647693f*make_float3(id)/bounds.trace(); if (id.x>sz.x/2) k.x= 6.28318530717958647693f*(id.x-sz.x)/bounds.trace().x; if (id.y>sz.y/2) k.y= 6.28318530717958647693f*(id.y-sz.y)/bounds.trace().y; if (id.z>sz.z/2) k.z= 6.28318530717958647693f*(id.z-sz.z)/bounds.trace().z; float klen=lengthSqr(k); hipfftComplex qi=FFT_qs[id.x*sz.y*sz.z+id.y*sz.z+id.z]; float E=(qi.x*qi.x+qi.y*qi.y)*Green_function[id.x*sz.y*sz.z+id.y*sz.z+id.z]; float differential=-2.0*(1.0/klen+0.25/(alpha*alpha)); if (klen==0.0) {differential=0.0;E=0.0;} Virial virialstmp = Virial(0, 0, 0, 0, 0, 0); virialstmp[0]=(1.0+differential*k.x*k.x)*E; //xx virialstmp[1]=(1.0+differential*k.y*k.y)*E; //yy virialstmp[2]=(1.0+differential*k.z*k.z)*E; //zz virialstmp[3]=(differential*k.x*k.y)*E; //xy virialstmp[4]=(differential*k.x*k.z)*E; //xz virialstmp[5]=(differential*k.y*k.z)*E; //yz // virials[id.x*sz.y*sz.z+id.y*sz.z+id.z]=virialstmp; // __syncthreads(); extern __shared__ Virial tmpV[]; // const int copyBaseIdx = blockDim.x*blockIdx.x * N_DATA_PER_THREAD + threadIdx.x; // const int copyIncrement = blockDim.x; tmpV[threadIdx.x*blockDim.y*blockDim.z+threadIdx.y*blockDim.z+threadIdx.z]=virialstmp; int curLookahead=1; int numLookaheadSteps = log2f(blockDim.x*blockDim.y*blockDim.z-1); const int sumBaseIdx = threadIdx.x*blockDim.y*blockDim.z+threadIdx.y*blockDim.z+threadIdx.z; __syncthreads(); for (int i=0; i<=numLookaheadSteps; i++) { if (! (sumBaseIdx % (curLookahead*2))) { tmpV[sumBaseIdx] += tmpV[sumBaseIdx + curLookahead]; } curLookahead *= 2; // if (curLookahead >= (warpSize)) {//Doesn't work in 3D case __syncthreads(); // } } if (sumBaseIdx == 0) { atomicAdd(&(dest[0].vals[0]), tmpV[0][0]); atomicAdd(&(dest[0].vals[1]), tmpV[0][1]); atomicAdd(&(dest[0].vals[2]), tmpV[0][2]); atomicAdd(&(dest[0].vals[3]), tmpV[0][3]); atomicAdd(&(dest[0].vals[4]), tmpV[0][4]); atomicAdd(&(dest[0].vals[5]), tmpV[0][5]); } } } #define N_DATA_PER_THREAD 4 //just taken from cutils_func.h __global__ void sum_virials_cu(Virial *dest, Virial *src, int n, int warpSize){ extern __shared__ Virial tmpV[]; const int copyBaseIdx = blockDim.x*blockIdx.x * N_DATA_PER_THREAD + threadIdx.x; const int copyIncrement = blockDim.x; for (int i=0; i<N_DATA_PER_THREAD; i++) { int step = i * copyIncrement; if (copyBaseIdx + step < n) { tmpV[threadIdx.x + step] = src[copyBaseIdx + step]; } else { tmpV[threadIdx.x + step] =Virial(0, 0, 0, 0, 0, 0); } } int curLookahead = N_DATA_PER_THREAD; int numLookaheadSteps = log2f(blockDim.x-1); const int sumBaseIdx = threadIdx.x * N_DATA_PER_THREAD; __syncthreads(); for (int i=sumBaseIdx+1; i<sumBaseIdx + N_DATA_PER_THREAD; i++) { tmpV[sumBaseIdx] += tmpV[i]; } for (int i=0; i<=numLookaheadSteps; i++) { if (! (sumBaseIdx % (curLookahead*2))) { tmpV[sumBaseIdx] += tmpV[sumBaseIdx + curLookahead]; } curLookahead *= 2; if (curLookahead >= (N_DATA_PER_THREAD * warpSize)) { __syncthreads(); } } if (threadIdx.x == 0) { atomicAdd(&(dest[0].vals[0]), tmpV[0][0]); atomicAdd(&(dest[0].vals[1]), tmpV[0][1]); atomicAdd(&(dest[0].vals[2]), tmpV[0][2]); atomicAdd(&(dest[0].vals[3]), tmpV[0][3]); atomicAdd(&(dest[0].vals[4]), tmpV[0][4]); atomicAdd(&(dest[0].vals[5]), tmpV[0][5]); } } /* template < bool COMPUTE_VIRIALS> __global__ void compute_short_range_forces_cu(int nAtoms, float4 *xs, float4 *fs, uint16_t *neighborCounts, uint *neighborlist, uint32_t *cumulSumMaxPerBlock, float *qs, float alpha, float rCut, BoundsGPU bounds, int warpSize, float onetwoStr, float onethreeStr, float onefourStr, Virial *__restrict__ virials, Virial *virialField, float volume,float conversion) { float multipliers[4] = {1, onetwoStr, onethreeStr, onefourStr}; // printf("USING SHORT RANGE FORCES IN VIRIAL. THIS KERNEL IS INCORRECT\n"); Virial virialsSum = Virial(0, 0, 0, 0, 0, 0); int idx = GETIDX(); if (idx < nAtoms) { float4 posWhole = xs[idx]; float3 pos = make_float3(posWhole); float3 forceSum = make_float3(0, 0, 0); float qi = qs[idx]; int baseIdx = baseNeighlistIdx(cumulSumMaxPerBlock, warpSize); int numNeigh = neighborCounts[idx]; for (int i=0; i<numNeigh; i++) { int nlistIdx = baseIdx + warpSize * i; uint otherIdxRaw = neighborlist[nlistIdx]; uint neighDist = otherIdxRaw >> 30; uint otherIdx = otherIdxRaw & EXCL_MASK; float3 otherPos = make_float3(xs[otherIdx]); //then wrap and compute forces! float3 dr = bounds.minImage(pos - otherPos); float lenSqr = lengthSqr(dr); // printf("dist is %f %f %f\n", dr.x, dr.y, dr.z); if (lenSqr < rCut*rCut) { float multiplier = multipliers[neighDist]; if (multiplier) { float len=sqrtf(lenSqr); float qj = qs[otherIdx]; float r2inv = 1.0f/lenSqr; float rinv = 1.0f/len; //1/Sqrt(Pi) float forceScalar = conversion*qi*qj*(erfcf((alpha*len))*rinv+(2.0*0.5641895835477563*alpha)*exp(-alpha*alpha*lenSqr))*r2inv* multiplier; float3 forceVec = dr * forceScalar; forceSum += forceVec; // if ((::isnan(forceScalar)) or (abs(forceScalar)>1E6)) printf("short ewald nan %f ,%d ,%d %f \n", forceScalar,idx, otherIdx,pos.x); if (COMPUTE_VIRIALS) { computeVirial(virialsSum, forceVec, dr); } } } } fs[idx] += forceSum; //operator for float4 + float3 if (COMPUTE_VIRIALS) { //printf("vir %f %f %f %f %f %f\n", virialsSum.vals[0], virialsSum.vals[1], virialsSum.vals[2], virial_per_particle.vals[0],virial_per_particle.vals[1],virial_per_particle.vals[2]); Virial field = virialField[0]; field /= (nAtoms * volume); virialsSum+=field; virials[idx] += virialsSum; } } } */ /* __global__ void compute_short_range_energies_cu(int nAtoms, float4 *xs, uint16_t *neighborCounts, uint *neighborlist, uint32_t *cumulSumMaxPerBlock, float *qs, float alpha, float rCut, BoundsGPU bounds, int warpSize, float onetwoStr, float onethreeStr, float onefourStr,float *perParticleEng, float field_energy_per_particle,float conversion) { float multipliers[4] = {1, onetwoStr, onethreeStr, onefourStr}; int idx = GETIDX(); if (idx < nAtoms) { float4 posWhole = xs[idx]; float3 pos = make_float3(posWhole); float EngSum = 0.0f; float qi = qs[idx]; int baseIdx = baseNeighlistIdx(cumulSumMaxPerBlock, warpSize); int numNeigh = neighborCounts[idx]; for (int i=0; i<numNeigh; i++) { int nlistIdx = baseIdx + warpSize * i; uint otherIdxRaw = neighborlist[nlistIdx]; uint neighDist = otherIdxRaw >> 30; uint otherIdx = otherIdxRaw & EXCL_MASK; float3 otherPos = make_float3(xs[otherIdx]); //then wrap and compute forces! float3 dr = bounds.minImage(pos - otherPos); float lenSqr = lengthSqr(dr); // printf("dist is %f %f %f\n", dr.x, dr.y, dr.z); if (lenSqr < rCut*rCut) { float multiplier = multipliers[neighDist]; if (multiplier) { float len=sqrtf(lenSqr); float qj = qs[otherIdx]; // float r2inv = 1.0f/lenSqr; float rinv = 1.0f/len; float eng = conversion*0.5*qi*qj*(erfcf((alpha*len))*rinv)*multiplier; EngSum += eng; } } } perParticleEng[idx] += EngSum+field_energy_per_particle; } } */ __global__ void applyStoredForces(int nAtoms, float4 *fs, uint *ids, float4 *fsStored) { int idx = GETIDX(); if (idx < nAtoms) { float4 cur = fs[idx]; float3 stored = make_float3(fsStored[ids[idx]]); cur += stored; fs[idx] = cur; } } __global__ void mapVirialToSingleAtom(Virial *atomVirials, Virial *fieldVirial, float volume) { //just mapping to one atom for now. If we're looking at per-atom properties, should change to mapping to all atoms evenly atomVirials[0][threadIdx.x] += 0.5 * fieldVirial[0][threadIdx.x] / volume; } __global__ void mapEngToParticles(int nAtoms, float eng, float *engs) { int idx = GETIDX(); if (idx < nAtoms) { engs[idx] += eng; } } FixChargeEwald::FixChargeEwald(SHARED(State) state_, string handle_, string groupHandle_): FixCharge(state_, handle_, groupHandle_, chargeEwaldType, true){ hipfftCreate(&plan); canOffloadChargePairCalc = true; modeIsError = false; sz = make_int3(32, 32, 32); malloced = false; longRangeInterval = 1; setEvalWrapper(); } FixChargeEwald::~FixChargeEwald(){ hipfftDestroy(plan); if (malloced) { hipFree(FFT_Qs); hipFree(FFT_Ex); hipFree(FFT_Ey); hipFree(FFT_Ez); } } //Root mean square force error estimation const double amp_table[][7] = { {2.0/3.0, 0, 0, 0, 0, 0, 0}, {1.0/50.0, 5.0/294.0, 0, 0, 0, 0, 0}, {1.0/588.0, 7.0/1440.0, 21.0/3872.0, 0, 0, 0, 0}, {1.0/4320.0, 3.0/1936.0, 7601.0/2271360.0, 143.0/28800.0, 0, 0, 0}, {1.0/23232.0, 7601.0/12628160.0, 143.0/69120.0, 517231.0/106536960.0, 106640677.0/11737571328.0, 0, 0}, {691.0/68140800.0, 13.0/57600.0, 47021.0/35512320.0, 9694607.0/2095994880.0, 733191589.0/59609088000.0, 326190917.0/11700633600.0, 0}, {1.0/345600.0, 3617.0/35512320.0, 745739.0/838397952.0, 56399353.0/12773376000.0, 25091609.0/1560084480.0, 1755948832039.0/36229939200000.0, 48887769399.0/37838389248.0} }; double FixChargeEwald :: DeltaF_k(double t_alpha){ int nAtoms = state->atoms.size(); double sumx=0.0,sumy=0.0,sumz=0.0; for( int m=0;m<interpolation_order;m++){ double amp=amp_table[interpolation_order-1][m]; sumx+=amp*pow(h.x*t_alpha,2*m); sumy+=amp*pow(h.y*t_alpha,2*m); sumz+=amp*pow(h.z*t_alpha,2*m); } return total_Q2/3.0*(1.0/(L.x*L.x)*pow(t_alpha*h.x,interpolation_order)*sqrt(t_alpha*L.x/nAtoms*sqrt(2.0*M_PI)*sumx)+ 1.0/(L.y*L.y)*pow(t_alpha*h.y,interpolation_order)*sqrt(t_alpha*L.y/nAtoms*sqrt(2.0*M_PI)*sumy)+ 1.0/(L.z*L.z)*pow(t_alpha*h.z,interpolation_order)*sqrt(t_alpha*L.z/nAtoms*sqrt(2.0*M_PI)*sumz)); } double FixChargeEwald :: DeltaF_real(double t_alpha){ int nAtoms = state->atoms.size(); return 2*total_Q2/sqrt(nAtoms*r_cut*L.x*L.y*L.z)*exp(-t_alpha*t_alpha*r_cut*r_cut); } void FixChargeEwald::setTotalQ2() { int nAtoms = state->atoms.size(); GPUArrayGlobal<float>tmp(1); tmp.memsetByVal(0.0); float conversion = state->units.qqr_to_eng; hipLaunchKernelGGL(( accumulate_gpu<float,float, SumSqr, N_DATA_PER_THREAD>) , dim3(NBLOCK(nAtoms/(double)N_DATA_PER_THREAD)),dim3(PERBLOCK),N_DATA_PER_THREAD*sizeof(float)*PERBLOCK, 0, tmp.getDevData(), state->gpd.qs(state->gpd.activeIdx()), nAtoms, state->devManager.prop.warpSize, SumSqr()); tmp.dataToHost(); total_Q2=conversion*tmp.h_data[0]/state->nPerRingPoly; tmp.memsetByVal(0.0); hipLaunchKernelGGL(( accumulate_gpu<float,float, SumSingle, N_DATA_PER_THREAD>) , dim3(NBLOCK(nAtoms/(double)N_DATA_PER_THREAD)),dim3(PERBLOCK),N_DATA_PER_THREAD*sizeof(float)*PERBLOCK, 0, tmp.getDevData(), state->gpd.qs(state->gpd.activeIdx()), nAtoms, state->devManager.prop.warpSize, SumSingle()); tmp.dataToHost(); total_Q=sqrt(conversion)*tmp.h_data[0]/state->nPerRingPoly; cout<<"total_Q "<<total_Q<<'\n'; cout<<"total_Q2 "<<total_Q2<<'\n'; } double FixChargeEwald::find_optimal_parameters(bool printError){ int nAtoms = state->atoms.size(); L=state->boundsGPU.trace(); h=make_float3(L.x/sz.x,L.y/sz.y,L.z/sz.z); // cout<<"Lx "<<L.x<<'\n'; // cout<<"hx "<<h.x<<'\n'; // cout<<"nA "<<nAtoms<<'\n'; //now root solver //solving DeltaF_k=DeltaF_real // Log(DeltaF_k)=Log(DeltaF_real) // Log(DeltaF_k)-Log(DeltaF_real)=0 //lets try secant //two initial points double x_a=0.0; double x_b=4.79853/r_cut; double y_a=DeltaF_k(x_a)-DeltaF_real(x_a); double y_b=DeltaF_k(x_b)-DeltaF_real(x_b); // cout<<x_a<<' '<<y_a<<'\n'; // cout<<x_b<<' '<<y_b<<' '<<DeltaF_real(x_b)<<'\n'; double tol=1E-5; int n_iter=0,max_iter=100; while((fabs(y_b)/DeltaF_real(x_b)>tol)&&(n_iter<max_iter)){ double kinv=(x_b-x_a)/(y_b-y_a); y_a=y_b; x_a=x_b; x_b=x_a-y_a*kinv; y_b=DeltaF_k(x_b)-DeltaF_real(x_b); // cout<<x_b<<' '<<y_b<<'\n'; n_iter++; } if (n_iter==max_iter) cout<<"Ewald RMS Root finder failed, max_iter "<<max_iter<<" reached\n"; alpha=x_b; setEvalWrapper(); //set orig! //alpha = 1.0; double error = DeltaF_k(alpha)+DeltaF_real(alpha); if (printError) { cout<<"Ewald alpha="<<alpha<<'\n'; cout<<"Ewald RMS error is "<<error<<'\n'; } return error; } void FixChargeEwald::setParameters(int szx_,int szy_,int szz_,float rcut_,int interpolation_order_) { //for now support for only 2^N sizes //TODO generalize for non cubic boxes if (rcut_==-1) { rcut_ = state->rCut; } if ((szx_!=32)&&(szx_!=64)&&(szx_!=128)&&(szx_!=256)&&(szx_!=512)&&(szx_!=1024)){ cout << szx_ << " is not supported, sorry. Only 2^N grid size works for charge Ewald\n"; exit(2); } if ((szy_!=32)&&(szy_!=64)&&(szy_!=128)&&(szy_!=256)&&(szy_!=512)&&(szy_!=1024)){ cout << szy_ << " is not supported, sorry. Only 2^N grid size works for charge Ewald\n"; exit(2); } if ((szz_!=32)&&(szz_!=64)&&(szz_!=128)&&(szz_!=256)&&(szz_!=512)&&(szz_!=1024)){ cout << szz_ << " is not supported, sorry. Only 2^N grid size works for charge Ewald\n"; exit(2); } sz=make_int3(szx_,szy_,szz_); r_cut=rcut_; hipMalloc((void**)&FFT_Qs, sizeof(hipfftComplex)*sz.x*sz.y*sz.z); hipfftPlan3d(&plan, sz.x,sz.y, sz.z, HIPFFT_C2C); hipMalloc((void**)&FFT_Ex, sizeof(hipfftComplex)*sz.x*sz.y*sz.z); hipMalloc((void**)&FFT_Ey, sizeof(hipfftComplex)*sz.x*sz.y*sz.z); hipMalloc((void**)&FFT_Ez, sizeof(hipfftComplex)*sz.x*sz.y*sz.z); Green_function=GPUArrayGlobal<float>(sz.x*sz.y*sz.z); CUT_CHECK_ERROR("setParameters execution failed"); interpolation_order=interpolation_order_; malloced = true; } void FixChargeEwald::setGridToErrorTolerance(bool printMsg) { int3 szOld = sz; int nTries = 0; double error = find_optimal_parameters(false); Vector trace = state->bounds.rectComponents; while (nTries < 100 and (error > errorTolerance or error!=error or error < 0)) { //<0 tests for -inf Vector sVec = Vector(make_float3(sz)); Vector ratio = sVec / trace; double minRatio = ratio[0]; int minIdx = 0; for (int i=0; i<3; i++) { if (ratio[i] < minRatio) { minRatio = ratio[i]; minIdx = i; } } sVec[minIdx] *= 2; //sz *= 2;//make_int3(sVec.asFloat3()); sz = make_int3(sVec.asFloat3()); error = find_optimal_parameters(false); nTries++; } //DOESN'T REDUCE GRID SIZE EVER if (printMsg) { printf("Using ewald grid of %d %d %d with error %f\n", sz.x, sz.y, sz.z, error); } if (!malloced or szOld != sz) { if (malloced) { hipfftDestroy(plan); hipFree(FFT_Qs); hipFree(FFT_Ex); hipFree(FFT_Ey); hipFree(FFT_Ez); } hipMalloc((void**)&FFT_Qs, sizeof(hipfftComplex)*sz.x*sz.y*sz.z); hipfftPlan3d(&plan, sz.x,sz.y, sz.z, HIPFFT_C2C); hipMalloc((void**)&FFT_Ex, sizeof(hipfftComplex)*sz.x*sz.y*sz.z); hipMalloc((void**)&FFT_Ey, sizeof(hipfftComplex)*sz.x*sz.y*sz.z); hipMalloc((void**)&FFT_Ez, sizeof(hipfftComplex)*sz.x*sz.y*sz.z); Green_function=GPUArrayGlobal<float>(sz.x*sz.y*sz.z); malloced = true; } } void FixChargeEwald::setError(double targetError, float rcut_, int interpolation_order_) { if (rcut_==-1) { rcut_ = state->rCut; } r_cut=rcut_; interpolation_order=interpolation_order_; errorTolerance = targetError; modeIsError = true; } void FixChargeEwald::calc_Green_function(){ dim3 dimBlock(8,8,8); dim3 dimGrid((sz.x + dimBlock.x - 1) / dimBlock.x,(sz.y + dimBlock.y - 1) / dimBlock.y,(sz.z + dimBlock.z - 1) / dimBlock.z); int sum_limits=int(alpha*pow(h.x*h.y*h.z,1.0/3.0)/3.14159*(sqrt(-log(10E-7))))+1; hipLaunchKernelGGL(( Green_function_cu), dim3(dimGrid), dim3(dimBlock), 0, 0, state->boundsGPU, sz,Green_function.getDevData(),alpha, sum_limits,interpolation_order);//TODO parameters unknown CUT_CHECK_ERROR("Green_function_cu kernel execution failed"); //test area // Green_function.dataToHost(); // ofstream ofs; // ofs.open("test_Green_function.dat",ios::out ); // for(int i=0;i<sz.x;i++) // for(int j=0;j<sz.y;j++){ // for(int k=0;k<sz.z;k++){ // cout<<Green_function.h_data[i*sz.y*sz.z+j*sz.z+k]<<'\t'; // ofs<<Green_function.h_data[i*sz.y*sz.z+j*sz.z+k]<<'\t'; // } // ofs<<'\n'; // cout<<'\n'; // } // ofs.close(); } void FixChargeEwald::calc_potential(hipfftComplex *phi_buf){ BoundsGPU b=state->boundsGPU; float volume=b.volume(); dim3 dimBlock(8,8,8); dim3 dimGrid((sz.x + dimBlock.x - 1) / dimBlock.x,(sz.y + dimBlock.y - 1) / dimBlock.y,(sz.z + dimBlock.z - 1) / dimBlock.z); hipLaunchKernelGGL(( potential_cu), dim3(dimGrid), dim3(dimBlock), 0, 0, sz,Green_function.getDevData(), FFT_Qs,phi_buf); CUT_CHECK_ERROR("potential_cu kernel execution failed"); hipfftExecC2C(plan, phi_buf, phi_buf, HIPFFT_BACKWARD); hipDeviceSynchronize(); CUT_CHECK_ERROR("hipfftExecC2C execution failed"); // //test area // float *buf=new float[sz.x*sz.y*sz.z*2]; // hipMemcpy((void *)buf,phi_buf,sizeof(hipfftComplex)*sz.x*sz.y*sz.z,hipMemcpyDeviceToHost ); // ofstream ofs; // ofs.open("test_phi.dat",ios::out ); // for(int i=0;i<sz.x;i++) // for(int j=0;j<sz.y;j++){ // for(int k=0;k<sz.z;k++){ // cout<<buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]/volume<<'\t'; // ofs<<buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]/volume<<'\t'; // } // ofs<<'\n'; // cout<<'\n'; // } // ofs.close(); // delete []buf; } bool FixChargeEwald::prepareForRun() { virialField = GPUArrayDeviceGlobal<Virial>(1); setTotalQ2(); if ((state->boundsGPU != boundsLastOptimize)||(total_Q2!=total_Q2LastOptimize)) { handleBoundsChangeInternal(true); } turnInit = state->turn; if (longRangeInterval != 1) { storedForces = GPUArrayDeviceGlobal<float4>(state->maxIdExisting+1); } else { storedForces = GPUArrayDeviceGlobal<float4>(1); } if (state->nPerRingPoly > 1) { rpCentroids = GPUArrayDeviceGlobal<float4>(state->atoms.size() / state->nPerRingPoly); } setEvalWrapper(); return true; } void FixChargeEwald::setEvalWrapper() { if (evalWrapperMode == "offload") { if (hasOffloadedChargePairCalc) { evalWrap = pickEvaluator<EvaluatorNone, 1, false>(EvaluatorNone(), nullptr); //nParams arg is 1 rather than zero b/c can't have zero sized argument on device } else { evalWrap = pickEvaluator<EvaluatorNone, 1, false>(EvaluatorNone(), this); } } else if (evalWrapperMode == "self") { evalWrap = pickEvaluator<EvaluatorNone, 1, false>(EvaluatorNone(), this); } } void FixChargeEwald::handleBoundsChange() { handleBoundsChangeInternal(false); } void FixChargeEwald::handleBoundsChangeInternal(bool printError) { if ((state->boundsGPU != boundsLastOptimize)||(total_Q2!=total_Q2LastOptimize)) { if (modeIsError) { setGridToErrorTolerance(printError); } else { find_optimal_parameters(printError); } calc_Green_function(); boundsLastOptimize = state->boundsGPU; total_Q2LastOptimize=total_Q2; } } void FixChargeEwald::compute(int virialMode) { // CUT_CHECK_ERROR("before FixChargeEwald kernel execution failed"); // cout<<"FixChargeEwald::compute..\n"; int nAtoms = state->atoms.size(); int nPerRingPoly = state->nPerRingPoly; int nRingPoly = nAtoms / nPerRingPoly; GPUData &gpd = state->gpd; GridGPU &grid = state->gridGPU; int activeIdx = gpd.activeIdx(); uint16_t *neighborCounts = grid.perAtomArray.d_data.data(); float Qconversion = sqrt(state->units.qqr_to_eng); //first update grid from atoms positions //set qs to 0 dim3 dimBlock(8,8,8); dim3 dimGrid((sz.x + dimBlock.x - 1) / dimBlock.x,(sz.y + dimBlock.y - 1) / dimBlock.y,(sz.z + dimBlock.z - 1) / dimBlock.z); if (not ((state->turn - turnInit) % longRangeInterval)) { hipLaunchKernelGGL(( map_charge_set_to_zero_cu), dim3(dimGrid), dim3(dimBlock), 0, 0, sz,FFT_Qs); // CUT_CHECK_ERROR("map_charge_set_to_zero_cu kernel execution failed"); //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Compute centroids of all ring polymers for use on grid //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ float4 *centroids; BoundsGPU bounds = state->boundsGPU; BoundsGPU boundsUnskewed = bounds.unskewed(); if (nPerRingPoly >1) { hipLaunchKernelGGL(( computeCentroid), dim3(NBLOCK(nRingPoly)),dim3(PERBLOCK), 0, 0, rpCentroids.data(),gpd.xs(activeIdx),nAtoms,nPerRingPoly,boundsUnskewed); centroids = rpCentroids.data(); } else { centroids = gpd.xs(activeIdx); } switch (interpolation_order){ case 1:hipLaunchKernelGGL(({map_charge_to_grid_order_1_cu) , dim3(NBLOCK(nRingPoly)), dim3(PERBLOCK), 0, 0, nRingPoly, nPerRingPoly, centroids, gpd.qs(activeIdx), state->boundsGPU, sz, (float *)FFT_Qs, Qconversion); break;} case 3:hipLaunchKernelGGL(({map_charge_to_grid_order_3_cu) , dim3(NBLOCK(nRingPoly)), dim3(PERBLOCK), 0, 0, nRingPoly, nPerRingPoly, centroids, gpd.qs(activeIdx), state->boundsGPU, sz, (float *)FFT_Qs, Qconversion); break;} } // CUT_CHECK_ERROR("map_charge_to_grid_cu kernel execution failed"); hipfftExecC2C(plan, FFT_Qs, FFT_Qs, HIPFFT_FORWARD); // hipDeviceSynchronize(); // CUT_CHECK_ERROR("hipfftExecC2C Qs execution failed"); // //test area // float buf[sz.x*sz.y*sz.z*2]; // hipMemcpy(buf,FFT_Qs,sizeof(hipfftComplex)*sz.x*sz.y*sz.z,hipMemcpyDeviceToHost ); // ofstream ofs; // ofs.open("test_FFT.dat",ios::out ); // for(int i=0;i<sz.x;i++) // for(int j=0;j<sz.y;j++){ // for(int k=0;k<sz.z;k++){ // cout<<buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]<<'\t'; // ofs <<buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]<<'\t'; // } // ofs<<'\n'; // cout<<'\n'; // } //next potential calculation: just going to use Ex to store it for now // calc_potential(FFT_Ex); //calc E field hipLaunchKernelGGL(( E_field_cu), dim3(dimGrid), dim3(dimBlock), 0, 0, state->boundsGPU,sz,Green_function.getDevData(), FFT_Qs,FFT_Ex,FFT_Ey,FFT_Ez); CUT_CHECK_ERROR("E_field_cu kernel execution failed"); hipfftExecC2C(plan, FFT_Ex, FFT_Ex, HIPFFT_BACKWARD); hipfftExecC2C(plan, FFT_Ey, FFT_Ey, HIPFFT_BACKWARD); hipfftExecC2C(plan, FFT_Ez, FFT_Ez, HIPFFT_BACKWARD); // hipDeviceSynchronize(); // CUT_CHECK_ERROR("hipfftExecC2C E_field execution failed"); /*//test area Bounds b=state->bounds; float volume=b.trace[0]*b.trace[1]*b.trace[2]; float *buf=new float[sz.x*sz.y*sz.z*2]; hipMemcpy((void *)buf,FFT_Ex,sizeof(hipfftComplex)*sz.x*sz.y*sz.z,hipMemcpyDeviceToHost ); ofstream ofs; ofs.open("test_Ex.dat",ios::out ); for(int i=0;i<sz.x;i++) for(int j=0;j<sz.y;j++){ for(int k=0;k<sz.z;k++){ cout<<-buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]/volume<<'\t'; ofs<<-buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]/volume<<'\t'; } ofs<<'\n'; cout<<'\n'; } ofs.close(); hipMemcpy((void *)buf,FFT_Ey,sizeof(hipfftComplex)*sz.x*sz.y*sz.z,hipMemcpyDeviceToHost ); ofs.open("test_Ey.dat",ios::out ); for(int i=0;i<sz.x;i++) for(int j=0;j<sz.y;j++){ for(int k=0;k<sz.z;k++){ cout<<-buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]/volume<<'\t'; ofs<<-buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]/volume<<'\t'; } ofs<<'\n'; cout<<'\n'; } ofs.close(); hipMemcpy((void *)buf,FFT_Ez,sizeof(hipfftComplex)*sz.x*sz.y*sz.z,hipMemcpyDeviceToHost ); ofs.open("test_Ez.dat",ios::out ); for(int i=0;i<sz.x;i++) for(int j=0;j<sz.y;j++){ for(int k=0;k<sz.z;k++){ cout<<-buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]/volume<<'\t'; ofs<<-buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]/volume<<'\t'; } ofs<<'\n'; cout<<'\n'; } ofs.close(); delete []buf; */ //calc forces //printf("Forces!\n"); // Performing an "effective" ring polymer contraction means that we should evaluate the forces // for the centroids bool storeForces = longRangeInterval != 1; switch (interpolation_order){ case 1:hipLaunchKernelGGL(({Ewald_long_range_forces_order_1_cu), dim3(NBLOCK(nRingPoly)), dim3(PERBLOCK), 0, 0, nRingPoly, nPerRingPoly, centroids, gpd.fs(activeIdx), gpd.qs(activeIdx), state->boundsGPU, sz, FFT_Ex,FFT_Ey,FFT_Ez,Qconversion, storeForces, gpd.ids(activeIdx), storedForces.data() ); break;} case 3:hipLaunchKernelGGL(({Ewald_long_range_forces_order_3_cu), dim3(NBLOCK(nRingPoly)), dim3(PERBLOCK), 0, 0, nRingPoly, nPerRingPoly, centroids, gpd.fs(activeIdx), gpd.qs(activeIdx), state->boundsGPU, sz, FFT_Ex,FFT_Ey,FFT_Ez,Qconversion, storeForces, gpd.ids(activeIdx), storedForces.data() ); break;} } } else { hipLaunchKernelGGL(( applyStoredForces), dim3(NBLOCK(nAtoms)), dim3(PERBLOCK), 0, 0, nAtoms, gpd.fs(activeIdx), gpd.ids(activeIdx), storedForces.data()); } CUT_CHECK_ERROR("Ewald_long_range_forces_cu execution failed"); //SHORT RANGE if (virialMode) { int warpSize = state->devManager.prop.warpSize; BoundsGPU &b=state->boundsGPU; float volume=b.volume(); virialField.memset(0); hipLaunchKernelGGL(( virials_cu), dim3(dimGrid), dim3(dimBlock),sizeof(Virial)*dimBlock.x*dimBlock.y*dimBlock.z, 0, state->boundsGPU,sz,virialField.data(),alpha,Green_function.getDevData(), FFT_Qs, warpSize); CUT_CHECK_ERROR("virials_cu kernel execution failed"); hipLaunchKernelGGL(( mapVirialToSingleAtom), dim3(1), dim3(6), 0, 0, gpd.virials.d_data.data(), virialField.data(), volume); } float *neighborCoefs = state->specialNeighborCoefs; evalWrap->compute(nAtoms,nPerRingPoly,gpd.xs(activeIdx), gpd.fs(activeIdx), neighborCounts, grid.neighborlist.data(), grid.perBlockArray.d_data.data(), state->devManager.prop.warpSize, nullptr, 0, state->boundsGPU, //PASSING NULLPTR TO GPU MAY CAUSE ISSUES //ALTERNATIVELy, COULD JUST GIVE THE PARMS SOME OTHER RANDOM POINTER, AS LONG AS IT'S VALID neighborCoefs[0], neighborCoefs[1], neighborCoefs[2], gpd.virials.d_data.data(), gpd.qs(activeIdx), r_cut, virialMode, nThreadPerBlock(), nThreadPerAtom()); CUT_CHECK_ERROR("Ewald_short_range_forces_cu execution failed"); } void FixChargeEwald::singlePointEng(float * perParticleEng) { CUT_CHECK_ERROR("before FixChargeEwald kernel execution failed"); if (state->boundsGPU != boundsLastOptimize) { handleBoundsChange(); } // cout<<"FixChargeEwald::compute..\n"; int nAtoms = state->atoms.size(); int nPerRingPoly = state->nPerRingPoly; int nRingPoly = nAtoms / nPerRingPoly; GPUData &gpd = state->gpd; GridGPU &grid = state->gridGPU; int activeIdx = gpd.activeIdx(); uint16_t *neighborCounts = grid.perAtomArray.d_data.data(); float Qconversion = sqrt(state->units.qqr_to_eng); //first update grid from atoms positions //set qs to 0 float field_energy_per_particle = 0; dim3 dimBlock(8,8,8); dim3 dimGrid((sz.x + dimBlock.x - 1) / dimBlock.x,(sz.y + dimBlock.y - 1) / dimBlock.y,(sz.z + dimBlock.z - 1) / dimBlock.z); hipLaunchKernelGGL(( map_charge_set_to_zero_cu), dim3(dimGrid), dim3(dimBlock), 0, 0, sz,FFT_Qs); CUT_CHECK_ERROR("map_charge_set_to_zero_cu kernel execution failed"); //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Compute centroids of all ring polymers for use on grid //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ float4 *centroids; BoundsGPU bounds = state->boundsGPU; BoundsGPU boundsUnskewed = bounds.unskewed(); if (nPerRingPoly >1) { rpCentroids = GPUArrayDeviceGlobal<float4>(nRingPoly); hipLaunchKernelGGL(( computeCentroid), dim3(NBLOCK(nRingPoly)),dim3(PERBLOCK), 0, 0, rpCentroids.data(),gpd.xs(activeIdx),nAtoms,nPerRingPoly,boundsUnskewed); centroids = rpCentroids.data(); hipLaunchKernelGGL(( periodicWrapCpy), dim3(NBLOCK(nRingPoly)), dim3(PERBLOCK), 0, 0, centroids, nRingPoly, boundsUnskewed); } else { centroids = gpd.xs(activeIdx); } switch (interpolation_order){ case 1:hipLaunchKernelGGL(({map_charge_to_grid_order_1_cu) , dim3(NBLOCK(nRingPoly)), dim3(PERBLOCK), 0, 0, nRingPoly, nPerRingPoly, centroids, gpd.qs(activeIdx), state->boundsGPU, sz, (float *)FFT_Qs,Qconversion); break;} case 3:hipLaunchKernelGGL(({map_charge_to_grid_order_3_cu) , dim3(NBLOCK(nRingPoly)), dim3(PERBLOCK), 0, 0, nRingPoly, nPerRingPoly, centroids, gpd.qs(activeIdx), state->boundsGPU, sz, (float *)FFT_Qs,Qconversion); break;} } CUT_CHECK_ERROR("map_charge_to_grid_cu kernel execution failed"); hipfftExecC2C(plan, FFT_Qs, FFT_Qs, HIPFFT_FORWARD); hipDeviceSynchronize(); CUT_CHECK_ERROR("hipfftExecC2C Qs execution failed"); //calc field energy BoundsGPU &b=state->boundsGPU; float volume=b.volume(); hipLaunchKernelGGL(( Energy_cu), dim3(dimGrid), dim3(dimBlock), 0, 0, sz,Green_function.getDevData(), FFT_Qs,FFT_Ex);//use Ex as buffer CUT_CHECK_ERROR("Energy_cu kernel execution failed"); GPUArrayGlobal<float>field_E(1); field_E.memsetByVal(0.0); int warpSize = state->devManager.prop.warpSize; hipLaunchKernelGGL(( accumulate_gpu<float,float, SumSingle, N_DATA_PER_THREAD>) , dim3(NBLOCK(2*sz.x*sz.y*sz.z/(double)N_DATA_PER_THREAD)),dim3(PERBLOCK),N_DATA_PER_THREAD*sizeof(float)*PERBLOCK, 0, field_E.getDevData(), (float *)FFT_Ex, 2*sz.x*sz.y*sz.z, warpSize, SumSingle() ); /* sumSingle<float,float, N_DATA_PER_THREAD> <<<NBLOCK(2*sz.x*sz.y*sz.z/(double)N_DATA_PER_THREAD),PERBLOCK,N_DATA_PER_THREAD*sizeof(float)*PERBLOCK>>>( field_E.getDevData(), (float *)FFT_Ex, 2*sz.x*sz.y*sz.z, warpSize); */ field_E.dataToHost(); //field_energy_per_particle=0.5*field_E.h_data[0]/volume/nAtoms; field_energy_per_particle=0.5*field_E.h_data[0]/volume/nRingPoly; // cout<<"field_E "<<field_E.h_data[0]<<'\n'; field_energy_per_particle-=alpha/sqrt(M_PI)*total_Q2/nRingPoly; // cout<<"self correction "<<alpha/sqrt(M_PI)*total_Q2<<'\n'; //pair energies hipLaunchKernelGGL(( mapEngToParticles), dim3(NBLOCK(nAtoms)), dim3(PERBLOCK), 0, 0, nAtoms, field_energy_per_particle, perParticleEng); float *neighborCoefs = state->specialNeighborCoefs; evalWrap->energy(nAtoms,nPerRingPoly, gpd.xs(activeIdx), perParticleEng, neighborCounts, grid.neighborlist.data(), grid.perBlockArray.d_data.data(), state->devManager.prop.warpSize, nullptr, 0, state->boundsGPU, neighborCoefs[0], neighborCoefs[1], neighborCoefs[2], gpd.qs(activeIdx), r_cut, nThreadPerBlock(), nThreadPerAtom()); CUT_CHECK_ERROR("Ewald_short_range_forces_cu execution failed"); } int FixChargeEwald::setLongRangeInterval(int interval) { if (interval) { longRangeInterval = interval; } return longRangeInterval; } ChargeEvaluatorEwald FixChargeEwald::generateEvaluator() { return ChargeEvaluatorEwald(alpha, state->units.qqr_to_eng); } void (FixChargeEwald::*setParameters_xyz)(int ,int ,int ,float ,int) = &FixChargeEwald::setParameters; void (FixChargeEwald::*setParameters_xxx)(int ,float ,int) = &FixChargeEwald::setParameters; void export_FixChargeEwald() { py::class_<FixChargeEwald, SHARED(FixChargeEwald), py::bases<FixCharge> > ( "FixChargeEwald", py::init<SHARED(State), string, string> ( py::args("state", "handle", "groupHandle")) ) .def("setParameters", setParameters_xyz, (py::arg("szx"),py::arg("szy"),py::arg("szz"), py::arg("r_cut")=-1,py::arg("interpolation_order")=3) ) .def("setParameters", setParameters_xxx, (py::arg("sz"),py::arg("r_cut")=-1,py::arg("interpolation_order")=3) ) .def("setError", &FixChargeEwald::setError, (py::arg("error"), py::arg("rCut")=-1, py::arg("interpolation_order")=3) ) .def("setLongRangeInterval", &FixChargeEwald::setLongRangeInterval, (py::arg("interval")=0)) ; }
6ece75ff14ba962e7e72c707c901f8f4127e5eb1.cu
#include "FixChargeEwald.h" #include "BoundsGPU.h" #include "cutils_func.h" #include "cutils_math.h" #include "GridGPU.h" #include "State.h" #include <cufft.h> #include "globalDefs.h" #include <fstream> #include "Virial.h" #include "helpers.h" #include "PairEvaluatorNone.h" #include "EvaluatorWrapper.h" // #include <cmath> using namespace std; namespace py = boost::python; const std::string chargeEwaldType = "ChargeEwald"; // #define THREADS_PER_BLOCK_ // MW: Note that this function is a verbatim copy of that which appears in GridGPU.cu // consider combining __global__ void computeCentroid(float4 *centroids, float4 *xs, int nAtoms, int nPerRingPoly, BoundsGPU bounds) { int idx = GETIDX(); int nRingPoly = nAtoms / nPerRingPoly; if (idx < nRingPoly) { int baseIdx = idx * nPerRingPoly; float3 init = make_float3(xs[baseIdx]); float3 diffSum = make_float3(0, 0, 0); for (int i=baseIdx+1; i<baseIdx + nPerRingPoly; i++) { float3 next = make_float3(xs[i]); float3 dx = bounds.minImage(next - init); diffSum += dx; } diffSum /= nPerRingPoly; float3 unwrappedPos = init + diffSum; float3 trace = bounds.trace(); float3 diffFromLo = unwrappedPos - bounds.lo; float3 imgs = floorf(diffFromLo / trace); //are unskewed at this point float3 wrappedPos = unwrappedPos - trace * imgs * bounds.periodic; centroids[idx] = make_float4(wrappedPos); } } // MW: This is a duplicated function from GridGPU.cu __global__ void periodicWrapCpy(float4 *xs, int nAtoms, BoundsGPU bounds) { int idx = GETIDX(); if (idx < nAtoms) { float4 pos = xs[idx]; float id = pos.w; float3 trace = bounds.trace(); float3 diffFromLo = make_float3(pos) - bounds.lo; float3 imgs = floorf(diffFromLo / trace); //are unskewed at this point pos -= make_float4(trace * imgs * bounds.periodic); pos.w = id; //if (not(pos.x==orig.x and pos.y==orig.y and pos.z==orig.z)) { //sigh if (imgs.x != 0 or imgs.y != 0 or imgs.z != 0) { xs[idx] = pos; } } } //different implementation for different interpolation orders //TODO template //order 1 nearest point __global__ void map_charge_to_grid_order_1_cu(int nRingPoly, int nPerRingPoly, float4 *xs, float *qs, BoundsGPU bounds, int3 sz,float *grid/*convert to float for cufffComplex*/,float Qunit) { int idx = GETIDX(); if (idx < nRingPoly) { float4 posWhole = xs[idx]; float3 pos = make_float3(posWhole)-bounds.lo; float qi = Qunit*qs[idx * nPerRingPoly]; //find nearest grid point float3 h=bounds.trace()/make_float3(sz); int3 nearest_grid_point=make_int3((pos+0.5*h)/h); //or int3 p=nearest_grid_point; if (p.x>0) p.x-=int(p.x/sz.x)*sz.x; if (p.y>0) p.y-=int(p.y/sz.y)*sz.y; if (p.z>0) p.z-=int(p.z/sz.z)*sz.z; if (p.x<0) p.x-=int((p.x+1)/sz.x-1)*sz.x; if (p.y<0) p.y-=int((p.y+1)/sz.y-1)*sz.y; if (p.z<0) p.z-=int((p.z+1)/sz.z-1)*sz.z; atomicAdd(&grid[p.x*sz.y*sz.z*2+p.y*sz.z*2+p.z*2], 1.0*qi); } } inline __host__ __device__ float W_p_3(int i,float x){ if (i==-1) return 0.125-0.5*x+0.5*x*x; if (i== 0) return 0.75-x*x; /*if (i== 1)*/ return 0.125+0.5*x+0.5*x*x; } __global__ void map_charge_to_grid_order_3_cu(int nRingPoly, int nPerRingPoly, float4 *xs, float *qs, BoundsGPU bounds, int3 sz,float *grid/*convert to float for cufffComplex*/,float Qunit) { int idx = GETIDX(); if (idx < nRingPoly) { float4 posWhole = xs[idx]; float3 pos = make_float3(posWhole)-bounds.lo; float qi = Qunit*qs[idx * nPerRingPoly]; //find nearest grid point float3 h=bounds.trace()/make_float3(sz); int3 nearest_grid_point=make_int3((pos+0.5*h)/h); //distance from nearest_grid_point /h float3 d=pos/h-make_float3(nearest_grid_point); int3 p=nearest_grid_point; for (int ix=-1;ix<=1;ix++){ p.x=nearest_grid_point.x+ix; float charge_yz_w=qi*W_p_3(ix,d.x); for (int iy=-1;iy<=1;iy++){ p.y=nearest_grid_point.y+iy; float charge_z_w=charge_yz_w*W_p_3(iy,d.y); for (int iz=-1;iz<=1;iz++){ p.z=nearest_grid_point.z+iz; float charge_w=charge_z_w*W_p_3(iz,d.z); if (p.x>0) p.x-=int(p.x/sz.x)*sz.x; if (p.y>0) p.y-=int(p.y/sz.y)*sz.y; if (p.z>0) p.z-=int(p.z/sz.z)*sz.z; if (p.x<0) p.x-=int((p.x+1)/sz.x-1)*sz.x; if (p.y<0) p.y-=int((p.y+1)/sz.y-1)*sz.y; if (p.z<0) p.z-=int((p.z+1)/sz.z-1)*sz.z; if ((p.x<0) or (p.x>sz.x-1)) printf("grid point miss x %d, %d, %d, %f \n", idx,p.x,nearest_grid_point.x,pos.x); if ((p.y<0) or (p.y>sz.y-1)) printf("grid point miss y %d, %d, %d, %f \n", idx,p.y,nearest_grid_point.y,pos.y); if ((p.z<0) or (p.z>sz.z-1)) printf("grid point miss z %d, %d, %d, %f \n", idx,p.z,nearest_grid_point.z,pos.z); atomicAdd(&grid[p.x*sz.y*sz.z*2+p.y*sz.z*2+p.z*2], charge_w); } } } } } __global__ void map_charge_set_to_zero_cu(int3 sz,cufftComplex *grid) { int3 id = make_int3( blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z); if ((id.x<sz.x)&&(id.y<sz.y)&&(id.z<sz.z)) grid[id.x*sz.y*sz.z+id.y*sz.z+id.z]=make_cuComplex (0.0f, 0.0f); } __device__ float sinc(float x){ if ((x<0.1)&&(x>-0.1)){ float x2=x*x; return 1.0 - x2*0.16666666667f + x2*x2*0.008333333333333333f - x2*x2*x2*0.00019841269841269841f; } else return sin(x)/x; } __global__ void Green_function_cu(BoundsGPU bounds, int3 sz,float *Green_function,float alpha, //now some parameter for Gf calc int sum_limits, int intrpl_order) { int3 id = make_int3( blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z); if ((id.x<sz.x)&&(id.y<sz.y)&&(id.z<sz.z)){ float3 h =bounds.trace()/make_float3(sz); // 2*PI float3 k= 6.28318530717958647693f*make_float3(id)/bounds.trace(); if (id.x>sz.x/2) k.x= 6.28318530717958647693f*(id.x-sz.x)/bounds.trace().x; if (id.y>sz.y/2) k.y= 6.28318530717958647693f*(id.y-sz.y)/bounds.trace().y; if (id.z>sz.z/2) k.z= 6.28318530717958647693f*(id.z-sz.z)/bounds.trace().z; //OK GF(k) = 4Pi/K^2 [SumforM(W(K+M)^2 exp(-(K+M)^2/4alpha) dot(K,K+M)/(K+M^2))] / // [SumforM^2(W(K+M)^2)] float sum1=0.0f; float sum2=0.0f; float k2=lengthSqr(k); float Fouralpha2inv=0.25/alpha/alpha; if (k2!=0.0){ for (int ix=-sum_limits;ix<=sum_limits;ix++){//TODO different limits for (int iy=-sum_limits;iy<=sum_limits;iy++){ for (int iz=-sum_limits;iz<=sum_limits;iz++){ float3 kpM=k+6.28318530717958647693f*make_float3(ix,iy,iz)/h; // kpM.x+=6.28318530717958647693f/h.x*ix;//TODO rewrite // kpM.y+=6.28318530717958647693f/h.y*iy; // kpM.z+=6.28318530717958647693f/h.z*iz; float kpMlen=lengthSqr(kpM); float W=sinc(kpM.x*h.x*0.5)*sinc(kpM.y*h.y*0.5)*sinc(kpM.z*h.z*0.5); // for(int p=1;p<intrpl_order;p++) // W*=W; // W*=h;//not need- cancels out // float W2=W*W; float W2=pow(W,intrpl_order*2); //4*PI sum1+=12.56637061435917295385*exp(-kpMlen*Fouralpha2inv)*dot(k,kpM)/kpMlen*W2; sum2+=W2; } } } Green_function[id.x*sz.y*sz.z+id.y*sz.z+id.z]=sum1/(sum2*sum2)/k2; }else{ Green_function[id.x*sz.y*sz.z+id.y*sz.z+id.z]=0.0f; } } } __global__ void potential_cu(int3 sz,float *Green_function, cufftComplex *FFT_qs, cufftComplex *FFT_phi){ int3 id = make_int3( blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z); if ((id.x<sz.x)&&(id.y<sz.y)&&(id.z<sz.z)){ FFT_phi[id.x*sz.y*sz.z+id.y*sz.z+id.z]=FFT_qs[id.x*sz.y*sz.z+id.y*sz.z+id.z]*Green_function[id.x*sz.y*sz.z+id.y*sz.z+id.z]; //TODO after Inverse FFT divide by volume } } __global__ void E_field_cu(BoundsGPU bounds, int3 sz,float *Green_function, cufftComplex *FFT_qs, cufftComplex *FFT_Ex,cufftComplex *FFT_Ey,cufftComplex *FFT_Ez){ int3 id = make_int3( blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z); if ((id.x<sz.x)&&(id.y<sz.y)&&(id.z<sz.z)){ //K vector float3 k= 6.28318530717958647693f*make_float3(id)/bounds.trace(); if (id.x>sz.x/2) k.x= 6.28318530717958647693f*(id.x-sz.x)/bounds.trace().x; if (id.y>sz.y/2) k.y= 6.28318530717958647693f*(id.y-sz.y)/bounds.trace().y; if (id.z>sz.z/2) k.z= 6.28318530717958647693f*(id.z-sz.z)/bounds.trace().z; //ik*q(k)*Gf(k) cufftComplex Ex,Ey,Ez; float GF=Green_function[id.x*sz.y*sz.z+id.y*sz.z+id.z]; cufftComplex q=FFT_qs[id.x*sz.y*sz.z+id.y*sz.z+id.z]; Ex.y= k.x*q.x*GF; Ex.x=-k.x*q.y*GF; Ey.y= k.y*q.x*GF; Ey.x=-k.y*q.y*GF; Ez.y= k.z*q.x*GF; Ez.x=-k.z*q.y*GF; FFT_Ex[id.x*sz.y*sz.z+id.y*sz.z+id.z]=Ex; FFT_Ey[id.x*sz.y*sz.z+id.y*sz.z+id.z]=Ey; FFT_Ez[id.x*sz.y*sz.z+id.y*sz.z+id.z]=Ez; //TODO after Inverse FFT divide by -volume } } __global__ void Ewald_long_range_forces_order_1_cu(int nRingPoly, int nPerRingPoly, float4 *xs, float4 *fs, float *qs, BoundsGPU bounds, int3 sz, cufftComplex *FFT_Ex, cufftComplex *FFT_Ey,cufftComplex *FFT_Ez,float Qunit, bool storeForces, uint *ids, float4 *storedForces) { int idx = GETIDX(); if (idx < nRingPoly) { float4 posWhole= xs[idx]; float3 pos = make_float3(posWhole)-bounds.lo; int baseIdx = idx*nPerRingPoly; float qi = qs[baseIdx]; //find nearest grid point float3 h=bounds.trace()/make_float3(sz); int3 nearest_grid_point=make_int3((pos+0.5*h)/h); int3 p=nearest_grid_point; if (p.x>0) p.x-=int(p.x/sz.x)*sz.x; if (p.y>0) p.y-=int(p.y/sz.y)*sz.y; if (p.z>0) p.z-=int(p.z/sz.z)*sz.z; if (p.x<0) p.x-=int((p.x+1)/sz.x-1)*sz.x; if (p.y<0) p.y-=int((p.y+1)/sz.y-1)*sz.y; if (p.z<0) p.z-=int((p.z+1)/sz.z-1)*sz.z; //get E field float3 E; float volume=bounds.trace().x*bounds.trace().y*bounds.trace().z; E.x= -FFT_Ex[p.x*sz.y*sz.z+p.y*sz.z+p.z].x/volume; E.y= -FFT_Ey[p.x*sz.y*sz.z+p.y*sz.z+p.z].x/volume; E.z= -FFT_Ez[p.x*sz.y*sz.z+p.y*sz.z+p.z].x/volume; // Apply force on centroid to all time slices for given atom float3 force= Qunit*qi*E; for (int i = 0; i< nPerRingPoly; i++) { fs[baseIdx + i] += force; } if (storeForces) { for (int i = 0; i < nPerRingPoly; i++) { storedForces[ids[baseIdx+i]] = make_float4(force.x, force.y, force.z, 0); } } } } __global__ void Ewald_long_range_forces_order_3_cu(int nRingPoly, int nPerRingPoly, float4 *xs, float4 *fs, float *qs, BoundsGPU bounds, int3 sz, cufftComplex *FFT_Ex, cufftComplex *FFT_Ey,cufftComplex *FFT_Ez,float Qunit, bool storeForces, uint *ids, float4 *storedForces) { int idx = GETIDX(); if (idx < nRingPoly) { float4 posWhole= xs[idx]; float3 pos = make_float3(posWhole)-bounds.lo; int baseIdx = idx*nPerRingPoly; float qi = qs[baseIdx]; //find nearest grid point float3 h=bounds.trace()/make_float3(sz); int3 nearest_grid_point=make_int3((pos+0.5*h)/h); //distance from nearest_grid_point /h float3 d=pos/h-make_float3(nearest_grid_point); float3 E=make_float3(0,0,0); float volume=bounds.trace().x*bounds.trace().y*bounds.trace().z; int3 p=nearest_grid_point; for (int ix=-1;ix<=1;ix++){ p.x=nearest_grid_point.x+ix; for (int iy=-1;iy<=1;iy++){ p.y=nearest_grid_point.y+iy; for (int iz=-1;iz<=1;iz++){ p.z=nearest_grid_point.z+iz; if (p.x>0) p.x-=int(p.x/sz.x)*sz.x; if (p.y>0) p.y-=int(p.y/sz.y)*sz.y; if (p.z>0) p.z-=int(p.z/sz.z)*sz.z; if (p.x<0) p.x-=int((p.x+1)/sz.x-1)*sz.x; if (p.y<0) p.y-=int((p.y+1)/sz.y-1)*sz.y; if (p.z<0) p.z-=int((p.z+1)/sz.z-1)*sz.z; float3 Ep; float W_xyz=W_p_3(ix,d.x)*W_p_3(iy,d.y)*W_p_3(iz,d.z); Ep.x= -FFT_Ex[p.x*sz.y*sz.z+p.y*sz.z+p.z].x/volume; Ep.y= -FFT_Ey[p.x*sz.y*sz.z+p.y*sz.z+p.z].x/volume; Ep.z= -FFT_Ez[p.x*sz.y*sz.z+p.y*sz.z+p.z].x/volume; E+=W_xyz*Ep; } } } float3 force= Qunit*qi*E; // Apply force on centroid to all time slices for given atom for (int i = 0; i < nPerRingPoly; i++) { fs[baseIdx + i] += force; } if (storeForces) { for (int i = 0; i < nPerRingPoly; i++) { storedForces[ids[baseIdx+i]] = make_float4(force.x, force.y, force.z, 0); } } } } __global__ void Energy_cu(int3 sz,float *Green_function, cufftComplex *FFT_qs, cufftComplex *E_grid){ int3 id = make_int3( blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z); if ((id.x<sz.x)&&(id.y<sz.y)&&(id.z<sz.z)){ cufftComplex qi=FFT_qs[id.x*sz.y*sz.z+id.y*sz.z+id.z]; E_grid[id.x*sz.y*sz.z+id.y*sz.z+id.z] =make_cuComplex((qi.x*qi.x+qi.y*qi.y)*Green_function[id.x*sz.y*sz.z+id.y*sz.z+id.z],0.0); //TODO after Inverse FFT divide by volume } } __global__ void virials_cu(BoundsGPU bounds,int3 sz,Virial *dest,float alpha, float *Green_function,cufftComplex *FFT_qs,int warpSize){ int3 id = make_int3( blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z); if ((id.x<sz.x)&&(id.y<sz.y)&&(id.z<sz.z)){ float3 k= 6.28318530717958647693f*make_float3(id)/bounds.trace(); if (id.x>sz.x/2) k.x= 6.28318530717958647693f*(id.x-sz.x)/bounds.trace().x; if (id.y>sz.y/2) k.y= 6.28318530717958647693f*(id.y-sz.y)/bounds.trace().y; if (id.z>sz.z/2) k.z= 6.28318530717958647693f*(id.z-sz.z)/bounds.trace().z; float klen=lengthSqr(k); cufftComplex qi=FFT_qs[id.x*sz.y*sz.z+id.y*sz.z+id.z]; float E=(qi.x*qi.x+qi.y*qi.y)*Green_function[id.x*sz.y*sz.z+id.y*sz.z+id.z]; float differential=-2.0*(1.0/klen+0.25/(alpha*alpha)); if (klen==0.0) {differential=0.0;E=0.0;} Virial virialstmp = Virial(0, 0, 0, 0, 0, 0); virialstmp[0]=(1.0+differential*k.x*k.x)*E; //xx virialstmp[1]=(1.0+differential*k.y*k.y)*E; //yy virialstmp[2]=(1.0+differential*k.z*k.z)*E; //zz virialstmp[3]=(differential*k.x*k.y)*E; //xy virialstmp[4]=(differential*k.x*k.z)*E; //xz virialstmp[5]=(differential*k.y*k.z)*E; //yz // virials[id.x*sz.y*sz.z+id.y*sz.z+id.z]=virialstmp; // __syncthreads(); extern __shared__ Virial tmpV[]; // const int copyBaseIdx = blockDim.x*blockIdx.x * N_DATA_PER_THREAD + threadIdx.x; // const int copyIncrement = blockDim.x; tmpV[threadIdx.x*blockDim.y*blockDim.z+threadIdx.y*blockDim.z+threadIdx.z]=virialstmp; int curLookahead=1; int numLookaheadSteps = log2f(blockDim.x*blockDim.y*blockDim.z-1); const int sumBaseIdx = threadIdx.x*blockDim.y*blockDim.z+threadIdx.y*blockDim.z+threadIdx.z; __syncthreads(); for (int i=0; i<=numLookaheadSteps; i++) { if (! (sumBaseIdx % (curLookahead*2))) { tmpV[sumBaseIdx] += tmpV[sumBaseIdx + curLookahead]; } curLookahead *= 2; // if (curLookahead >= (warpSize)) {//Doesn't work in 3D case __syncthreads(); // } } if (sumBaseIdx == 0) { atomicAdd(&(dest[0].vals[0]), tmpV[0][0]); atomicAdd(&(dest[0].vals[1]), tmpV[0][1]); atomicAdd(&(dest[0].vals[2]), tmpV[0][2]); atomicAdd(&(dest[0].vals[3]), tmpV[0][3]); atomicAdd(&(dest[0].vals[4]), tmpV[0][4]); atomicAdd(&(dest[0].vals[5]), tmpV[0][5]); } } } #define N_DATA_PER_THREAD 4 //just taken from cutils_func.h __global__ void sum_virials_cu(Virial *dest, Virial *src, int n, int warpSize){ extern __shared__ Virial tmpV[]; const int copyBaseIdx = blockDim.x*blockIdx.x * N_DATA_PER_THREAD + threadIdx.x; const int copyIncrement = blockDim.x; for (int i=0; i<N_DATA_PER_THREAD; i++) { int step = i * copyIncrement; if (copyBaseIdx + step < n) { tmpV[threadIdx.x + step] = src[copyBaseIdx + step]; } else { tmpV[threadIdx.x + step] =Virial(0, 0, 0, 0, 0, 0); } } int curLookahead = N_DATA_PER_THREAD; int numLookaheadSteps = log2f(blockDim.x-1); const int sumBaseIdx = threadIdx.x * N_DATA_PER_THREAD; __syncthreads(); for (int i=sumBaseIdx+1; i<sumBaseIdx + N_DATA_PER_THREAD; i++) { tmpV[sumBaseIdx] += tmpV[i]; } for (int i=0; i<=numLookaheadSteps; i++) { if (! (sumBaseIdx % (curLookahead*2))) { tmpV[sumBaseIdx] += tmpV[sumBaseIdx + curLookahead]; } curLookahead *= 2; if (curLookahead >= (N_DATA_PER_THREAD * warpSize)) { __syncthreads(); } } if (threadIdx.x == 0) { atomicAdd(&(dest[0].vals[0]), tmpV[0][0]); atomicAdd(&(dest[0].vals[1]), tmpV[0][1]); atomicAdd(&(dest[0].vals[2]), tmpV[0][2]); atomicAdd(&(dest[0].vals[3]), tmpV[0][3]); atomicAdd(&(dest[0].vals[4]), tmpV[0][4]); atomicAdd(&(dest[0].vals[5]), tmpV[0][5]); } } /* template < bool COMPUTE_VIRIALS> __global__ void compute_short_range_forces_cu(int nAtoms, float4 *xs, float4 *fs, uint16_t *neighborCounts, uint *neighborlist, uint32_t *cumulSumMaxPerBlock, float *qs, float alpha, float rCut, BoundsGPU bounds, int warpSize, float onetwoStr, float onethreeStr, float onefourStr, Virial *__restrict__ virials, Virial *virialField, float volume,float conversion) { float multipliers[4] = {1, onetwoStr, onethreeStr, onefourStr}; // printf("USING SHORT RANGE FORCES IN VIRIAL. THIS KERNEL IS INCORRECT\n"); Virial virialsSum = Virial(0, 0, 0, 0, 0, 0); int idx = GETIDX(); if (idx < nAtoms) { float4 posWhole = xs[idx]; float3 pos = make_float3(posWhole); float3 forceSum = make_float3(0, 0, 0); float qi = qs[idx]; int baseIdx = baseNeighlistIdx(cumulSumMaxPerBlock, warpSize); int numNeigh = neighborCounts[idx]; for (int i=0; i<numNeigh; i++) { int nlistIdx = baseIdx + warpSize * i; uint otherIdxRaw = neighborlist[nlistIdx]; uint neighDist = otherIdxRaw >> 30; uint otherIdx = otherIdxRaw & EXCL_MASK; float3 otherPos = make_float3(xs[otherIdx]); //then wrap and compute forces! float3 dr = bounds.minImage(pos - otherPos); float lenSqr = lengthSqr(dr); // printf("dist is %f %f %f\n", dr.x, dr.y, dr.z); if (lenSqr < rCut*rCut) { float multiplier = multipliers[neighDist]; if (multiplier) { float len=sqrtf(lenSqr); float qj = qs[otherIdx]; float r2inv = 1.0f/lenSqr; float rinv = 1.0f/len; //1/Sqrt(Pi) float forceScalar = conversion*qi*qj*(erfcf((alpha*len))*rinv+(2.0*0.5641895835477563*alpha)*exp(-alpha*alpha*lenSqr))*r2inv* multiplier; float3 forceVec = dr * forceScalar; forceSum += forceVec; // if ((::isnan(forceScalar)) or (abs(forceScalar)>1E6)) printf("short ewald nan %f ,%d ,%d %f \n", forceScalar,idx, otherIdx,pos.x); if (COMPUTE_VIRIALS) { computeVirial(virialsSum, forceVec, dr); } } } } fs[idx] += forceSum; //operator for float4 + float3 if (COMPUTE_VIRIALS) { //printf("vir %f %f %f %f %f %f\n", virialsSum.vals[0], virialsSum.vals[1], virialsSum.vals[2], virial_per_particle.vals[0],virial_per_particle.vals[1],virial_per_particle.vals[2]); Virial field = virialField[0]; field /= (nAtoms * volume); virialsSum+=field; virials[idx] += virialsSum; } } } */ /* __global__ void compute_short_range_energies_cu(int nAtoms, float4 *xs, uint16_t *neighborCounts, uint *neighborlist, uint32_t *cumulSumMaxPerBlock, float *qs, float alpha, float rCut, BoundsGPU bounds, int warpSize, float onetwoStr, float onethreeStr, float onefourStr,float *perParticleEng, float field_energy_per_particle,float conversion) { float multipliers[4] = {1, onetwoStr, onethreeStr, onefourStr}; int idx = GETIDX(); if (idx < nAtoms) { float4 posWhole = xs[idx]; float3 pos = make_float3(posWhole); float EngSum = 0.0f; float qi = qs[idx]; int baseIdx = baseNeighlistIdx(cumulSumMaxPerBlock, warpSize); int numNeigh = neighborCounts[idx]; for (int i=0; i<numNeigh; i++) { int nlistIdx = baseIdx + warpSize * i; uint otherIdxRaw = neighborlist[nlistIdx]; uint neighDist = otherIdxRaw >> 30; uint otherIdx = otherIdxRaw & EXCL_MASK; float3 otherPos = make_float3(xs[otherIdx]); //then wrap and compute forces! float3 dr = bounds.minImage(pos - otherPos); float lenSqr = lengthSqr(dr); // printf("dist is %f %f %f\n", dr.x, dr.y, dr.z); if (lenSqr < rCut*rCut) { float multiplier = multipliers[neighDist]; if (multiplier) { float len=sqrtf(lenSqr); float qj = qs[otherIdx]; // float r2inv = 1.0f/lenSqr; float rinv = 1.0f/len; float eng = conversion*0.5*qi*qj*(erfcf((alpha*len))*rinv)*multiplier; EngSum += eng; } } } perParticleEng[idx] += EngSum+field_energy_per_particle; } } */ __global__ void applyStoredForces(int nAtoms, float4 *fs, uint *ids, float4 *fsStored) { int idx = GETIDX(); if (idx < nAtoms) { float4 cur = fs[idx]; float3 stored = make_float3(fsStored[ids[idx]]); cur += stored; fs[idx] = cur; } } __global__ void mapVirialToSingleAtom(Virial *atomVirials, Virial *fieldVirial, float volume) { //just mapping to one atom for now. If we're looking at per-atom properties, should change to mapping to all atoms evenly atomVirials[0][threadIdx.x] += 0.5 * fieldVirial[0][threadIdx.x] / volume; } __global__ void mapEngToParticles(int nAtoms, float eng, float *engs) { int idx = GETIDX(); if (idx < nAtoms) { engs[idx] += eng; } } FixChargeEwald::FixChargeEwald(SHARED(State) state_, string handle_, string groupHandle_): FixCharge(state_, handle_, groupHandle_, chargeEwaldType, true){ cufftCreate(&plan); canOffloadChargePairCalc = true; modeIsError = false; sz = make_int3(32, 32, 32); malloced = false; longRangeInterval = 1; setEvalWrapper(); } FixChargeEwald::~FixChargeEwald(){ cufftDestroy(plan); if (malloced) { cudaFree(FFT_Qs); cudaFree(FFT_Ex); cudaFree(FFT_Ey); cudaFree(FFT_Ez); } } //Root mean square force error estimation const double amp_table[][7] = { {2.0/3.0, 0, 0, 0, 0, 0, 0}, {1.0/50.0, 5.0/294.0, 0, 0, 0, 0, 0}, {1.0/588.0, 7.0/1440.0, 21.0/3872.0, 0, 0, 0, 0}, {1.0/4320.0, 3.0/1936.0, 7601.0/2271360.0, 143.0/28800.0, 0, 0, 0}, {1.0/23232.0, 7601.0/12628160.0, 143.0/69120.0, 517231.0/106536960.0, 106640677.0/11737571328.0, 0, 0}, {691.0/68140800.0, 13.0/57600.0, 47021.0/35512320.0, 9694607.0/2095994880.0, 733191589.0/59609088000.0, 326190917.0/11700633600.0, 0}, {1.0/345600.0, 3617.0/35512320.0, 745739.0/838397952.0, 56399353.0/12773376000.0, 25091609.0/1560084480.0, 1755948832039.0/36229939200000.0, 48887769399.0/37838389248.0} }; double FixChargeEwald :: DeltaF_k(double t_alpha){ int nAtoms = state->atoms.size(); double sumx=0.0,sumy=0.0,sumz=0.0; for( int m=0;m<interpolation_order;m++){ double amp=amp_table[interpolation_order-1][m]; sumx+=amp*pow(h.x*t_alpha,2*m); sumy+=amp*pow(h.y*t_alpha,2*m); sumz+=amp*pow(h.z*t_alpha,2*m); } return total_Q2/3.0*(1.0/(L.x*L.x)*pow(t_alpha*h.x,interpolation_order)*sqrt(t_alpha*L.x/nAtoms*sqrt(2.0*M_PI)*sumx)+ 1.0/(L.y*L.y)*pow(t_alpha*h.y,interpolation_order)*sqrt(t_alpha*L.y/nAtoms*sqrt(2.0*M_PI)*sumy)+ 1.0/(L.z*L.z)*pow(t_alpha*h.z,interpolation_order)*sqrt(t_alpha*L.z/nAtoms*sqrt(2.0*M_PI)*sumz)); } double FixChargeEwald :: DeltaF_real(double t_alpha){ int nAtoms = state->atoms.size(); return 2*total_Q2/sqrt(nAtoms*r_cut*L.x*L.y*L.z)*exp(-t_alpha*t_alpha*r_cut*r_cut); } void FixChargeEwald::setTotalQ2() { int nAtoms = state->atoms.size(); GPUArrayGlobal<float>tmp(1); tmp.memsetByVal(0.0); float conversion = state->units.qqr_to_eng; accumulate_gpu<float,float, SumSqr, N_DATA_PER_THREAD> <<<NBLOCK(nAtoms/(double)N_DATA_PER_THREAD),PERBLOCK,N_DATA_PER_THREAD*sizeof(float)*PERBLOCK>>> ( tmp.getDevData(), state->gpd.qs(state->gpd.activeIdx()), nAtoms, state->devManager.prop.warpSize, SumSqr()); tmp.dataToHost(); total_Q2=conversion*tmp.h_data[0]/state->nPerRingPoly; tmp.memsetByVal(0.0); accumulate_gpu<float,float, SumSingle, N_DATA_PER_THREAD> <<<NBLOCK(nAtoms/(double)N_DATA_PER_THREAD),PERBLOCK,N_DATA_PER_THREAD*sizeof(float)*PERBLOCK>>> ( tmp.getDevData(), state->gpd.qs(state->gpd.activeIdx()), nAtoms, state->devManager.prop.warpSize, SumSingle()); tmp.dataToHost(); total_Q=sqrt(conversion)*tmp.h_data[0]/state->nPerRingPoly; cout<<"total_Q "<<total_Q<<'\n'; cout<<"total_Q2 "<<total_Q2<<'\n'; } double FixChargeEwald::find_optimal_parameters(bool printError){ int nAtoms = state->atoms.size(); L=state->boundsGPU.trace(); h=make_float3(L.x/sz.x,L.y/sz.y,L.z/sz.z); // cout<<"Lx "<<L.x<<'\n'; // cout<<"hx "<<h.x<<'\n'; // cout<<"nA "<<nAtoms<<'\n'; //now root solver //solving DeltaF_k=DeltaF_real // Log(DeltaF_k)=Log(DeltaF_real) // Log(DeltaF_k)-Log(DeltaF_real)=0 //lets try secant //two initial points double x_a=0.0; double x_b=4.79853/r_cut; double y_a=DeltaF_k(x_a)-DeltaF_real(x_a); double y_b=DeltaF_k(x_b)-DeltaF_real(x_b); // cout<<x_a<<' '<<y_a<<'\n'; // cout<<x_b<<' '<<y_b<<' '<<DeltaF_real(x_b)<<'\n'; double tol=1E-5; int n_iter=0,max_iter=100; while((fabs(y_b)/DeltaF_real(x_b)>tol)&&(n_iter<max_iter)){ double kinv=(x_b-x_a)/(y_b-y_a); y_a=y_b; x_a=x_b; x_b=x_a-y_a*kinv; y_b=DeltaF_k(x_b)-DeltaF_real(x_b); // cout<<x_b<<' '<<y_b<<'\n'; n_iter++; } if (n_iter==max_iter) cout<<"Ewald RMS Root finder failed, max_iter "<<max_iter<<" reached\n"; alpha=x_b; setEvalWrapper(); //set orig! //alpha = 1.0; double error = DeltaF_k(alpha)+DeltaF_real(alpha); if (printError) { cout<<"Ewald alpha="<<alpha<<'\n'; cout<<"Ewald RMS error is "<<error<<'\n'; } return error; } void FixChargeEwald::setParameters(int szx_,int szy_,int szz_,float rcut_,int interpolation_order_) { //for now support for only 2^N sizes //TODO generalize for non cubic boxes if (rcut_==-1) { rcut_ = state->rCut; } if ((szx_!=32)&&(szx_!=64)&&(szx_!=128)&&(szx_!=256)&&(szx_!=512)&&(szx_!=1024)){ cout << szx_ << " is not supported, sorry. Only 2^N grid size works for charge Ewald\n"; exit(2); } if ((szy_!=32)&&(szy_!=64)&&(szy_!=128)&&(szy_!=256)&&(szy_!=512)&&(szy_!=1024)){ cout << szy_ << " is not supported, sorry. Only 2^N grid size works for charge Ewald\n"; exit(2); } if ((szz_!=32)&&(szz_!=64)&&(szz_!=128)&&(szz_!=256)&&(szz_!=512)&&(szz_!=1024)){ cout << szz_ << " is not supported, sorry. Only 2^N grid size works for charge Ewald\n"; exit(2); } sz=make_int3(szx_,szy_,szz_); r_cut=rcut_; cudaMalloc((void**)&FFT_Qs, sizeof(cufftComplex)*sz.x*sz.y*sz.z); cufftPlan3d(&plan, sz.x,sz.y, sz.z, CUFFT_C2C); cudaMalloc((void**)&FFT_Ex, sizeof(cufftComplex)*sz.x*sz.y*sz.z); cudaMalloc((void**)&FFT_Ey, sizeof(cufftComplex)*sz.x*sz.y*sz.z); cudaMalloc((void**)&FFT_Ez, sizeof(cufftComplex)*sz.x*sz.y*sz.z); Green_function=GPUArrayGlobal<float>(sz.x*sz.y*sz.z); CUT_CHECK_ERROR("setParameters execution failed"); interpolation_order=interpolation_order_; malloced = true; } void FixChargeEwald::setGridToErrorTolerance(bool printMsg) { int3 szOld = sz; int nTries = 0; double error = find_optimal_parameters(false); Vector trace = state->bounds.rectComponents; while (nTries < 100 and (error > errorTolerance or error!=error or error < 0)) { //<0 tests for -inf Vector sVec = Vector(make_float3(sz)); Vector ratio = sVec / trace; double minRatio = ratio[0]; int minIdx = 0; for (int i=0; i<3; i++) { if (ratio[i] < minRatio) { minRatio = ratio[i]; minIdx = i; } } sVec[minIdx] *= 2; //sz *= 2;//make_int3(sVec.asFloat3()); sz = make_int3(sVec.asFloat3()); error = find_optimal_parameters(false); nTries++; } //DOESN'T REDUCE GRID SIZE EVER if (printMsg) { printf("Using ewald grid of %d %d %d with error %f\n", sz.x, sz.y, sz.z, error); } if (!malloced or szOld != sz) { if (malloced) { cufftDestroy(plan); cudaFree(FFT_Qs); cudaFree(FFT_Ex); cudaFree(FFT_Ey); cudaFree(FFT_Ez); } cudaMalloc((void**)&FFT_Qs, sizeof(cufftComplex)*sz.x*sz.y*sz.z); cufftPlan3d(&plan, sz.x,sz.y, sz.z, CUFFT_C2C); cudaMalloc((void**)&FFT_Ex, sizeof(cufftComplex)*sz.x*sz.y*sz.z); cudaMalloc((void**)&FFT_Ey, sizeof(cufftComplex)*sz.x*sz.y*sz.z); cudaMalloc((void**)&FFT_Ez, sizeof(cufftComplex)*sz.x*sz.y*sz.z); Green_function=GPUArrayGlobal<float>(sz.x*sz.y*sz.z); malloced = true; } } void FixChargeEwald::setError(double targetError, float rcut_, int interpolation_order_) { if (rcut_==-1) { rcut_ = state->rCut; } r_cut=rcut_; interpolation_order=interpolation_order_; errorTolerance = targetError; modeIsError = true; } void FixChargeEwald::calc_Green_function(){ dim3 dimBlock(8,8,8); dim3 dimGrid((sz.x + dimBlock.x - 1) / dimBlock.x,(sz.y + dimBlock.y - 1) / dimBlock.y,(sz.z + dimBlock.z - 1) / dimBlock.z); int sum_limits=int(alpha*pow(h.x*h.y*h.z,1.0/3.0)/3.14159*(sqrt(-log(10E-7))))+1; Green_function_cu<<<dimGrid, dimBlock>>>(state->boundsGPU, sz,Green_function.getDevData(),alpha, sum_limits,interpolation_order);//TODO parameters unknown CUT_CHECK_ERROR("Green_function_cu kernel execution failed"); //test area // Green_function.dataToHost(); // ofstream ofs; // ofs.open("test_Green_function.dat",ios::out ); // for(int i=0;i<sz.x;i++) // for(int j=0;j<sz.y;j++){ // for(int k=0;k<sz.z;k++){ // cout<<Green_function.h_data[i*sz.y*sz.z+j*sz.z+k]<<'\t'; // ofs<<Green_function.h_data[i*sz.y*sz.z+j*sz.z+k]<<'\t'; // } // ofs<<'\n'; // cout<<'\n'; // } // ofs.close(); } void FixChargeEwald::calc_potential(cufftComplex *phi_buf){ BoundsGPU b=state->boundsGPU; float volume=b.volume(); dim3 dimBlock(8,8,8); dim3 dimGrid((sz.x + dimBlock.x - 1) / dimBlock.x,(sz.y + dimBlock.y - 1) / dimBlock.y,(sz.z + dimBlock.z - 1) / dimBlock.z); potential_cu<<<dimGrid, dimBlock>>>(sz,Green_function.getDevData(), FFT_Qs,phi_buf); CUT_CHECK_ERROR("potential_cu kernel execution failed"); cufftExecC2C(plan, phi_buf, phi_buf, CUFFT_INVERSE); cudaDeviceSynchronize(); CUT_CHECK_ERROR("cufftExecC2C execution failed"); // //test area // float *buf=new float[sz.x*sz.y*sz.z*2]; // cudaMemcpy((void *)buf,phi_buf,sizeof(cufftComplex)*sz.x*sz.y*sz.z,cudaMemcpyDeviceToHost ); // ofstream ofs; // ofs.open("test_phi.dat",ios::out ); // for(int i=0;i<sz.x;i++) // for(int j=0;j<sz.y;j++){ // for(int k=0;k<sz.z;k++){ // cout<<buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]/volume<<'\t'; // ofs<<buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]/volume<<'\t'; // } // ofs<<'\n'; // cout<<'\n'; // } // ofs.close(); // delete []buf; } bool FixChargeEwald::prepareForRun() { virialField = GPUArrayDeviceGlobal<Virial>(1); setTotalQ2(); if ((state->boundsGPU != boundsLastOptimize)||(total_Q2!=total_Q2LastOptimize)) { handleBoundsChangeInternal(true); } turnInit = state->turn; if (longRangeInterval != 1) { storedForces = GPUArrayDeviceGlobal<float4>(state->maxIdExisting+1); } else { storedForces = GPUArrayDeviceGlobal<float4>(1); } if (state->nPerRingPoly > 1) { rpCentroids = GPUArrayDeviceGlobal<float4>(state->atoms.size() / state->nPerRingPoly); } setEvalWrapper(); return true; } void FixChargeEwald::setEvalWrapper() { if (evalWrapperMode == "offload") { if (hasOffloadedChargePairCalc) { evalWrap = pickEvaluator<EvaluatorNone, 1, false>(EvaluatorNone(), nullptr); //nParams arg is 1 rather than zero b/c can't have zero sized argument on device } else { evalWrap = pickEvaluator<EvaluatorNone, 1, false>(EvaluatorNone(), this); } } else if (evalWrapperMode == "self") { evalWrap = pickEvaluator<EvaluatorNone, 1, false>(EvaluatorNone(), this); } } void FixChargeEwald::handleBoundsChange() { handleBoundsChangeInternal(false); } void FixChargeEwald::handleBoundsChangeInternal(bool printError) { if ((state->boundsGPU != boundsLastOptimize)||(total_Q2!=total_Q2LastOptimize)) { if (modeIsError) { setGridToErrorTolerance(printError); } else { find_optimal_parameters(printError); } calc_Green_function(); boundsLastOptimize = state->boundsGPU; total_Q2LastOptimize=total_Q2; } } void FixChargeEwald::compute(int virialMode) { // CUT_CHECK_ERROR("before FixChargeEwald kernel execution failed"); // cout<<"FixChargeEwald::compute..\n"; int nAtoms = state->atoms.size(); int nPerRingPoly = state->nPerRingPoly; int nRingPoly = nAtoms / nPerRingPoly; GPUData &gpd = state->gpd; GridGPU &grid = state->gridGPU; int activeIdx = gpd.activeIdx(); uint16_t *neighborCounts = grid.perAtomArray.d_data.data(); float Qconversion = sqrt(state->units.qqr_to_eng); //first update grid from atoms positions //set qs to 0 dim3 dimBlock(8,8,8); dim3 dimGrid((sz.x + dimBlock.x - 1) / dimBlock.x,(sz.y + dimBlock.y - 1) / dimBlock.y,(sz.z + dimBlock.z - 1) / dimBlock.z); if (not ((state->turn - turnInit) % longRangeInterval)) { map_charge_set_to_zero_cu<<<dimGrid, dimBlock>>>(sz,FFT_Qs); // CUT_CHECK_ERROR("map_charge_set_to_zero_cu kernel execution failed"); //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Compute centroids of all ring polymers for use on grid //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ float4 *centroids; BoundsGPU bounds = state->boundsGPU; BoundsGPU boundsUnskewed = bounds.unskewed(); if (nPerRingPoly >1) { computeCentroid<<<NBLOCK(nRingPoly),PERBLOCK>>>(rpCentroids.data(),gpd.xs(activeIdx),nAtoms,nPerRingPoly,boundsUnskewed); centroids = rpCentroids.data(); } else { centroids = gpd.xs(activeIdx); } switch (interpolation_order){ case 1:{map_charge_to_grid_order_1_cu <<<NBLOCK(nRingPoly), PERBLOCK>>>( nRingPoly, nPerRingPoly, centroids, gpd.qs(activeIdx), state->boundsGPU, sz, (float *)FFT_Qs, Qconversion); break;} case 3:{map_charge_to_grid_order_3_cu <<<NBLOCK(nRingPoly), PERBLOCK>>>(nRingPoly, nPerRingPoly, centroids, gpd.qs(activeIdx), state->boundsGPU, sz, (float *)FFT_Qs, Qconversion); break;} } // CUT_CHECK_ERROR("map_charge_to_grid_cu kernel execution failed"); cufftExecC2C(plan, FFT_Qs, FFT_Qs, CUFFT_FORWARD); // cudaDeviceSynchronize(); // CUT_CHECK_ERROR("cufftExecC2C Qs execution failed"); // //test area // float buf[sz.x*sz.y*sz.z*2]; // cudaMemcpy(buf,FFT_Qs,sizeof(cufftComplex)*sz.x*sz.y*sz.z,cudaMemcpyDeviceToHost ); // ofstream ofs; // ofs.open("test_FFT.dat",ios::out ); // for(int i=0;i<sz.x;i++) // for(int j=0;j<sz.y;j++){ // for(int k=0;k<sz.z;k++){ // cout<<buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]<<'\t'; // ofs <<buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]<<'\t'; // } // ofs<<'\n'; // cout<<'\n'; // } //next potential calculation: just going to use Ex to store it for now // calc_potential(FFT_Ex); //calc E field E_field_cu<<<dimGrid, dimBlock>>>(state->boundsGPU,sz,Green_function.getDevData(), FFT_Qs,FFT_Ex,FFT_Ey,FFT_Ez); CUT_CHECK_ERROR("E_field_cu kernel execution failed"); cufftExecC2C(plan, FFT_Ex, FFT_Ex, CUFFT_INVERSE); cufftExecC2C(plan, FFT_Ey, FFT_Ey, CUFFT_INVERSE); cufftExecC2C(plan, FFT_Ez, FFT_Ez, CUFFT_INVERSE); // cudaDeviceSynchronize(); // CUT_CHECK_ERROR("cufftExecC2C E_field execution failed"); /*//test area Bounds b=state->bounds; float volume=b.trace[0]*b.trace[1]*b.trace[2]; float *buf=new float[sz.x*sz.y*sz.z*2]; cudaMemcpy((void *)buf,FFT_Ex,sizeof(cufftComplex)*sz.x*sz.y*sz.z,cudaMemcpyDeviceToHost ); ofstream ofs; ofs.open("test_Ex.dat",ios::out ); for(int i=0;i<sz.x;i++) for(int j=0;j<sz.y;j++){ for(int k=0;k<sz.z;k++){ cout<<-buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]/volume<<'\t'; ofs<<-buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]/volume<<'\t'; } ofs<<'\n'; cout<<'\n'; } ofs.close(); cudaMemcpy((void *)buf,FFT_Ey,sizeof(cufftComplex)*sz.x*sz.y*sz.z,cudaMemcpyDeviceToHost ); ofs.open("test_Ey.dat",ios::out ); for(int i=0;i<sz.x;i++) for(int j=0;j<sz.y;j++){ for(int k=0;k<sz.z;k++){ cout<<-buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]/volume<<'\t'; ofs<<-buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]/volume<<'\t'; } ofs<<'\n'; cout<<'\n'; } ofs.close(); cudaMemcpy((void *)buf,FFT_Ez,sizeof(cufftComplex)*sz.x*sz.y*sz.z,cudaMemcpyDeviceToHost ); ofs.open("test_Ez.dat",ios::out ); for(int i=0;i<sz.x;i++) for(int j=0;j<sz.y;j++){ for(int k=0;k<sz.z;k++){ cout<<-buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]/volume<<'\t'; ofs<<-buf[i*sz.y*sz.z*2+j*sz.z*2+k*2]/volume<<'\t'; } ofs<<'\n'; cout<<'\n'; } ofs.close(); delete []buf; */ //calc forces //printf("Forces!\n"); // Performing an "effective" ring polymer contraction means that we should evaluate the forces // for the centroids bool storeForces = longRangeInterval != 1; switch (interpolation_order){ case 1:{Ewald_long_range_forces_order_1_cu<<<NBLOCK(nRingPoly), PERBLOCK>>>( nRingPoly, nPerRingPoly, centroids, gpd.fs(activeIdx), gpd.qs(activeIdx), state->boundsGPU, sz, FFT_Ex,FFT_Ey,FFT_Ez,Qconversion, storeForces, gpd.ids(activeIdx), storedForces.data() ); break;} case 3:{Ewald_long_range_forces_order_3_cu<<<NBLOCK(nRingPoly), PERBLOCK>>>( nRingPoly, nPerRingPoly, centroids, gpd.fs(activeIdx), gpd.qs(activeIdx), state->boundsGPU, sz, FFT_Ex,FFT_Ey,FFT_Ez,Qconversion, storeForces, gpd.ids(activeIdx), storedForces.data() ); break;} } } else { applyStoredForces<<<NBLOCK(nAtoms), PERBLOCK>>>( nAtoms, gpd.fs(activeIdx), gpd.ids(activeIdx), storedForces.data()); } CUT_CHECK_ERROR("Ewald_long_range_forces_cu execution failed"); //SHORT RANGE if (virialMode) { int warpSize = state->devManager.prop.warpSize; BoundsGPU &b=state->boundsGPU; float volume=b.volume(); virialField.memset(0); virials_cu<<<dimGrid, dimBlock,sizeof(Virial)*dimBlock.x*dimBlock.y*dimBlock.z>>>(state->boundsGPU,sz,virialField.data(),alpha,Green_function.getDevData(), FFT_Qs, warpSize); CUT_CHECK_ERROR("virials_cu kernel execution failed"); mapVirialToSingleAtom<<<1, 6>>>(gpd.virials.d_data.data(), virialField.data(), volume); } float *neighborCoefs = state->specialNeighborCoefs; evalWrap->compute(nAtoms,nPerRingPoly,gpd.xs(activeIdx), gpd.fs(activeIdx), neighborCounts, grid.neighborlist.data(), grid.perBlockArray.d_data.data(), state->devManager.prop.warpSize, nullptr, 0, state->boundsGPU, //PASSING NULLPTR TO GPU MAY CAUSE ISSUES //ALTERNATIVELy, COULD JUST GIVE THE PARMS SOME OTHER RANDOM POINTER, AS LONG AS IT'S VALID neighborCoefs[0], neighborCoefs[1], neighborCoefs[2], gpd.virials.d_data.data(), gpd.qs(activeIdx), r_cut, virialMode, nThreadPerBlock(), nThreadPerAtom()); CUT_CHECK_ERROR("Ewald_short_range_forces_cu execution failed"); } void FixChargeEwald::singlePointEng(float * perParticleEng) { CUT_CHECK_ERROR("before FixChargeEwald kernel execution failed"); if (state->boundsGPU != boundsLastOptimize) { handleBoundsChange(); } // cout<<"FixChargeEwald::compute..\n"; int nAtoms = state->atoms.size(); int nPerRingPoly = state->nPerRingPoly; int nRingPoly = nAtoms / nPerRingPoly; GPUData &gpd = state->gpd; GridGPU &grid = state->gridGPU; int activeIdx = gpd.activeIdx(); uint16_t *neighborCounts = grid.perAtomArray.d_data.data(); float Qconversion = sqrt(state->units.qqr_to_eng); //first update grid from atoms positions //set qs to 0 float field_energy_per_particle = 0; dim3 dimBlock(8,8,8); dim3 dimGrid((sz.x + dimBlock.x - 1) / dimBlock.x,(sz.y + dimBlock.y - 1) / dimBlock.y,(sz.z + dimBlock.z - 1) / dimBlock.z); map_charge_set_to_zero_cu<<<dimGrid, dimBlock>>>(sz,FFT_Qs); CUT_CHECK_ERROR("map_charge_set_to_zero_cu kernel execution failed"); //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Compute centroids of all ring polymers for use on grid //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ float4 *centroids; BoundsGPU bounds = state->boundsGPU; BoundsGPU boundsUnskewed = bounds.unskewed(); if (nPerRingPoly >1) { rpCentroids = GPUArrayDeviceGlobal<float4>(nRingPoly); computeCentroid<<<NBLOCK(nRingPoly),PERBLOCK>>>(rpCentroids.data(),gpd.xs(activeIdx),nAtoms,nPerRingPoly,boundsUnskewed); centroids = rpCentroids.data(); periodicWrapCpy<<<NBLOCK(nRingPoly), PERBLOCK>>>(centroids, nRingPoly, boundsUnskewed); } else { centroids = gpd.xs(activeIdx); } switch (interpolation_order){ case 1:{map_charge_to_grid_order_1_cu <<<NBLOCK(nRingPoly), PERBLOCK>>>(nRingPoly, nPerRingPoly, centroids, gpd.qs(activeIdx), state->boundsGPU, sz, (float *)FFT_Qs,Qconversion); break;} case 3:{map_charge_to_grid_order_3_cu <<<NBLOCK(nRingPoly), PERBLOCK>>>(nRingPoly, nPerRingPoly, centroids, gpd.qs(activeIdx), state->boundsGPU, sz, (float *)FFT_Qs,Qconversion); break;} } CUT_CHECK_ERROR("map_charge_to_grid_cu kernel execution failed"); cufftExecC2C(plan, FFT_Qs, FFT_Qs, CUFFT_FORWARD); cudaDeviceSynchronize(); CUT_CHECK_ERROR("cufftExecC2C Qs execution failed"); //calc field energy BoundsGPU &b=state->boundsGPU; float volume=b.volume(); Energy_cu<<<dimGrid, dimBlock>>>(sz,Green_function.getDevData(), FFT_Qs,FFT_Ex);//use Ex as buffer CUT_CHECK_ERROR("Energy_cu kernel execution failed"); GPUArrayGlobal<float>field_E(1); field_E.memsetByVal(0.0); int warpSize = state->devManager.prop.warpSize; accumulate_gpu<float,float, SumSingle, N_DATA_PER_THREAD> <<<NBLOCK(2*sz.x*sz.y*sz.z/(double)N_DATA_PER_THREAD),PERBLOCK,N_DATA_PER_THREAD*sizeof(float)*PERBLOCK>>> ( field_E.getDevData(), (float *)FFT_Ex, 2*sz.x*sz.y*sz.z, warpSize, SumSingle() ); /* sumSingle<float,float, N_DATA_PER_THREAD> <<<NBLOCK(2*sz.x*sz.y*sz.z/(double)N_DATA_PER_THREAD),PERBLOCK,N_DATA_PER_THREAD*sizeof(float)*PERBLOCK>>>( field_E.getDevData(), (float *)FFT_Ex, 2*sz.x*sz.y*sz.z, warpSize); */ field_E.dataToHost(); //field_energy_per_particle=0.5*field_E.h_data[0]/volume/nAtoms; field_energy_per_particle=0.5*field_E.h_data[0]/volume/nRingPoly; // cout<<"field_E "<<field_E.h_data[0]<<'\n'; field_energy_per_particle-=alpha/sqrt(M_PI)*total_Q2/nRingPoly; // cout<<"self correction "<<alpha/sqrt(M_PI)*total_Q2<<'\n'; //pair energies mapEngToParticles<<<NBLOCK(nAtoms), PERBLOCK>>>(nAtoms, field_energy_per_particle, perParticleEng); float *neighborCoefs = state->specialNeighborCoefs; evalWrap->energy(nAtoms,nPerRingPoly, gpd.xs(activeIdx), perParticleEng, neighborCounts, grid.neighborlist.data(), grid.perBlockArray.d_data.data(), state->devManager.prop.warpSize, nullptr, 0, state->boundsGPU, neighborCoefs[0], neighborCoefs[1], neighborCoefs[2], gpd.qs(activeIdx), r_cut, nThreadPerBlock(), nThreadPerAtom()); CUT_CHECK_ERROR("Ewald_short_range_forces_cu execution failed"); } int FixChargeEwald::setLongRangeInterval(int interval) { if (interval) { longRangeInterval = interval; } return longRangeInterval; } ChargeEvaluatorEwald FixChargeEwald::generateEvaluator() { return ChargeEvaluatorEwald(alpha, state->units.qqr_to_eng); } void (FixChargeEwald::*setParameters_xyz)(int ,int ,int ,float ,int) = &FixChargeEwald::setParameters; void (FixChargeEwald::*setParameters_xxx)(int ,float ,int) = &FixChargeEwald::setParameters; void export_FixChargeEwald() { py::class_<FixChargeEwald, SHARED(FixChargeEwald), py::bases<FixCharge> > ( "FixChargeEwald", py::init<SHARED(State), string, string> ( py::args("state", "handle", "groupHandle")) ) .def("setParameters", setParameters_xyz, (py::arg("szx"),py::arg("szy"),py::arg("szz"), py::arg("r_cut")=-1,py::arg("interpolation_order")=3) ) .def("setParameters", setParameters_xxx, (py::arg("sz"),py::arg("r_cut")=-1,py::arg("interpolation_order")=3) ) .def("setError", &FixChargeEwald::setError, (py::arg("error"), py::arg("rCut")=-1, py::arg("interpolation_order")=3) ) .def("setLongRangeInterval", &FixChargeEwald::setLongRangeInterval, (py::arg("interval")=0)) ; }
e72dc233157857864e360dbbe211e9d22dc354ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "cutil_inline.h" #include "Mandelbrot_kernel.h" #include "Mandelbrot_kernel.cu" // The Mandelbrot CUDA GPU thread function /* Version using software scheduling of thread blocks. The idea here is to launch of fixed number of worker blocks to fill the machine, and have each block loop over the available work until it is all done. We use a counter in global memory to keep track of which blocks have been completed. The counter is incremented atomically by each worker block. This method can achieve higher performance when blocks take a wide range of different times to complete. */ __device__ unsigned int blockCounter; // global counter, initialized to zero before kernel launch template<class T> __global__ void Mandelbrot0_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff, const T scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while(1) { if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } #ifndef __DEVICE_EMULATION__ // device emu doesn't like syncthreads inside while() __syncthreads(); #endif if (blockIndex >= numBlocks) break; // finish // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Calculate the location const T xPos = (T)ix * scale + xOff; const T yPos = (T)iy * scale + yOff; // Calculate the Mandelbrot index for the current location int m = CalcMandelbrot<T>(xPos, yPos, crunch); // int m = blockIdx.x; // uncomment to see scheduling order m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int pixel = imageW * iy + ix; if (frame == 0) { color.w = 0; dst[pixel] = color; } else { int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1; dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1; dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1; } } } } // Mandelbrot0 // The Mandelbrot CUDA GPU thread function (double single version) __global__ void MandelbrotDS0_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1, const float yOff0, const float yOff1, const float scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while(1) { if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } #ifndef __DEVICE_EMULATION__ __syncthreads(); #endif if (blockIndex >= numBlocks) break; // finish // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Calculate the location float xPos0 = (float)ix * scale; float xPos1 = 0.0f; float yPos0 = (float)iy * scale; float yPos1 = 0.0f; dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1); dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1); // Calculate the Mandelbrot index for the current location int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, crunch); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int pixel = imageW * iy + ix; if (frame == 0) { color.w = 0; dst[pixel] = color; } else { int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1; dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1; dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1; } } } } // MandelbrotDS0 // The Mandelbrot secondary AA pass CUDA GPU thread function template<class T> __global__ void Mandelbrot1_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff, const T scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while(1) { if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } #ifndef __DEVICE_EMULATION__ __syncthreads(); #endif if (blockIndex >= numBlocks) break; // finish // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Get the current pixel color int pixel = imageW * iy + ix; uchar4 pixelColor = dst[pixel]; int count = 0; // Search for pixels out of tolerance surrounding the current pixel if (ix > 0) count += CheckColors(pixelColor, dst[pixel - 1]); if (ix + 1 < imageW) count += CheckColors(pixelColor, dst[pixel + 1]); if (iy > 0) count += CheckColors(pixelColor, dst[pixel - imageW]); if (iy + 1 < imageH) count += CheckColors(pixelColor, dst[pixel + imageW]); if (count) { // Calculate the location const T xPos = (T)ix * scale + xOff; const T yPos = (T)iy * scale + yOff; // Calculate the Mandelbrot index for the current location int m = CalcMandelbrot(xPos, yPos, crunch); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1; dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1; dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1; } } } } // Mandelbrot1 // The Mandelbrot secondary AA pass CUDA GPU thread function (double single version) __global__ void MandelbrotDS1_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1, const float yOff0, const float yOff1, const float scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while(1) { if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } #ifndef __DEVICE_EMULATION__ __syncthreads(); #endif if (blockIndex >= numBlocks) break; // finish // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Get the current pixel color int pixel = imageW * iy + ix; uchar4 pixelColor = dst[pixel]; int count = 0; // Search for pixels out of tolerance surrounding the current pixel if (ix > 0) count += CheckColors(pixelColor, dst[pixel - 1]); if (ix + 1 < imageW) count += CheckColors(pixelColor, dst[pixel + 1]); if (iy > 0) count += CheckColors(pixelColor, dst[pixel - imageW]); if (iy + 1 < imageH) count += CheckColors(pixelColor, dst[pixel + imageW]); if (count) { // Calculate the location float xPos0 = (float)ix * scale; float xPos1 = 0.0f; float yPos0 = (float)iy * scale; float yPos1 = 0.0f; dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1); dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1); // Calculate the Mandelbrot index for the current location int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, crunch); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1; dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1; dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1; } } } } // MandelbrotDS1 // The host CPU Mandebrot thread spawner void RunMandelbrot0_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff, const double scale, const uchar4 colors, const int frame, const int animationFrame, const int mode, const int numSMs) { dim3 threads(BLOCKDIM_X, BLOCKDIM_Y); dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y)); // zero block counter unsigned int hBlockCounter = 0; cutilSafeCall( hipMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, hipMemcpyHostToDevice ) ); int numWorkerBlocks = numSMs; switch(mode) { default: case 0: hipLaunchKernelGGL(( Mandelbrot0_sm13<float>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, (float)xOff, (float)yOff, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y); break; case 1: float x0, x1, y0, y1; dsdeq(x0, x1, xOff); dsdeq(y0, y1, yOff); hipLaunchKernelGGL(( MandelbrotDS0_sm13), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, x0, x1, y0, y1, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y); break; case 2: hipLaunchKernelGGL(( Mandelbrot0_sm13<double>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, xOff, yOff, scale, colors, frame, animationFrame, grid.x, grid.x*grid.y); break; } cutilCheckMsg("Mandelbrot0_sm13 kernel execution failed.\n"); } // RunMandelbrot0 // The host CPU Mandebrot thread spawner void RunMandelbrot1_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff, const double scale, const uchar4 colors, const int frame, const int animationFrame, const int mode, const int numSMs) { dim3 threads(BLOCKDIM_X, BLOCKDIM_Y); dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y)); // zero block counter unsigned int hBlockCounter = 0; cutilSafeCall( hipMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, hipMemcpyHostToDevice ) ); int numWorkerBlocks = numSMs; switch(mode) { default: case 0: hipLaunchKernelGGL(( Mandelbrot1_sm13<float>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, (float)xOff, (float)yOff, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y); break; case 1: float x0, x1, y0, y1; dsdeq(x0, x1, xOff); dsdeq(y0, y1, yOff); hipLaunchKernelGGL(( MandelbrotDS1_sm13), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, x0, x1, y0, y1, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y); break; case 2: hipLaunchKernelGGL(( Mandelbrot1_sm13<double>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, xOff, yOff, scale, colors, frame, animationFrame, grid.x, grid.x*grid.y); break; } cutilCheckMsg("Mandelbrot1_sm13 kernel execution failed.\n"); } // RunMandelbrot1 // check if we're running in emulation mode int inEmulationMode() { #ifdef __DEVICE_EMULATION__ return 1; #else return 0; #endif }
e72dc233157857864e360dbbe211e9d22dc354ae.cu
#include <stdio.h> #include "cutil_inline.h" #include "Mandelbrot_kernel.h" #include "Mandelbrot_kernel.cu" // The Mandelbrot CUDA GPU thread function /* Version using software scheduling of thread blocks. The idea here is to launch of fixed number of worker blocks to fill the machine, and have each block loop over the available work until it is all done. We use a counter in global memory to keep track of which blocks have been completed. The counter is incremented atomically by each worker block. This method can achieve higher performance when blocks take a wide range of different times to complete. */ __device__ unsigned int blockCounter; // global counter, initialized to zero before kernel launch template<class T> __global__ void Mandelbrot0_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff, const T scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while(1) { if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } #ifndef __DEVICE_EMULATION__ // device emu doesn't like syncthreads inside while() __syncthreads(); #endif if (blockIndex >= numBlocks) break; // finish // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Calculate the location const T xPos = (T)ix * scale + xOff; const T yPos = (T)iy * scale + yOff; // Calculate the Mandelbrot index for the current location int m = CalcMandelbrot<T>(xPos, yPos, crunch); // int m = blockIdx.x; // uncomment to see scheduling order m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int pixel = imageW * iy + ix; if (frame == 0) { color.w = 0; dst[pixel] = color; } else { int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1; dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1; dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1; } } } } // Mandelbrot0 // The Mandelbrot CUDA GPU thread function (double single version) __global__ void MandelbrotDS0_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1, const float yOff0, const float yOff1, const float scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while(1) { if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } #ifndef __DEVICE_EMULATION__ __syncthreads(); #endif if (blockIndex >= numBlocks) break; // finish // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Calculate the location float xPos0 = (float)ix * scale; float xPos1 = 0.0f; float yPos0 = (float)iy * scale; float yPos1 = 0.0f; dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1); dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1); // Calculate the Mandelbrot index for the current location int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, crunch); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int pixel = imageW * iy + ix; if (frame == 0) { color.w = 0; dst[pixel] = color; } else { int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1; dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1; dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1; } } } } // MandelbrotDS0 // The Mandelbrot secondary AA pass CUDA GPU thread function template<class T> __global__ void Mandelbrot1_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff, const T scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while(1) { if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } #ifndef __DEVICE_EMULATION__ __syncthreads(); #endif if (blockIndex >= numBlocks) break; // finish // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Get the current pixel color int pixel = imageW * iy + ix; uchar4 pixelColor = dst[pixel]; int count = 0; // Search for pixels out of tolerance surrounding the current pixel if (ix > 0) count += CheckColors(pixelColor, dst[pixel - 1]); if (ix + 1 < imageW) count += CheckColors(pixelColor, dst[pixel + 1]); if (iy > 0) count += CheckColors(pixelColor, dst[pixel - imageW]); if (iy + 1 < imageH) count += CheckColors(pixelColor, dst[pixel + imageW]); if (count) { // Calculate the location const T xPos = (T)ix * scale + xOff; const T yPos = (T)iy * scale + yOff; // Calculate the Mandelbrot index for the current location int m = CalcMandelbrot(xPos, yPos, crunch); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1; dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1; dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1; } } } } // Mandelbrot1 // The Mandelbrot secondary AA pass CUDA GPU thread function (double single version) __global__ void MandelbrotDS1_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1, const float yOff0, const float yOff1, const float scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while(1) { if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } #ifndef __DEVICE_EMULATION__ __syncthreads(); #endif if (blockIndex >= numBlocks) break; // finish // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Get the current pixel color int pixel = imageW * iy + ix; uchar4 pixelColor = dst[pixel]; int count = 0; // Search for pixels out of tolerance surrounding the current pixel if (ix > 0) count += CheckColors(pixelColor, dst[pixel - 1]); if (ix + 1 < imageW) count += CheckColors(pixelColor, dst[pixel + 1]); if (iy > 0) count += CheckColors(pixelColor, dst[pixel - imageW]); if (iy + 1 < imageH) count += CheckColors(pixelColor, dst[pixel + imageW]); if (count) { // Calculate the location float xPos0 = (float)ix * scale; float xPos1 = 0.0f; float yPos0 = (float)iy * scale; float yPos1 = 0.0f; dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1); dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1); // Calculate the Mandelbrot index for the current location int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, crunch); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1; dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1; dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1; } } } } // MandelbrotDS1 // The host CPU Mandebrot thread spawner void RunMandelbrot0_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff, const double scale, const uchar4 colors, const int frame, const int animationFrame, const int mode, const int numSMs) { dim3 threads(BLOCKDIM_X, BLOCKDIM_Y); dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y)); // zero block counter unsigned int hBlockCounter = 0; cutilSafeCall( cudaMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, cudaMemcpyHostToDevice ) ); int numWorkerBlocks = numSMs; switch(mode) { default: case 0: Mandelbrot0_sm13<float><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, (float)xOff, (float)yOff, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y); break; case 1: float x0, x1, y0, y1; dsdeq(x0, x1, xOff); dsdeq(y0, y1, yOff); MandelbrotDS0_sm13<<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, x0, x1, y0, y1, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y); break; case 2: Mandelbrot0_sm13<double><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, xOff, yOff, scale, colors, frame, animationFrame, grid.x, grid.x*grid.y); break; } cutilCheckMsg("Mandelbrot0_sm13 kernel execution failed.\n"); } // RunMandelbrot0 // The host CPU Mandebrot thread spawner void RunMandelbrot1_sm13(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff, const double scale, const uchar4 colors, const int frame, const int animationFrame, const int mode, const int numSMs) { dim3 threads(BLOCKDIM_X, BLOCKDIM_Y); dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y)); // zero block counter unsigned int hBlockCounter = 0; cutilSafeCall( cudaMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, cudaMemcpyHostToDevice ) ); int numWorkerBlocks = numSMs; switch(mode) { default: case 0: Mandelbrot1_sm13<float><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, (float)xOff, (float)yOff, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y); break; case 1: float x0, x1, y0, y1; dsdeq(x0, x1, xOff); dsdeq(y0, y1, yOff); MandelbrotDS1_sm13<<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, x0, x1, y0, y1, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y); break; case 2: Mandelbrot1_sm13<double><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, xOff, yOff, scale, colors, frame, animationFrame, grid.x, grid.x*grid.y); break; } cutilCheckMsg("Mandelbrot1_sm13 kernel execution failed.\n"); } // RunMandelbrot1 // check if we're running in emulation mode int inEmulationMode() { #ifdef __DEVICE_EMULATION__ return 1; #else return 0; #endif }
c040706a0b564077157142f371db780fe2f21a29.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "matrixblockdiagmultiply.kernel.h" __global__ void MatrixBlockDiagMultiplyKernel(Matrix A, Matrix B, Matrix C, int blocksize) { float sum = 0; int from, to; int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; from = blocksize * (j / blocksize); to = from + blocksize; for (int k = from; k < to; ++k) { sum += A.elements[i * A.cols + k] * B.elements[k * B.cols + j]; } C.elements[i * C.cols + j] = sum; }
c040706a0b564077157142f371db780fe2f21a29.cu
#include "matrixblockdiagmultiply.kernel.h" __global__ void MatrixBlockDiagMultiplyKernel(Matrix A, Matrix B, Matrix C, int blocksize) { float sum = 0; int from, to; int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; from = blocksize * (j / blocksize); to = from + blocksize; for (int k = from; k < to; ++k) { sum += A.elements[i * A.cols + k] * B.elements[k * B.cols + j]; } C.elements[i * C.cols + j] = sum; }
de2a819b0995e9ada7d3fb98053c8f6384c01ebb.hip
// !!! This is a file automatically generated by hipify!!! #include "cupoch/visualization/shader/image_shader.h" #include <algorithm> #include "cupoch/geometry/image.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/utility/color_map.h" #include "cupoch/utility/range.h" #include "cupoch/utility/platform.h" #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { __device__ uint8_t ConvertColorFromFloatToUnsignedChar(float color) { if (isnan(color)) { return 0; } else { thrust::minimum<float> min; thrust::maximum<float> max; float unified_color = min(1.0f, max(0.0f, color)); return (uint8_t)(unified_color * 255.0f); } } struct copy_float_gray_image_functor { copy_float_gray_image_functor(const uint8_t* gray) : gray_(gray) {}; const uint8_t* gray_; __device__ uint8_t operator() (size_t k) const { int idx = k / 3; float *p = (float *)(gray_ + idx * 4); uint8_t color = ConvertColorFromFloatToUnsignedChar(*p); return color; } }; struct copy_float_rgb_image_functor { copy_float_rgb_image_functor(const uint8_t* rgb) : rgb_(rgb) {}; const uint8_t* rgb_; __device__ uint8_t operator() (size_t idx) const { float *p = (float *)(rgb_ + idx * 4); return ConvertColorFromFloatToUnsignedChar(*p); } }; struct copy_int16_rgb_image_functor { copy_int16_rgb_image_functor(const uint8_t* rgb) : rgb_(rgb) {}; const uint8_t* rgb_; __device__ uint8_t operator() (size_t idx) const { uint16_t *p = (uint16_t *)(rgb_ + idx * 2); return (uint8_t)((*p) & 0xff); } }; struct copy_depth_image_functor { copy_depth_image_functor(const uint8_t* depth, int max_depth) : depth_(depth), max_depth_(max_depth) {}; const uint8_t* depth_; const int max_depth_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ uint8_t operator() (size_t k) const { thrust::minimum<float> min; int i = k / 3; int j = k % 3; uint16_t *p = (uint16_t *)(depth_ + i * 2); float depth = min(float(*p) / float(max_depth_), 1.0); Eigen::Vector3f color = GetColorMapColor(depth, colormap_option_); return (uint8_t)(color(j) * 255); } }; } // unnamed namespace bool ImageShader::Compile() { if (CompileShaders(image_vertex_shader, NULL, image_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_UV_ = glGetAttribLocation(program_, "vertex_UV"); image_texture_ = glGetUniformLocation(program_, "image_texture"); vertex_scale_ = glGetUniformLocation(program_, "vertex_scale"); return true; } void ImageShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool ImageShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_height = GetDataHeight(geometry); const size_t num_data_width = GetDataWidth(geometry); // Create buffers and bind the geometry const GLfloat vertex_position_buffer_data[18] = { -1.0f, -1.0f, 0.0f, 1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f, -1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f, -1.0f, 1.0f, 0.0f, }; const GLfloat vertex_UV_buffer_data[12] = { 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, }; glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, sizeof(vertex_position_buffer_data), vertex_position_buffer_data, GL_STATIC_DRAW); glGenBuffers(1, &vertex_UV_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_UV_buffer_); glBufferData(GL_ARRAY_BUFFER, sizeof(vertex_UV_buffer_data), vertex_UV_buffer_data, GL_STATIC_DRAW); glGenTextures(1, &image_texture_buffer_); glBindTexture(GL_TEXTURE_2D, image_texture_buffer_); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, num_data_width, num_data_height, 0, GL_RGB, GL_UNSIGNED_BYTE, 0); if (option.interpolation_option_ == RenderOption::TextureInterpolationOption::Nearest) { glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); } else { glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); glGenerateMipmap(GL_TEXTURE_2D); } glGenBuffers(1, &image_pixel_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, image_pixel_buffer_); size_t data_size = GetDataSize(geometry); glBufferData(GL_PIXEL_UNPACK_BUFFER, data_size, 0, GL_STATIC_DRAW); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], image_pixel_buffer_, hipGraphicsMapFlagsNone)); uint8_t* raw_render_image_ptr; size_t n_bytes; cudaSafeCall(hipGraphicsMapResources(1, cuda_graphics_resources_)); cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_render_image_ptr, &n_bytes, cuda_graphics_resources_[0])); thrust::device_ptr<uint8_t> dev_render_image_ptr = thrust::device_pointer_cast(raw_render_image_ptr); if (PrepareBinding(geometry, option, view, dev_render_image_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(1); bound_ = true; return true; } bool ImageShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } const size_t num_data_height = GetDataHeight(geometry); const size_t num_data_width = GetDataWidth(geometry); glUseProgram(program_); glUniform3fv(vertex_scale_, 1, vertex_scale_data_.data()); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, image_texture_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, image_pixel_buffer_); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, num_data_width, num_data_height, GL_RGB, GL_UNSIGNED_BYTE, 0); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); glUniform1i(image_texture_, 0); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_UV_); glBindBuffer(GL_ARRAY_BUFFER, vertex_UV_buffer_); glVertexAttribPointer(vertex_UV_, 2, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_UV_); return true; } void ImageShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) cudaSafeCall(hipGraphicsUnregisterResource(cuda_graphics_resources_[0])); glDeleteBuffers(1, &image_pixel_buffer_); glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_UV_buffer_); glDeleteTextures(1, &image_texture_buffer_); bound_ = false; } } bool ImageShaderForImage::PrepareRendering(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Image) { PrintShaderWarning("Rendering type is not geometry::Image."); return false; } const geometry::Image &image = (const geometry::Image &)geometry; GLfloat ratio_x, ratio_y; switch (option.image_stretch_option_) { case RenderOption::ImageStretchOption::StretchKeepRatio: ratio_x = GLfloat(image.width_) / GLfloat(view.GetWindowWidth()); ratio_y = GLfloat(image.height_) / GLfloat(view.GetWindowHeight()); if (ratio_x < ratio_y) { ratio_x /= ratio_y; ratio_y = 1.0f; } else { ratio_y /= ratio_x; ratio_x = 1.0f; } break; case RenderOption::ImageStretchOption::StretchWithWindow: ratio_x = 1.0f; ratio_y = 1.0f; break; case RenderOption::ImageStretchOption::OriginalSize: default: ratio_x = GLfloat(image.width_) / GLfloat(view.GetWindowWidth()); ratio_y = GLfloat(image.height_) / GLfloat(view.GetWindowHeight()); break; } vertex_scale_data_(0) = ratio_x; vertex_scale_data_(1) = ratio_y; vertex_scale_data_(2) = 1.0f; glDisable(GL_DEPTH_TEST); return true; } bool ImageShaderForImage::PrepareBinding(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<uint8_t> &render_image) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Image) { PrintShaderWarning("Rendering type is not geometry::Image."); return false; } const geometry::Image &image = (const geometry::Image &)geometry; if (image.HasData() == false) { PrintShaderWarning("Binding failed with empty image."); return false; } if (image.num_of_channels_ == 3 && image.bytes_per_channel_ == 1) { thrust::copy(image.data_.begin(), image.data_.end(), render_image); } else { if (image.num_of_channels_ == 1 && image.bytes_per_channel_ == 1) { // grayscale image thrust::repeated_range<utility::device_vector<uint8_t>::const_iterator> range(image.data_.begin(), image.data_.end(), 3); thrust::copy(range.begin(), range.end(), render_image); } else if (image.num_of_channels_ == 1 && image.bytes_per_channel_ == 4) { // grayscale image with floating point per channel copy_float_gray_image_functor func(thrust::raw_pointer_cast(image.data_.data())); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(image.height_ * image.width_ * 3), render_image, func); } else if (image.num_of_channels_ == 3 && image.bytes_per_channel_ == 4) { // RGB image with floating point per channel copy_float_rgb_image_functor func(thrust::raw_pointer_cast(image.data_.data())); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(image.height_ * image.width_ * 3), render_image, func); } else if (image.num_of_channels_ == 3 && image.bytes_per_channel_ == 2) { // image with RGB channels, each channel is a 16-bit integer copy_int16_rgb_image_functor func(thrust::raw_pointer_cast(image.data_.data())); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(image.height_ * image.width_ * 3), render_image, func); } else if (image.num_of_channels_ == 1 && image.bytes_per_channel_ == 2) { // depth image, one channel of 16-bit integer const int max_depth = option.image_max_depth_; copy_depth_image_functor func(thrust::raw_pointer_cast(image.data_.data()), max_depth); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(image.height_ * image.width_ * 3), render_image, func); } } draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = 6; return true; } size_t ImageShaderForImage::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::Image &)geometry).data_.size(); } size_t ImageShaderForImage::GetDataHeight(const geometry::Geometry &geometry) const { return ((const geometry::Image &)geometry).height_; } size_t ImageShaderForImage::GetDataWidth(const geometry::Geometry &geometry) const { return ((const geometry::Image &)geometry).width_; }
de2a819b0995e9ada7d3fb98053c8f6384c01ebb.cu
#include "cupoch/visualization/shader/image_shader.h" #include <algorithm> #include "cupoch/geometry/image.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/utility/color_map.h" #include "cupoch/utility/range.h" #include "cupoch/utility/platform.h" #include <cuda_runtime.h> #include <cuda_gl_interop.h> using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { __device__ uint8_t ConvertColorFromFloatToUnsignedChar(float color) { if (isnan(color)) { return 0; } else { thrust::minimum<float> min; thrust::maximum<float> max; float unified_color = min(1.0f, max(0.0f, color)); return (uint8_t)(unified_color * 255.0f); } } struct copy_float_gray_image_functor { copy_float_gray_image_functor(const uint8_t* gray) : gray_(gray) {}; const uint8_t* gray_; __device__ uint8_t operator() (size_t k) const { int idx = k / 3; float *p = (float *)(gray_ + idx * 4); uint8_t color = ConvertColorFromFloatToUnsignedChar(*p); return color; } }; struct copy_float_rgb_image_functor { copy_float_rgb_image_functor(const uint8_t* rgb) : rgb_(rgb) {}; const uint8_t* rgb_; __device__ uint8_t operator() (size_t idx) const { float *p = (float *)(rgb_ + idx * 4); return ConvertColorFromFloatToUnsignedChar(*p); } }; struct copy_int16_rgb_image_functor { copy_int16_rgb_image_functor(const uint8_t* rgb) : rgb_(rgb) {}; const uint8_t* rgb_; __device__ uint8_t operator() (size_t idx) const { uint16_t *p = (uint16_t *)(rgb_ + idx * 2); return (uint8_t)((*p) & 0xff); } }; struct copy_depth_image_functor { copy_depth_image_functor(const uint8_t* depth, int max_depth) : depth_(depth), max_depth_(max_depth) {}; const uint8_t* depth_; const int max_depth_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ uint8_t operator() (size_t k) const { thrust::minimum<float> min; int i = k / 3; int j = k % 3; uint16_t *p = (uint16_t *)(depth_ + i * 2); float depth = min(float(*p) / float(max_depth_), 1.0); Eigen::Vector3f color = GetColorMapColor(depth, colormap_option_); return (uint8_t)(color(j) * 255); } }; } // unnamed namespace bool ImageShader::Compile() { if (CompileShaders(image_vertex_shader, NULL, image_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_UV_ = glGetAttribLocation(program_, "vertex_UV"); image_texture_ = glGetUniformLocation(program_, "image_texture"); vertex_scale_ = glGetUniformLocation(program_, "vertex_scale"); return true; } void ImageShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool ImageShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_height = GetDataHeight(geometry); const size_t num_data_width = GetDataWidth(geometry); // Create buffers and bind the geometry const GLfloat vertex_position_buffer_data[18] = { -1.0f, -1.0f, 0.0f, 1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f, -1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f, -1.0f, 1.0f, 0.0f, }; const GLfloat vertex_UV_buffer_data[12] = { 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, }; glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, sizeof(vertex_position_buffer_data), vertex_position_buffer_data, GL_STATIC_DRAW); glGenBuffers(1, &vertex_UV_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_UV_buffer_); glBufferData(GL_ARRAY_BUFFER, sizeof(vertex_UV_buffer_data), vertex_UV_buffer_data, GL_STATIC_DRAW); glGenTextures(1, &image_texture_buffer_); glBindTexture(GL_TEXTURE_2D, image_texture_buffer_); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, num_data_width, num_data_height, 0, GL_RGB, GL_UNSIGNED_BYTE, 0); if (option.interpolation_option_ == RenderOption::TextureInterpolationOption::Nearest) { glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); } else { glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); glGenerateMipmap(GL_TEXTURE_2D); } glGenBuffers(1, &image_pixel_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, image_pixel_buffer_); size_t data_size = GetDataSize(geometry); glBufferData(GL_PIXEL_UNPACK_BUFFER, data_size, 0, GL_STATIC_DRAW); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], image_pixel_buffer_, cudaGraphicsMapFlagsNone)); uint8_t* raw_render_image_ptr; size_t n_bytes; cudaSafeCall(cudaGraphicsMapResources(1, cuda_graphics_resources_)); cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_render_image_ptr, &n_bytes, cuda_graphics_resources_[0])); thrust::device_ptr<uint8_t> dev_render_image_ptr = thrust::device_pointer_cast(raw_render_image_ptr); if (PrepareBinding(geometry, option, view, dev_render_image_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(1); bound_ = true; return true; } bool ImageShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } const size_t num_data_height = GetDataHeight(geometry); const size_t num_data_width = GetDataWidth(geometry); glUseProgram(program_); glUniform3fv(vertex_scale_, 1, vertex_scale_data_.data()); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, image_texture_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, image_pixel_buffer_); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, num_data_width, num_data_height, GL_RGB, GL_UNSIGNED_BYTE, 0); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); glUniform1i(image_texture_, 0); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_UV_); glBindBuffer(GL_ARRAY_BUFFER, vertex_UV_buffer_); glVertexAttribPointer(vertex_UV_, 2, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_UV_); return true; } void ImageShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) cudaSafeCall(cudaGraphicsUnregisterResource(cuda_graphics_resources_[0])); glDeleteBuffers(1, &image_pixel_buffer_); glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_UV_buffer_); glDeleteTextures(1, &image_texture_buffer_); bound_ = false; } } bool ImageShaderForImage::PrepareRendering(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Image) { PrintShaderWarning("Rendering type is not geometry::Image."); return false; } const geometry::Image &image = (const geometry::Image &)geometry; GLfloat ratio_x, ratio_y; switch (option.image_stretch_option_) { case RenderOption::ImageStretchOption::StretchKeepRatio: ratio_x = GLfloat(image.width_) / GLfloat(view.GetWindowWidth()); ratio_y = GLfloat(image.height_) / GLfloat(view.GetWindowHeight()); if (ratio_x < ratio_y) { ratio_x /= ratio_y; ratio_y = 1.0f; } else { ratio_y /= ratio_x; ratio_x = 1.0f; } break; case RenderOption::ImageStretchOption::StretchWithWindow: ratio_x = 1.0f; ratio_y = 1.0f; break; case RenderOption::ImageStretchOption::OriginalSize: default: ratio_x = GLfloat(image.width_) / GLfloat(view.GetWindowWidth()); ratio_y = GLfloat(image.height_) / GLfloat(view.GetWindowHeight()); break; } vertex_scale_data_(0) = ratio_x; vertex_scale_data_(1) = ratio_y; vertex_scale_data_(2) = 1.0f; glDisable(GL_DEPTH_TEST); return true; } bool ImageShaderForImage::PrepareBinding(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<uint8_t> &render_image) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Image) { PrintShaderWarning("Rendering type is not geometry::Image."); return false; } const geometry::Image &image = (const geometry::Image &)geometry; if (image.HasData() == false) { PrintShaderWarning("Binding failed with empty image."); return false; } if (image.num_of_channels_ == 3 && image.bytes_per_channel_ == 1) { thrust::copy(image.data_.begin(), image.data_.end(), render_image); } else { if (image.num_of_channels_ == 1 && image.bytes_per_channel_ == 1) { // grayscale image thrust::repeated_range<utility::device_vector<uint8_t>::const_iterator> range(image.data_.begin(), image.data_.end(), 3); thrust::copy(range.begin(), range.end(), render_image); } else if (image.num_of_channels_ == 1 && image.bytes_per_channel_ == 4) { // grayscale image with floating point per channel copy_float_gray_image_functor func(thrust::raw_pointer_cast(image.data_.data())); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(image.height_ * image.width_ * 3), render_image, func); } else if (image.num_of_channels_ == 3 && image.bytes_per_channel_ == 4) { // RGB image with floating point per channel copy_float_rgb_image_functor func(thrust::raw_pointer_cast(image.data_.data())); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(image.height_ * image.width_ * 3), render_image, func); } else if (image.num_of_channels_ == 3 && image.bytes_per_channel_ == 2) { // image with RGB channels, each channel is a 16-bit integer copy_int16_rgb_image_functor func(thrust::raw_pointer_cast(image.data_.data())); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(image.height_ * image.width_ * 3), render_image, func); } else if (image.num_of_channels_ == 1 && image.bytes_per_channel_ == 2) { // depth image, one channel of 16-bit integer const int max_depth = option.image_max_depth_; copy_depth_image_functor func(thrust::raw_pointer_cast(image.data_.data()), max_depth); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(image.height_ * image.width_ * 3), render_image, func); } } draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = 6; return true; } size_t ImageShaderForImage::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::Image &)geometry).data_.size(); } size_t ImageShaderForImage::GetDataHeight(const geometry::Geometry &geometry) const { return ((const geometry::Image &)geometry).height_; } size_t ImageShaderForImage::GetDataWidth(const geometry::Geometry &geometry) const { return ((const geometry::Image &)geometry).width_; }
f267e1462000499e925a24329b81e6437985cc5a.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <iomanip> #include <iostream> #include "gpu.cuh" template <typename Dtype> void print(const thrust::device_vector<Dtype> &v) { for (size_t i = 0; i < v.size(); i++) std::cout << " " << std::fixed << std::setprecision(3) << v[i]; std::cout << "\n"; } template void print(const thrust::device_vector<float> &v); template void print(const thrust::device_vector<int32_t> &v); template <typename Dtype1, typename Dtype2> void print(const thrust::device_vector<Dtype1> &v1, const thrust::device_vector<Dtype2> &v2) { for (size_t i = 0; i < v1.size(); i++) std::cout << " (" << v1[i] << "," << std::setw(2) << v2[i] << ")"; std::cout << "\n"; } template void print(const thrust::device_vector<int32_t> &v1, const thrust::device_vector<int32_t> &v2); const char* cublasGetErrorString(hipblasStatus_t error) { switch (error) { case HIPBLAS_STATUS_SUCCESS: return "HIPBLAS_STATUS_SUCCESS"; case HIPBLAS_STATUS_NOT_INITIALIZED: return "HIPBLAS_STATUS_NOT_INITIALIZED"; case HIPBLAS_STATUS_ALLOC_FAILED: return "HIPBLAS_STATUS_ALLOC_FAILED"; case HIPBLAS_STATUS_INVALID_VALUE: return "HIPBLAS_STATUS_INVALID_VALUE"; case HIPBLAS_STATUS_ARCH_MISMATCH: return "HIPBLAS_STATUS_ARCH_MISMATCH"; case HIPBLAS_STATUS_MAPPING_ERROR: return "HIPBLAS_STATUS_MAPPING_ERROR"; case HIPBLAS_STATUS_EXECUTION_FAILED: return "HIPBLAS_STATUS_EXECUTION_FAILED"; case HIPBLAS_STATUS_INTERNAL_ERROR: return "HIPBLAS_STATUS_INTERNAL_ERROR"; #if TORCH_HIP_VERSION >= 6000 case HIPBLAS_STATUS_NOT_SUPPORTED: return "HIPBLAS_STATUS_NOT_SUPPORTED"; #endif #if TORCH_HIP_VERSION >= 6050 case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; #endif } return "Unknown cublas status"; } const char* hipsparseGetErrorString(hipsparseStatus_t error) { // Read more at: http://docs.nvidia.com/cuda/cusparse/index.html#ixzz3f79JxRar switch (error) { case HIPSPARSE_STATUS_SUCCESS: return "The operation completed successfully."; case HIPSPARSE_STATUS_NOT_INITIALIZED: return "HIPSPARSE_STATUS_NOT_INITIALIZED"; case HIPSPARSE_STATUS_ALLOC_FAILED: return "HIPSPARSE_STATUS_ALLOC_FAILED"; case HIPSPARSE_STATUS_INVALID_VALUE: return "HIPSPARSE_STATUS_INVALID_VALUE"; case HIPSPARSE_STATUS_ARCH_MISMATCH: return "HIPSPARSE_STATUS_ARCH_MISMATCH"; case HIPSPARSE_STATUS_MAPPING_ERROR: return "HIPSPARSE_STATUS_MAPPING_ERROR"; case HIPSPARSE_STATUS_EXECUTION_FAILED: return "HIPSPARSE_STATUS_EXECUTION_FAILED"; case HIPSPARSE_STATUS_INTERNAL_ERROR: return "HIPSPARSE_STATUS_INTERNAL_ERROR"; case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; } return "<unknown>"; }
f267e1462000499e925a24329b81e6437985cc5a.cu
#include <cstdio> #include <iomanip> #include <iostream> #include "gpu.cuh" template <typename Dtype> void print(const thrust::device_vector<Dtype> &v) { for (size_t i = 0; i < v.size(); i++) std::cout << " " << std::fixed << std::setprecision(3) << v[i]; std::cout << "\n"; } template void print(const thrust::device_vector<float> &v); template void print(const thrust::device_vector<int32_t> &v); template <typename Dtype1, typename Dtype2> void print(const thrust::device_vector<Dtype1> &v1, const thrust::device_vector<Dtype2> &v2) { for (size_t i = 0; i < v1.size(); i++) std::cout << " (" << v1[i] << "," << std::setw(2) << v2[i] << ")"; std::cout << "\n"; } template void print(const thrust::device_vector<int32_t> &v1, const thrust::device_vector<int32_t> &v2); const char* cublasGetErrorString(cublasStatus_t error) { switch (error) { case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; #if CUDA_VERSION >= 6000 case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; #endif #if CUDA_VERSION >= 6050 case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; #endif } return "Unknown cublas status"; } const char* cusparseGetErrorString(cusparseStatus_t error) { // Read more at: http://docs.nvidia.com/cuda/cusparse/index.html#ixzz3f79JxRar switch (error) { case CUSPARSE_STATUS_SUCCESS: return "The operation completed successfully."; case CUSPARSE_STATUS_NOT_INITIALIZED: return "CUSPARSE_STATUS_NOT_INITIALIZED"; case CUSPARSE_STATUS_ALLOC_FAILED: return "CUSPARSE_STATUS_ALLOC_FAILED"; case CUSPARSE_STATUS_INVALID_VALUE: return "CUSPARSE_STATUS_INVALID_VALUE"; case CUSPARSE_STATUS_ARCH_MISMATCH: return "CUSPARSE_STATUS_ARCH_MISMATCH"; case CUSPARSE_STATUS_MAPPING_ERROR: return "CUSPARSE_STATUS_MAPPING_ERROR"; case CUSPARSE_STATUS_EXECUTION_FAILED: return "CUSPARSE_STATUS_EXECUTION_FAILED"; case CUSPARSE_STATUS_INTERNAL_ERROR: return "CUSPARSE_STATUS_INTERNAL_ERROR"; case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; } return "<unknown>"; }
2899418c138624730d6682abcc14c115e542cb68.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // -*- Mode: C++ ; c-file-style:"stroustrup"; indent-tabs-mode:nil; -*- #include <stdio.h> #include <stdlib.h> #include "Model.cu" #define DTYPE @DataType@ __shared__ DTYPE * inputPtr; // Macro to get data value. #define get(xarg,yarg)(GetInputValue(input_size, inputPtr, \ ((int)((blockDim.x * blockIdx.x) + threadIdx.x)) + xarg, \ ((int)((blockDim.y * blockIdx.y) + threadIdx.y)) + yarg)) #define getNew(xarg,yarg)(GetInputValue(input_size, output, \ ((int)((blockDim.x * blockIdx.x) + threadIdx.x)) + xarg, \ ((int)((blockDim.y * blockIdx.y) + threadIdx.y)) + yarg)) // Macro to read global read only data from within CellValue code. #define read(offset)(ro_data[offset]) #if EdgeValue __device__ DTYPE EdgeValue(dim3 input_size, int x, int y, DTYPE value) { @EdgeValue@ } #endif #if ConvergeValue __device__ int converge_value; __device__ int ConvergeValue(dim3 input_size, int x, int y, DTYPE *output @ConvergeScalarVariables@) { @ConvergeValue@ } #endif // Take care to only read global memory is no EdgeValue is specified. __device__ DTYPE GetInputValue(dim3 input_size, DTYPE *data, int x, int y) { int ex, ey, inside; ex = x; ey = y; if (ex < 0) ex = 0; if (ey < 0) ey = 0; if (ex >= input_size.x) ex = input_size.x-1; if (ey >= input_size.y) ey = input_size.y-1; inside = ((x == ex) && (y == ey)); if (inside) return data[y * input_size.x + x]; DTYPE value = data[ey * input_size.x + ex]; #if EdgeValue if (!inside) { value = EdgeValue(input_size, x, y, value @ScalarVariableNames@); } #endif return value; } __device__ DTYPE CellValue(dim3 input_size, int iteration, int x, int y, DTYPE *ro_data @ScalarVariables@) { @CellValue@ } /** * Each thread runs this kernel to calculate the value at one particular * cell in one particular iteration. */ // We need to declare it C style naming. // This avoids name mangling and allows us to get attributes about the kernel call from Cuda. // Its possible to do this with a C++ interface, but that will only run on certain devices. // This technique is older and therefore more reliable across Cuda devices. extern "C" { void @FunctionName@Kernel(dim3 input_size, int iter, DTYPE *input, DTYPE *output, DTYPE *ro_data @ScalarVariables@ @ConvergeScalarVariables@); } __global__ void @FunctionName@Kernel(dim3 input_size, int iter, DTYPE *input, DTYPE *output, DTYPE *ro_data @ScalarVariables@ @ConvergeScalarVariables@) { if (threadIdx.x == 0 && threadIdx.y == 0) inputPtr = input; __syncthreads(); int bx, by, x, y; DTYPE value; #if ConvergeValue do { int converge_value_result; #endif // (bx, by) is the location in the input of the top left of this block. bx = blockIdx.x * blockDim.x; by = blockIdx.y * blockDim.y; // x is the location in the input of this thread. x = bx + threadIdx.x; y = by + threadIdx.y; // Make sure we are not output the data size. if (x >= input_size.x || y >= input_size.y) return; value = CellValue(input_size, iter, x, y, ro_data @ScalarVariableNames@); output[y * input_size.x + x] = value; #if ConvergeValue converge_value = @ConvergeType@; __syncthreads(); converge_value_result = ConvergeValue(input_size, x, y, output @ConvergeScalarVariableNames@); if (@ConvergeType@) { if (!converge_value_result) { converge_value = converge_value_result; } } else { if (converge_value_result) { converge_value = converge_value_result; } } __syncthreads(); } while (!converge_value); #endif } /** * Store data between calls to SetData() and run(). * This is basically a hack. */ static DTYPE *global_ro_data = NULL; /** * Function exported to do the entire stencil computation. */ void @FunctionName@(DTYPE *host_data, int x_max, int y_max, int iterations @ScalarVariables@ @ConvergeScalarVariables@) { // User-specific parameters dim3 input_size(x_max, y_max); dim3 stencil_size@StencilSize@; // Host to device DTYPE *device_input, *device_output; int num_bytes = input_size.x * input_size.y * sizeof(DTYPE); hipMalloc((void **) &device_input, num_bytes); hipMalloc((void **) &device_output, num_bytes); hipMemcpy(device_input, host_data, num_bytes, hipMemcpyHostToDevice); // Setup the structure that holds parameters for the application. // And from that, get the block size. char * KernelName = "@FunctionName@Kernel"; dim3 tile_size = initSAProps(@NumDimensions@, input_size, stencil_size, iterations, sizeof(DTYPE), KernelName); dim3 grid_dims; filldim3(&grid_dims, div_ceil(input_size.x, tile_size.x), div_ceil(input_size.y, tile_size.y)); fprintf(stderr, "Grid dimensions are: x=%d, y=%d\n", grid_dims.x, grid_dims.y); fprintf(stderr, "Tile dimensions are: x=%d, y=%d\n", tile_size.x, tile_size.y); ///////////////////////////////////////////////////////////////////////////////////// // Start of code to gather statistics to hone model. Remove in final version. //////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// // End of code to gather statistics to hone model. Remove in final version. //////////////////////////////////////////////////////////////////////////////////// #ifdef STATISTICS fprintf(stderr, "***********************************Start of a new Run****************************************\n"); fprintf(stderr, "Data Size=%d, Tile Size=%d Iteration Count=%d\n", input_size.x, tile_size.x, iterations); struct timeval starttime, endtime; unsigned int usec2; gettimeofday(&starttime, NULL); #endif // Run computation for (int iter = 1; iter <= iterations; iter += 1) { @FunctionNamehipLaunchKernelGGL((@Kernel), dim3(grid_dims), dim3(tile_size) , 0, 0, input_size, iter, device_input, device_output, global_ro_data @ScalarVariableNames@ @ConvergeScalarVariableNames@); #ifdef STATISTICS /// TEMPORARY HACK to get some debug info to make sure the kernel call succeeds. /// Should we leave it in, or take it out? hipError_t foo = hipGetLastError(); if (foo != hipSuccess) fprintf(stderr, "%s\n", hipGetErrorString(foo)); #endif DTYPE *temp = device_input; device_input = device_output; device_output = temp; } #ifdef STATISTICS // Synch the threads to make sure everything is done before taking a timing. CUDA_SAFE_THREAD_SYNC(); gettimeofday(&endtime, NULL); usec2 = ((endtime.tv_sec - starttime.tv_sec) * 1000000 + (endtime.tv_usec - starttime.tv_usec)); fprintf(stderr, "Actual total time=%u\n", usec2); #endif // Device to host hipMemcpy(host_data, device_input, num_bytes, hipMemcpyDeviceToHost); hipFree(device_input); hipFree(device_output); if (global_ro_data != NULL) { hipFree(global_ro_data); global_ro_data = NULL; } } /** * Store unnamed data on device. */ void @FunctionName@SetData(DTYPE *host_data, int num_elements) { // TEMPORARY. // If we want to set the cuda device number, it must be here before we call any other cuda functions. // hipSetDevice(1); int num_bytes = sizeof(DTYPE) * num_elements; hipMalloc((void **) &global_ro_data, num_bytes); hipMemcpy(global_ro_data, host_data, num_bytes, hipMemcpyHostToDevice); }
2899418c138624730d6682abcc14c115e542cb68.cu
// -*- Mode: C++ ; c-file-style:"stroustrup"; indent-tabs-mode:nil; -*- #include <stdio.h> #include <stdlib.h> #include "Model.cu" #define DTYPE @DataType@ __shared__ DTYPE * inputPtr; // Macro to get data value. #define get(xarg,yarg)(GetInputValue(input_size, inputPtr, \ ((int)((blockDim.x * blockIdx.x) + threadIdx.x)) + xarg, \ ((int)((blockDim.y * blockIdx.y) + threadIdx.y)) + yarg)) #define getNew(xarg,yarg)(GetInputValue(input_size, output, \ ((int)((blockDim.x * blockIdx.x) + threadIdx.x)) + xarg, \ ((int)((blockDim.y * blockIdx.y) + threadIdx.y)) + yarg)) // Macro to read global read only data from within CellValue code. #define read(offset)(ro_data[offset]) #if EdgeValue __device__ DTYPE EdgeValue(dim3 input_size, int x, int y, DTYPE value) { @EdgeValue@ } #endif #if ConvergeValue __device__ int converge_value; __device__ int ConvergeValue(dim3 input_size, int x, int y, DTYPE *output @ConvergeScalarVariables@) { @ConvergeValue@ } #endif // Take care to only read global memory is no EdgeValue is specified. __device__ DTYPE GetInputValue(dim3 input_size, DTYPE *data, int x, int y) { int ex, ey, inside; ex = x; ey = y; if (ex < 0) ex = 0; if (ey < 0) ey = 0; if (ex >= input_size.x) ex = input_size.x-1; if (ey >= input_size.y) ey = input_size.y-1; inside = ((x == ex) && (y == ey)); if (inside) return data[y * input_size.x + x]; DTYPE value = data[ey * input_size.x + ex]; #if EdgeValue if (!inside) { value = EdgeValue(input_size, x, y, value @ScalarVariableNames@); } #endif return value; } __device__ DTYPE CellValue(dim3 input_size, int iteration, int x, int y, DTYPE *ro_data @ScalarVariables@) { @CellValue@ } /** * Each thread runs this kernel to calculate the value at one particular * cell in one particular iteration. */ // We need to declare it C style naming. // This avoids name mangling and allows us to get attributes about the kernel call from Cuda. // Its possible to do this with a C++ interface, but that will only run on certain devices. // This technique is older and therefore more reliable across Cuda devices. extern "C" { void @FunctionName@Kernel(dim3 input_size, int iter, DTYPE *input, DTYPE *output, DTYPE *ro_data @ScalarVariables@ @ConvergeScalarVariables@); } __global__ void @FunctionName@Kernel(dim3 input_size, int iter, DTYPE *input, DTYPE *output, DTYPE *ro_data @ScalarVariables@ @ConvergeScalarVariables@) { if (threadIdx.x == 0 && threadIdx.y == 0) inputPtr = input; __syncthreads(); int bx, by, x, y; DTYPE value; #if ConvergeValue do { int converge_value_result; #endif // (bx, by) is the location in the input of the top left of this block. bx = blockIdx.x * blockDim.x; by = blockIdx.y * blockDim.y; // x is the location in the input of this thread. x = bx + threadIdx.x; y = by + threadIdx.y; // Make sure we are not output the data size. if (x >= input_size.x || y >= input_size.y) return; value = CellValue(input_size, iter, x, y, ro_data @ScalarVariableNames@); output[y * input_size.x + x] = value; #if ConvergeValue converge_value = @ConvergeType@; __syncthreads(); converge_value_result = ConvergeValue(input_size, x, y, output @ConvergeScalarVariableNames@); if (@ConvergeType@) { if (!converge_value_result) { converge_value = converge_value_result; } } else { if (converge_value_result) { converge_value = converge_value_result; } } __syncthreads(); } while (!converge_value); #endif } /** * Store data between calls to SetData() and run(). * This is basically a hack. */ static DTYPE *global_ro_data = NULL; /** * Function exported to do the entire stencil computation. */ void @FunctionName@(DTYPE *host_data, int x_max, int y_max, int iterations @ScalarVariables@ @ConvergeScalarVariables@) { // User-specific parameters dim3 input_size(x_max, y_max); dim3 stencil_size@StencilSize@; // Host to device DTYPE *device_input, *device_output; int num_bytes = input_size.x * input_size.y * sizeof(DTYPE); cudaMalloc((void **) &device_input, num_bytes); cudaMalloc((void **) &device_output, num_bytes); cudaMemcpy(device_input, host_data, num_bytes, cudaMemcpyHostToDevice); // Setup the structure that holds parameters for the application. // And from that, get the block size. char * KernelName = "@FunctionName@Kernel"; dim3 tile_size = initSAProps(@NumDimensions@, input_size, stencil_size, iterations, sizeof(DTYPE), KernelName); dim3 grid_dims; filldim3(&grid_dims, div_ceil(input_size.x, tile_size.x), div_ceil(input_size.y, tile_size.y)); fprintf(stderr, "Grid dimensions are: x=%d, y=%d\n", grid_dims.x, grid_dims.y); fprintf(stderr, "Tile dimensions are: x=%d, y=%d\n", tile_size.x, tile_size.y); ///////////////////////////////////////////////////////////////////////////////////// // Start of code to gather statistics to hone model. Remove in final version. //////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// // End of code to gather statistics to hone model. Remove in final version. //////////////////////////////////////////////////////////////////////////////////// #ifdef STATISTICS fprintf(stderr, "***********************************Start of a new Run****************************************\n"); fprintf(stderr, "Data Size=%d, Tile Size=%d Iteration Count=%d\n", input_size.x, tile_size.x, iterations); struct timeval starttime, endtime; unsigned int usec2; gettimeofday(&starttime, NULL); #endif // Run computation for (int iter = 1; iter <= iterations; iter += 1) { @FunctionName@Kernel<<< grid_dims, tile_size >>>( input_size, iter, device_input, device_output, global_ro_data @ScalarVariableNames@ @ConvergeScalarVariableNames@); #ifdef STATISTICS /// TEMPORARY HACK to get some debug info to make sure the kernel call succeeds. /// Should we leave it in, or take it out? cudaError_t foo = cudaGetLastError(); if (foo != cudaSuccess) fprintf(stderr, "%s\n", cudaGetErrorString(foo)); #endif DTYPE *temp = device_input; device_input = device_output; device_output = temp; } #ifdef STATISTICS // Synch the threads to make sure everything is done before taking a timing. CUDA_SAFE_THREAD_SYNC(); gettimeofday(&endtime, NULL); usec2 = ((endtime.tv_sec - starttime.tv_sec) * 1000000 + (endtime.tv_usec - starttime.tv_usec)); fprintf(stderr, "Actual total time=%u\n", usec2); #endif // Device to host cudaMemcpy(host_data, device_input, num_bytes, cudaMemcpyDeviceToHost); cudaFree(device_input); cudaFree(device_output); if (global_ro_data != NULL) { cudaFree(global_ro_data); global_ro_data = NULL; } } /** * Store unnamed data on device. */ void @FunctionName@SetData(DTYPE *host_data, int num_elements) { // TEMPORARY. // If we want to set the cuda device number, it must be here before we call any other cuda functions. // cudaSetDevice(1); int num_bytes = sizeof(DTYPE) * num_elements; cudaMalloc((void **) &global_ro_data, num_bytes); cudaMemcpy(global_ro_data, host_data, num_bytes, cudaMemcpyHostToDevice); }
236f240514aa23186789321ea693d8ee988e2c1b.hip
// !!! This is a file automatically generated by hipify!!! // CUDA Device Query #include <stdio.h> // Print device properties void printDevProp(hipDeviceProp_t devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } int main() { // Number of CUDA devices int devCount; hipGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, i); printDevProp(devProp); } printf("\nPress any key to exit..."); char c; scanf("%c", &c); return 0; }
236f240514aa23186789321ea693d8ee988e2c1b.cu
// CUDA Device Query #include <stdio.h> // Print device properties void printDevProp(cudaDeviceProp devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } int main() { // Number of CUDA devices int devCount; cudaGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printDevProp(devProp); } printf("\nPress any key to exit..."); char c; scanf("%c", &c); return 0; }
e9fe3c71e34292251d0d502be623406acfa26df9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. // //////////////////////////////////////////////////////////////////////////////// #include "lbann/execution_algorithms/kfac/kfac_block_bn.hpp" namespace lbann { namespace { template <typename TensorDataType> __global__ void kfac_compute_bn_factor_data2col_kernel( const TensorDataType * __restrict__ activations, const TensorDataType * __restrict__ errors, const TensorDataType * __restrict__ scales, const TensorDataType * __restrict__ biases, TensorDataType * __restrict__ cols, const size_t batch_size, const size_t num_channels, const size_t spatial_prod, const size_t num_threads) { // = batch_size*num_channels*spatial_prod const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; if(gid < num_threads) { const size_t i_c = gid%num_channels; const size_t i_n = (gid/num_channels)%batch_size; const size_t i_s = gid/num_channels/batch_size; const auto scale = scales[i_c]; const auto bias = biases[i_c]; const auto i_act = i_s+i_c*spatial_prod+i_n*spatial_prod*num_channels; const auto error = errors[i_act]; const auto act = (activations[i_act]-bias)/scale; const auto i_out = i_c+i_n*num_channels*2 + i_s*(num_channels*2*batch_size); cols[i_out] = error*act; cols[i_out+num_channels] = error; } } } // namespace template <> void kfac_bn_util::compute_bn_factor_data2col( const El::Matrix<DataType, El::Device::GPU>& activations, const El::Matrix<DataType, El::Device::GPU>& errors, const El::Matrix<DataType, El::Device::GPU>& scales, const El::Matrix<DataType, El::Device::GPU>& biases, El::Matrix<DataType, El::Device::GPU>& cols, const size_t batch_size, const size_t num_channels, const size_t spatial_prod, const El::SyncInfo<El::Device::GPU>& sync_info) { constexpr size_t block_size = 256; const size_t num_threads = batch_size * num_channels * spatial_prod; const size_t grid_size = (num_threads + block_size - 1) / block_size; if (grid_size > 0) { hydrogen::gpu::LaunchKernel( kfac_compute_bn_factor_data2col_kernel<DataType>, grid_size, block_size, 0, sync_info, activations.LockedBuffer(), errors.LockedBuffer(), scales.LockedBuffer(), biases.LockedBuffer(), cols.Buffer(), batch_size, num_channels, spatial_prod, num_threads); } } } // namespace lbann
e9fe3c71e34292251d0d502be623406acfa26df9.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. // //////////////////////////////////////////////////////////////////////////////// #include "lbann/execution_algorithms/kfac/kfac_block_bn.hpp" namespace lbann { namespace { template <typename TensorDataType> __global__ void kfac_compute_bn_factor_data2col_kernel( const TensorDataType * __restrict__ activations, const TensorDataType * __restrict__ errors, const TensorDataType * __restrict__ scales, const TensorDataType * __restrict__ biases, TensorDataType * __restrict__ cols, const size_t batch_size, const size_t num_channels, const size_t spatial_prod, const size_t num_threads) { // = batch_size*num_channels*spatial_prod const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; if(gid < num_threads) { const size_t i_c = gid%num_channels; const size_t i_n = (gid/num_channels)%batch_size; const size_t i_s = gid/num_channels/batch_size; const auto scale = scales[i_c]; const auto bias = biases[i_c]; const auto i_act = i_s+i_c*spatial_prod+i_n*spatial_prod*num_channels; const auto error = errors[i_act]; const auto act = (activations[i_act]-bias)/scale; const auto i_out = i_c+i_n*num_channels*2 + i_s*(num_channels*2*batch_size); cols[i_out] = error*act; cols[i_out+num_channels] = error; } } } // namespace template <> void kfac_bn_util::compute_bn_factor_data2col( const El::Matrix<DataType, El::Device::GPU>& activations, const El::Matrix<DataType, El::Device::GPU>& errors, const El::Matrix<DataType, El::Device::GPU>& scales, const El::Matrix<DataType, El::Device::GPU>& biases, El::Matrix<DataType, El::Device::GPU>& cols, const size_t batch_size, const size_t num_channels, const size_t spatial_prod, const El::SyncInfo<El::Device::GPU>& sync_info) { constexpr size_t block_size = 256; const size_t num_threads = batch_size * num_channels * spatial_prod; const size_t grid_size = (num_threads + block_size - 1) / block_size; if (grid_size > 0) { hydrogen::gpu::LaunchKernel( kfac_compute_bn_factor_data2col_kernel<DataType>, grid_size, block_size, 0, sync_info, activations.LockedBuffer(), errors.LockedBuffer(), scales.LockedBuffer(), biases.LockedBuffer(), cols.Buffer(), batch_size, num_channels, spatial_prod, num_threads); } } } // namespace lbann
a7107149d9da26cc8226ad6766820116b3d99f45.hip
// !!! This is a file automatically generated by hipify!!! // C++ #include <iostream> #include <string> #include <random> // C #include <stdlib.h> #include <stdio.h> #include <math.h> #include <limits.h> // CUDA #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <hip/hip_runtime_api.h> // Force -Wall after this point, VC only (Check https://gcc.gnu.org/onlinedocs/gcc/Diagnostic-Pragmas.html for GCC) #pragma warning(push,4) #define DEBUG 0 #define MICRO 0 #define MACRO 0 #define QUEUE 0 #if QUEUE const int MaxBlocks = 10000; const int SliceSize = 100; #endif /******************************************************************************/ /*** 2-opt with random restarts ***********************************************/ /******************************************************************************/ // Euclidean distance #define dist(a, b) (sqrtf((pos[a].x - pos[b].x) * (pos[a].x - pos[b].x) + (pos[a].y - pos[b].y) * (pos[a].y - pos[b].y))) //#define dist(a, b) __float2int_rn(sqrtf((pos[a].x - pos[b].x) * (pos[a].x - pos[b].x) + (pos[a].y - pos[b].y) * (pos[a].y - pos[b].y))) #define swap(a, b) {float tmp = a; a = b; b = tmp;} static __device__ int climbs_d = 0; static __device__ int best_d = INT_MAX; #if QUEUE static __device__ int restart_d = 0; #endif // Buffer space, used for cache and maximum propagation #if DEBUG #if MICRO static __device__ unsigned long long int d_lDuration = 0; static __device__ unsigned long long int d_cDuration = 0; static __device__ unsigned long long int d_pDuration = 0; static __device__ long long int load_duration[128] = {0}; static __device__ long long int compute_duration[128] = {0}; static __device__ long long int propagate_duration[128] = {0}; #endif #if MACRO static __device__ unsigned long long int d_uDuration = 0; static __device__ unsigned long long int d_sDuration = 0; static __device__ long long int update_duration[128] = {0}; static __device__ long long int single_duration[128] = {0}; #endif #endif // Instrumentation #define LOG( X ) { if( DEBUG ) {X();} } // Load static __device__ void inline load_start() { #if MICRO if(threadIdx.x == 0) {load_duration[blockIdx.x] -= clock64();} #endif } static __device__ void inline load_end() { #if MICRO if(threadIdx.x == 0) {load_duration[blockIdx.x] += clock64();} #endif } // Compute static __device__ void inline compute_start() { #if MICRO if(threadIdx.x == 0) {compute_duration[blockIdx.x] -= clock64();} #endif } static __device__ void inline compute_end() { #if MICRO if(threadIdx.x == 0) {compute_duration[blockIdx.x] += clock64();} #endif } // Compute static __device__ void inline propagate_start() { #if MICRO if(threadIdx.x == 0) {propagate_duration[blockIdx.x] -= clock64();} #endif } static __device__ void inline propagate_end() { #if sMICRO if(threadIdx.x == 0) {propagate_duration[blockIdx.x] += clock64();} #endif } // Single_iter static __device__ void inline single_start() { #if MACRO if(threadIdx.x == 0 && DEBUG) {single_duration[blockIdx.x] -= clock64();} #endif } static __device__ void inline single_end() { #if MACRO if(threadIdx.x == 0 && DEBUG) {single_duration[blockIdx.x] += clock64();} #endif } // Update static __device__ void inline update_start() { #if MACRO if(threadIdx.x == 0) {update_duration[blockIdx.x] -= clock64();} #endif } static __device__ void inline update_end() { #if MACRO if(threadIdx.x == 0) {update_duration[blockIdx.x] += clock64();} #endif } enum ThreadBufferStatus {MORE_THREADS_THAN_BUFFER,EQUAL_SIZE,MORE_BUFFER_THAN_THREADS}; // Data structure used to hold position along path struct __align__(8) Data { float x,y; }; #if QUEUE // // Returns a unique integer value with the initial value being 0 // // Synchronizes so not safe for branches // // @return - Returns the next unique int // static __device__ inline int nextSlice(int* __restrict__ w_buffer) { if(threadIdx.x==0) { w_buffer[0] = atomicAdd(&restart_d, SliceSize); }__syncthreads(); return w_buffer[0]; } #endif // Allocates and initializes my global memory and shared memory. // // @pos - An array that need to be initialized and will hold our path points // @weight - An array that need to be initialized and will hold our edge weights // @cities - The amount of points in our graph // // @return - Returns true if initialization was successful, false otherwise. // template <int TileSize> static inline __device__ bool initMemory(const Data* &pos_d, Data* &pos, int * &weight, const int &cities) { // Shared memory is required to share the allocated memory __shared__ Data *d; __shared__ int *w; if(threadIdx.x == 0 ) { d = new Data[cities + 1]; if( d != NULL ) { w = new int[cities]; if( w == NULL ) { delete[] d; d = NULL; } } }__syncthreads(); if(d == NULL) { return false; } // Save new memory locations pos = d; weight = w; for (int i = threadIdx.x; i < cities; i += blockDim.x) pos[i] = pos_d[i]; __syncthreads(); return true; } // // Each thread gives some integer value, then the maximum of them is returned. // // @t_val - The number that the thread submits as a candidate for the maximum value // @cities - The number of cities. // // @return - The maximum value of t_val seen from all threads template <ThreadBufferStatus Status, int TileSize> static inline __device__ int maximum(int t_val, const int &cities, int* __restrict__ &w_buffer) { LOG( propagate_start ); int upper = min(blockDim.x,min(TileSize,cities)); // We have to condense things down if(Status == MORE_THREADS_THAN_BUFFER) { // Compute your index and then try to shove what you have in the buffer const int Index = threadIdx.x % TileSize; w_buffer[Index] = t_val; __syncthreads(); // Now try to win (someone will win) for(int i = 0 ; i <= (blockDim.x /TileSize); ++i ) { if(t_val < w_buffer[Index]) { w_buffer[Index] = t_val; } } }else { // Otherwise we have more than enough room! w_buffer[threadIdx.x] = t_val; }__syncthreads(); #pragma unroll 4 for( int i = 512; i > 32 ; i /= 2 ) { if (TileSize > i && blockDim.x > i) { int offset = (upper + 1) / 2; if( threadIdx.x < offset) { int tmp = w_buffer[threadIdx.x + offset]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } }__syncthreads(); upper = offset; } } // 64 and down if(threadIdx.x < 32) { // Yes. upper = 32. w_buffer[tid] = t_val = min(t_val,w_buffer[threadIdx.x + 16] if(TileSize > 32 && blockDim.x > 32) { int tmp = w_buffer[threadIdx.x + (upper+1)/2]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } for( int i = 16; i > 0; i = i / 2 ) { if(threadIdx.x < i) { int tmp = w_buffer[threadIdx.x + i]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } } }__syncthreads(); LOG( propagate_end ); return w_buffer[0]; } // // After we find the best four position to reconnect with all we need to // reverse the path between them. // // @start - The first position in the sub-path we have to swap with the end // @end - The last position in the path we have to swap with the start // @pos - The positions in our path // @weights - The edge weights between points // // TODO: Is it better to reverse the weight or just recompute it? // static inline __device__ void reverse(int start, int end, Data* &pos, int* &weight) { while(start<end) { int w = weight[start]; Data d = pos[start]; weight[start] = weight[end-1]; pos[start] = pos[end]; weight[end-1] = w; pos[end] = d; start += blockDim.x; end -= blockDim.x; }__syncthreads(); } // // Perform a single iteration of Two-Opt // @pos - The current Hamiltonian path // @weight - The current weight of our edges along the path // @minchange - The current best change we can make // @mini - The ith city in the path that is part of the swap // @minj - The jth city in the path that is part of the swap // @cities - The number of cities along the path (excluding the end point) template <int TileSize> static __device__ void singleIter(Data* &pos, int* &weight, int &minchange, int &mini, int &minj, const int &cities, float* __restrict__ x_buffer, float* __restrict__ y_buffer, int* __restrict__ w_buffer) { LOG( single_start ); // // The tour is divided into segments. Each segment has a length of // the number of threads, except possibly the last one. // // We traverse through the segments. When we are in a segment each // city in the segment of the tour is given to a thread. Then we // begin scanning each city from the end of the tour until we reach // the current city. Later threads will terminate this process earlier // than earlier threads. // // During each scan we will evaluate if it is better to reverse the path // between the two cities. If so we check to see if that is better than // any other possible reversal we have seen. // // After we have done this for all segments then we call update. Update // make some modification to the tour given the set of best reversals // seen by each thread. // // for (int leading = 0; leading < cities - 2; leading += blockDim.x) { int i = leading + threadIdx.x; float pxi0, pyi0, pxi1, pyi1, pxj1, pyj1; if (i < cities - 2) { minchange -= weight[i]; pxi0 = pos[i].x; pyi0 = pos[i].y; pxi1 = pos[i+1].x; pyi1 = pos[i+1].y; pxj1 = pos[0].x; pyj1 = pos[0].y; } for (int trailing = cities - 1; trailing >= leading + 2; trailing -= TileSize) { int bound = trailing - TileSize + 1; // The lower bound on what we can load // // Load the shared memory cache // // Each thread will try to load adjacent elements // LOG( load_start ); for(int k = threadIdx.x; k < TileSize; k += blockDim.x) { int cache_idx = k + bound; if (cache_idx >= (leading + 2)) { // Never go below the lowest city x_buffer[k] = pos[cache_idx].x; y_buffer[k] = pos[cache_idx].y; w_buffer[k] = weight[cache_idx]; } }__syncthreads(); LOG( load_end ); LOG( compute_start ); // Compute the lower bound that we can see int lower = bound; if (lower < i + 2) lower = i + 2; // Go over loaded cache that everyone will use for (int current = trailing; current >= lower; current--) { int cache_idx = current - bound; float pxj0 = x_buffer[cache_idx]; float pyj0 = y_buffer[cache_idx]; int change = w_buffer[cache_idx] + __float2int_rn(sqrtf((pxi0 - pxj0) * (pxi0 - pxj0) + (pyi0 - pyj0) * (pyi0 - pyj0))) + __float2int_rn(sqrtf((pxi1 - pxj1) * (pxi1 - pxj1) + (pyi1 - pyj1) * (pyi1 - pyj1))); // Shift down pxj1 = pxj0; pyj1 = pyj0; // If better save it and where we found it if (minchange > change) { minchange = change; mini = i; minj = current; } }__syncthreads(); LOG( compute_end ); } if (i < cities - 2) { minchange += weight[i]; } } LOG( single_end ); } // // Perform the swaps to the edges i and j to decrease the total length of our // path and update the weight and pos arrays appropriately. // // @pos - The current Hamiltonian path // @weight - The current weight of our edges along the path // @minchange - The current best change we can make // @mini - The ith city in the path that is part of the swap // @minj - The jth city in the path that is part of the swap // @cities - The number of cities along the path (excluding the end point) template <ThreadBufferStatus Status, int TileSize> static __device__ bool update(Data* &pos, int* &weight, int &minchange, int &mini, int &minj, const int &cities, int* __restrict__ w_buffer) { LOG( update_start ); // For all threads, find the best change maximum<Status,TileSize>(minchange, cities, w_buffer); // If we don't have one, oh well. if(w_buffer[0] >= 0) { LOG( update_end ); return false; } // While we have an update while(w_buffer[0] < 0 ) { // If we have multiple bests, pick one if (minchange == w_buffer[0]) { w_buffer[1] = threadIdx.x; }__syncthreads(); // Get what which indices to swap if(threadIdx.x==w_buffer[1]) { w_buffer[2] = mini; w_buffer[3] = minj; }__syncthreads(); // Give them to each thread int mi = w_buffer[2]; int mj = w_buffer[3]; // If we are overlapping the best then we can't be used if(!(minj < (mi - 1)) && !(mini > (mj + 1))) { minchange = 0; } // Reverse the path between the nodes selected reverse(mi+1+threadIdx.x,mj-threadIdx.x,pos,weight); // Fix connecting edges weights for the endpoints weight[mi] = -dist(mi,mi+1); weight[mj] = -dist(mj,mj+1); __syncthreads(); // Wait for everyone // Get the next best maximum<Status,TileSize>(minchange, cities, w_buffer); } LOG( update_end ); return true; } // // Given a path we randomly permute it into a new new path and then initialize the weights of the path. // // @pos - The current Hamiltonian path // @weight - The current weight of our edges along the path // @cities - The number of cities along the path (excluding the end point) static __device__ inline void permute(Data* &pos, int* &weight, const int &cities, hiprandState_t &rndstate) { if (threadIdx.x == 0) { // serial permutation for (int i = 1; i < cities; i++) { int j = hiprand(&rndstate) % (cities - 1) + 1; Data d = pos[i]; pos[i] = pos[j]; pos[j] = d; } pos[cities] = pos[0]; }__syncthreads(); for (int i = threadIdx.x; i < cities; i += blockDim.x) weight[i] = -dist(i, i + 1); __syncthreads(); } // // Releases memory and saves results // // @pos - Pointer to allocated path memory // @weight - Pointer to allocated edge weight memory // @local_climbs - The number of climbs performed by this block // @best_length - The best length this block found. static __device__ inline void cleanup(Data* &pos, int* &weight, int &local_climbs, int &best_length) { if (threadIdx.x == 0) { // Save data atomicAdd(&climbs_d,local_climbs); atomicMin(&best_d, best_length); // Release memory delete pos; delete weight; #if DEBUG #if MICRO atomicAdd(&d_lDuration,load_duration[blockIdx.x]); atomicAdd(&d_cDuration,compute_duration[blockIdx.x]); atomicAdd(&d_pDuration,propagate_duration[blockIdx.x]); #endif #if MACRO atomicAdd(&d_uDuration,update_duration[blockIdx.x]); atomicAdd(&d_sDuration,single_duration[blockIdx.x]); #endif #endif } } // // Perform iterative two-opt until there can be no more swaps to reduce the path length. // // @pos_d - The position of each point in the graph. // @cities - The number of vertices in the graph template <ThreadBufferStatus Status, int TileSize> static __global__ __launch_bounds__(1024, 2) void TwoOpt(const int Restarts, const Data *pos_d, const int cities) { Data *pos; int *weight; int local_climbs = 0; int best_length = INT_MAX; hiprandState_t rndstate; //hiprand_init(blockIdx.x , 0, 0, &rndstate); __shared__ float x_buffer[TileSize]; __shared__ float y_buffer[TileSize]; __shared__ int w_buffer[TileSize]; // Initialize the memory, if cannot then output error and exit if( !initMemory<TileSize>(pos_d,pos,weight,cities) ) { if(threadIdx.x == 0) { printf("Memory initialization error for block %d\n", blockIdx.x); } return; } #if QUEUE for(int slice = nextSlice(w_buffer) ; slice < Restarts; slice = nextSlice(w_buffer)) { // get smaller blocks for( int r = slice ; r < slice + SliceSize && r < Restarts ; ++r ) { #else for(int r = blockIdx.x; r < Restarts; r += gridDim.x) { // even blocks #endif if( local_climbs % 10 == 0 ) { hiprand_init( blockIdx.x + gridDim.x * local_climbs , 0, 0, &rndstate); } int mini,minj,minchange; // Give our current path we need to permute it permute(pos,weight,cities,rndstate); // Keep applying two-opt until we reach some local // (or global) minimum on the length do { ++local_climbs; minchange = mini = minj = 0; singleIter<TileSize>(pos, weight, minchange, mini, minj, cities, x_buffer, y_buffer, w_buffer); } while (update<Status,TileSize>(pos, weight, minchange, mini, minj, cities, w_buffer)); // Calculate the length of the path x_buffer[0] = 0; __syncthreads(); float term = 0; for (int i = threadIdx.x; i < cities; i += blockDim.x) { term += dist(i, i + 1); } atomicAdd(x_buffer,term); __syncthreads(); // If better then save it to my local best if(threadIdx.x == 0) { if(w_buffer[0] < best_length) { best_length = x_buffer[0]; } } #if QUEUE } #endif } // Release all my resources, and save the best seen // with any other statistics cleanup(pos, weight, local_climbs, best_length); } // // Checks to see if an error occurred with CUDA and if so prints out the message passed and the CUDA // error then quits the application. // // @msg - Message to print out if error occurs static void CudaTest(char *msg) { hipError_t e; hipDeviceSynchronize(); if (hipSuccess != (e = hipGetLastError())) { fprintf(stderr, "%s: %d\n", msg, e); fprintf(stderr, "%s\n", hipGetErrorString(e)); system("PAUSE"); exit(-1); } } #define mallocOnGPU(addr, size) if (hipSuccess != hipMalloc((void **)&addr, size)) fprintf(stderr, "could not allocate GPU memory\n"); CudaTest("couldn't allocate GPU memory"); #define copyToGPU(to, from, size) if (hipSuccess != hipMemcpy(to, from, size, hipMemcpyHostToDevice)) fprintf(stderr, "copying of data to device failed\n"); CudaTest("data copy to device failed"); // // Read TPS lib files into GPU memory. ATT and CEIL_2D edge weight types are not supported // // @fname - The name of the file to read the TSP data from // @pos_d - Pointer to the pointer that will hold data on GPU // and is modified here to be the address on the GPU // // @return - Returns the number of cities found static int readInput(int cities, Data **pos_d) { Data* pos = new Data[cities]; if (pos == NULL) {fprintf(stderr, "cannot allocate pos\n"); exit(-1);} // C++ Random Note: seed is 1 std::default_random_engine gen; std::uniform_real_distribution<float> ds(0.0,1.0); for(int i = 0 ; i < cities; ++i) { pos[i].x = ds(gen); pos[i].y = ds(gen); //std::cout << pos[i].x << "," << pos[i].y << std::endl; } mallocOnGPU(*pos_d, sizeof(Data) * cities); copyToGPU(*pos_d, pos, sizeof(Data) * cities); delete (pos); return cities; } // // Given an enum value return it's string representation // // @status - The enum value to translate // // @return - The enums string representation in the source code static const std::string getName(const ThreadBufferStatus status) { switch(status) { case MORE_THREADS_THAN_BUFFER: return std::string("MORE_THREADS_THAN_BUFFER"); case EQUAL_SIZE: return std::string("EQUAL_SIZE"); case MORE_BUFFER_THAN_THREADS: return std::string("MORE_BUFFER_THAN_THREADS"); }; return std::string("enum value not found."); } int getMaxSharedMemory( int major ) { if(major < 3) { return 16384; }else if(major < 5) { return 32768; }else { return 65536; } } // // Calculates the maximum number of resident blocks that the card can hold // // @Threads - Number of threads that each block will have // @Shared_Bytes - The amount of bytes each block will allocate // // @return - Returns the number of blocks the card can have resident static int getMaxBlocks(const int Shared_Bytes, const int Threads) { hipDeviceProp_t props; hipGetDeviceProperties(&props,0); std::cout << "Compute Version " << props.major << "." << props.minor << std::endl; /* 5.x or higher */ int numBlocks = 0; int Max_Shared = 65536; int Max_Blocks = 32; const int Block_Thread_Limit = 2048 / Threads; if(props.major < 3) { Max_Shared = 16384; Max_Blocks = 8; }else if(props.major < 5) { Max_Shared = 49152; Max_Blocks = 16; } const int Block_Shared_Limit = (Max_Shared / Shared_Bytes); numBlocks = props.multiProcessorCount * min(Max_Blocks,min(Block_Shared_Limit,Block_Thread_Limit)); #if QUEUE numBlocks = max(MaxBlocks, numBlocks ); #endif return numBlocks; } // // Given an integer returns the next multiple of 32 greater than or equal to it. // // @in - The integer to round to next multiple of 32 // // @return - Returns the next multiple of 32 that is greater than or equals to in static int next32(int in) { return ((in + 31) / 32 ) * 32; } // // Handle ThreadBufferStatus kernel selection // template <int TileSize> static float _wrapStatus(const int Restarts, const int Threads, const Data *Pos_d, const int Cities) { const int Shared_Bytes = (sizeof(int) + 2*sizeof(float)) * TileSize; const int Blocks = min(Restarts,getMaxBlocks(Shared_Bytes + 16,Threads)); const ThreadBufferStatus Status = (Threads > TileSize) ? MORE_THREADS_THAN_BUFFER : (Threads < TileSize) ? MORE_BUFFER_THAN_THREADS : EQUAL_SIZE; float time; const int Device_Memory = (sizeof(int) + sizeof(Data)) * (Cities + 1)* 2*Blocks; hipDeviceSetLimit(hipLimitMallocHeapSize, Device_Memory); CudaTest("Change heap size"); // Output runtime configuration std::cout << "Blocks = " << Blocks << ", Threads = " << Threads << ", TileSize = " << TileSize << ", Status = " << getName(Status) << ", Shared Bytes = " << Shared_Bytes << ", Device Memory = " << Device_Memory/(1024.0f*1024.0f) << "MB" << std::endl; #if QUEUE std::cout << "SliceSize = " << SliceSize << std::endl; #endif hipEvent_t begin,end; hipEventCreate(&begin); hipEventCreate(&end); hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); hipDeviceSetCacheConfig( hipFuncCachePreferShared ); hipProfilerStart(); switch(Status) { case MORE_THREADS_THAN_BUFFER: hipEventRecord(begin,0); hipLaunchKernelGGL(( TwoOpt<MORE_THREADS_THAN_BUFFER,TileSize>), dim3(Blocks),dim3(Threads), 0, 0, Restarts,Pos_d,Cities); CudaTest("Kernel Call"); hipEventRecord(end,0); hipEventSynchronize(end); break; case EQUAL_SIZE: hipEventRecord(begin,0); hipLaunchKernelGGL(( TwoOpt<EQUAL_SIZE,TileSize>), dim3(Blocks),dim3(Threads), 0, 0, Restarts,Pos_d,Cities); CudaTest("Kernel Call"); hipEventRecord(end,0); hipEventSynchronize(end); break; case MORE_BUFFER_THAN_THREADS: hipEventRecord(begin,0); hipLaunchKernelGGL(( TwoOpt<MORE_BUFFER_THAN_THREADS,TileSize>), dim3(Blocks),dim3(Threads), 0, 0, Restarts,Pos_d,Cities); CudaTest("Kernel Call"); hipEventRecord(end,0); hipEventSynchronize(end); break; }; hipProfilerStop(); hipEventElapsedTime(&time,begin,end); hipEventDestroy(begin); hipEventDestroy(end); return time; } // // Choose the parameters // template<int p, int i> class Recur { public: static float recur( const int Cities, const Data *Pos, const int Restarts, const int Threads , const int TileSize ) { if( i == TileSize ) { return _wrapStatus<i>( Restarts , Threads , Pos , Cities ); }else { return Recur<p,i-32>::recur( Cities , Pos , Restarts , Threads , TileSize ); } } }; // // Default // template<int p> class Recur<p,0> { public: static float recur( const int Cities, const Data *Pos, const int Restarts, const int Threads , const int TileSize ) { hipDeviceProp_t props; hipGetDeviceProperties(&props,0); int sharedMemBytes = getMaxSharedMemory( props.major ) / (2 * (sizeof(int) + 2 * sizeof(float))); if( sharedMemBytes < 1344 && sharedMemBytes >= 1024 ) { return _wrapStatus<1024>(Restarts,Threads,Pos,Cities); } else if( sharedMemBytes < 2048 && sharedMemBytes >= 1344 ) { return _wrapStatus<1344>(Restarts,Threads,Pos,Cities); }else if( sharedMemBytes >= 2048 ) { return _wrapStatus<2048>(Restarts,Threads,Pos,Cities); }else { std::cout << "Invalid TileSize = " << TileSize << std::endl; exit(-1); } return -1; } }; // // Auto-generate templates so I don't have to. // // Runs through each possible value form 0 to 1024 // float RunKernel(const int Cities, const Data *Pos, const int Restarts, const int Threads, const int TileSize) { return Recur<1024,1024>::recur( Cities , Pos , Restarts , Threads , TileSize ); } // // Main entry point to program. // // // argv[0] - program name // argv[1] - input file // argv[2] - restarts // argv[3] - threads // argv[4] - shared memory // int main(int argc, char *argv[]) { if (argc < 3 || argc > 5) {fprintf(stderr, "\narguments: input_file restart_count <threads> <tilesize> \n"); exit(-1);} const int Restarts = atoi(argv[2]); if (Restarts < 1) {fprintf(stderr, "restart_count is too small: %d\n", Restarts); exit(-1);} Data *pos_d; const int Cities = readInput(atoi(argv[1]), &pos_d); // Load data to GPU printf("configuration: %d cities, %d restarts, %s input\n", Cities, Restarts, argv[1]); // Make sure we are a multiple of 32 and less than 1024 const int Threads = (argc >= 4) ? min(1024,next32(atoi(argv[3]))) : min(1024,next32(Cities)); // How big is our shared memory const int TileSize = (argc >= 5) ? min( next32(atoi(argv[4])),2048) : Threads; // Run the kernel const float time = RunKernel(Cities,pos_d,Restarts,Threads,TileSize); // Synchronize (just in case) hipDeviceSynchronize(); // how long it took int hours = (int)(time / (3600.0f * 1000.0f)); int seconds = (int)(time/1000) % 60; int minutes = (int)((time/1000) / 60) % 60; // Grab the data int climbs,best; hipMemcpyFromSymbol(&climbs,climbs_d,sizeof(int),0,hipMemcpyDeviceToHost); hipMemcpyFromSymbol(&best,best_d,sizeof(int),0,hipMemcpyDeviceToHost); #if DEBUG #if MICRO long long pd,cd,ld; hipMemcpyFromSymbol(&pd,propagate_duration,sizeof(int),0,hipMemcpyDeviceToHost); hipMemcpyFromSymbol(&cd,compute_duration,sizeof(int),0,hipMemcpyDeviceToHost); hipMemcpyFromSymbol(&ld,load_duration,sizeof(int),0,hipMemcpyDeviceToHost); #else long long sd,ud; hipMemcpyFromSymbol(&sd,single_duration,sizeof(int),0,hipMemcpyDeviceToHost); hipMemcpyFromSymbol(&ud,update_duration,sizeof(int),0,hipMemcpyDeviceToHost); #endif #endif // Output long long moves = 1LL * climbs * (Cities - 2) * (Cities - 1) / 2; std::cout << "Number of two-opts " << climbs << std::endl; std::cout << moves * 0.000001 / time << "Gmoves/s" << std::endl; std::cout << "best found tour length = " << best << std::endl; std::cout << "Total Time : " << time / 1000.0f << "s" << std::endl; std::cout << "Hours = " << hours << ", Minutes = " << minutes << ", Seconds = " << seconds << ", Milliseconds = " << (int)(time) % 1000 << std::endl; #if DEBUG #if MICRO std::cout << "Propagate: " << pd << std::endl; std::cout << "Load: " << ld << std::endl; std::cout << "Compute: " << cd << std::endl; #else std::cout << "Single: " << sd << std::endl; std::cout << "Update: " << ud << std::endl; #endif #endif // Reset and free all the data hipDeviceReset(); hipFree(pos_d); return 0; }
a7107149d9da26cc8226ad6766820116b3d99f45.cu
// C++ #include <iostream> #include <string> #include <random> // C #include <stdlib.h> #include <stdio.h> #include <math.h> #include <limits.h> // CUDA #include <cuda.h> #include <curand_kernel.h> #include <cuda_profiler_api.h> // Force -Wall after this point, VC only (Check https://gcc.gnu.org/onlinedocs/gcc/Diagnostic-Pragmas.html for GCC) #pragma warning(push,4) #define DEBUG 0 #define MICRO 0 #define MACRO 0 #define QUEUE 0 #if QUEUE const int MaxBlocks = 10000; const int SliceSize = 100; #endif /******************************************************************************/ /*** 2-opt with random restarts ***********************************************/ /******************************************************************************/ // Euclidean distance #define dist(a, b) (sqrtf((pos[a].x - pos[b].x) * (pos[a].x - pos[b].x) + (pos[a].y - pos[b].y) * (pos[a].y - pos[b].y))) //#define dist(a, b) __float2int_rn(sqrtf((pos[a].x - pos[b].x) * (pos[a].x - pos[b].x) + (pos[a].y - pos[b].y) * (pos[a].y - pos[b].y))) #define swap(a, b) {float tmp = a; a = b; b = tmp;} static __device__ int climbs_d = 0; static __device__ int best_d = INT_MAX; #if QUEUE static __device__ int restart_d = 0; #endif // Buffer space, used for cache and maximum propagation #if DEBUG #if MICRO static __device__ unsigned long long int d_lDuration = 0; static __device__ unsigned long long int d_cDuration = 0; static __device__ unsigned long long int d_pDuration = 0; static __device__ long long int load_duration[128] = {0}; static __device__ long long int compute_duration[128] = {0}; static __device__ long long int propagate_duration[128] = {0}; #endif #if MACRO static __device__ unsigned long long int d_uDuration = 0; static __device__ unsigned long long int d_sDuration = 0; static __device__ long long int update_duration[128] = {0}; static __device__ long long int single_duration[128] = {0}; #endif #endif // Instrumentation #define LOG( X ) { if( DEBUG ) {X();} } // Load static __device__ void inline load_start() { #if MICRO if(threadIdx.x == 0) {load_duration[blockIdx.x] -= clock64();} #endif } static __device__ void inline load_end() { #if MICRO if(threadIdx.x == 0) {load_duration[blockIdx.x] += clock64();} #endif } // Compute static __device__ void inline compute_start() { #if MICRO if(threadIdx.x == 0) {compute_duration[blockIdx.x] -= clock64();} #endif } static __device__ void inline compute_end() { #if MICRO if(threadIdx.x == 0) {compute_duration[blockIdx.x] += clock64();} #endif } // Compute static __device__ void inline propagate_start() { #if MICRO if(threadIdx.x == 0) {propagate_duration[blockIdx.x] -= clock64();} #endif } static __device__ void inline propagate_end() { #if sMICRO if(threadIdx.x == 0) {propagate_duration[blockIdx.x] += clock64();} #endif } // Single_iter static __device__ void inline single_start() { #if MACRO if(threadIdx.x == 0 && DEBUG) {single_duration[blockIdx.x] -= clock64();} #endif } static __device__ void inline single_end() { #if MACRO if(threadIdx.x == 0 && DEBUG) {single_duration[blockIdx.x] += clock64();} #endif } // Update static __device__ void inline update_start() { #if MACRO if(threadIdx.x == 0) {update_duration[blockIdx.x] -= clock64();} #endif } static __device__ void inline update_end() { #if MACRO if(threadIdx.x == 0) {update_duration[blockIdx.x] += clock64();} #endif } enum ThreadBufferStatus {MORE_THREADS_THAN_BUFFER,EQUAL_SIZE,MORE_BUFFER_THAN_THREADS}; // Data structure used to hold position along path struct __align__(8) Data { float x,y; }; #if QUEUE // // Returns a unique integer value with the initial value being 0 // // Synchronizes so not safe for branches // // @return - Returns the next unique int // static __device__ inline int nextSlice(int* __restrict__ w_buffer) { if(threadIdx.x==0) { w_buffer[0] = atomicAdd(&restart_d, SliceSize); }__syncthreads(); return w_buffer[0]; } #endif // Allocates and initializes my global memory and shared memory. // // @pos - An array that need to be initialized and will hold our path points // @weight - An array that need to be initialized and will hold our edge weights // @cities - The amount of points in our graph // // @return - Returns true if initialization was successful, false otherwise. // template <int TileSize> static inline __device__ bool initMemory(const Data* &pos_d, Data* &pos, int * &weight, const int &cities) { // Shared memory is required to share the allocated memory __shared__ Data *d; __shared__ int *w; if(threadIdx.x == 0 ) { d = new Data[cities + 1]; if( d != NULL ) { w = new int[cities]; if( w == NULL ) { delete[] d; d = NULL; } } }__syncthreads(); if(d == NULL) { return false; } // Save new memory locations pos = d; weight = w; for (int i = threadIdx.x; i < cities; i += blockDim.x) pos[i] = pos_d[i]; __syncthreads(); return true; } // // Each thread gives some integer value, then the maximum of them is returned. // // @t_val - The number that the thread submits as a candidate for the maximum value // @cities - The number of cities. // // @return - The maximum value of t_val seen from all threads template <ThreadBufferStatus Status, int TileSize> static inline __device__ int maximum(int t_val, const int &cities, int* __restrict__ &w_buffer) { LOG( propagate_start ); int upper = min(blockDim.x,min(TileSize,cities)); // We have to condense things down if(Status == MORE_THREADS_THAN_BUFFER) { // Compute your index and then try to shove what you have in the buffer const int Index = threadIdx.x % TileSize; w_buffer[Index] = t_val; __syncthreads(); // Now try to win (someone will win) for(int i = 0 ; i <= (blockDim.x /TileSize); ++i ) { if(t_val < w_buffer[Index]) { w_buffer[Index] = t_val; } } }else { // Otherwise we have more than enough room! w_buffer[threadIdx.x] = t_val; }__syncthreads(); #pragma unroll 4 for( int i = 512; i > 32 ; i /= 2 ) { if (TileSize > i && blockDim.x > i) { int offset = (upper + 1) / 2; if( threadIdx.x < offset) { int tmp = w_buffer[threadIdx.x + offset]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } }__syncthreads(); upper = offset; } } // 64 and down if(threadIdx.x < 32) { // Yes. upper = 32. w_buffer[tid] = t_val = min(t_val,w_buffer[threadIdx.x + 16] if(TileSize > 32 && blockDim.x > 32) { int tmp = w_buffer[threadIdx.x + (upper+1)/2]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } for( int i = 16; i > 0; i = i / 2 ) { if(threadIdx.x < i) { int tmp = w_buffer[threadIdx.x + i]; if(tmp < t_val) { w_buffer[threadIdx.x] = t_val = tmp; } } } }__syncthreads(); LOG( propagate_end ); return w_buffer[0]; } // // After we find the best four position to reconnect with all we need to // reverse the path between them. // // @start - The first position in the sub-path we have to swap with the end // @end - The last position in the path we have to swap with the start // @pos - The positions in our path // @weights - The edge weights between points // // TODO: Is it better to reverse the weight or just recompute it? // static inline __device__ void reverse(int start, int end, Data* &pos, int* &weight) { while(start<end) { int w = weight[start]; Data d = pos[start]; weight[start] = weight[end-1]; pos[start] = pos[end]; weight[end-1] = w; pos[end] = d; start += blockDim.x; end -= blockDim.x; }__syncthreads(); } // // Perform a single iteration of Two-Opt // @pos - The current Hamiltonian path // @weight - The current weight of our edges along the path // @minchange - The current best change we can make // @mini - The ith city in the path that is part of the swap // @minj - The jth city in the path that is part of the swap // @cities - The number of cities along the path (excluding the end point) template <int TileSize> static __device__ void singleIter(Data* &pos, int* &weight, int &minchange, int &mini, int &minj, const int &cities, float* __restrict__ x_buffer, float* __restrict__ y_buffer, int* __restrict__ w_buffer) { LOG( single_start ); // // The tour is divided into segments. Each segment has a length of // the number of threads, except possibly the last one. // // We traverse through the segments. When we are in a segment each // city in the segment of the tour is given to a thread. Then we // begin scanning each city from the end of the tour until we reach // the current city. Later threads will terminate this process earlier // than earlier threads. // // During each scan we will evaluate if it is better to reverse the path // between the two cities. If so we check to see if that is better than // any other possible reversal we have seen. // // After we have done this for all segments then we call update. Update // make some modification to the tour given the set of best reversals // seen by each thread. // // for (int leading = 0; leading < cities - 2; leading += blockDim.x) { int i = leading + threadIdx.x; float pxi0, pyi0, pxi1, pyi1, pxj1, pyj1; if (i < cities - 2) { minchange -= weight[i]; pxi0 = pos[i].x; pyi0 = pos[i].y; pxi1 = pos[i+1].x; pyi1 = pos[i+1].y; pxj1 = pos[0].x; pyj1 = pos[0].y; } for (int trailing = cities - 1; trailing >= leading + 2; trailing -= TileSize) { int bound = trailing - TileSize + 1; // The lower bound on what we can load // // Load the shared memory cache // // Each thread will try to load adjacent elements // LOG( load_start ); for(int k = threadIdx.x; k < TileSize; k += blockDim.x) { int cache_idx = k + bound; if (cache_idx >= (leading + 2)) { // Never go below the lowest city x_buffer[k] = pos[cache_idx].x; y_buffer[k] = pos[cache_idx].y; w_buffer[k] = weight[cache_idx]; } }__syncthreads(); LOG( load_end ); LOG( compute_start ); // Compute the lower bound that we can see int lower = bound; if (lower < i + 2) lower = i + 2; // Go over loaded cache that everyone will use for (int current = trailing; current >= lower; current--) { int cache_idx = current - bound; float pxj0 = x_buffer[cache_idx]; float pyj0 = y_buffer[cache_idx]; int change = w_buffer[cache_idx] + __float2int_rn(sqrtf((pxi0 - pxj0) * (pxi0 - pxj0) + (pyi0 - pyj0) * (pyi0 - pyj0))) + __float2int_rn(sqrtf((pxi1 - pxj1) * (pxi1 - pxj1) + (pyi1 - pyj1) * (pyi1 - pyj1))); // Shift down pxj1 = pxj0; pyj1 = pyj0; // If better save it and where we found it if (minchange > change) { minchange = change; mini = i; minj = current; } }__syncthreads(); LOG( compute_end ); } if (i < cities - 2) { minchange += weight[i]; } } LOG( single_end ); } // // Perform the swaps to the edges i and j to decrease the total length of our // path and update the weight and pos arrays appropriately. // // @pos - The current Hamiltonian path // @weight - The current weight of our edges along the path // @minchange - The current best change we can make // @mini - The ith city in the path that is part of the swap // @minj - The jth city in the path that is part of the swap // @cities - The number of cities along the path (excluding the end point) template <ThreadBufferStatus Status, int TileSize> static __device__ bool update(Data* &pos, int* &weight, int &minchange, int &mini, int &minj, const int &cities, int* __restrict__ w_buffer) { LOG( update_start ); // For all threads, find the best change maximum<Status,TileSize>(minchange, cities, w_buffer); // If we don't have one, oh well. if(w_buffer[0] >= 0) { LOG( update_end ); return false; } // While we have an update while(w_buffer[0] < 0 ) { // If we have multiple bests, pick one if (minchange == w_buffer[0]) { w_buffer[1] = threadIdx.x; }__syncthreads(); // Get what which indices to swap if(threadIdx.x==w_buffer[1]) { w_buffer[2] = mini; w_buffer[3] = minj; }__syncthreads(); // Give them to each thread int mi = w_buffer[2]; int mj = w_buffer[3]; // If we are overlapping the best then we can't be used if(!(minj < (mi - 1)) && !(mini > (mj + 1))) { minchange = 0; } // Reverse the path between the nodes selected reverse(mi+1+threadIdx.x,mj-threadIdx.x,pos,weight); // Fix connecting edges weights for the endpoints weight[mi] = -dist(mi,mi+1); weight[mj] = -dist(mj,mj+1); __syncthreads(); // Wait for everyone // Get the next best maximum<Status,TileSize>(minchange, cities, w_buffer); } LOG( update_end ); return true; } // // Given a path we randomly permute it into a new new path and then initialize the weights of the path. // // @pos - The current Hamiltonian path // @weight - The current weight of our edges along the path // @cities - The number of cities along the path (excluding the end point) static __device__ inline void permute(Data* &pos, int* &weight, const int &cities, curandState &rndstate) { if (threadIdx.x == 0) { // serial permutation for (int i = 1; i < cities; i++) { int j = curand(&rndstate) % (cities - 1) + 1; Data d = pos[i]; pos[i] = pos[j]; pos[j] = d; } pos[cities] = pos[0]; }__syncthreads(); for (int i = threadIdx.x; i < cities; i += blockDim.x) weight[i] = -dist(i, i + 1); __syncthreads(); } // // Releases memory and saves results // // @pos - Pointer to allocated path memory // @weight - Pointer to allocated edge weight memory // @local_climbs - The number of climbs performed by this block // @best_length - The best length this block found. static __device__ inline void cleanup(Data* &pos, int* &weight, int &local_climbs, int &best_length) { if (threadIdx.x == 0) { // Save data atomicAdd(&climbs_d,local_climbs); atomicMin(&best_d, best_length); // Release memory delete pos; delete weight; #if DEBUG #if MICRO atomicAdd(&d_lDuration,load_duration[blockIdx.x]); atomicAdd(&d_cDuration,compute_duration[blockIdx.x]); atomicAdd(&d_pDuration,propagate_duration[blockIdx.x]); #endif #if MACRO atomicAdd(&d_uDuration,update_duration[blockIdx.x]); atomicAdd(&d_sDuration,single_duration[blockIdx.x]); #endif #endif } } // // Perform iterative two-opt until there can be no more swaps to reduce the path length. // // @pos_d - The position of each point in the graph. // @cities - The number of vertices in the graph template <ThreadBufferStatus Status, int TileSize> static __global__ __launch_bounds__(1024, 2) void TwoOpt(const int Restarts, const Data *pos_d, const int cities) { Data *pos; int *weight; int local_climbs = 0; int best_length = INT_MAX; curandState rndstate; //curand_init(blockIdx.x , 0, 0, &rndstate); __shared__ float x_buffer[TileSize]; __shared__ float y_buffer[TileSize]; __shared__ int w_buffer[TileSize]; // Initialize the memory, if cannot then output error and exit if( !initMemory<TileSize>(pos_d,pos,weight,cities) ) { if(threadIdx.x == 0) { printf("Memory initialization error for block %d\n", blockIdx.x); } return; } #if QUEUE for(int slice = nextSlice(w_buffer) ; slice < Restarts; slice = nextSlice(w_buffer)) { // get smaller blocks for( int r = slice ; r < slice + SliceSize && r < Restarts ; ++r ) { #else for(int r = blockIdx.x; r < Restarts; r += gridDim.x) { // even blocks #endif if( local_climbs % 10 == 0 ) { curand_init( blockIdx.x + gridDim.x * local_climbs , 0, 0, &rndstate); } int mini,minj,minchange; // Give our current path we need to permute it permute(pos,weight,cities,rndstate); // Keep applying two-opt until we reach some local // (or global) minimum on the length do { ++local_climbs; minchange = mini = minj = 0; singleIter<TileSize>(pos, weight, minchange, mini, minj, cities, x_buffer, y_buffer, w_buffer); } while (update<Status,TileSize>(pos, weight, minchange, mini, minj, cities, w_buffer)); // Calculate the length of the path x_buffer[0] = 0; __syncthreads(); float term = 0; for (int i = threadIdx.x; i < cities; i += blockDim.x) { term += dist(i, i + 1); } atomicAdd(x_buffer,term); __syncthreads(); // If better then save it to my local best if(threadIdx.x == 0) { if(w_buffer[0] < best_length) { best_length = x_buffer[0]; } } #if QUEUE } #endif } // Release all my resources, and save the best seen // with any other statistics cleanup(pos, weight, local_climbs, best_length); } // // Checks to see if an error occurred with CUDA and if so prints out the message passed and the CUDA // error then quits the application. // // @msg - Message to print out if error occurs static void CudaTest(char *msg) { cudaError_t e; cudaThreadSynchronize(); if (cudaSuccess != (e = cudaGetLastError())) { fprintf(stderr, "%s: %d\n", msg, e); fprintf(stderr, "%s\n", cudaGetErrorString(e)); system("PAUSE"); exit(-1); } } #define mallocOnGPU(addr, size) if (cudaSuccess != cudaMalloc((void **)&addr, size)) fprintf(stderr, "could not allocate GPU memory\n"); CudaTest("couldn't allocate GPU memory"); #define copyToGPU(to, from, size) if (cudaSuccess != cudaMemcpy(to, from, size, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of data to device failed\n"); CudaTest("data copy to device failed"); // // Read TPS lib files into GPU memory. ATT and CEIL_2D edge weight types are not supported // // @fname - The name of the file to read the TSP data from // @pos_d - Pointer to the pointer that will hold data on GPU // and is modified here to be the address on the GPU // // @return - Returns the number of cities found static int readInput(int cities, Data **pos_d) { Data* pos = new Data[cities]; if (pos == NULL) {fprintf(stderr, "cannot allocate pos\n"); exit(-1);} // C++ Random Note: seed is 1 std::default_random_engine gen; std::uniform_real_distribution<float> ds(0.0,1.0); for(int i = 0 ; i < cities; ++i) { pos[i].x = ds(gen); pos[i].y = ds(gen); //std::cout << pos[i].x << "," << pos[i].y << std::endl; } mallocOnGPU(*pos_d, sizeof(Data) * cities); copyToGPU(*pos_d, pos, sizeof(Data) * cities); delete (pos); return cities; } // // Given an enum value return it's string representation // // @status - The enum value to translate // // @return - The enums string representation in the source code static const std::string getName(const ThreadBufferStatus status) { switch(status) { case MORE_THREADS_THAN_BUFFER: return std::string("MORE_THREADS_THAN_BUFFER"); case EQUAL_SIZE: return std::string("EQUAL_SIZE"); case MORE_BUFFER_THAN_THREADS: return std::string("MORE_BUFFER_THAN_THREADS"); }; return std::string("enum value not found."); } int getMaxSharedMemory( int major ) { if(major < 3) { return 16384; }else if(major < 5) { return 32768; }else { return 65536; } } // // Calculates the maximum number of resident blocks that the card can hold // // @Threads - Number of threads that each block will have // @Shared_Bytes - The amount of bytes each block will allocate // // @return - Returns the number of blocks the card can have resident static int getMaxBlocks(const int Shared_Bytes, const int Threads) { cudaDeviceProp props; cudaGetDeviceProperties(&props,0); std::cout << "Compute Version " << props.major << "." << props.minor << std::endl; /* 5.x or higher */ int numBlocks = 0; int Max_Shared = 65536; int Max_Blocks = 32; const int Block_Thread_Limit = 2048 / Threads; if(props.major < 3) { Max_Shared = 16384; Max_Blocks = 8; }else if(props.major < 5) { Max_Shared = 49152; Max_Blocks = 16; } const int Block_Shared_Limit = (Max_Shared / Shared_Bytes); numBlocks = props.multiProcessorCount * min(Max_Blocks,min(Block_Shared_Limit,Block_Thread_Limit)); #if QUEUE numBlocks = max(MaxBlocks, numBlocks ); #endif return numBlocks; } // // Given an integer returns the next multiple of 32 greater than or equal to it. // // @in - The integer to round to next multiple of 32 // // @return - Returns the next multiple of 32 that is greater than or equals to in static int next32(int in) { return ((in + 31) / 32 ) * 32; } // // Handle ThreadBufferStatus kernel selection // template <int TileSize> static float _wrapStatus(const int Restarts, const int Threads, const Data *Pos_d, const int Cities) { const int Shared_Bytes = (sizeof(int) + 2*sizeof(float)) * TileSize; const int Blocks = min(Restarts,getMaxBlocks(Shared_Bytes + 16,Threads)); const ThreadBufferStatus Status = (Threads > TileSize) ? MORE_THREADS_THAN_BUFFER : (Threads < TileSize) ? MORE_BUFFER_THAN_THREADS : EQUAL_SIZE; float time; const int Device_Memory = (sizeof(int) + sizeof(Data)) * (Cities + 1)* 2*Blocks; cudaDeviceSetLimit(cudaLimitMallocHeapSize, Device_Memory); CudaTest("Change heap size"); // Output runtime configuration std::cout << "Blocks = " << Blocks << ", Threads = " << Threads << ", TileSize = " << TileSize << ", Status = " << getName(Status) << ", Shared Bytes = " << Shared_Bytes << ", Device Memory = " << Device_Memory/(1024.0f*1024.0f) << "MB" << std::endl; #if QUEUE std::cout << "SliceSize = " << SliceSize << std::endl; #endif cudaEvent_t begin,end; cudaEventCreate(&begin); cudaEventCreate(&end); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); cudaThreadSetCacheConfig( cudaFuncCachePreferShared ); cudaProfilerStart(); switch(Status) { case MORE_THREADS_THAN_BUFFER: cudaEventRecord(begin,0); TwoOpt<MORE_THREADS_THAN_BUFFER,TileSize><<<Blocks,Threads>>>(Restarts,Pos_d,Cities); CudaTest("Kernel Call"); cudaEventRecord(end,0); cudaEventSynchronize(end); break; case EQUAL_SIZE: cudaEventRecord(begin,0); TwoOpt<EQUAL_SIZE,TileSize><<<Blocks,Threads>>>(Restarts,Pos_d,Cities); CudaTest("Kernel Call"); cudaEventRecord(end,0); cudaEventSynchronize(end); break; case MORE_BUFFER_THAN_THREADS: cudaEventRecord(begin,0); TwoOpt<MORE_BUFFER_THAN_THREADS,TileSize><<<Blocks,Threads>>>(Restarts,Pos_d,Cities); CudaTest("Kernel Call"); cudaEventRecord(end,0); cudaEventSynchronize(end); break; }; cudaProfilerStop(); cudaEventElapsedTime(&time,begin,end); cudaEventDestroy(begin); cudaEventDestroy(end); return time; } // // Choose the parameters // template<int p, int i> class Recur { public: static float recur( const int Cities, const Data *Pos, const int Restarts, const int Threads , const int TileSize ) { if( i == TileSize ) { return _wrapStatus<i>( Restarts , Threads , Pos , Cities ); }else { return Recur<p,i-32>::recur( Cities , Pos , Restarts , Threads , TileSize ); } } }; // // Default // template<int p> class Recur<p,0> { public: static float recur( const int Cities, const Data *Pos, const int Restarts, const int Threads , const int TileSize ) { cudaDeviceProp props; cudaGetDeviceProperties(&props,0); int sharedMemBytes = getMaxSharedMemory( props.major ) / (2 * (sizeof(int) + 2 * sizeof(float))); if( sharedMemBytes < 1344 && sharedMemBytes >= 1024 ) { return _wrapStatus<1024>(Restarts,Threads,Pos,Cities); } else if( sharedMemBytes < 2048 && sharedMemBytes >= 1344 ) { return _wrapStatus<1344>(Restarts,Threads,Pos,Cities); }else if( sharedMemBytes >= 2048 ) { return _wrapStatus<2048>(Restarts,Threads,Pos,Cities); }else { std::cout << "Invalid TileSize = " << TileSize << std::endl; exit(-1); } return -1; } }; // // Auto-generate templates so I don't have to. // // Runs through each possible value form 0 to 1024 // float RunKernel(const int Cities, const Data *Pos, const int Restarts, const int Threads, const int TileSize) { return Recur<1024,1024>::recur( Cities , Pos , Restarts , Threads , TileSize ); } // // Main entry point to program. // // // argv[0] - program name // argv[1] - input file // argv[2] - restarts // argv[3] - threads // argv[4] - shared memory // int main(int argc, char *argv[]) { if (argc < 3 || argc > 5) {fprintf(stderr, "\narguments: input_file restart_count <threads> <tilesize> \n"); exit(-1);} const int Restarts = atoi(argv[2]); if (Restarts < 1) {fprintf(stderr, "restart_count is too small: %d\n", Restarts); exit(-1);} Data *pos_d; const int Cities = readInput(atoi(argv[1]), &pos_d); // Load data to GPU printf("configuration: %d cities, %d restarts, %s input\n", Cities, Restarts, argv[1]); // Make sure we are a multiple of 32 and less than 1024 const int Threads = (argc >= 4) ? min(1024,next32(atoi(argv[3]))) : min(1024,next32(Cities)); // How big is our shared memory const int TileSize = (argc >= 5) ? min( next32(atoi(argv[4])),2048) : Threads; // Run the kernel const float time = RunKernel(Cities,pos_d,Restarts,Threads,TileSize); // Synchronize (just in case) cudaDeviceSynchronize(); // how long it took int hours = (int)(time / (3600.0f * 1000.0f)); int seconds = (int)(time/1000) % 60; int minutes = (int)((time/1000) / 60) % 60; // Grab the data int climbs,best; cudaMemcpyFromSymbol(&climbs,climbs_d,sizeof(int),0,cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(&best,best_d,sizeof(int),0,cudaMemcpyDeviceToHost); #if DEBUG #if MICRO long long pd,cd,ld; cudaMemcpyFromSymbol(&pd,propagate_duration,sizeof(int),0,cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(&cd,compute_duration,sizeof(int),0,cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(&ld,load_duration,sizeof(int),0,cudaMemcpyDeviceToHost); #else long long sd,ud; cudaMemcpyFromSymbol(&sd,single_duration,sizeof(int),0,cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(&ud,update_duration,sizeof(int),0,cudaMemcpyDeviceToHost); #endif #endif // Output long long moves = 1LL * climbs * (Cities - 2) * (Cities - 1) / 2; std::cout << "Number of two-opts " << climbs << std::endl; std::cout << moves * 0.000001 / time << "Gmoves/s" << std::endl; std::cout << "best found tour length = " << best << std::endl; std::cout << "Total Time : " << time / 1000.0f << "s" << std::endl; std::cout << "Hours = " << hours << ", Minutes = " << minutes << ", Seconds = " << seconds << ", Milliseconds = " << (int)(time) % 1000 << std::endl; #if DEBUG #if MICRO std::cout << "Propagate: " << pd << std::endl; std::cout << "Load: " << ld << std::endl; std::cout << "Compute: " << cd << std::endl; #else std::cout << "Single: " << sd << std::endl; std::cout << "Update: " << ud << std::endl; #endif #endif // Reset and free all the data cudaDeviceReset(); cudaFree(pos_d); return 0; }
f525626a1812b6dd1b45320ec3376bf57f682424.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> // Initialize host vectors void init(int *a, int *b, int n) { for (int i=0; i < n; ++i) { a[i] = i; b[i] = n-i; } } // Check result correctness void check(int *c, int n) { int i = 0; while (i < n && c[i] == n) { ++i; } if (i == n) printf("Ok\n"); else printf("Non ok\n"); } // Cuda kernel __global__ void add(int *a, int *b, int *c, int n) { //@TODO@ : complete kernel code int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < n) { c[i]=a[i]+b[i]; } } int main(int argc, char **argv) { if(argc<2) {printf("Give the vector size as first parameter\n");;exit(2);} int n = atoi(argv[1]); printf("Vector size is %d\n",n); // host pointers int *host_a, *host_b, *host_c; int STREAM_NB=4; int STREAM_SIZE=512; // Allocations on host //@TODO@ : hipHostMalloc((void **) &host_a, n*sizeof(int), hipHostMallocDefault); hipHostMalloc((void **) &host_b, n*sizeof(int), hipHostMallocDefault); hipHostMalloc((void **) &host_c, n*sizeof(int), hipHostMallocDefault); // Initialize vectors init(host_a,host_b,n); hipStream_t streams[STREAM_NB]; int *d_A[STREAM_NB]; int *d_B[STREAM_NB]; int *d_C[STREAM_NB]; for(int i=0;i<STREAM_NB;i++) { hipStreamCreate(&streams[i]); hipMalloc((void**)&d_A[i],STREAM_SIZE*sizeof(int)); hipMalloc((void**)&d_B[i],STREAM_SIZE*sizeof(int)); hipMalloc((void**)&d_C[i],STREAM_SIZE*sizeof(int)); } for (int i=0; i<n; i+=STREAM_SIZE*STREAM_NB) { for(int j=0;j<STREAM_NB;j++) { hipMemcpyAsync(d_A[j], host_a+i+STREAM_SIZE*j,STREAM_SIZE*sizeof(int),hipMemcpyHostToDevice,streams[j]); hipMemcpyAsync(d_B[j], host_b+i+STREAM_SIZE*j, STREAM_SIZE*sizeof(int),hipMemcpyHostToDevice,streams[j]); hipLaunchKernelGGL(( add), dim3(STREAM_SIZE/256), dim3(256), 0, streams[j], d_A[j], d_B[j],d_C[j],STREAM_SIZE); hipMemcpyAsync(host_c+i+STREAM_SIZE*j,d_C[j], STREAM_SIZE*sizeof(int),hipMemcpyDeviceToHost,streams[j]); } } hipDeviceSynchronize(); // Check result check(host_c,n); // Free device memory in a loop : for(int i=0;i<STREAM_NB;i++) { hipFree(d_A[i]); hipFree(d_B[i]); hipFree(d_C[i]); } hipFree(host_a); hipFree(host_b); hipFree(host_b); return 0; }
f525626a1812b6dd1b45320ec3376bf57f682424.cu
#include <stdio.h> #include <cuda.h> // Initialize host vectors void init(int *a, int *b, int n) { for (int i=0; i < n; ++i) { a[i] = i; b[i] = n-i; } } // Check result correctness void check(int *c, int n) { int i = 0; while (i < n && c[i] == n) { ++i; } if (i == n) printf("Ok\n"); else printf("Non ok\n"); } // Cuda kernel __global__ void add(int *a, int *b, int *c, int n) { //@TODO@ : complete kernel code int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < n) { c[i]=a[i]+b[i]; } } int main(int argc, char **argv) { if(argc<2) {printf("Give the vector size as first parameter\n");;exit(2);} int n = atoi(argv[1]); printf("Vector size is %d\n",n); // host pointers int *host_a, *host_b, *host_c; int STREAM_NB=4; int STREAM_SIZE=512; // Allocations on host //@TODO@ : cudaHostAlloc((void **) &host_a, n*sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void **) &host_b, n*sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void **) &host_c, n*sizeof(int), cudaHostAllocDefault); // Initialize vectors init(host_a,host_b,n); cudaStream_t streams[STREAM_NB]; int *d_A[STREAM_NB]; int *d_B[STREAM_NB]; int *d_C[STREAM_NB]; for(int i=0;i<STREAM_NB;i++) { cudaStreamCreate(&streams[i]); cudaMalloc((void**)&d_A[i],STREAM_SIZE*sizeof(int)); cudaMalloc((void**)&d_B[i],STREAM_SIZE*sizeof(int)); cudaMalloc((void**)&d_C[i],STREAM_SIZE*sizeof(int)); } for (int i=0; i<n; i+=STREAM_SIZE*STREAM_NB) { for(int j=0;j<STREAM_NB;j++) { cudaMemcpyAsync(d_A[j], host_a+i+STREAM_SIZE*j,STREAM_SIZE*sizeof(int),cudaMemcpyHostToDevice,streams[j]); cudaMemcpyAsync(d_B[j], host_b+i+STREAM_SIZE*j, STREAM_SIZE*sizeof(int),cudaMemcpyHostToDevice,streams[j]); add<<<STREAM_SIZE/256, 256, 0, streams[j]>>>(d_A[j], d_B[j],d_C[j],STREAM_SIZE); cudaMemcpyAsync(host_c+i+STREAM_SIZE*j,d_C[j], STREAM_SIZE*sizeof(int),cudaMemcpyDeviceToHost,streams[j]); } } cudaDeviceSynchronize(); // Check result check(host_c,n); // Free device memory in a loop : for(int i=0;i<STREAM_NB;i++) { cudaFree(d_A[i]); cudaFree(d_B[i]); cudaFree(d_C[i]); } cudaFree(host_a); cudaFree(host_b); cudaFree(host_b); return 0; }
bd6832b97d2391c677d5c9d12f5814cccf734d42.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <experimental/detail/graph_utils.cuh> #include <experimental/graph.hpp> #include <partition_manager.hpp> #include <utilities/comm_utils.cuh> #include <utilities/error.hpp> #include <rmm/thrust_rmm_allocator.h> #include <raft/device_atomics.cuh> #include <raft/handle.hpp> #include <rmm/device_uvector.hpp> #include <thrust/adjacent_difference.h> #include <thrust/binary_search.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/tuple.h> #include <algorithm> #include <tuple> namespace cugraph { namespace experimental { namespace { // can't use lambda due to nvcc limitations (The enclosing parent function ("graph_view_t") for an // extended __device__ lambda must allow its address to be taken) template <typename vertex_t> struct out_of_range_t { vertex_t major_first{}; vertex_t major_last{}; vertex_t minor_first{}; vertex_t minor_last{}; __device__ bool operator()(thrust::tuple<vertex_t, vertex_t> t) { auto major = thrust::get<0>(t); auto minor = thrust::get<1>(t); return (major < major_first) || (major >= major_last) || (minor < minor_first) || (minor >= minor_last); } }; template <bool store_transposed, typename vertex_t, typename edge_t, typename weight_t> std:: tuple<rmm::device_uvector<edge_t>, rmm::device_uvector<vertex_t>, rmm::device_uvector<weight_t>> edge_list_to_compressed_sparse(raft::handle_t const &handle, edgelist_t<vertex_t, edge_t, weight_t> const &edgelist, vertex_t major_first, vertex_t major_last, vertex_t minor_first, vertex_t minor_last) { rmm::device_uvector<edge_t> offsets((major_last - major_first) + 1, handle.get_stream()); rmm::device_uvector<vertex_t> indices(edgelist.number_of_edges, handle.get_stream()); rmm::device_uvector<weight_t> weights( edgelist.p_edge_weights != nullptr ? edgelist.number_of_edges : 0, handle.get_stream()); thrust::fill(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), offsets.begin(), offsets.end(), edge_t{0}); thrust::fill(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), indices.begin(), indices.end(), vertex_t{0}); // FIXME: need to performance test this code with R-mat graphs having highly-skewed degree // distribution. If there is a small number of vertices with very large degrees, atomicAdd can // sequentialize execution. CUDA9+ & Kepler+ provide complier/architectural optimizations to // mitigate this impact // (https://developer.nvidia.com/blog/cuda-pro-tip-optimized-filtering-warp-aggregated-atomics/), // and we need to check this thrust::for_each based approach delivers the expected performance. // FIXME: also need to verify this approach is at least not significantly slower than the sorting // based approach (this approach does not use extra memory, so better stick to this approach // unless performance is significantly worse). auto p_offsets = offsets.data(); auto p_indices = indices.data(); auto p_weights = edgelist.p_edge_weights != nullptr ? weights.data() : static_cast<weight_t *>(nullptr); thrust::for_each(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), store_transposed ? edgelist.p_dst_vertices : edgelist.p_src_vertices, store_transposed ? edgelist.p_dst_vertices + edgelist.number_of_edges : edgelist.p_src_vertices + edgelist.number_of_edges, [p_offsets, major_first] __device__(auto v) { atomicAdd(p_offsets + (v - major_first), edge_t{1}); }); thrust::exclusive_scan(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), offsets.begin(), offsets.end(), offsets.begin()); if (edgelist.p_edge_weights != nullptr) { auto edge_first = thrust::make_zip_iterator(thrust::make_tuple( edgelist.p_src_vertices, edgelist.p_dst_vertices, edgelist.p_edge_weights)); thrust::for_each(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), edge_first, edge_first + edgelist.number_of_edges, [p_offsets, p_indices, p_weights, major_first] __device__(auto e) { auto s = thrust::get<0>(e); auto d = thrust::get<1>(e); auto w = thrust::get<2>(e); auto major = store_transposed ? d : s; auto minor = store_transposed ? s : d; auto start = p_offsets[major - major_first]; auto degree = p_offsets[(major - major_first) + 1] - start; auto idx = atomicAdd(p_indices + (start + degree - 1), vertex_t{1}); // use the last element as a counter // FIXME: we can actually store minor - minor_first instead of minor to save // memory if minor can be larger than 32 bit but minor - minor_first fits // within 32 bit p_indices[start + idx] = minor; // overwrite the counter only if idx == degree - 1 (no race) p_weights[start + idx] = w; }); } else { auto edge_first = thrust::make_zip_iterator( thrust::make_tuple(edgelist.p_src_vertices, edgelist.p_dst_vertices)); thrust::for_each(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), edge_first, edge_first + edgelist.number_of_edges, [p_offsets, p_indices, p_weights, major_first] __device__(auto e) { auto s = thrust::get<0>(e); auto d = thrust::get<1>(e); auto major = store_transposed ? d : s; auto minor = store_transposed ? s : d; auto start = p_offsets[major - major_first]; auto degree = p_offsets[(major - major_first) + 1] - start; auto idx = atomicAdd(p_indices + (start + degree - 1), vertex_t{1}); // use the last element as a counter // FIXME: we can actually store minor - minor_first instead of minor to save // memory if minor can be larger than 32 bit but minor - minor_first fits // within 32 bit p_indices[start + idx] = minor; // overwrite the counter only if idx == degree - 1 (no race) }); } // FIXME: need to add an option to sort neighbor lists return std::make_tuple(std::move(offsets), std::move(indices), std::move(weights)); } template <typename vertex_t, typename DegreeIterator, typename ThresholdIterator> std::vector<vertex_t> segment_degree_sorted_vertex_partition(raft::handle_t const &handle, DegreeIterator degree_first, DegreeIterator degree_last, ThresholdIterator threshold_first, ThresholdIterator threshold_last) { auto num_elements = thrust::distance(degree_first, degree_last); auto num_segments = thrust::distance(threshold_first, threshold_last) + 1; std::vector<vertex_t> h_segment_offsets(num_segments + 1); h_segment_offsets[0] = 0; h_segment_offsets.back() = num_elements; rmm::device_uvector<vertex_t> d_segment_offsets(num_segments - 1, handle.get_stream()); thrust::upper_bound(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), degree_first, degree_last, threshold_first, threshold_last, d_segment_offsets.begin()); raft::update_host(h_segment_offsets.begin() + 1, d_segment_offsets.begin(), d_segment_offsets.size(), handle.get_stream()); CUDA_TRY(hipStreamSynchronize( handle.get_stream())); // this is necessary as d_segment_offsets will become out-of-scope once // this function returns and this function returns a host variable which // can be used right after return. return h_segment_offsets; } } // namespace template <typename vertex_t, typename edge_t, typename weight_t, bool store_transposed, bool multi_gpu> graph_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>:: graph_t(raft::handle_t const &handle, std::vector<edgelist_t<vertex_t, edge_t, weight_t>> const &edgelists, partition_t<vertex_t> const &partition, vertex_t number_of_vertices, edge_t number_of_edges, graph_properties_t properties, bool sorted_by_global_degree_within_vertex_partition, bool do_expensive_check) : detail::graph_base_t<vertex_t, edge_t, weight_t>( handle, number_of_vertices, number_of_edges, properties), partition_(partition) { // cheap error checks auto &comm = this->get_handle_ptr()->get_comms(); auto const comm_size = comm.get_size(); auto &row_comm = this->get_handle_ptr()->get_subcomm(cugraph::partition_2d::key_naming_t().row_name()); auto const row_comm_rank = row_comm.get_rank(); auto const row_comm_size = row_comm.get_size(); auto &col_comm = this->get_handle_ptr()->get_subcomm(cugraph::partition_2d::key_naming_t().col_name()); auto const col_comm_rank = col_comm.get_rank(); auto const col_comm_size = col_comm.get_size(); auto default_stream = this->get_handle_ptr()->get_stream(); CUGRAPH_EXPECTS(edgelists.size() > 0, "Invalid API parameter: edgelists.size() should be non-zero."); bool is_weighted = edgelists[0].p_edge_weights != nullptr; CUGRAPH_EXPECTS( std::any_of(edgelists.begin() + 1, edgelists.end(), [is_weighted](auto edgelist) { return (edgelist.p_src_vertices == nullptr) || (edgelist.p_dst_vertices == nullptr) || (is_weighted && (edgelist.p_edge_weights == nullptr)) || (!is_weighted && (edgelist.p_edge_weights != nullptr)); }) == false, "Invalid API parameter: edgelists[].p_src_vertices and edgelists[].p_dst_vertices should not " "be nullptr and edgelists[].p_edge_weights should be nullptr (if edgelists[0].p_edge_weights " "is nullptr) or should not be nullptr (otherwise)."); CUGRAPH_EXPECTS((partition.is_hypergraph_partitioned() && (edgelists.size() == static_cast<size_t>(col_comm_size))) || (!(partition.is_hypergraph_partitioned()) && (edgelists.size() == 1)), "Invalid API parameter: errneous edgelists.size()."); // optional expensive checks (part 1/3) if (do_expensive_check) { edge_t number_of_local_edges_sum{}; for (size_t i = 0; i < edgelists.size(); ++i) { vertex_t major_first{}; vertex_t major_last{}; vertex_t minor_first{}; vertex_t minor_last{}; std::tie(major_first, major_last) = partition.get_matrix_partition_major_range(i); std::tie(minor_first, minor_last) = partition.get_matrix_partition_minor_range(); number_of_local_edges_sum += edgelists[i].number_of_edges; auto edge_first = thrust::make_zip_iterator(thrust::make_tuple( store_transposed ? edgelists[i].p_dst_vertices : edgelists[i].p_src_vertices, store_transposed ? edgelists[i].p_src_vertices : edgelists[i].p_dst_vertices)); // better use thrust::any_of once https://github.com/thrust/thrust/issues/1016 is resolved CUGRAPH_EXPECTS(thrust::count_if(rmm::exec_policy(default_stream)->on(default_stream), edge_first, edge_first + edgelists[i].number_of_edges, out_of_range_t<vertex_t>{ major_first, major_last, minor_first, minor_last}) == 0, "Invalid API parameter: edgelists[] have out-of-range values."); } number_of_local_edges_sum = host_scalar_allreduce(comm, number_of_local_edges_sum, default_stream); CUGRAPH_EXPECTS(number_of_local_edges_sum == this->get_number_of_edges(), "Invalid API parameter: the sum of local edges doe counts not match with " "number_of_local_edges."); CUGRAPH_EXPECTS( partition.get_vertex_partition_last(comm_size - 1) == number_of_vertices, "Invalid API parameter: vertex partition should cover [0, number_of_vertices)."); } // convert edge list (COO) to compressed sparse format (CSR or CSC) adj_matrix_partition_offsets_.reserve(edgelists.size()); adj_matrix_partition_indices_.reserve(edgelists.size()); adj_matrix_partition_weights_.reserve(is_weighted ? edgelists.size() : 0); for (size_t i = 0; i < edgelists.size(); ++i) { vertex_t major_first{}; vertex_t major_last{}; vertex_t minor_first{}; vertex_t minor_last{}; std::tie(major_first, major_last) = partition.get_matrix_partition_major_range(i); std::tie(minor_first, minor_last) = partition.get_matrix_partition_minor_range(); rmm::device_uvector<edge_t> offsets(0, default_stream); rmm::device_uvector<vertex_t> indices(0, default_stream); rmm::device_uvector<weight_t> weights(0, default_stream); std::tie(offsets, indices, weights) = edge_list_to_compressed_sparse<store_transposed>( *(this->get_handle_ptr()), edgelists[i], major_first, major_last, minor_first, minor_last); adj_matrix_partition_offsets_.push_back(std::move(offsets)); adj_matrix_partition_indices_.push_back(std::move(indices)); if (is_weighted) { adj_matrix_partition_weights_.push_back(std::move(weights)); } } // update degree-based segment offsets (to be used for graph analytics kernel optimization) if (sorted_by_global_degree_within_vertex_partition) { auto degrees = detail::compute_major_degree( *(this->get_handle_ptr()), adj_matrix_partition_offsets_, partition_); // optional expensive checks (part 2/3) if (do_expensive_check) { CUGRAPH_EXPECTS(thrust::is_sorted(rmm::exec_policy(default_stream)->on(default_stream), degrees.begin(), degrees.end(), thrust::greater<edge_t>{}), "Invalid API parameter: sorted_by_global_degree_within_vertex_partition is " "set to true, but degrees are not non-ascending."); } static_assert(detail::num_segments_per_vertex_partition == 3); static_assert((detail::low_degree_threshold <= detail::mid_degree_threshold) && (detail::mid_degree_threshold <= std::numeric_limits<edge_t>::max())); rmm::device_uvector<edge_t> d_thresholds(detail::num_segments_per_vertex_partition - 1, default_stream); std::vector<edge_t> h_thresholds = {static_cast<edge_t>(detail::low_degree_threshold), static_cast<edge_t>(detail::mid_degree_threshold)}; raft::update_device( d_thresholds.data(), h_thresholds.data(), h_thresholds.size(), default_stream); rmm::device_uvector<vertex_t> segment_offsets(detail::num_segments_per_vertex_partition + 1, default_stream); segment_offsets.set_element_async(0, 0, default_stream); segment_offsets.set_element_async( detail::num_segments_per_vertex_partition, degrees.size(), default_stream); thrust::upper_bound(rmm::exec_policy(default_stream)->on(default_stream), degrees.begin(), degrees.end(), d_thresholds.begin(), d_thresholds.end(), segment_offsets.begin() + 1); rmm::device_uvector<vertex_t> aggregate_segment_offsets(0, default_stream); if (partition.is_hypergraph_partitioned()) { rmm::device_uvector<vertex_t> aggregate_segment_offsets( col_comm_size * segment_offsets.size(), default_stream); col_comm.allgather(segment_offsets.data(), aggregate_segment_offsets.data(), segment_offsets.size(), default_stream); } else { rmm::device_uvector<vertex_t> aggregate_segment_offsets( row_comm_size * segment_offsets.size(), default_stream); row_comm.allgather(segment_offsets.data(), aggregate_segment_offsets.data(), segment_offsets.size(), default_stream); } vertex_partition_segment_offsets_.resize(aggregate_segment_offsets.size()); raft::update_host(vertex_partition_segment_offsets_.data(), aggregate_segment_offsets.data(), aggregate_segment_offsets.size(), default_stream); raft::comms::status_t status{}; if (partition.is_hypergraph_partitioned()) { status = col_comm.sync_stream( default_stream); // this is necessary as degrees, d_thresholds, and segment_offsets will // become out-of-scope once control flow exits this block and // vertex_partition_segment_offsets_ can be used right after return. } else { status = row_comm.sync_stream( default_stream); // this is necessary as degrees, d_thresholds, and segment_offsets will // become out-of-scope once control flow exits this block and // vertex_partition_segment_offsets_ can be used right after return. } CUGRAPH_EXPECTS(status == raft::comms::status_t::SUCCESS, "sync_stream() failure."); } // optional expensive checks (part 3/3) if (do_expensive_check) { // FIXME: check for symmetricity may better be implemetned with transpose(). if (this->is_symmetric()) {} // FIXME: check for duplicate edges may better be implemented after deciding whether to sort // neighbor list or not. if (!this->is_multigraph()) {} } } template <typename vertex_t, typename edge_t, typename weight_t, bool store_transposed, bool multi_gpu> graph_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<!multi_gpu>>:: graph_t(raft::handle_t const &handle, edgelist_t<vertex_t, edge_t, weight_t> const &edgelist, vertex_t number_of_vertices, graph_properties_t properties, bool sorted_by_degree, bool do_expensive_check) : detail::graph_base_t<vertex_t, edge_t, weight_t>( handle, number_of_vertices, edgelist.number_of_edges, properties), offsets_(rmm::device_uvector<edge_t>(0, handle.get_stream())), indices_(rmm::device_uvector<vertex_t>(0, handle.get_stream())), weights_(rmm::device_uvector<weight_t>(0, handle.get_stream())) { // cheap error checks auto default_stream = this->get_handle_ptr()->get_stream(); CUGRAPH_EXPECTS( (edgelist.p_src_vertices != nullptr) && (edgelist.p_dst_vertices != nullptr), "Invalid API parameter: edgelist.p_src_vertices and edgelist.p_dst_vertices should " "not be nullptr."); // optional expensive checks (part 1/2) if (do_expensive_check) { auto edge_first = thrust::make_zip_iterator( thrust::make_tuple(store_transposed ? edgelist.p_dst_vertices : edgelist.p_src_vertices, store_transposed ? edgelist.p_src_vertices : edgelist.p_dst_vertices)); // better use thrust::any_of once https://github.com/thrust/thrust/issues/1016 is resolved CUGRAPH_EXPECTS(thrust::count_if( rmm::exec_policy(default_stream)->on(default_stream), edge_first, edge_first + edgelist.number_of_edges, out_of_range_t<vertex_t>{ 0, this->get_number_of_vertices(), 0, this->get_number_of_vertices()}) == 0, "Invalid API parameter: edgelist have out-of-range values."); // FIXME: check for symmetricity may better be implemetned with transpose(). if (this->is_symmetric()) {} // FIXME: check for duplicate edges may better be implemented after deciding whether to sort // neighbor list or not. if (!this->is_multigraph()) {} } // convert edge list (COO) to compressed sparse format (CSR or CSC) std::tie(offsets_, indices_, weights_) = edge_list_to_compressed_sparse<store_transposed>(*(this->get_handle_ptr()), edgelist, vertex_t{0}, this->get_number_of_vertices(), vertex_t{0}, this->get_number_of_vertices()); // update degree-based segment offsets (to be used for graph analytics kernel optimization) if (sorted_by_degree) { auto degree_first = thrust::make_transform_iterator( thrust::make_counting_iterator(vertex_t{0}), detail::degree_from_offsets_t<vertex_t, edge_t>{offsets_.data()}); // optional expensive checks (part 2/2) if (do_expensive_check) { CUGRAPH_EXPECTS(thrust::is_sorted(rmm::exec_policy(default_stream)->on(default_stream), degree_first, degree_first + this->get_number_of_vertices(), thrust::greater<edge_t>{}), "Invalid API parameter: sorted_by_degree is set to true, but degrees are not " "non-ascending."); } static_assert(detail::num_segments_per_vertex_partition == 3); static_assert((detail::low_degree_threshold <= detail::mid_degree_threshold) && (detail::mid_degree_threshold <= std::numeric_limits<edge_t>::max())); rmm::device_uvector<edge_t> d_thresholds(detail::num_segments_per_vertex_partition - 1, default_stream); std::vector<edge_t> h_thresholds = {static_cast<edge_t>(detail::low_degree_threshold), static_cast<edge_t>(detail::mid_degree_threshold)}; raft::update_device( d_thresholds.data(), h_thresholds.data(), h_thresholds.size(), default_stream); rmm::device_uvector<vertex_t> segment_offsets(detail::num_segments_per_vertex_partition + 1, default_stream); segment_offsets.set_element_async(0, 0, default_stream); segment_offsets.set_element_async( detail::num_segments_per_vertex_partition, this->get_number_of_vertices(), default_stream); thrust::upper_bound(rmm::exec_policy(default_stream)->on(default_stream), degree_first, degree_first + this->get_number_of_vertices(), d_thresholds.begin(), d_thresholds.end(), segment_offsets.begin() + 1); segment_offsets_.resize(segment_offsets.size()); raft::update_host( segment_offsets_.data(), segment_offsets.data(), segment_offsets.size(), default_stream); CUDA_TRY(hipStreamSynchronize( default_stream)); // this is necessary as d_thresholds and segment_offsets will become // out-of-scpe once control flow exits this block and segment_offsets_ can // be used right after return. } // optional expensive checks (part 3/3) if (do_expensive_check) { // FIXME: check for symmetricity may better be implemetned with transpose(). if (this->is_symmetric()) {} // FIXME: check for duplicate edges may better be implemented after deciding whether to sort // neighbor list or not. if (!this->is_multigraph()) {} } } // explicit instantiation template class graph_t<int32_t, int32_t, float, true, true>; template class graph_t<int32_t, int32_t, float, false, true>; template class graph_t<int32_t, int32_t, double, true, true>; template class graph_t<int32_t, int32_t, double, false, true>; template class graph_t<int32_t, int64_t, float, true, true>; template class graph_t<int32_t, int64_t, float, false, true>; template class graph_t<int32_t, int64_t, double, true, true>; template class graph_t<int32_t, int64_t, double, false, true>; template class graph_t<int64_t, int64_t, float, true, true>; template class graph_t<int64_t, int64_t, float, false, true>; template class graph_t<int64_t, int64_t, double, true, true>; template class graph_t<int64_t, int64_t, double, false, true>; // template class graph_t<int32_t, int32_t, float, true, false>; template class graph_t<int32_t, int32_t, float, false, false>; template class graph_t<int32_t, int32_t, double, true, false>; template class graph_t<int32_t, int32_t, double, false, false>; template class graph_t<int32_t, int64_t, float, true, false>; template class graph_t<int32_t, int64_t, float, false, false>; template class graph_t<int32_t, int64_t, double, true, false>; template class graph_t<int32_t, int64_t, double, false, false>; template class graph_t<int64_t, int64_t, float, true, false>; template class graph_t<int64_t, int64_t, float, false, false>; template class graph_t<int64_t, int64_t, double, true, false>; template class graph_t<int64_t, int64_t, double, false, false>; } // namespace experimental } // namespace cugraph
bd6832b97d2391c677d5c9d12f5814cccf734d42.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <experimental/detail/graph_utils.cuh> #include <experimental/graph.hpp> #include <partition_manager.hpp> #include <utilities/comm_utils.cuh> #include <utilities/error.hpp> #include <rmm/thrust_rmm_allocator.h> #include <raft/device_atomics.cuh> #include <raft/handle.hpp> #include <rmm/device_uvector.hpp> #include <thrust/adjacent_difference.h> #include <thrust/binary_search.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/tuple.h> #include <algorithm> #include <tuple> namespace cugraph { namespace experimental { namespace { // can't use lambda due to nvcc limitations (The enclosing parent function ("graph_view_t") for an // extended __device__ lambda must allow its address to be taken) template <typename vertex_t> struct out_of_range_t { vertex_t major_first{}; vertex_t major_last{}; vertex_t minor_first{}; vertex_t minor_last{}; __device__ bool operator()(thrust::tuple<vertex_t, vertex_t> t) { auto major = thrust::get<0>(t); auto minor = thrust::get<1>(t); return (major < major_first) || (major >= major_last) || (minor < minor_first) || (minor >= minor_last); } }; template <bool store_transposed, typename vertex_t, typename edge_t, typename weight_t> std:: tuple<rmm::device_uvector<edge_t>, rmm::device_uvector<vertex_t>, rmm::device_uvector<weight_t>> edge_list_to_compressed_sparse(raft::handle_t const &handle, edgelist_t<vertex_t, edge_t, weight_t> const &edgelist, vertex_t major_first, vertex_t major_last, vertex_t minor_first, vertex_t minor_last) { rmm::device_uvector<edge_t> offsets((major_last - major_first) + 1, handle.get_stream()); rmm::device_uvector<vertex_t> indices(edgelist.number_of_edges, handle.get_stream()); rmm::device_uvector<weight_t> weights( edgelist.p_edge_weights != nullptr ? edgelist.number_of_edges : 0, handle.get_stream()); thrust::fill(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), offsets.begin(), offsets.end(), edge_t{0}); thrust::fill(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), indices.begin(), indices.end(), vertex_t{0}); // FIXME: need to performance test this code with R-mat graphs having highly-skewed degree // distribution. If there is a small number of vertices with very large degrees, atomicAdd can // sequentialize execution. CUDA9+ & Kepler+ provide complier/architectural optimizations to // mitigate this impact // (https://developer.nvidia.com/blog/cuda-pro-tip-optimized-filtering-warp-aggregated-atomics/), // and we need to check this thrust::for_each based approach delivers the expected performance. // FIXME: also need to verify this approach is at least not significantly slower than the sorting // based approach (this approach does not use extra memory, so better stick to this approach // unless performance is significantly worse). auto p_offsets = offsets.data(); auto p_indices = indices.data(); auto p_weights = edgelist.p_edge_weights != nullptr ? weights.data() : static_cast<weight_t *>(nullptr); thrust::for_each(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), store_transposed ? edgelist.p_dst_vertices : edgelist.p_src_vertices, store_transposed ? edgelist.p_dst_vertices + edgelist.number_of_edges : edgelist.p_src_vertices + edgelist.number_of_edges, [p_offsets, major_first] __device__(auto v) { atomicAdd(p_offsets + (v - major_first), edge_t{1}); }); thrust::exclusive_scan(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), offsets.begin(), offsets.end(), offsets.begin()); if (edgelist.p_edge_weights != nullptr) { auto edge_first = thrust::make_zip_iterator(thrust::make_tuple( edgelist.p_src_vertices, edgelist.p_dst_vertices, edgelist.p_edge_weights)); thrust::for_each(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), edge_first, edge_first + edgelist.number_of_edges, [p_offsets, p_indices, p_weights, major_first] __device__(auto e) { auto s = thrust::get<0>(e); auto d = thrust::get<1>(e); auto w = thrust::get<2>(e); auto major = store_transposed ? d : s; auto minor = store_transposed ? s : d; auto start = p_offsets[major - major_first]; auto degree = p_offsets[(major - major_first) + 1] - start; auto idx = atomicAdd(p_indices + (start + degree - 1), vertex_t{1}); // use the last element as a counter // FIXME: we can actually store minor - minor_first instead of minor to save // memory if minor can be larger than 32 bit but minor - minor_first fits // within 32 bit p_indices[start + idx] = minor; // overwrite the counter only if idx == degree - 1 (no race) p_weights[start + idx] = w; }); } else { auto edge_first = thrust::make_zip_iterator( thrust::make_tuple(edgelist.p_src_vertices, edgelist.p_dst_vertices)); thrust::for_each(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), edge_first, edge_first + edgelist.number_of_edges, [p_offsets, p_indices, p_weights, major_first] __device__(auto e) { auto s = thrust::get<0>(e); auto d = thrust::get<1>(e); auto major = store_transposed ? d : s; auto minor = store_transposed ? s : d; auto start = p_offsets[major - major_first]; auto degree = p_offsets[(major - major_first) + 1] - start; auto idx = atomicAdd(p_indices + (start + degree - 1), vertex_t{1}); // use the last element as a counter // FIXME: we can actually store minor - minor_first instead of minor to save // memory if minor can be larger than 32 bit but minor - minor_first fits // within 32 bit p_indices[start + idx] = minor; // overwrite the counter only if idx == degree - 1 (no race) }); } // FIXME: need to add an option to sort neighbor lists return std::make_tuple(std::move(offsets), std::move(indices), std::move(weights)); } template <typename vertex_t, typename DegreeIterator, typename ThresholdIterator> std::vector<vertex_t> segment_degree_sorted_vertex_partition(raft::handle_t const &handle, DegreeIterator degree_first, DegreeIterator degree_last, ThresholdIterator threshold_first, ThresholdIterator threshold_last) { auto num_elements = thrust::distance(degree_first, degree_last); auto num_segments = thrust::distance(threshold_first, threshold_last) + 1; std::vector<vertex_t> h_segment_offsets(num_segments + 1); h_segment_offsets[0] = 0; h_segment_offsets.back() = num_elements; rmm::device_uvector<vertex_t> d_segment_offsets(num_segments - 1, handle.get_stream()); thrust::upper_bound(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), degree_first, degree_last, threshold_first, threshold_last, d_segment_offsets.begin()); raft::update_host(h_segment_offsets.begin() + 1, d_segment_offsets.begin(), d_segment_offsets.size(), handle.get_stream()); CUDA_TRY(cudaStreamSynchronize( handle.get_stream())); // this is necessary as d_segment_offsets will become out-of-scope once // this function returns and this function returns a host variable which // can be used right after return. return h_segment_offsets; } } // namespace template <typename vertex_t, typename edge_t, typename weight_t, bool store_transposed, bool multi_gpu> graph_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>:: graph_t(raft::handle_t const &handle, std::vector<edgelist_t<vertex_t, edge_t, weight_t>> const &edgelists, partition_t<vertex_t> const &partition, vertex_t number_of_vertices, edge_t number_of_edges, graph_properties_t properties, bool sorted_by_global_degree_within_vertex_partition, bool do_expensive_check) : detail::graph_base_t<vertex_t, edge_t, weight_t>( handle, number_of_vertices, number_of_edges, properties), partition_(partition) { // cheap error checks auto &comm = this->get_handle_ptr()->get_comms(); auto const comm_size = comm.get_size(); auto &row_comm = this->get_handle_ptr()->get_subcomm(cugraph::partition_2d::key_naming_t().row_name()); auto const row_comm_rank = row_comm.get_rank(); auto const row_comm_size = row_comm.get_size(); auto &col_comm = this->get_handle_ptr()->get_subcomm(cugraph::partition_2d::key_naming_t().col_name()); auto const col_comm_rank = col_comm.get_rank(); auto const col_comm_size = col_comm.get_size(); auto default_stream = this->get_handle_ptr()->get_stream(); CUGRAPH_EXPECTS(edgelists.size() > 0, "Invalid API parameter: edgelists.size() should be non-zero."); bool is_weighted = edgelists[0].p_edge_weights != nullptr; CUGRAPH_EXPECTS( std::any_of(edgelists.begin() + 1, edgelists.end(), [is_weighted](auto edgelist) { return (edgelist.p_src_vertices == nullptr) || (edgelist.p_dst_vertices == nullptr) || (is_weighted && (edgelist.p_edge_weights == nullptr)) || (!is_weighted && (edgelist.p_edge_weights != nullptr)); }) == false, "Invalid API parameter: edgelists[].p_src_vertices and edgelists[].p_dst_vertices should not " "be nullptr and edgelists[].p_edge_weights should be nullptr (if edgelists[0].p_edge_weights " "is nullptr) or should not be nullptr (otherwise)."); CUGRAPH_EXPECTS((partition.is_hypergraph_partitioned() && (edgelists.size() == static_cast<size_t>(col_comm_size))) || (!(partition.is_hypergraph_partitioned()) && (edgelists.size() == 1)), "Invalid API parameter: errneous edgelists.size()."); // optional expensive checks (part 1/3) if (do_expensive_check) { edge_t number_of_local_edges_sum{}; for (size_t i = 0; i < edgelists.size(); ++i) { vertex_t major_first{}; vertex_t major_last{}; vertex_t minor_first{}; vertex_t minor_last{}; std::tie(major_first, major_last) = partition.get_matrix_partition_major_range(i); std::tie(minor_first, minor_last) = partition.get_matrix_partition_minor_range(); number_of_local_edges_sum += edgelists[i].number_of_edges; auto edge_first = thrust::make_zip_iterator(thrust::make_tuple( store_transposed ? edgelists[i].p_dst_vertices : edgelists[i].p_src_vertices, store_transposed ? edgelists[i].p_src_vertices : edgelists[i].p_dst_vertices)); // better use thrust::any_of once https://github.com/thrust/thrust/issues/1016 is resolved CUGRAPH_EXPECTS(thrust::count_if(rmm::exec_policy(default_stream)->on(default_stream), edge_first, edge_first + edgelists[i].number_of_edges, out_of_range_t<vertex_t>{ major_first, major_last, minor_first, minor_last}) == 0, "Invalid API parameter: edgelists[] have out-of-range values."); } number_of_local_edges_sum = host_scalar_allreduce(comm, number_of_local_edges_sum, default_stream); CUGRAPH_EXPECTS(number_of_local_edges_sum == this->get_number_of_edges(), "Invalid API parameter: the sum of local edges doe counts not match with " "number_of_local_edges."); CUGRAPH_EXPECTS( partition.get_vertex_partition_last(comm_size - 1) == number_of_vertices, "Invalid API parameter: vertex partition should cover [0, number_of_vertices)."); } // convert edge list (COO) to compressed sparse format (CSR or CSC) adj_matrix_partition_offsets_.reserve(edgelists.size()); adj_matrix_partition_indices_.reserve(edgelists.size()); adj_matrix_partition_weights_.reserve(is_weighted ? edgelists.size() : 0); for (size_t i = 0; i < edgelists.size(); ++i) { vertex_t major_first{}; vertex_t major_last{}; vertex_t minor_first{}; vertex_t minor_last{}; std::tie(major_first, major_last) = partition.get_matrix_partition_major_range(i); std::tie(minor_first, minor_last) = partition.get_matrix_partition_minor_range(); rmm::device_uvector<edge_t> offsets(0, default_stream); rmm::device_uvector<vertex_t> indices(0, default_stream); rmm::device_uvector<weight_t> weights(0, default_stream); std::tie(offsets, indices, weights) = edge_list_to_compressed_sparse<store_transposed>( *(this->get_handle_ptr()), edgelists[i], major_first, major_last, minor_first, minor_last); adj_matrix_partition_offsets_.push_back(std::move(offsets)); adj_matrix_partition_indices_.push_back(std::move(indices)); if (is_weighted) { adj_matrix_partition_weights_.push_back(std::move(weights)); } } // update degree-based segment offsets (to be used for graph analytics kernel optimization) if (sorted_by_global_degree_within_vertex_partition) { auto degrees = detail::compute_major_degree( *(this->get_handle_ptr()), adj_matrix_partition_offsets_, partition_); // optional expensive checks (part 2/3) if (do_expensive_check) { CUGRAPH_EXPECTS(thrust::is_sorted(rmm::exec_policy(default_stream)->on(default_stream), degrees.begin(), degrees.end(), thrust::greater<edge_t>{}), "Invalid API parameter: sorted_by_global_degree_within_vertex_partition is " "set to true, but degrees are not non-ascending."); } static_assert(detail::num_segments_per_vertex_partition == 3); static_assert((detail::low_degree_threshold <= detail::mid_degree_threshold) && (detail::mid_degree_threshold <= std::numeric_limits<edge_t>::max())); rmm::device_uvector<edge_t> d_thresholds(detail::num_segments_per_vertex_partition - 1, default_stream); std::vector<edge_t> h_thresholds = {static_cast<edge_t>(detail::low_degree_threshold), static_cast<edge_t>(detail::mid_degree_threshold)}; raft::update_device( d_thresholds.data(), h_thresholds.data(), h_thresholds.size(), default_stream); rmm::device_uvector<vertex_t> segment_offsets(detail::num_segments_per_vertex_partition + 1, default_stream); segment_offsets.set_element_async(0, 0, default_stream); segment_offsets.set_element_async( detail::num_segments_per_vertex_partition, degrees.size(), default_stream); thrust::upper_bound(rmm::exec_policy(default_stream)->on(default_stream), degrees.begin(), degrees.end(), d_thresholds.begin(), d_thresholds.end(), segment_offsets.begin() + 1); rmm::device_uvector<vertex_t> aggregate_segment_offsets(0, default_stream); if (partition.is_hypergraph_partitioned()) { rmm::device_uvector<vertex_t> aggregate_segment_offsets( col_comm_size * segment_offsets.size(), default_stream); col_comm.allgather(segment_offsets.data(), aggregate_segment_offsets.data(), segment_offsets.size(), default_stream); } else { rmm::device_uvector<vertex_t> aggregate_segment_offsets( row_comm_size * segment_offsets.size(), default_stream); row_comm.allgather(segment_offsets.data(), aggregate_segment_offsets.data(), segment_offsets.size(), default_stream); } vertex_partition_segment_offsets_.resize(aggregate_segment_offsets.size()); raft::update_host(vertex_partition_segment_offsets_.data(), aggregate_segment_offsets.data(), aggregate_segment_offsets.size(), default_stream); raft::comms::status_t status{}; if (partition.is_hypergraph_partitioned()) { status = col_comm.sync_stream( default_stream); // this is necessary as degrees, d_thresholds, and segment_offsets will // become out-of-scope once control flow exits this block and // vertex_partition_segment_offsets_ can be used right after return. } else { status = row_comm.sync_stream( default_stream); // this is necessary as degrees, d_thresholds, and segment_offsets will // become out-of-scope once control flow exits this block and // vertex_partition_segment_offsets_ can be used right after return. } CUGRAPH_EXPECTS(status == raft::comms::status_t::SUCCESS, "sync_stream() failure."); } // optional expensive checks (part 3/3) if (do_expensive_check) { // FIXME: check for symmetricity may better be implemetned with transpose(). if (this->is_symmetric()) {} // FIXME: check for duplicate edges may better be implemented after deciding whether to sort // neighbor list or not. if (!this->is_multigraph()) {} } } template <typename vertex_t, typename edge_t, typename weight_t, bool store_transposed, bool multi_gpu> graph_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<!multi_gpu>>:: graph_t(raft::handle_t const &handle, edgelist_t<vertex_t, edge_t, weight_t> const &edgelist, vertex_t number_of_vertices, graph_properties_t properties, bool sorted_by_degree, bool do_expensive_check) : detail::graph_base_t<vertex_t, edge_t, weight_t>( handle, number_of_vertices, edgelist.number_of_edges, properties), offsets_(rmm::device_uvector<edge_t>(0, handle.get_stream())), indices_(rmm::device_uvector<vertex_t>(0, handle.get_stream())), weights_(rmm::device_uvector<weight_t>(0, handle.get_stream())) { // cheap error checks auto default_stream = this->get_handle_ptr()->get_stream(); CUGRAPH_EXPECTS( (edgelist.p_src_vertices != nullptr) && (edgelist.p_dst_vertices != nullptr), "Invalid API parameter: edgelist.p_src_vertices and edgelist.p_dst_vertices should " "not be nullptr."); // optional expensive checks (part 1/2) if (do_expensive_check) { auto edge_first = thrust::make_zip_iterator( thrust::make_tuple(store_transposed ? edgelist.p_dst_vertices : edgelist.p_src_vertices, store_transposed ? edgelist.p_src_vertices : edgelist.p_dst_vertices)); // better use thrust::any_of once https://github.com/thrust/thrust/issues/1016 is resolved CUGRAPH_EXPECTS(thrust::count_if( rmm::exec_policy(default_stream)->on(default_stream), edge_first, edge_first + edgelist.number_of_edges, out_of_range_t<vertex_t>{ 0, this->get_number_of_vertices(), 0, this->get_number_of_vertices()}) == 0, "Invalid API parameter: edgelist have out-of-range values."); // FIXME: check for symmetricity may better be implemetned with transpose(). if (this->is_symmetric()) {} // FIXME: check for duplicate edges may better be implemented after deciding whether to sort // neighbor list or not. if (!this->is_multigraph()) {} } // convert edge list (COO) to compressed sparse format (CSR or CSC) std::tie(offsets_, indices_, weights_) = edge_list_to_compressed_sparse<store_transposed>(*(this->get_handle_ptr()), edgelist, vertex_t{0}, this->get_number_of_vertices(), vertex_t{0}, this->get_number_of_vertices()); // update degree-based segment offsets (to be used for graph analytics kernel optimization) if (sorted_by_degree) { auto degree_first = thrust::make_transform_iterator( thrust::make_counting_iterator(vertex_t{0}), detail::degree_from_offsets_t<vertex_t, edge_t>{offsets_.data()}); // optional expensive checks (part 2/2) if (do_expensive_check) { CUGRAPH_EXPECTS(thrust::is_sorted(rmm::exec_policy(default_stream)->on(default_stream), degree_first, degree_first + this->get_number_of_vertices(), thrust::greater<edge_t>{}), "Invalid API parameter: sorted_by_degree is set to true, but degrees are not " "non-ascending."); } static_assert(detail::num_segments_per_vertex_partition == 3); static_assert((detail::low_degree_threshold <= detail::mid_degree_threshold) && (detail::mid_degree_threshold <= std::numeric_limits<edge_t>::max())); rmm::device_uvector<edge_t> d_thresholds(detail::num_segments_per_vertex_partition - 1, default_stream); std::vector<edge_t> h_thresholds = {static_cast<edge_t>(detail::low_degree_threshold), static_cast<edge_t>(detail::mid_degree_threshold)}; raft::update_device( d_thresholds.data(), h_thresholds.data(), h_thresholds.size(), default_stream); rmm::device_uvector<vertex_t> segment_offsets(detail::num_segments_per_vertex_partition + 1, default_stream); segment_offsets.set_element_async(0, 0, default_stream); segment_offsets.set_element_async( detail::num_segments_per_vertex_partition, this->get_number_of_vertices(), default_stream); thrust::upper_bound(rmm::exec_policy(default_stream)->on(default_stream), degree_first, degree_first + this->get_number_of_vertices(), d_thresholds.begin(), d_thresholds.end(), segment_offsets.begin() + 1); segment_offsets_.resize(segment_offsets.size()); raft::update_host( segment_offsets_.data(), segment_offsets.data(), segment_offsets.size(), default_stream); CUDA_TRY(cudaStreamSynchronize( default_stream)); // this is necessary as d_thresholds and segment_offsets will become // out-of-scpe once control flow exits this block and segment_offsets_ can // be used right after return. } // optional expensive checks (part 3/3) if (do_expensive_check) { // FIXME: check for symmetricity may better be implemetned with transpose(). if (this->is_symmetric()) {} // FIXME: check for duplicate edges may better be implemented after deciding whether to sort // neighbor list or not. if (!this->is_multigraph()) {} } } // explicit instantiation template class graph_t<int32_t, int32_t, float, true, true>; template class graph_t<int32_t, int32_t, float, false, true>; template class graph_t<int32_t, int32_t, double, true, true>; template class graph_t<int32_t, int32_t, double, false, true>; template class graph_t<int32_t, int64_t, float, true, true>; template class graph_t<int32_t, int64_t, float, false, true>; template class graph_t<int32_t, int64_t, double, true, true>; template class graph_t<int32_t, int64_t, double, false, true>; template class graph_t<int64_t, int64_t, float, true, true>; template class graph_t<int64_t, int64_t, float, false, true>; template class graph_t<int64_t, int64_t, double, true, true>; template class graph_t<int64_t, int64_t, double, false, true>; // template class graph_t<int32_t, int32_t, float, true, false>; template class graph_t<int32_t, int32_t, float, false, false>; template class graph_t<int32_t, int32_t, double, true, false>; template class graph_t<int32_t, int32_t, double, false, false>; template class graph_t<int32_t, int64_t, float, true, false>; template class graph_t<int32_t, int64_t, float, false, false>; template class graph_t<int32_t, int64_t, double, true, false>; template class graph_t<int32_t, int64_t, double, false, false>; template class graph_t<int64_t, int64_t, float, true, false>; template class graph_t<int64_t, int64_t, float, false, false>; template class graph_t<int64_t, int64_t, double, true, false>; template class graph_t<int64_t, int64_t, double, false, false>; } // namespace experimental } // namespace cugraph
2dd6b01560c75f960ae6e136ab87f0f3472ac6db.hip
// !!! This is a file automatically generated by hipify!!! #include "THHTensorMath.h" #include "THHGeneral.h" #include "THHTensorCopy.h" #include "THHApply.cuh" #include "THHNumerics.cuh" #include "THHTensorMath.cuh" #include "THHThrustAllocator.cuh" #include "THHTensor.hpp" #include "THHStream.hpp" #include <thrust/copy.h> #include <thrust/count.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/transform.h> #if TORCH_HIP_VERSION >= 7000 #include <thrust/system/hip/execution_policy.h> #endif #include <cfloat> template <typename T> struct TensorFillOp { TensorFillOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* v) { *v = val; } const T val; }; // copypasta from https://github.com/thrust/thrust/blob/master/examples/strided_range.cu template <typename Iterator> class strided_range { public: typedef typename thrust::iterator_difference<Iterator>::type difference_type; struct stride_functor : public thrust::unary_function<difference_type, difference_type> { difference_type stride; stride_functor(difference_type stride) : stride(stride) {} __host__ __device__ difference_type operator()(const difference_type& i) const { return stride * i; } }; typedef typename thrust::counting_iterator<difference_type> CountingIterator; typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator; typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator; // type of the strided_range iterator typedef PermutationIterator iterator; // construct strided_range for the range [first,last) strided_range(Iterator first, Iterator last, difference_type stride) : first(first), last(last), stride(stride) {} iterator begin(void) const { return PermutationIterator(first, TransformIterator(CountingIterator(0), stride_functor(stride))); } iterator end(void) const { return begin() + ((last - first) + (stride - 1)) / stride; } protected: Iterator first; Iterator last; difference_type stride; }; struct idx_functor { int64_t div; int64_t size; __host__ __device__ idx_functor(int64_t div, int64_t size) : div(div), size(size) {} __host__ __device__ int64_t operator()(int64_t val) { return (val / div) % size + TH_INDEX_BASE; } }; template <typename T> struct NonZeroOp { NonZeroOp() {} __host__ __device__ bool operator()(T lhs) const { if (THCNumerics<T>::ne(lhs, ScalarConvert<float, T>::to(0.0))) { return true; } else { return false; } } }; template<typename T, typename accT = T> struct LinspaceOp { __host__ __device__ LinspaceOp(accT start, accT step): start_(start), step_(step) { } __device__ __forceinline__ T operator()(ptrdiff_t index) { accT increment = THCNumerics<accT>::mul(step_, ScalarConvert<ptrdiff_t,accT>::to(index)); accT value = THCNumerics<accT>::add(start_, increment); return ScalarConvert<accT,T>::to(value); } const accT start_, step_; }; template<typename T, typename accT = T> struct LogspaceOp { __host__ __device__ LogspaceOp(accT start, accT step): start_(start), step_(step) { } __device__ __forceinline__ T operator()(ptrdiff_t index) { accT increment = THCNumerics<accT>::mul(step_, ScalarConvert<ptrdiff_t,accT>::to(index)); accT value = THCNumerics<accT>::exp10(THCNumerics<accT>::add(start_, increment)); return ScalarConvert<accT,T>::to(value); } const accT start_, step_; }; #include "generic/THCTensorMath.cu" #include "THHGenerateAllTypes.h"
2dd6b01560c75f960ae6e136ab87f0f3472ac6db.cu
#include "THCTensorMath.h" #include "THCGeneral.h" #include "THCTensorCopy.h" #include "THCApply.cuh" #include "THCNumerics.cuh" #include "THCTensorMath.cuh" #include "THCThrustAllocator.cuh" #include "THCTensor.hpp" #include "THCStream.hpp" #include <thrust/copy.h> #include <thrust/count.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/transform.h> #if CUDA_VERSION >= 7000 #include <thrust/system/cuda/execution_policy.h> #endif #include <cfloat> template <typename T> struct TensorFillOp { TensorFillOp(T v) : val(v) {} __device__ __forceinline__ void operator()(T* v) { *v = val; } const T val; }; // copypasta from https://github.com/thrust/thrust/blob/master/examples/strided_range.cu template <typename Iterator> class strided_range { public: typedef typename thrust::iterator_difference<Iterator>::type difference_type; struct stride_functor : public thrust::unary_function<difference_type, difference_type> { difference_type stride; stride_functor(difference_type stride) : stride(stride) {} __host__ __device__ difference_type operator()(const difference_type& i) const { return stride * i; } }; typedef typename thrust::counting_iterator<difference_type> CountingIterator; typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator; typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator; // type of the strided_range iterator typedef PermutationIterator iterator; // construct strided_range for the range [first,last) strided_range(Iterator first, Iterator last, difference_type stride) : first(first), last(last), stride(stride) {} iterator begin(void) const { return PermutationIterator(first, TransformIterator(CountingIterator(0), stride_functor(stride))); } iterator end(void) const { return begin() + ((last - first) + (stride - 1)) / stride; } protected: Iterator first; Iterator last; difference_type stride; }; struct idx_functor { int64_t div; int64_t size; __host__ __device__ idx_functor(int64_t div, int64_t size) : div(div), size(size) {} __host__ __device__ int64_t operator()(int64_t val) { return (val / div) % size + TH_INDEX_BASE; } }; template <typename T> struct NonZeroOp { NonZeroOp() {} __host__ __device__ bool operator()(T lhs) const { if (THCNumerics<T>::ne(lhs, ScalarConvert<float, T>::to(0.0))) { return true; } else { return false; } } }; template<typename T, typename accT = T> struct LinspaceOp { __host__ __device__ LinspaceOp(accT start, accT step): start_(start), step_(step) { } __device__ __forceinline__ T operator()(ptrdiff_t index) { accT increment = THCNumerics<accT>::mul(step_, ScalarConvert<ptrdiff_t,accT>::to(index)); accT value = THCNumerics<accT>::add(start_, increment); return ScalarConvert<accT,T>::to(value); } const accT start_, step_; }; template<typename T, typename accT = T> struct LogspaceOp { __host__ __device__ LogspaceOp(accT start, accT step): start_(start), step_(step) { } __device__ __forceinline__ T operator()(ptrdiff_t index) { accT increment = THCNumerics<accT>::mul(step_, ScalarConvert<ptrdiff_t,accT>::to(index)); accT value = THCNumerics<accT>::exp10(THCNumerics<accT>::add(start_, increment)); return ScalarConvert<accT,T>::to(value); } const accT start_, step_; }; #include "generic/THCTensorMath.cu" #include "THCGenerateAllTypes.h"
464df4954784d55b392da2f014e62c63fa6152ec.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <cusp/complex.h> #include <cusp/blas.h> #include<cusp/csr_matrix.h> #include<cusp/multiply.h> #include <cusp/array1d.h> #include <cusp/copy.h> #include <thrust/device_ptr.h> #include "mex.h" #include "gpu/mxGPUArray.h" /* Input Arguments */ #define A prhs[0] #define XV prhs[3] /* Output Arguments */ #define Y plhs[0] void mexFunction(int nlhs, mxArray * plhs[], int nrhs,const mxArray * prhs[]){ const char *Avalm, *Acolm, *Aptrm; const char *Anrow, *Ancol, *Annz; mxGPUArray const *Aval; mxGPUArray const *Acol; mxGPUArray const *Aptr; mxGPUArray const *x; mxGPUArray *y; // fnames[ifield] = mxGetFieldNameByNumber(A,ifield); /* Initialize the MathWorks GPU API. */ mxInitGPU(); /*get matlab pointers*/ //Anrows= mxGetFieldNameByNumber(A,0); //Ancol= mxGetFieldNameByNumber(A,1); //Annz= mxGetFieldNameByNumber(A,2); Avalm= mxGetFieldNameByNumber(A,3); Acolm= mxGetFieldNameByNumber(A,4); Aptrm= mxGetFieldNameByNumber(A,5); /*get matlab variables*/ Aval = mxGPUCreateFromMxArray(Avalm); Acol = mxGPUCreateFromMxArray(Acolm); Aptr = mxGPUCreateFromMxArray(Aptrm); x = mxGPUCreateFromMxArray(XV); int ncol=mxGPUGetNumberOfElements(Acol); int nrowp1=mxGPUGetNumberOfElements(Aptr); int nnz =mxGPUGetNumberOfElements(x); // int nout=nrowp1-1; mxComplexity isXVreal = mxGPUGetComplexity(x); mxComplexity isAreal = mxGPUGetComplexity(Aval); const mwSize ndim= 1; const mwSize dims[]={(mwSize) (nrowp1-1)}; // mxComplexity isYVreal=mxCOMPLEX; // if (isAreal==mxREAL && isXVreal==mxREAL) // isYVreal=mxCOMPLEX; if (isAreal!=isXVreal) { mexErrMsgTxt("Aval and X must have the same complexity"); return; } if(mxGPUGetClassID(Aval) != mxSINGLE_CLASS|| mxGPUGetClassID(x)!= mxSINGLE_CLASS|| mxGPUGetClassID(Aptr)!= mxINT32_CLASS|| mxGPUGetClassID(Acol)!= mxINT32_CLASS){ mexErrMsgTxt("usage: gspmv(single, int32, int32, single )"); return; } //create output vector y = mxGPUCreateGPUArray(ndim,dims,mxGPUGetClassID(x),isAreal, MX_GPU_DO_NOT_INITIALIZE); /* wrap indices from matlab */ typedef const int TI; /* the type for index */ TI *d_col =(TI *)(mxGPUGetDataReadOnly(Acol)); TI *d_ptr =(TI *)(mxGPUGetDataReadOnly(Aptr)); // wrap with thrust::device_ptr thrust::device_ptr<TI> wrap_d_col (d_col); thrust::device_ptr<TI> wrap_d_ptr (d_ptr); // wrap with array1d_view typedef typename cusp::array1d_view< thrust::device_ptr<TI> > idx2Av; // wrap index arrays idx2Av colIndex (wrap_d_col , wrap_d_col + ncol); idx2Av ptrIndex (wrap_d_ptr , wrap_d_ptr + nrowp1); if (isAreal!=mxREAL){ typedef const cusp::complex<float> TA; /* the type for A */ typedef const cusp::complex<float> TXV; /* the type for X */ typedef cusp::complex<float> TYV; /* the type for Y */ // wrap with array1d_view typedef typename cusp::array1d_view< thrust::device_ptr<TA > > val2Av; typedef typename cusp::array1d_view< thrust::device_ptr<TXV > > x2Av; typedef typename cusp::array1d_view< thrust::device_ptr<TYV > > y2Av; /* pointers from matlab */ TA *d_val =(TA *)(mxGPUGetDataReadOnly(Aval)); TXV *d_x =(TXV *)(mxGPUGetDataReadOnly(x)); TYV *d_y =(TYV *)(mxGPUGetData(y)); // wrap with thrust::device_ptr thrust::device_ptr<TA > wrap_d_val (d_val); thrust::device_ptr<TXV > wrap_d_x (d_x); thrust::device_ptr<TYV > wrap_d_y (d_y); // wrap arrays val2Av valIndex (wrap_d_val , wrap_d_val + ncol); x2Av xIndex (wrap_d_x , wrap_d_x + nnz); y2Av yIndex(wrap_d_y, wrap_d_y+ nrowp1-1); // y2Av yIndex(wrap_d_y, wrap_d_y+ nnz); // combine info in CSR matrix typedef cusp::csr_matrix_view<idx2Av,idx2Av,val2Av> DeviceView; DeviceView As(nrowp1-1, nnz, ncol, ptrIndex, colIndex, valIndex); // multiply matrix cusp::multiply(As, xIndex, yIndex); } else{ typedef const float TA; /* the type for A */ typedef const float TXV; /* the type for X */ typedef float TYV; /* the type for Y */ /* pointers from matlab */ TA *d_val =(TA *)(mxGPUGetDataReadOnly(Aval)); TXV *d_x =(TXV *)(mxGPUGetDataReadOnly(x)); TYV *d_y =(TYV *)(mxGPUGetData(y)); // wrap with thrust::device_ptr! thrust::device_ptr<TA > wrap_d_val (d_val); thrust::device_ptr<TXV > wrap_d_x (d_x); thrust::device_ptr<TYV > wrap_d_y (d_y); // wrap with array1d_view typedef typename cusp::array1d_view< thrust::device_ptr<TA > > val2Av; typedef typename cusp::array1d_view< thrust::device_ptr<TXV > > x2Av; typedef typename cusp::array1d_view< thrust::device_ptr<TYV > > y2Av; // wrap arrays val2Av valIndex (wrap_d_val , wrap_d_val + ncol); x2Av xIndex (wrap_d_x , wrap_d_x + nnz); //y2Av yIndex(wrap_d_y, wrap_d_y+ nnz); y2Av yIndex(wrap_d_y, wrap_d_y+ nrowp1-1); // combine info in CSR matrix typedef cusp::csr_matrix_view<idx2Av,idx2Av,val2Av> DeviceView; DeviceView As(nrowp1-1, nnz, ncol, ptrIndex, colIndex, valIndex); // multiply matrix cusp::multiply(As, xIndex, yIndex); } Y = mxGPUCreateMxArrayOnGPU(y); mxGPUDestroyGPUArray(Aval); mxGPUDestroyGPUArray(Aptr); mxGPUDestroyGPUArray(Acol); mxGPUDestroyGPUArray(x); mxGPUDestroyGPUArray(y); return; }
464df4954784d55b392da2f014e62c63fa6152ec.cu
#include <cuda.h> #include <cusp/complex.h> #include <cusp/blas.h> #include<cusp/csr_matrix.h> #include<cusp/multiply.h> #include <cusp/array1d.h> #include <cusp/copy.h> #include <thrust/device_ptr.h> #include "mex.h" #include "gpu/mxGPUArray.h" /* Input Arguments */ #define A prhs[0] #define XV prhs[3] /* Output Arguments */ #define Y plhs[0] void mexFunction(int nlhs, mxArray * plhs[], int nrhs,const mxArray * prhs[]){ const char *Avalm, *Acolm, *Aptrm; const char *Anrow, *Ancol, *Annz; mxGPUArray const *Aval; mxGPUArray const *Acol; mxGPUArray const *Aptr; mxGPUArray const *x; mxGPUArray *y; // fnames[ifield] = mxGetFieldNameByNumber(A,ifield); /* Initialize the MathWorks GPU API. */ mxInitGPU(); /*get matlab pointers*/ //Anrows= mxGetFieldNameByNumber(A,0); //Ancol= mxGetFieldNameByNumber(A,1); //Annz= mxGetFieldNameByNumber(A,2); Avalm= mxGetFieldNameByNumber(A,3); Acolm= mxGetFieldNameByNumber(A,4); Aptrm= mxGetFieldNameByNumber(A,5); /*get matlab variables*/ Aval = mxGPUCreateFromMxArray(Avalm); Acol = mxGPUCreateFromMxArray(Acolm); Aptr = mxGPUCreateFromMxArray(Aptrm); x = mxGPUCreateFromMxArray(XV); int ncol=mxGPUGetNumberOfElements(Acol); int nrowp1=mxGPUGetNumberOfElements(Aptr); int nnz =mxGPUGetNumberOfElements(x); // int nout=nrowp1-1; mxComplexity isXVreal = mxGPUGetComplexity(x); mxComplexity isAreal = mxGPUGetComplexity(Aval); const mwSize ndim= 1; const mwSize dims[]={(mwSize) (nrowp1-1)}; // mxComplexity isYVreal=mxCOMPLEX; // if (isAreal==mxREAL && isXVreal==mxREAL) // isYVreal=mxCOMPLEX; if (isAreal!=isXVreal) { mexErrMsgTxt("Aval and X must have the same complexity"); return; } if(mxGPUGetClassID(Aval) != mxSINGLE_CLASS|| mxGPUGetClassID(x)!= mxSINGLE_CLASS|| mxGPUGetClassID(Aptr)!= mxINT32_CLASS|| mxGPUGetClassID(Acol)!= mxINT32_CLASS){ mexErrMsgTxt("usage: gspmv(single, int32, int32, single )"); return; } //create output vector y = mxGPUCreateGPUArray(ndim,dims,mxGPUGetClassID(x),isAreal, MX_GPU_DO_NOT_INITIALIZE); /* wrap indices from matlab */ typedef const int TI; /* the type for index */ TI *d_col =(TI *)(mxGPUGetDataReadOnly(Acol)); TI *d_ptr =(TI *)(mxGPUGetDataReadOnly(Aptr)); // wrap with thrust::device_ptr thrust::device_ptr<TI> wrap_d_col (d_col); thrust::device_ptr<TI> wrap_d_ptr (d_ptr); // wrap with array1d_view typedef typename cusp::array1d_view< thrust::device_ptr<TI> > idx2Av; // wrap index arrays idx2Av colIndex (wrap_d_col , wrap_d_col + ncol); idx2Av ptrIndex (wrap_d_ptr , wrap_d_ptr + nrowp1); if (isAreal!=mxREAL){ typedef const cusp::complex<float> TA; /* the type for A */ typedef const cusp::complex<float> TXV; /* the type for X */ typedef cusp::complex<float> TYV; /* the type for Y */ // wrap with array1d_view typedef typename cusp::array1d_view< thrust::device_ptr<TA > > val2Av; typedef typename cusp::array1d_view< thrust::device_ptr<TXV > > x2Av; typedef typename cusp::array1d_view< thrust::device_ptr<TYV > > y2Av; /* pointers from matlab */ TA *d_val =(TA *)(mxGPUGetDataReadOnly(Aval)); TXV *d_x =(TXV *)(mxGPUGetDataReadOnly(x)); TYV *d_y =(TYV *)(mxGPUGetData(y)); // wrap with thrust::device_ptr thrust::device_ptr<TA > wrap_d_val (d_val); thrust::device_ptr<TXV > wrap_d_x (d_x); thrust::device_ptr<TYV > wrap_d_y (d_y); // wrap arrays val2Av valIndex (wrap_d_val , wrap_d_val + ncol); x2Av xIndex (wrap_d_x , wrap_d_x + nnz); y2Av yIndex(wrap_d_y, wrap_d_y+ nrowp1-1); // y2Av yIndex(wrap_d_y, wrap_d_y+ nnz); // combine info in CSR matrix typedef cusp::csr_matrix_view<idx2Av,idx2Av,val2Av> DeviceView; DeviceView As(nrowp1-1, nnz, ncol, ptrIndex, colIndex, valIndex); // multiply matrix cusp::multiply(As, xIndex, yIndex); } else{ typedef const float TA; /* the type for A */ typedef const float TXV; /* the type for X */ typedef float TYV; /* the type for Y */ /* pointers from matlab */ TA *d_val =(TA *)(mxGPUGetDataReadOnly(Aval)); TXV *d_x =(TXV *)(mxGPUGetDataReadOnly(x)); TYV *d_y =(TYV *)(mxGPUGetData(y)); // wrap with thrust::device_ptr! thrust::device_ptr<TA > wrap_d_val (d_val); thrust::device_ptr<TXV > wrap_d_x (d_x); thrust::device_ptr<TYV > wrap_d_y (d_y); // wrap with array1d_view typedef typename cusp::array1d_view< thrust::device_ptr<TA > > val2Av; typedef typename cusp::array1d_view< thrust::device_ptr<TXV > > x2Av; typedef typename cusp::array1d_view< thrust::device_ptr<TYV > > y2Av; // wrap arrays val2Av valIndex (wrap_d_val , wrap_d_val + ncol); x2Av xIndex (wrap_d_x , wrap_d_x + nnz); //y2Av yIndex(wrap_d_y, wrap_d_y+ nnz); y2Av yIndex(wrap_d_y, wrap_d_y+ nrowp1-1); // combine info in CSR matrix typedef cusp::csr_matrix_view<idx2Av,idx2Av,val2Av> DeviceView; DeviceView As(nrowp1-1, nnz, ncol, ptrIndex, colIndex, valIndex); // multiply matrix cusp::multiply(As, xIndex, yIndex); } Y = mxGPUCreateMxArrayOnGPU(y); mxGPUDestroyGPUArray(Aval); mxGPUDestroyGPUArray(Aptr); mxGPUDestroyGPUArray(Acol); mxGPUDestroyGPUArray(x); mxGPUDestroyGPUArray(y); return; }
929c73ba8cd9148673193d82af49ae3133ffa3df.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Context.h> #include <ATen/hip/HIPContext.h> #include <c10/util/Exception.h> #include <ATen/native/sparse/hip/SparseHIPBlas.cuh> #include <TH/THGeneral.h> #include <hipsparse.h> namespace at { namespace native { namespace sparse { namespace cuda { std::string hipsparseGetErrorString(hipsparseStatus_t status) { switch(status) { case HIPSPARSE_STATUS_SUCCESS: return "success"; case HIPSPARSE_STATUS_NOT_INITIALIZED: return "library not initialized"; case HIPSPARSE_STATUS_ALLOC_FAILED: return "resource allocation failed"; case HIPSPARSE_STATUS_INVALID_VALUE: return "an invalid numeric value was used as an argument"; case HIPSPARSE_STATUS_ARCH_MISMATCH: return "an absent device architectural feature is required"; case HIPSPARSE_STATUS_MAPPING_ERROR: return "an access to GPU memory space failed"; case HIPSPARSE_STATUS_EXECUTION_FAILED: return "the GPU program failed to execute"; case HIPSPARSE_STATUS_INTERNAL_ERROR: return "an internal operation failed"; case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "the matrix type is not supported by this function"; case HIPSPARSE_STATUS_ZERO_PIVOT: return "an entry of the matrix is either structural zero or numerical zero (singular block)"; default: { std::ostringstream oss; oss << "unknown error " << static_cast<int64_t>(status); return oss.str(); } } } inline void CUSPARSE_CHECK(hipsparseStatus_t status) { if (status != HIPSPARSE_STATUS_SUCCESS) { AT_ERROR("cusparse runtime error: ", hipsparseGetErrorString(status)); } } inline hipsparseHandle_t setCUDASparseStream() { hipsparseHandle_t handle = at::cuda::getCurrentCUDASparseHandle(); hipsparseSetStream(handle, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); return handle; } void Xcoo2csr(const int *coorowind, int64_t nnz, int64_t m, int *csrrowptr) { AT_CHECK((m <= INT_MAX) && (nnz <= INT_MAX), "hipsparseXcoo2csr only supports m, nnz with the bound [val] <= ", INT_MAX); auto handle = setCUDASparseStream(); CUSPARSE_CHECK(hipsparseXcoo2csr(handle, coorowind, nnz, m, csrrowptr, HIPSPARSE_INDEX_BASE_ZERO)); } hipsparseOperation_t convertTransToCusparseOperation(char trans) { if (trans == 't') return HIPSPARSE_OPERATION_TRANSPOSE; else if (trans == 'n') return HIPSPARSE_OPERATION_NON_TRANSPOSE; else if (trans == 'c') return HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE; else { AT_ERROR("trans must be one of: t, n, c"); } } void adjustLd(char transb, int64_t m, int64_t n, int64_t k, int64_t *ldb, int64_t *ldc) { int transb_ = ((transb == 't') || (transb == 'T')); if(n == 1) *ldc = m; if(transb_) { if(k == 1) *ldb = n; } else { if(n == 1) *ldb = k; } } /* Level 3 */ void Scsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, float alpha, float *csrvala, int *csrrowptra, int *csrcolinda, float *b, int64_t ldb, float beta, float *c, int64_t ldc) { adjustLd(transb, m, n, k, &ldb, &ldc); hipsparseOperation_t opa = convertTransToCusparseOperation(transa); hipsparseOperation_t opb = convertTransToCusparseOperation(transb); AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX), "hipsparseScsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX); int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_nnz = (int)nnz; int i_ldb = (int)ldb; int i_ldc = (int)ldc; auto handle = setCUDASparseStream(); hipsparseMatDescr_t desc; hipsparseCreateMatDescr(&desc); CUSPARSE_CHECK(hipsparseScsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc)); } void Dcsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, double alpha, double *csrvala, int *csrrowptra, int *csrcolinda, double *b, int64_t ldb, double beta, double *c, int64_t ldc) { adjustLd(transb, m, n, k, &ldb, &ldc); hipsparseOperation_t opa = convertTransToCusparseOperation(transa); hipsparseOperation_t opb = convertTransToCusparseOperation(transb); AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX), "hipsparseDcsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX); int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_nnz = (int)nnz; int i_ldb = (int)ldb; int i_ldc = (int)ldc; auto handle = setCUDASparseStream(); hipsparseMatDescr_t desc; hipsparseCreateMatDescr(&desc); CUSPARSE_CHECK(hipsparseDcsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc)); // TODO: I think this leaks the matrix descriptor. Proper fix is to create // real descriptor classes } /* format conversion */ void CreateIdentityPermutation(int64_t nnz, int *P) { AT_CHECK((nnz <= INT_MAX), "Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ", INT_MAX); int i_nnz = (int)nnz; auto handle = setCUDASparseStream(); hipsparseCreateIdentityPermutation(handle, i_nnz, P); } void Xcsrsort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, const int *csrColInd, size_t *pBufferSizeInBytes) { AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX), "Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <=", INT_MAX); int i_m = (int)m; int i_n = (int)n; int i_nnz = (int)nnz; auto handle = setCUDASparseStream(); CUSPARSE_CHECK(hipsparseXcsrsort_bufferSizeExt(handle, i_m, i_n, i_nnz, csrRowPtr, csrColInd, pBufferSizeInBytes)); } void Xcsrsort(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, int *csrColInd, int *P, void *pBuffer) { AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX), "Xcsrsort only supports m, n, nnz with the bound [val] <= ", INT_MAX); int i_m = (int)m; int i_n = (int)n; int i_nnz = (int)nnz; auto handle = setCUDASparseStream(); hipsparseMatDescr_t desc; hipsparseCreateMatDescr(&desc); CUSPARSE_CHECK(hipsparseXcsrsort(handle, i_m, i_n, i_nnz, desc, csrRowPtr, csrColInd, P, pBuffer)); // TODO: I think this leaks the matrix descriptor. } void Xcoosort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *cooRows, const int *cooCols, size_t *pBufferSizeInBytes) { AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX), "Xcoosort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ", INT_MAX); int i_m = (int)m; int i_n = (int)n; int i_nnz = (int)nnz; auto handle = setCUDASparseStream(); CUSPARSE_CHECK(hipsparseXcoosort_bufferSizeExt(handle, i_m, i_n, i_nnz, cooRows, cooCols, pBufferSizeInBytes)); } void XcoosortByRow(int64_t m, int64_t n, int64_t nnz, int *cooRows, int *cooCols, int *P, void *pBuffer) { AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX), "XcoosortByRow only supports m, n, nnz with the bound [val] <= ", INT_MAX); int i_m = (int)m; int i_n = (int)n; int i_nnz = (int)nnz; auto handle = setCUDASparseStream(); CUSPARSE_CHECK(hipsparseXcoosortByRow(handle, i_m, i_n, i_nnz, cooRows, cooCols, P, pBuffer)); } }}}} // namespace at::native::sparse::cuda
929c73ba8cd9148673193d82af49ae3133ffa3df.cu
#include <ATen/Context.h> #include <ATen/cuda/CUDAContext.h> #include <c10/util/Exception.h> #include <ATen/native/sparse/cuda/SparseCUDABlas.cuh> #include <TH/THGeneral.h> #include <cusparse.h> namespace at { namespace native { namespace sparse { namespace cuda { std::string cusparseGetErrorString(cusparseStatus_t status) { switch(status) { case CUSPARSE_STATUS_SUCCESS: return "success"; case CUSPARSE_STATUS_NOT_INITIALIZED: return "library not initialized"; case CUSPARSE_STATUS_ALLOC_FAILED: return "resource allocation failed"; case CUSPARSE_STATUS_INVALID_VALUE: return "an invalid numeric value was used as an argument"; case CUSPARSE_STATUS_ARCH_MISMATCH: return "an absent device architectural feature is required"; case CUSPARSE_STATUS_MAPPING_ERROR: return "an access to GPU memory space failed"; case CUSPARSE_STATUS_EXECUTION_FAILED: return "the GPU program failed to execute"; case CUSPARSE_STATUS_INTERNAL_ERROR: return "an internal operation failed"; case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "the matrix type is not supported by this function"; case CUSPARSE_STATUS_ZERO_PIVOT: return "an entry of the matrix is either structural zero or numerical zero (singular block)"; default: { std::ostringstream oss; oss << "unknown error " << static_cast<int64_t>(status); return oss.str(); } } } inline void CUSPARSE_CHECK(cusparseStatus_t status) { if (status != CUSPARSE_STATUS_SUCCESS) { AT_ERROR("cusparse runtime error: ", cusparseGetErrorString(status)); } } inline cusparseHandle_t setCUDASparseStream() { cusparseHandle_t handle = at::cuda::getCurrentCUDASparseHandle(); cusparseSetStream(handle, at::cuda::getCurrentCUDAStream()); return handle; } void Xcoo2csr(const int *coorowind, int64_t nnz, int64_t m, int *csrrowptr) { AT_CHECK((m <= INT_MAX) && (nnz <= INT_MAX), "cusparseXcoo2csr only supports m, nnz with the bound [val] <= ", INT_MAX); auto handle = setCUDASparseStream(); CUSPARSE_CHECK(cusparseXcoo2csr(handle, coorowind, nnz, m, csrrowptr, CUSPARSE_INDEX_BASE_ZERO)); } cusparseOperation_t convertTransToCusparseOperation(char trans) { if (trans == 't') return CUSPARSE_OPERATION_TRANSPOSE; else if (trans == 'n') return CUSPARSE_OPERATION_NON_TRANSPOSE; else if (trans == 'c') return CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE; else { AT_ERROR("trans must be one of: t, n, c"); } } void adjustLd(char transb, int64_t m, int64_t n, int64_t k, int64_t *ldb, int64_t *ldc) { int transb_ = ((transb == 't') || (transb == 'T')); if(n == 1) *ldc = m; if(transb_) { if(k == 1) *ldb = n; } else { if(n == 1) *ldb = k; } } /* Level 3 */ void Scsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, float alpha, float *csrvala, int *csrrowptra, int *csrcolinda, float *b, int64_t ldb, float beta, float *c, int64_t ldc) { adjustLd(transb, m, n, k, &ldb, &ldc); cusparseOperation_t opa = convertTransToCusparseOperation(transa); cusparseOperation_t opb = convertTransToCusparseOperation(transb); AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX), "cusparseScsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX); int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_nnz = (int)nnz; int i_ldb = (int)ldb; int i_ldc = (int)ldc; auto handle = setCUDASparseStream(); cusparseMatDescr_t desc; cusparseCreateMatDescr(&desc); CUSPARSE_CHECK(cusparseScsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc)); } void Dcsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, double alpha, double *csrvala, int *csrrowptra, int *csrcolinda, double *b, int64_t ldb, double beta, double *c, int64_t ldc) { adjustLd(transb, m, n, k, &ldb, &ldc); cusparseOperation_t opa = convertTransToCusparseOperation(transa); cusparseOperation_t opb = convertTransToCusparseOperation(transb); AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX), "cusparseDcsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX); int i_m = (int)m; int i_n = (int)n; int i_k = (int)k; int i_nnz = (int)nnz; int i_ldb = (int)ldb; int i_ldc = (int)ldc; auto handle = setCUDASparseStream(); cusparseMatDescr_t desc; cusparseCreateMatDescr(&desc); CUSPARSE_CHECK(cusparseDcsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc)); // TODO: I think this leaks the matrix descriptor. Proper fix is to create // real descriptor classes } /* format conversion */ void CreateIdentityPermutation(int64_t nnz, int *P) { AT_CHECK((nnz <= INT_MAX), "Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ", INT_MAX); int i_nnz = (int)nnz; auto handle = setCUDASparseStream(); cusparseCreateIdentityPermutation(handle, i_nnz, P); } void Xcsrsort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, const int *csrColInd, size_t *pBufferSizeInBytes) { AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX), "Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <=", INT_MAX); int i_m = (int)m; int i_n = (int)n; int i_nnz = (int)nnz; auto handle = setCUDASparseStream(); CUSPARSE_CHECK(cusparseXcsrsort_bufferSizeExt(handle, i_m, i_n, i_nnz, csrRowPtr, csrColInd, pBufferSizeInBytes)); } void Xcsrsort(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, int *csrColInd, int *P, void *pBuffer) { AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX), "Xcsrsort only supports m, n, nnz with the bound [val] <= ", INT_MAX); int i_m = (int)m; int i_n = (int)n; int i_nnz = (int)nnz; auto handle = setCUDASparseStream(); cusparseMatDescr_t desc; cusparseCreateMatDescr(&desc); CUSPARSE_CHECK(cusparseXcsrsort(handle, i_m, i_n, i_nnz, desc, csrRowPtr, csrColInd, P, pBuffer)); // TODO: I think this leaks the matrix descriptor. } void Xcoosort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *cooRows, const int *cooCols, size_t *pBufferSizeInBytes) { AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX), "Xcoosort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ", INT_MAX); int i_m = (int)m; int i_n = (int)n; int i_nnz = (int)nnz; auto handle = setCUDASparseStream(); CUSPARSE_CHECK(cusparseXcoosort_bufferSizeExt(handle, i_m, i_n, i_nnz, cooRows, cooCols, pBufferSizeInBytes)); } void XcoosortByRow(int64_t m, int64_t n, int64_t nnz, int *cooRows, int *cooCols, int *P, void *pBuffer) { AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX), "XcoosortByRow only supports m, n, nnz with the bound [val] <= ", INT_MAX); int i_m = (int)m; int i_n = (int)n; int i_nnz = (int)nnz; auto handle = setCUDASparseStream(); CUSPARSE_CHECK(cusparseXcoosortByRow(handle, i_m, i_n, i_nnz, cooRows, cooCols, P, pBuffer)); } }}}} // namespace at::native::sparse::cuda
6648269fcdf6e3cb909834e9d2ef73ab7b430e55.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Definition Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "reference_calc.cpp" #include "utils.h" __global__ void find_min(const float* const array) { } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ /**************************************************************************** * You can use the code below to help with debugging, but make sure to * * comment it out again before submitting your assignment for grading, * * otherwise this code will take too much time and make it seem like your * * GPU implementation isn't fast enough. * * * * This code generates a reference cdf on the host by running the * * reference calculation we have given you. It then copies your GPU * * generated cdf back to the host and calls a function that compares the * * the two and will output the first location they differ. * * ************************************************************************* */ /* float *h_logLuminance = new float[numRows * numCols]; unsigned int *h_cdf = new unsigned int[numBins]; unsigned int *h_your_cdf = new unsigned int[numBins]; checkCudaErrors(hipMemcpy(h_logLuminance, d_logLuminance, numCols * numRows * sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_your_cdf, d_cdf, numBins * sizeof(unsigned int), hipMemcpyDeviceToHost)); referenceCalculation(h_logLuminance, h_cdf, numRows, numCols, numBins); //compare the results of the CDF checkResultsExact(h_cdf, h_your_cdf, numBins); delete[] h_logLuminance; delete[] h_cdf; delete[] h_your_cdf; */ }
6648269fcdf6e3cb909834e9d2ef73ab7b430e55.cu
/* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Definition Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "reference_calc.cpp" #include "utils.h" __global__ void find_min(const float* const array) { } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ /**************************************************************************** * You can use the code below to help with debugging, but make sure to * * comment it out again before submitting your assignment for grading, * * otherwise this code will take too much time and make it seem like your * * GPU implementation isn't fast enough. * * * * This code generates a reference cdf on the host by running the * * reference calculation we have given you. It then copies your GPU * * generated cdf back to the host and calls a function that compares the * * the two and will output the first location they differ. * * ************************************************************************* */ /* float *h_logLuminance = new float[numRows * numCols]; unsigned int *h_cdf = new unsigned int[numBins]; unsigned int *h_your_cdf = new unsigned int[numBins]; checkCudaErrors(cudaMemcpy(h_logLuminance, d_logLuminance, numCols * numRows * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_your_cdf, d_cdf, numBins * sizeof(unsigned int), cudaMemcpyDeviceToHost)); referenceCalculation(h_logLuminance, h_cdf, numRows, numCols, numBins); //compare the results of the CDF checkResultsExact(h_cdf, h_your_cdf, numBins); delete[] h_logLuminance; delete[] h_cdf; delete[] h_your_cdf; */ }
1fa448092aae697ea7885c1d58a0b5f751ea7f76.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************* * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************/ #include "libwrap.h" #include <dlfcn.h> #include "core.h" typedef enum { SUCCESS = 0 } RetCode; int symbolsLoaded = 0; static RetCode (*nvmlInternalInit)(void); static RetCode (*nvmlInternalShutdown)(void); static RetCode (*nvmlInternalDeviceGetHandleByPciBusId)(const char* pciBusId, uint32_t* device); static RetCode (*nvmlInternalDeviceGetIndex)(uint32_t device, unsigned* index); static RetCode (*nvmlInternalDeviceSetCpuAffinity)(uint32_t device); static RetCode (*nvmlInternalDeviceClearCpuAffinity)(uint32_t device); static const char* (*nvmlInternalErrorString)(RetCode r); ncclResult_t wrapSymbols(void) { if (symbolsLoaded) return ncclSuccess; static void* nvmlhandle = NULL; static void* cuhandle = NULL; void* tmp; void** cast; nvmlhandle=dlopen("libnvidia-ml.so", RTLD_NOW); if (!nvmlhandle) { nvmlhandle=dlopen("libnvidia-ml.so.1", RTLD_NOW); if (!nvmlhandle) { WARN("Failed to open libnvidia-ml.so[.1]"); goto teardown; } } cuhandle = dlopen("libcuda.so", RTLD_NOW); if (!cuhandle) { cuhandle = dlopen("libcuda.so.1", RTLD_NOW); if (!cuhandle) { WARN("Failed to open libcuda.so[.1]"); goto teardown; } } #define LOAD_SYM(handle, symbol, funcptr) do { \ cast = (void**)&funcptr; \ tmp = dlsym(handle, symbol); \ if (tmp == NULL) { \ WARN("dlsym failed on %s - %s", symbol, dlerror()); \ goto teardown; \ } \ *cast = tmp; \ } while (0) LOAD_SYM(nvmlhandle, "nvmlInit", nvmlInternalInit); LOAD_SYM(nvmlhandle, "nvmlShutdown", nvmlInternalShutdown); LOAD_SYM(nvmlhandle, "nvmlDeviceGetHandleByPciBusId", nvmlInternalDeviceGetHandleByPciBusId); LOAD_SYM(nvmlhandle, "nvmlDeviceGetIndex", nvmlInternalDeviceGetIndex); LOAD_SYM(nvmlhandle, "nvmlDeviceSetCpuAffinity", nvmlInternalDeviceSetCpuAffinity); LOAD_SYM(nvmlhandle, "nvmlDeviceClearCpuAffinity", nvmlInternalDeviceClearCpuAffinity); LOAD_SYM(nvmlhandle, "nvmlErrorString", nvmlInternalErrorString); symbolsLoaded = 1; return ncclSuccess; teardown: nvmlInternalInit = NULL; nvmlInternalShutdown = NULL; nvmlInternalDeviceGetHandleByPciBusId = NULL; nvmlInternalDeviceGetIndex = NULL; nvmlInternalDeviceSetCpuAffinity = NULL; nvmlInternalDeviceClearCpuAffinity = NULL; if (cuhandle != NULL) dlclose(cuhandle); if (nvmlhandle != NULL) dlclose(nvmlhandle); return ncclSystemError; } ncclResult_t wrapNvmlInit(void) { if (nvmlInternalInit == NULL) { WARN("lib wrapper not initilaized."); return ncclLibWrapperNotSet; } RetCode ret = nvmlInternalInit(); if (ret != SUCCESS) { WARN("nvmlInit() failed: %s", nvmlInternalErrorString(ret)); return ncclSystemError; } return ncclSuccess; } ncclResult_t wrapNvmlShutdown(void) { if (nvmlInternalShutdown == NULL) { WARN("lib wrapper not initilaized."); return ncclLibWrapperNotSet; } RetCode ret = nvmlInternalShutdown(); if (ret != SUCCESS) { WARN("nvmlShutdown() failed: %s ", nvmlInternalErrorString(ret)); return ncclSystemError; } return ncclSuccess; } ncclResult_t wrapNvmlDeviceGetHandleByPciBusId(const char* pciBusId, uint32_t* device) { if (nvmlInternalDeviceGetHandleByPciBusId == NULL) { WARN("lib wrapper not initilaized."); return ncclLibWrapperNotSet; } RetCode ret = nvmlInternalDeviceGetHandleByPciBusId(pciBusId, device); if (ret != SUCCESS) { WARN("nvmlDeviceGetHandleByPciBusId() failed: %s ", nvmlInternalErrorString(ret)); return ncclSystemError; } return ncclSuccess; } ncclResult_t wrapNvmlDeviceGetIndex(uint32_t device, unsigned* index) { if (nvmlInternalDeviceGetIndex == NULL) { WARN("lib wrapper not initilaized."); return ncclLibWrapperNotSet; } RetCode ret = nvmlInternalDeviceGetIndex(device, index); if (ret != SUCCESS) { WARN("nvmlDeviceGetIndex() failed: %s ", nvmlInternalErrorString(ret)); return ncclSystemError; } return ncclSuccess; } ncclResult_t wrapNvmlDeviceSetCpuAffinity(uint32_t device) { if (nvmlInternalDeviceSetCpuAffinity == NULL) { WARN("lib wrapper not initilaized."); return ncclLibWrapperNotSet; } RetCode ret = nvmlInternalDeviceSetCpuAffinity(device); if (ret != SUCCESS) { WARN("nvmlDeviceSetCpuAffinity() failed: %s ", nvmlInternalErrorString(ret)); return ncclSystemError; } return ncclSuccess; } ncclResult_t wrapNvmlDeviceClearCpuAffinity(uint32_t device) { if (nvmlInternalInit == NULL) { WARN("lib wrapper not initilaized."); return ncclLibWrapperNotSet; } RetCode ret = nvmlInternalDeviceClearCpuAffinity(device); if (ret != SUCCESS) { WARN("nvmlDeviceClearCpuAffinity() failed: %s ", nvmlInternalErrorString(ret)); return ncclSystemError; } return ncclSuccess; }
1fa448092aae697ea7885c1d58a0b5f751ea7f76.cu
/************************************************************************* * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************/ #include "libwrap.h" #include <dlfcn.h> #include "core.h" typedef enum { SUCCESS = 0 } RetCode; int symbolsLoaded = 0; static RetCode (*nvmlInternalInit)(void); static RetCode (*nvmlInternalShutdown)(void); static RetCode (*nvmlInternalDeviceGetHandleByPciBusId)(const char* pciBusId, nvmlDevice_t* device); static RetCode (*nvmlInternalDeviceGetIndex)(nvmlDevice_t device, unsigned* index); static RetCode (*nvmlInternalDeviceSetCpuAffinity)(nvmlDevice_t device); static RetCode (*nvmlInternalDeviceClearCpuAffinity)(nvmlDevice_t device); static const char* (*nvmlInternalErrorString)(RetCode r); ncclResult_t wrapSymbols(void) { if (symbolsLoaded) return ncclSuccess; static void* nvmlhandle = NULL; static void* cuhandle = NULL; void* tmp; void** cast; nvmlhandle=dlopen("libnvidia-ml.so", RTLD_NOW); if (!nvmlhandle) { nvmlhandle=dlopen("libnvidia-ml.so.1", RTLD_NOW); if (!nvmlhandle) { WARN("Failed to open libnvidia-ml.so[.1]"); goto teardown; } } cuhandle = dlopen("libcuda.so", RTLD_NOW); if (!cuhandle) { cuhandle = dlopen("libcuda.so.1", RTLD_NOW); if (!cuhandle) { WARN("Failed to open libcuda.so[.1]"); goto teardown; } } #define LOAD_SYM(handle, symbol, funcptr) do { \ cast = (void**)&funcptr; \ tmp = dlsym(handle, symbol); \ if (tmp == NULL) { \ WARN("dlsym failed on %s - %s", symbol, dlerror()); \ goto teardown; \ } \ *cast = tmp; \ } while (0) LOAD_SYM(nvmlhandle, "nvmlInit", nvmlInternalInit); LOAD_SYM(nvmlhandle, "nvmlShutdown", nvmlInternalShutdown); LOAD_SYM(nvmlhandle, "nvmlDeviceGetHandleByPciBusId", nvmlInternalDeviceGetHandleByPciBusId); LOAD_SYM(nvmlhandle, "nvmlDeviceGetIndex", nvmlInternalDeviceGetIndex); LOAD_SYM(nvmlhandle, "nvmlDeviceSetCpuAffinity", nvmlInternalDeviceSetCpuAffinity); LOAD_SYM(nvmlhandle, "nvmlDeviceClearCpuAffinity", nvmlInternalDeviceClearCpuAffinity); LOAD_SYM(nvmlhandle, "nvmlErrorString", nvmlInternalErrorString); symbolsLoaded = 1; return ncclSuccess; teardown: nvmlInternalInit = NULL; nvmlInternalShutdown = NULL; nvmlInternalDeviceGetHandleByPciBusId = NULL; nvmlInternalDeviceGetIndex = NULL; nvmlInternalDeviceSetCpuAffinity = NULL; nvmlInternalDeviceClearCpuAffinity = NULL; if (cuhandle != NULL) dlclose(cuhandle); if (nvmlhandle != NULL) dlclose(nvmlhandle); return ncclSystemError; } ncclResult_t wrapNvmlInit(void) { if (nvmlInternalInit == NULL) { WARN("lib wrapper not initilaized."); return ncclLibWrapperNotSet; } RetCode ret = nvmlInternalInit(); if (ret != SUCCESS) { WARN("nvmlInit() failed: %s", nvmlInternalErrorString(ret)); return ncclSystemError; } return ncclSuccess; } ncclResult_t wrapNvmlShutdown(void) { if (nvmlInternalShutdown == NULL) { WARN("lib wrapper not initilaized."); return ncclLibWrapperNotSet; } RetCode ret = nvmlInternalShutdown(); if (ret != SUCCESS) { WARN("nvmlShutdown() failed: %s ", nvmlInternalErrorString(ret)); return ncclSystemError; } return ncclSuccess; } ncclResult_t wrapNvmlDeviceGetHandleByPciBusId(const char* pciBusId, nvmlDevice_t* device) { if (nvmlInternalDeviceGetHandleByPciBusId == NULL) { WARN("lib wrapper not initilaized."); return ncclLibWrapperNotSet; } RetCode ret = nvmlInternalDeviceGetHandleByPciBusId(pciBusId, device); if (ret != SUCCESS) { WARN("nvmlDeviceGetHandleByPciBusId() failed: %s ", nvmlInternalErrorString(ret)); return ncclSystemError; } return ncclSuccess; } ncclResult_t wrapNvmlDeviceGetIndex(nvmlDevice_t device, unsigned* index) { if (nvmlInternalDeviceGetIndex == NULL) { WARN("lib wrapper not initilaized."); return ncclLibWrapperNotSet; } RetCode ret = nvmlInternalDeviceGetIndex(device, index); if (ret != SUCCESS) { WARN("nvmlDeviceGetIndex() failed: %s ", nvmlInternalErrorString(ret)); return ncclSystemError; } return ncclSuccess; } ncclResult_t wrapNvmlDeviceSetCpuAffinity(nvmlDevice_t device) { if (nvmlInternalDeviceSetCpuAffinity == NULL) { WARN("lib wrapper not initilaized."); return ncclLibWrapperNotSet; } RetCode ret = nvmlInternalDeviceSetCpuAffinity(device); if (ret != SUCCESS) { WARN("nvmlDeviceSetCpuAffinity() failed: %s ", nvmlInternalErrorString(ret)); return ncclSystemError; } return ncclSuccess; } ncclResult_t wrapNvmlDeviceClearCpuAffinity(nvmlDevice_t device) { if (nvmlInternalInit == NULL) { WARN("lib wrapper not initilaized."); return ncclLibWrapperNotSet; } RetCode ret = nvmlInternalDeviceClearCpuAffinity(device); if (ret != SUCCESS) { WARN("nvmlDeviceClearCpuAffinity() failed: %s ", nvmlInternalErrorString(ret)); return ncclSystemError; } return ncclSuccess; }
67e6c80de841090d1245f0fe2f95c8d82544fc75.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "imageBlurKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *d_image = NULL; hipMalloc(&d_image, XSIZE*YSIZE); int h = YSIZE; int w = XSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( imageBlurKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_image,h,w); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( imageBlurKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_image,h,w); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( imageBlurKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_image,h,w); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
67e6c80de841090d1245f0fe2f95c8d82544fc75.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "imageBlurKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *d_image = NULL; cudaMalloc(&d_image, XSIZE*YSIZE); int h = YSIZE; int w = XSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); imageBlurKernel<<<gridBlock,threadBlock>>>(d_image,h,w); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { imageBlurKernel<<<gridBlock,threadBlock>>>(d_image,h,w); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { imageBlurKernel<<<gridBlock,threadBlock>>>(d_image,h,w); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
accaac453509b318d9d77bd796f6bb4efb3f9dee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <string.h> #include <sys/time.h> #include "Graph.h" using namespace std; #define BlockSize 128 #define Bsize_maximum 128 // -------------------------------------------------------------------------- // double cpuSecond() { struct timeval tp; gettimeofday( &tp, NULL ); return( (double) tp.tv_sec + (double) tp.tv_usec * 1e-6 ); } // -------------------------------------------------------------------------- // __global__ void floyd_unid( int * M, const int nverts, const int k ) { int ij = threadIdx.x + blockDim.x * blockIdx.x; if( ij < nverts * nverts ) { int Mij = M[ij]; int i = ij / nverts; int j = ij - i * nverts; if( i != j && i != k && j != k ) { int Mikj = M[i * nverts + k] + M[k * nverts + j]; Mij = ( Mij > Mikj ) ? Mikj : Mij; M[ij] = Mij; } } } // -------------------------------------------------------------------------- // __global__ void floyd_dosd( int * M, const int nverts, const int k ) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if( i < nverts && j < nverts ) { int ij = i * nverts + j; int Mij = M[ij]; if( i != j && i != k && j != k ) { int Mikj = M[i * nverts + k] + M[k * nverts + j]; Mij = ( Mij > Mikj ) ? Mikj : Mij; M[ij] = Mij; } } } // -------------------------------------------------------------------------- // __global__ void reduceMax( int * V_in, int * V_out, const int nverts ) { extern __shared__ int sdata[]; int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = ( ( i < nverts ) ? V_in[i] : 0.0f ); if( sdata[tid] == 1000000 ) sdata[tid] = 0.0f; __syncthreads(); for( int s = blockDim.x / 2; s > 0; s >>= 1 ) { if( tid < s ) if( sdata[tid] < sdata[tid + s] ) sdata[tid] = sdata[tid + s]; __syncthreads(); } if( tid == 0 ) { V_out[blockIdx.x] = sdata[0]; } } // -------------------------------------------------------------------------- // int main( int argc, char *argv[] ) { if( argc != 2 ) { cerr << "Sintaxis: " << argv[0] << " <archivo de grafo>" << endl; return(-1); } // Obtenemos datos de la GPU int devID; hipDeviceProp_t props; hipError_t err; err = hipGetDevice( &devID ); if( err != hipSuccess ) cout << "Error al mostrar los datos de la GPU." << endl; hipGetDeviceProperties( &props, devID ); printf( "Device %d: \"%s\" with Compute %d. %d capability\n\n", devID, props.name, props.major, props.minor ); // Lectura del grafo pasado como parmetro Graph G; G.lee( argv[1] ); // Preparacin de las matrices const int nverts = G.vertices; const int niters = nverts; const int nverts2 = nverts * nverts; int *c_Out_M = new int[nverts2]; int size = nverts2 * sizeof(int); int *d_In_M = NULL; err = hipMalloc( (void **) &d_In_M, size ); if( err != hipSuccess ) cout << "Error en la reserva - hipMalloc." << endl; int *A = G.Get_Matrix(); // Fase 1: Ejecucin en GPU double t1 = cpuSecond(); err = hipMemcpy( d_In_M, A, size, hipMemcpyHostToDevice ); if( err != hipSuccess ) cout << "Error en la copia de la matriz a GPU - hipMemcpy." << endl; //int threadsPerBlock = blocksize; //int blocksPerGrid = ( nverts2 + threadsPerBlock - 1 ) / threadsPerBlock; dim3 threadsPerBlock( 16, 16 ); dim3 numBlocks( ceil( (float) (nverts) / threadsPerBlock.x ), ceil( (float) (nverts) / threadsPerBlock.y ) ); // Ejecucin de las iteraciones - kernel for( int k = 0; k < niters; k++ ) { hipLaunchKernelGGL(( floyd_dosd) , dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, d_In_M, nverts, k ); err = hipGetLastError(); if( err != hipSuccess ) { fprintf( stderr, "Failed to launch kernel! ERROR = %d\n", err ); exit( EXIT_FAILURE ); } } hipMemcpy( c_Out_M, d_In_M, size, hipMemcpyDeviceToHost ); hipDeviceSynchronize(); double Tgpu = cpuSecond() - t1; cout << "Tiempo gastado en GPU = " << Tgpu << endl; // Fase 2: Ejecucin en CPU t1 = cpuSecond(); int inj, in, kn; for( int k = 0; k < niters; k++ ) { kn = k * nverts; for( int i = 0; i < nverts; i++ ) { in = i * nverts; for( int j = 0; j < nverts; j++ ) if( i != j && i != k && j != k ) { inj = in + j; A[inj] = min( A[in + k] + A[kn + j], A[inj] ); } } } double t2 = cpuSecond() - t1; cout << "Tiempo gastado en CPU = " << t2 << endl << endl; cout << "Ganancia = " << t2 / Tgpu << endl; for( int i = 0; i < nverts; i++ ) for( int j = 0; j < nverts; j++ ) if( abs( c_Out_M[i * nverts + j] - G.arista( i, j ) ) > 0 ) cout << "Error (" << i << "," << j << ") -> " << c_Out_M[i * nverts + j] << "..." << G.arista( i, j ) << endl; /* cout << "Matriz resultado secuencial:" << endl; for( int i = 0; i < nverts; i++ ) { cout << i << ": "; for( int j = 0; j < nverts; j++ ) cout << A[i * nverts + j] << " "; cout << endl; } */ /* cout << endl << endl << "Matriz resultado GPU:" << endl; for( int i = 0; i < nverts; i++ ) { cout << i << ": "; for( int j = 0; j < nverts; j++ ) cout << c_Out_M[i * nverts + j] << " "; cout << endl; } */ //dim3 dimBlock( Bsize_maximum ); //dim3 dimGrid ( ceil((float(N)/(float)dimBlock.x)) ); dim3 threadsPerBlock2( Bsize_maximum, 1 ); dim3 numBlocks2 ( ceil( (float) nverts2 / threadsPerBlock2.x ), 1 ); int smemSize = nverts2 * sizeof(int); int *C = new int[numBlocks2.x]; int *C_D; hipMalloc( (void**) &C_D, numBlocks2.x * sizeof(int) ); hipLaunchKernelGGL(( reduceMax) , dim3(numBlocks2), dim3(threadsPerBlock2), smemSize, 0, d_In_M, C_D, nverts2 ); err = hipGetLastError(); if( err != hipSuccess ) { fprintf( stderr, "Failed to launch reduction kernel! ERROR = %d\n", err ); exit( EXIT_FAILURE ); } hipMemcpy( C, C_D, size, hipMemcpyDeviceToHost ); int max = 0.0f; for( int i = 0; i < numBlocks2.x; i++ ) if( max < C[i] ) max = C[i]; /* for( int i = 0; i < nverts2; i++ ){ cout << "C[" << i << "]=" << C[i] << endl; } */ cout << endl << "Camino mximo GPU = " << max << endl; int maxcpu = 0; for( int i = 0; i < nverts2; i++ ) if( A[i] > maxcpu && A[i] != 1000000 ) maxcpu = A[i]; cout << "Camino mximo CPU = " << maxcpu << endl; }
accaac453509b318d9d77bd796f6bb4efb3f9dee.cu
#include <iostream> #include <fstream> #include <string.h> #include <sys/time.h> #include "Graph.h" using namespace std; #define BlockSize 128 #define Bsize_maximum 128 // -------------------------------------------------------------------------- // double cpuSecond() { struct timeval tp; gettimeofday( &tp, NULL ); return( (double) tp.tv_sec + (double) tp.tv_usec * 1e-6 ); } // -------------------------------------------------------------------------- // __global__ void floyd_unid( int * M, const int nverts, const int k ) { int ij = threadIdx.x + blockDim.x * blockIdx.x; if( ij < nverts * nverts ) { int Mij = M[ij]; int i = ij / nverts; int j = ij - i * nverts; if( i != j && i != k && j != k ) { int Mikj = M[i * nverts + k] + M[k * nverts + j]; Mij = ( Mij > Mikj ) ? Mikj : Mij; M[ij] = Mij; } } } // -------------------------------------------------------------------------- // __global__ void floyd_dosd( int * M, const int nverts, const int k ) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if( i < nverts && j < nverts ) { int ij = i * nverts + j; int Mij = M[ij]; if( i != j && i != k && j != k ) { int Mikj = M[i * nverts + k] + M[k * nverts + j]; Mij = ( Mij > Mikj ) ? Mikj : Mij; M[ij] = Mij; } } } // -------------------------------------------------------------------------- // __global__ void reduceMax( int * V_in, int * V_out, const int nverts ) { extern __shared__ int sdata[]; int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = ( ( i < nverts ) ? V_in[i] : 0.0f ); if( sdata[tid] == 1000000 ) sdata[tid] = 0.0f; __syncthreads(); for( int s = blockDim.x / 2; s > 0; s >>= 1 ) { if( tid < s ) if( sdata[tid] < sdata[tid + s] ) sdata[tid] = sdata[tid + s]; __syncthreads(); } if( tid == 0 ) { V_out[blockIdx.x] = sdata[0]; } } // -------------------------------------------------------------------------- // int main( int argc, char *argv[] ) { if( argc != 2 ) { cerr << "Sintaxis: " << argv[0] << " <archivo de grafo>" << endl; return(-1); } // Obtenemos datos de la GPU int devID; cudaDeviceProp props; cudaError_t err; err = cudaGetDevice( &devID ); if( err != cudaSuccess ) cout << "Error al mostrar los datos de la GPU." << endl; cudaGetDeviceProperties( &props, devID ); printf( "Device %d: \"%s\" with Compute %d. %d capability\n\n", devID, props.name, props.major, props.minor ); // Lectura del grafo pasado como parámetro Graph G; G.lee( argv[1] ); // Preparación de las matrices const int nverts = G.vertices; const int niters = nverts; const int nverts2 = nverts * nverts; int *c_Out_M = new int[nverts2]; int size = nverts2 * sizeof(int); int *d_In_M = NULL; err = cudaMalloc( (void **) &d_In_M, size ); if( err != cudaSuccess ) cout << "Error en la reserva - cudaMalloc." << endl; int *A = G.Get_Matrix(); // Fase 1: Ejecución en GPU double t1 = cpuSecond(); err = cudaMemcpy( d_In_M, A, size, cudaMemcpyHostToDevice ); if( err != cudaSuccess ) cout << "Error en la copia de la matriz a GPU - cudaMemcpy." << endl; //int threadsPerBlock = blocksize; //int blocksPerGrid = ( nverts2 + threadsPerBlock - 1 ) / threadsPerBlock; dim3 threadsPerBlock( 16, 16 ); dim3 numBlocks( ceil( (float) (nverts) / threadsPerBlock.x ), ceil( (float) (nverts) / threadsPerBlock.y ) ); // Ejecución de las iteraciones - kernel for( int k = 0; k < niters; k++ ) { floyd_dosd <<< numBlocks, threadsPerBlock >>> ( d_In_M, nverts, k ); err = cudaGetLastError(); if( err != cudaSuccess ) { fprintf( stderr, "Failed to launch kernel! ERROR = %d\n", err ); exit( EXIT_FAILURE ); } } cudaMemcpy( c_Out_M, d_In_M, size, cudaMemcpyDeviceToHost ); cudaDeviceSynchronize(); double Tgpu = cpuSecond() - t1; cout << "Tiempo gastado en GPU = " << Tgpu << endl; // Fase 2: Ejecución en CPU t1 = cpuSecond(); int inj, in, kn; for( int k = 0; k < niters; k++ ) { kn = k * nverts; for( int i = 0; i < nverts; i++ ) { in = i * nverts; for( int j = 0; j < nverts; j++ ) if( i != j && i != k && j != k ) { inj = in + j; A[inj] = min( A[in + k] + A[kn + j], A[inj] ); } } } double t2 = cpuSecond() - t1; cout << "Tiempo gastado en CPU = " << t2 << endl << endl; cout << "Ganancia = " << t2 / Tgpu << endl; for( int i = 0; i < nverts; i++ ) for( int j = 0; j < nverts; j++ ) if( abs( c_Out_M[i * nverts + j] - G.arista( i, j ) ) > 0 ) cout << "Error (" << i << "," << j << ") -> " << c_Out_M[i * nverts + j] << "..." << G.arista( i, j ) << endl; /* cout << "Matriz resultado secuencial:" << endl; for( int i = 0; i < nverts; i++ ) { cout << i << ": "; for( int j = 0; j < nverts; j++ ) cout << A[i * nverts + j] << " "; cout << endl; } */ /* cout << endl << endl << "Matriz resultado GPU:" << endl; for( int i = 0; i < nverts; i++ ) { cout << i << ": "; for( int j = 0; j < nverts; j++ ) cout << c_Out_M[i * nverts + j] << " "; cout << endl; } */ //dim3 dimBlock( Bsize_maximum ); //dim3 dimGrid ( ceil((float(N)/(float)dimBlock.x)) ); dim3 threadsPerBlock2( Bsize_maximum, 1 ); dim3 numBlocks2 ( ceil( (float) nverts2 / threadsPerBlock2.x ), 1 ); int smemSize = nverts2 * sizeof(int); int *C = new int[numBlocks2.x]; int *C_D; cudaMalloc( (void**) &C_D, numBlocks2.x * sizeof(int) ); reduceMax <<<numBlocks2, threadsPerBlock2, smemSize>>> ( d_In_M, C_D, nverts2 ); err = cudaGetLastError(); if( err != cudaSuccess ) { fprintf( stderr, "Failed to launch reduction kernel! ERROR = %d\n", err ); exit( EXIT_FAILURE ); } cudaMemcpy( C, C_D, size, cudaMemcpyDeviceToHost ); int max = 0.0f; for( int i = 0; i < numBlocks2.x; i++ ) if( max < C[i] ) max = C[i]; /* for( int i = 0; i < nverts2; i++ ){ cout << "C[" << i << "]=" << C[i] << endl; } */ cout << endl << "Camino máximo GPU = " << max << endl; int maxcpu = 0; for( int i = 0; i < nverts2; i++ ) if( A[i] > maxcpu && A[i] != 1000000 ) maxcpu = A[i]; cout << "Camino máximo CPU = " << maxcpu << endl; }
ab6c441ba1361340c4c9299c29f19ab48c115d5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <torch/library.h> #include <THH/THHAtomics.cuh> #include "cuda_helpers.h" namespace vision { namespace ops { namespace { template <typename T> __device__ T bilinear_interpolate( const T* input, int height, int width, T y, T x, int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = input[y_low * width + x_low]; T v2 = input[y_low * width + x_high]; T v3 = input[y_high * width + x_low]; T v4 = input[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void ps_roi_align_forward_kernel_impl( int nthreads, const T* input, const T spatial_scale, int channels, int height, int width, int pooled_height, int pooled_width, int sampling_ratio, const T* rois, int channels_out, T* output, int* channel_mapping) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c_out, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c_out = (index / pooled_width / pooled_height) % channels_out; int n = index / pooled_width / pooled_height / channels_out; // (n, c_in, ph, pw) is the associated element in the input int c_in = (c_out * pooled_height + ph) * pooled_width + pw; // [start, end) interval for spatial sampling const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5); T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5); T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5); T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5); T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); // Do not using floor/ceil; this implementation detail is critical T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; const T* offset_input = input + (roi_batch_ind * channels + c_in) * height * width; T out_sum = 0; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = hstart + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = wstart + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate(offset_input, height, width, y, x, index); out_sum += val; } } out_sum /= count; output[index] = out_sum; channel_mapping[index] = c_in; } } template <typename T> __device__ void bilinear_interpolate_gradient( int height, int width, T y, T x, T& w1, T& w2, T& w3, T& w4, int& x_low, int& x_high, int& y_low, int& y_high, int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = input[y_low * width + x_low]; // T v2 = input[y_low * width + x_high]; // T v3 = input[y_high * width + x_low]; // T v4 = input[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; } template <typename T> __global__ void ps_roi_align_backward_kernel_impl( int nthreads, const T* grad_output, const int* channel_mapping, int num_rois, const T spatial_scale, int channels, int height, int width, int pooled_height, int pooled_width, int sampling_ratio, int channels_out, T* grad_input, const T* rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, *, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / channels_out; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5); T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5); T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5); T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5); // Force too small ROIs to be 1x1 T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int c_in = channel_mapping[index]; T* grad_input_offset = grad_input + (roi_batch_ind * channels + c_in) * height * width; // Do not using floor/ceil; this implementation detail is critical T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; const T grad_output_this_bin = grad_output[index]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = hstart + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = wstart + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = grad_output_this_bin * w1 / count; T g2 = grad_output_this_bin * w2 / count; T g3 = grad_output_this_bin * w3 / count; T g4 = grad_output_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(grad_input_offset + y_low * width + x_low, g1); atomicAdd(grad_input_offset + y_low * width + x_high, g2); atomicAdd(grad_input_offset + y_high * width + x_low, g3); atomicAdd(grad_input_offset + y_high * width + x_high, g4); } // if } // ix } // iy } } std::tuple<at::Tensor, at::Tensor> ps_roi_align_forward_kernel( const at::Tensor& input, const at::Tensor& rois, double spatial_scale, int64_t pooled_height, int64_t pooled_width, int64_t sampling_ratio) { // Check if input tensors are CUDA tensors TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK( rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]"); at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::CheckedFrom c = "ps_roi_align_forward_kernel"; at::checkAllSameGPU(c, {input_t, rois_t}); at::checkAllSameType(c, {input_t, rois_t}); at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device()); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); TORCH_CHECK( channels % (pooled_height * pooled_width) == 0, "input channels must be a multiple of pooling height * pooling width"); int channels_out = channels / (pooled_height * pooled_width); auto output = at::zeros( {num_rois, channels_out, pooled_height, pooled_width}, input.options()); auto channel_mapping = at::zeros(output.sizes(), input.options().dtype(at::kInt)); auto output_size = output.numel(); if (output_size == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(output, channel_mapping); } hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min( ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); auto input_ = input.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "ps_roi_align_forward_kernel", [&] { hipLaunchKernelGGL(( ps_roi_align_forward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream, output_size, input_.data_ptr<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, rois_.data_ptr<scalar_t>(), channels_out, output.data_ptr<scalar_t>(), channel_mapping.data_ptr<int>()); }); AT_CUDA_CHECK(hipGetLastError()); hipDeviceSynchronize(); return std::make_tuple(output, channel_mapping); } at::Tensor ps_roi_align_backward_kernel( const at::Tensor& grad, const at::Tensor& rois, const at::Tensor& channel_mapping, double spatial_scale, int64_t pooled_height, int64_t pooled_width, int64_t sampling_ratio, int64_t batch_size, int64_t channels, int64_t height, int64_t width) { // Check if input tensors are CUDA tensors TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK( channel_mapping.is_cuda(), "channel_mapping must be a CUDA tensor"); at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, channel_mapping_t{channel_mapping, "channel_mapping", 3}; at::CheckedFrom c = "ps_roi_align_backward_kernel"; at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t}); at::checkAllSameType(c, {grad_t, rois_t}); at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device()); auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min( ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return grad_input; } int channels_out = channels / (pooled_height * pooled_width); auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "ps_roi_align_backward_kernel", [&] { hipLaunchKernelGGL(( ps_roi_align_backward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad_.data_ptr<scalar_t>(), channel_mapping.data_ptr<int>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, channels_out, grad_input.data_ptr<scalar_t>(), rois_.data_ptr<scalar_t>()); }); AT_CUDA_CHECK(hipGetLastError()); return grad_input; } } // namespace TORCH_LIBRARY_IMPL(torchvision, CUDA, m) { m.impl( TORCH_SELECTIVE_NAME("torchvision::ps_roi_align"), TORCH_FN(ps_roi_align_forward_kernel)); m.impl( TORCH_SELECTIVE_NAME("torchvision::_ps_roi_align_backward"), TORCH_FN(ps_roi_align_backward_kernel)); } } // namespace ops } // namespace vision
ab6c441ba1361340c4c9299c29f19ab48c115d5c.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <torch/library.h> #include <THC/THCAtomics.cuh> #include "cuda_helpers.h" namespace vision { namespace ops { namespace { template <typename T> __device__ T bilinear_interpolate( const T* input, int height, int width, T y, T x, int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = input[y_low * width + x_low]; T v2 = input[y_low * width + x_high]; T v3 = input[y_high * width + x_low]; T v4 = input[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void ps_roi_align_forward_kernel_impl( int nthreads, const T* input, const T spatial_scale, int channels, int height, int width, int pooled_height, int pooled_width, int sampling_ratio, const T* rois, int channels_out, T* output, int* channel_mapping) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c_out, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c_out = (index / pooled_width / pooled_height) % channels_out; int n = index / pooled_width / pooled_height / channels_out; // (n, c_in, ph, pw) is the associated element in the input int c_in = (c_out * pooled_height + ph) * pooled_width + pw; // [start, end) interval for spatial sampling const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5); T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5); T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5); T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5); T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); // Do not using floor/ceil; this implementation detail is critical T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; const T* offset_input = input + (roi_batch_ind * channels + c_in) * height * width; T out_sum = 0; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = hstart + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = wstart + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate(offset_input, height, width, y, x, index); out_sum += val; } } out_sum /= count; output[index] = out_sum; channel_mapping[index] = c_in; } } template <typename T> __device__ void bilinear_interpolate_gradient( int height, int width, T y, T x, T& w1, T& w2, T& w3, T& w4, int& x_low, int& x_high, int& y_low, int& y_high, int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = input[y_low * width + x_low]; // T v2 = input[y_low * width + x_high]; // T v3 = input[y_high * width + x_low]; // T v4 = input[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; } template <typename T> __global__ void ps_roi_align_backward_kernel_impl( int nthreads, const T* grad_output, const int* channel_mapping, int num_rois, const T spatial_scale, int channels, int height, int width, int pooled_height, int pooled_width, int sampling_ratio, int channels_out, T* grad_input, const T* rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, *, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / channels_out; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5); T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5); T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5); T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5); // Force too small ROIs to be 1x1 T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int c_in = channel_mapping[index]; T* grad_input_offset = grad_input + (roi_batch_ind * channels + c_in) * height * width; // Do not using floor/ceil; this implementation detail is critical T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; const T grad_output_this_bin = grad_output[index]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = hstart + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = wstart + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = grad_output_this_bin * w1 / count; T g2 = grad_output_this_bin * w2 / count; T g3 = grad_output_this_bin * w3 / count; T g4 = grad_output_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(grad_input_offset + y_low * width + x_low, g1); atomicAdd(grad_input_offset + y_low * width + x_high, g2); atomicAdd(grad_input_offset + y_high * width + x_low, g3); atomicAdd(grad_input_offset + y_high * width + x_high, g4); } // if } // ix } // iy } } std::tuple<at::Tensor, at::Tensor> ps_roi_align_forward_kernel( const at::Tensor& input, const at::Tensor& rois, double spatial_scale, int64_t pooled_height, int64_t pooled_width, int64_t sampling_ratio) { // Check if input tensors are CUDA tensors TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK( rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]"); at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::CheckedFrom c = "ps_roi_align_forward_kernel"; at::checkAllSameGPU(c, {input_t, rois_t}); at::checkAllSameType(c, {input_t, rois_t}); at::cuda::CUDAGuard device_guard(input.device()); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); TORCH_CHECK( channels % (pooled_height * pooled_width) == 0, "input channels must be a multiple of pooling height * pooling width"); int channels_out = channels / (pooled_height * pooled_width); auto output = at::zeros( {num_rois, channels_out, pooled_height, pooled_width}, input.options()); auto channel_mapping = at::zeros(output.sizes(), input.options().dtype(at::kInt)); auto output_size = output.numel(); if (output_size == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(output, channel_mapping); } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min( ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); auto input_ = input.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "ps_roi_align_forward_kernel", [&] { ps_roi_align_forward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>( output_size, input_.data_ptr<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, rois_.data_ptr<scalar_t>(), channels_out, output.data_ptr<scalar_t>(), channel_mapping.data_ptr<int>()); }); AT_CUDA_CHECK(cudaGetLastError()); cudaDeviceSynchronize(); return std::make_tuple(output, channel_mapping); } at::Tensor ps_roi_align_backward_kernel( const at::Tensor& grad, const at::Tensor& rois, const at::Tensor& channel_mapping, double spatial_scale, int64_t pooled_height, int64_t pooled_width, int64_t sampling_ratio, int64_t batch_size, int64_t channels, int64_t height, int64_t width) { // Check if input tensors are CUDA tensors TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK( channel_mapping.is_cuda(), "channel_mapping must be a CUDA tensor"); at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, channel_mapping_t{channel_mapping, "channel_mapping", 3}; at::CheckedFrom c = "ps_roi_align_backward_kernel"; at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t}); at::checkAllSameType(c, {grad_t, rois_t}); at::cuda::CUDAGuard device_guard(grad.device()); auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min( ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), static_cast<int64_t>(4096))); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return grad_input; } int channels_out = channels / (pooled_height * pooled_width); auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "ps_roi_align_backward_kernel", [&] { ps_roi_align_backward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>( grad.numel(), grad_.data_ptr<scalar_t>(), channel_mapping.data_ptr<int>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, channels_out, grad_input.data_ptr<scalar_t>(), rois_.data_ptr<scalar_t>()); }); AT_CUDA_CHECK(cudaGetLastError()); return grad_input; } } // namespace TORCH_LIBRARY_IMPL(torchvision, CUDA, m) { m.impl( TORCH_SELECTIVE_NAME("torchvision::ps_roi_align"), TORCH_FN(ps_roi_align_forward_kernel)); m.impl( TORCH_SELECTIVE_NAME("torchvision::_ps_roi_align_backward"), TORCH_FN(ps_roi_align_backward_kernel)); } } // namespace ops } // namespace vision
bad60c7eaf1cc49c18e4b01fc2d815ed8ea58850.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #include <hip/hip_runtime.h> #include <rocblas.h> #define N 1000000 #define checkCUDA(expression) \ { \ hipError_t status = (expression); \ if (status != hipSuccess) { \ printf("Error on line %d: err code %d\n", \ __LINE__, status); \ exit(EXIT_FAILURE); \ } \ } #define checkCUBLAS(expression) \ { \ hipblasStatus_t status = (expression); \ if (status != HIPBLAS_STATUS_SUCCESS) { \ printf("Error on line %d: err code %d\n", \ __LINE__, status); \ exit(EXIT_FAILURE); \ } \ } #define getMillisecond(start, end) \ (end.tv_sec-start.tv_sec)*1000 + \ (end.tv_usec-start.tv_usec)/1000.0 int main (void){ hipblasHandle_t handle; float *x, *y, *resultCPU, *resultGPU; float *devPtrX, *devPtrY; float alpha = 1.2; float incx = 1, incy = 1; float ms = 0; struct timeval start, end; srand(2018); // Memory for host x = (float *)malloc (N * sizeof (float)); y = (float *)malloc (N * sizeof (float)); resultCPU = (float *)malloc (N * sizeof (float)); resultGPU = (float *)malloc (N * sizeof (float)); // Init values for (int i = 0; i < N; i++) { x[i] = (rand() % 1000000) / 10000.0; y[i] = (rand() % 1000000) / 10000.0; } // Memory for device checkCUDA (hipMalloc ((void**)&devPtrX, N * sizeof (float))); checkCUDA (hipMalloc ((void**)&devPtrY, N * sizeof (float))); // Init cuBLAS checkCUBLAS (hipblasCreate (&handle)); // Memcpy host to device checkCUBLAS (hipblasSetVector (N, sizeof (float), x, 1, devPtrX, 1)); checkCUBLAS (hipblasSetVector (N, sizeof (float), y, 1, devPtrY, 1)); // Saxpy with GPU gettimeofday(&start, NULL); checkCUBLAS (hipblasSaxpy (handle, N, &alpha, devPtrX, incx, devPtrY, incy)); gettimeofday(&end, NULL); // Print duration ms = getMillisecond(start, end); printf("GPU time: %f (ms)\n", ms); // Memcpy device to host checkCUBLAS (hipblasGetVector (N, sizeof (float), devPtrY, 1, resultGPU, 1)); // Saxpy with CPU gettimeofday(&start, NULL); for (int i = 0; i < N; i++) { resultCPU[i] = x[i]*alpha + y[i]; } gettimeofday(&end, NULL); // Print duration ms = getMillisecond(start, end); printf("CPU time: %f (ms)\n", ms); // Validate the result float error = 0; for (int i = 0; i < N; i++) { error += abs((resultCPU[i] - resultGPU[i]) / resultCPU[i]); } error = error / N * 100; printf ("Mean Absolute Percentage Error: %f (%%)\n", error); // Free checkCUDA (hipFree (devPtrX)); checkCUDA (hipFree (devPtrY)); checkCUBLAS (hipblasDestroy (handle)); free(x); free(y); free(resultCPU); free(resultGPU); return EXIT_SUCCESS; }
bad60c7eaf1cc49c18e4b01fc2d815ed8ea58850.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #include <cuda_runtime.h> #include <cublas_v2.h> #define N 1000000 #define checkCUDA(expression) \ { \ cudaError_t status = (expression); \ if (status != cudaSuccess) { \ printf("Error on line %d: err code %d\n", \ __LINE__, status); \ exit(EXIT_FAILURE); \ } \ } #define checkCUBLAS(expression) \ { \ cublasStatus_t status = (expression); \ if (status != CUBLAS_STATUS_SUCCESS) { \ printf("Error on line %d: err code %d\n", \ __LINE__, status); \ exit(EXIT_FAILURE); \ } \ } #define getMillisecond(start, end) \ (end.tv_sec-start.tv_sec)*1000 + \ (end.tv_usec-start.tv_usec)/1000.0 int main (void){ cublasHandle_t handle; float *x, *y, *resultCPU, *resultGPU; float *devPtrX, *devPtrY; float alpha = 1.2; float incx = 1, incy = 1; float ms = 0; struct timeval start, end; srand(2018); // Memory for host x = (float *)malloc (N * sizeof (float)); y = (float *)malloc (N * sizeof (float)); resultCPU = (float *)malloc (N * sizeof (float)); resultGPU = (float *)malloc (N * sizeof (float)); // Init values for (int i = 0; i < N; i++) { x[i] = (rand() % 1000000) / 10000.0; y[i] = (rand() % 1000000) / 10000.0; } // Memory for device checkCUDA (cudaMalloc ((void**)&devPtrX, N * sizeof (float))); checkCUDA (cudaMalloc ((void**)&devPtrY, N * sizeof (float))); // Init cuBLAS checkCUBLAS (cublasCreate (&handle)); // Memcpy host to device checkCUBLAS (cublasSetVector (N, sizeof (float), x, 1, devPtrX, 1)); checkCUBLAS (cublasSetVector (N, sizeof (float), y, 1, devPtrY, 1)); // Saxpy with GPU gettimeofday(&start, NULL); checkCUBLAS (cublasSaxpy (handle, N, &alpha, devPtrX, incx, devPtrY, incy)); gettimeofday(&end, NULL); // Print duration ms = getMillisecond(start, end); printf("GPU time: %f (ms)\n", ms); // Memcpy device to host checkCUBLAS (cublasGetVector (N, sizeof (float), devPtrY, 1, resultGPU, 1)); // Saxpy with CPU gettimeofday(&start, NULL); for (int i = 0; i < N; i++) { resultCPU[i] = x[i]*alpha + y[i]; } gettimeofday(&end, NULL); // Print duration ms = getMillisecond(start, end); printf("CPU time: %f (ms)\n", ms); // Validate the result float error = 0; for (int i = 0; i < N; i++) { error += abs((resultCPU[i] - resultGPU[i]) / resultCPU[i]); } error = error / N * 100; printf ("Mean Absolute Percentage Error: %f (%%)\n", error); // Free checkCUDA (cudaFree (devPtrX)); checkCUDA (cudaFree (devPtrY)); checkCUBLAS (cublasDestroy (handle)); free(x); free(y); free(resultCPU); free(resultGPU); return EXIT_SUCCESS; }
f065103be316f3c5fa276ba5b112b0a8c2da085e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <ctype.h> #include "cu.h" #include "base.h" #include "prna.h" #include "util.h" #include "param.h" /* penalty for a helix terminated by a pair containing a U */ DEV static real_t terminal_U_penalty(const base_t *s, const int i, const int j, param_t p) { return s[i] == U || s[j] == U ? p->terminal_AU_penalty : RCONST(0.); } DEV static real_t dangle_3p_energy(const base_t *s, const int i, const int j, const int ip1, param_t p) { return p->dangle_3p[s[i]][s[j]][s[ip1]] + terminal_U_penalty(s,i,j,p); } DEV static real_t dangle_5p_energy(const base_t *s, const int i, const int j, const int jm1, param_t p) { return p->dangle_5p[s[i]][s[j]][s[jm1]] + terminal_U_penalty(s,i,j,p); } DEV static real_t terminal_stack(const base_t *s, const int i, const int j, const int ip1, const int jm1, param_t p) { return p->tstack[s[i]][s[j]][s[ip1]][s[jm1]] + terminal_U_penalty(s,i,j,p); } DEV static real_t terminal_stack_multibranch(const base_t *s, const int i, const int j, const int ip1, const int jm1, param_t p) { return p->tstackm[s[i]][s[j]][s[ip1]][s[jm1]] + terminal_U_penalty(s,i,j,p); } DEV static const real_t *lookup_find(const base_t *s, const int d, param_t p) { int i; switch (d) { case 3: for (i = 0; i < p->ntriloop; i++) if (sequences_match(s, p->triloop[i].seq, d+2)) return &p->triloop[i].val; break; case 4: for (i = 0; i < p->ntloop; i++) if (sequences_match(s, p->tloop[i].seq, d+2)) return &p->tloop[i].val; break; case 6: for (i = 0; i < p->nhexaloop; i++) if (sequences_match(s, p->hexaloop[i].seq, d+2)) return &p->hexaloop[i].val; break; } return 0; } /*** * Energy of a hairpin loop with d unpaired bases, d = j-i-1 * s[i] is paired with s[j] * s[i+1] is mismatched with s[j-1] ***/ DEV static real_t hairpin_loop_energy(const base_t *s, const int i, const int j, const int d, param_t p) { /* Lookup tables for special hairpin loops */ const real_t *val; if ((val = lookup_find(&s[i],d,p))) return *val; /* Hairpin loop initiation penalty */ real_t e; if (d > LOOP_MAX) e = p->hairpin_loop_initiation[LOOP_MAX] + p->Extrapolation_for_large_loops * LOG((real_t) d / LOOP_MAX); else e = p->hairpin_loop_initiation[d]; if (d == 3) { if (contains_only_base(C,d,&s[i+1])) e += p->c_hairpin_of_3; e += terminal_U_penalty(s,i,j,p); } else { e += p->tstackh[s[i]][s[j]][s[i+1]][s[j-1]]; if (contains_only_base(C,d,&s[i+1])) e += p->c_hairpin_slope*d + p->c_hairpin_intercept; } if (s[i] == G && s[j] == U && i > 1 && s[i-1] == G && s[i-2] == G) e += p->bonus_for_GGG_hairpin; return e; } DEV static real_t real_min(real_t a, real_t b) { return a < b ? a : b; } /*** * Energy of an internal/bulge loop with d1, d2 unpaired bases, * d1 = ip-i-1, d2 = j-jp-1 * s[i] is paired with s[j] * s[i+1] is mismatched sith s[j-1] * s[ip-1] is mismatched with s[jp+1] * s[ip] is paired with s[jp] ***/ DEV static real_t internal_loop_energy(const base_t *s, const int i, const int j, const int ip, const int jp, const int d1, const int d2, param_t p) { /* Bulge loops */ if (d1 == 0 || d2 == 0) { real_t e = p->bulge_loop_initiation[d1+d2]; if (d1 == 1 || d2 == 1) { /* single-nucleotide bulge */ e += p->stack[s[i]][s[j]][s[ip]][s[jp]]; if ((d1 == 1 && s[i+1] == C && (s[i] == C || s[i+2] == C)) || (d2 == 1 && s[j-1] == C && (s[j] == C || s[j-2] == C))) e += p->Bonus_for_Single_C_bulges_adjacent_to_C; } else { e += terminal_U_penalty(s,i,j,p); e += terminal_U_penalty(s,ip,jp,p); } return e; } /* Small internal loops */ if (d1 == 1 && d2 == 1) return p->int11[s[i]][s[i+1]][s[i+2]][s[j-2]][s[j-1]][s[j]]; if (d1 == 2 && d2 == 2) return p->int22[s[i]][s[ip]][s[j]][s[jp]][s[i+1]][s[i+2]][s[j-1]][s[j-2]]; if (d1 == 1 && d2 == 2) return p->int21[s[i]][s[j]][s[i+1]][s[j-1]][s[jp+1]][s[ip]][s[jp]]; if (d1 == 2 && d2 == 1) return p->int21[s[jp]][s[ip]][s[jp+1]][s[ip-1]][s[i+1]][s[j]][s[i]]; /* Larger internal loops */ tab4_t *sp; if (d1 == 1 || d2 == 1) sp = &p->tstacki1n; else if ((d1 == 2 && d2 == 3) || (d1 == 3 && d2 == 2)) sp = &p->tstacki23; else sp = &p->tstacki; return p->internal_loop_initiation[d1+d2] + real_min(p->fm_array_first_element * abs(d1-d2), p->maximum_correction) + (*sp)[s[i]][s[j]][s[i+1]][s[j-1]] + (*sp)[s[jp]][s[ip]][s[jp+1]][s[ip-1]]; } /* return -ln(e^-a + e^-b) */ DEV static real_t free_energy_sum(const real_t a, const real_t b) { if (a < b) return a - LOG1P(EXP(a-b)); else if (b < a) return b - LOG1P(EXP(b-a)); else return a - LOG(2); } DEV static void free_energy_accumulate(real_t *a, const real_t b) { *a = free_energy_sum(*a,b); } DEV HOST static int int_min(int a, int b) { return a < b ? a : b; } DEV HOST static int ind(int i, int j, int n) { return i*n + j; } DEV HOST static int upper_triangle_index(int i, int j) { return (j*(j-1))/2 + i; } DEV HOST inline static int can_pair(int i, int j, int n, const int *bcp) { if (i>=0 && j<=n-1 && i != j && j>=0 && i<=n-1){ if (i < j) return bcp[upper_triangle_index(i, j)]; else return bcp[upper_triangle_index(j, i)]; } else return 0; } DEV static int wrap(int i, int n) { return i >= n ? i-n : i; } DEV static int is_exterior(int i, int j) { return j < i; } DEV static int is_interior(int i, int j) { return i < j; } DEV HOST real_t* array_val(real_t *__restrict a, int i, int j, int n, const int *__restrict bcp) { return can_pair(i,j,n,bcp) ? &a[ind(i,j,n)] : 0; } #ifdef __HIPCC__ #define ISTART blockIdx.x #define IINC gridDim.x #else #define ISTART 0 #define IINC 1 #endif GLOBAL static void calc_hairpin_stack_exterior_multibranch (const int d, const int n, const base_t *__restrict s, const int *__restrict bcp, real_t *__restrict v, const real_t *__restrict x, const real_t *__restrict w5, const real_t *__restrict w3, const param_t p) { int i; for (i = ISTART; i < n; i += IINC) { const int jtmp = i+d+1; const int j = wrap(jtmp,n); if ((is_exterior(i,j) && i-j <= LOOP_MIN) || !can_pair(i,j,n,bcp)) continue; real_t vij = INF; if (i != n-1 && j != 0) { /* hairpin loop */ if (is_interior(i,j)) vij = hairpin_loop_energy(s,i,j,d,p); /* stack */ if (can_pair(i+1,j-1,n,bcp) && !(is_interior(i,j) && d <= LOOP_MIN-2)) free_energy_accumulate(&vij, p->stack[s[i]][s[j]][s[i+1]][s[j-1]] + v[ind(i+1,j-1,n)]); } /* exterior loop */ if (is_exterior(i,j)) { free_energy_accumulate(&vij, w3[i+1] + w5[j-1] + terminal_U_penalty(s,i,j,p)); if (i != n-1) free_energy_accumulate(&vij, w3[i+2] + w5[j-1] + dangle_3p_energy(s,i,j,i+1,p)); if (j != 0) free_energy_accumulate(&vij, w3[i+1] + w5[j-2] + dangle_5p_energy(s,i,j,j-1,p)); if (i != n-1 && j != 0) free_energy_accumulate(&vij, w3[i+2] + w5[j-2] + terminal_stack(s,i,j,i+1,j-1,p)); } /* multibranch loop */ if (d > 2*LOOP_MIN + 3 && i != n-1 && j != 0) { free_energy_accumulate(&vij, x[ind((d-2)%5,i+1,n)] + terminal_U_penalty(s,i,j,p) + p->a + p->c); if (i != n-2) free_energy_accumulate(&vij, x[ind((d-3)%5,i+2,n)] + dangle_3p_energy(s,i,j,i+1,p) + p->a + p->b + p->c); if (j != 1) free_energy_accumulate(&vij, x[ind((d-3)%5,i+1,n)] + dangle_5p_energy(s,i,j,j-1,p) + p->a + p->b + p->c); if (i != n-2 && j != 1) free_energy_accumulate(&vij, x[ind((d-4)%5,i+2,n)] + terminal_stack_multibranch(s,i,j,i+1,j-1,p) + p->a + 2*p->b + p->c); } v[ind(i,j,n)] = vij; } } #ifdef __HIPCC__ #define NTHREAD 128 #define THREAD_X 8 #define THREAD_Y 16 #if THREAD_X*THREAD_Y != NTHREAD #error THREAD_X * THREAD_Y must be equal to NTHREAD #endif DEV static void free_energy_reduce(real_t *x, int tid, int nt) { __shared__ real_t buf[NTHREAD]; buf[tid] = *x; for (nt /= 2, __syncthreads(); nt > 0; nt /= 2, __syncthreads()) if (tid < nt) free_energy_accumulate(&buf[tid], buf[tid+nt]); if (tid == 0) *x = buf[0]; } #endif /* __HIPCC__ */ GLOBAL static void calc_internal (const int d, const int n, const base_t *__restrict s, const int *__restrict bcp, real_t *__restrict v, const param_t p) { int i; for (i = ISTART; i < n; i += IINC) { const int jtmp = i+d+1; const int j = wrap(jtmp,n); if ((is_exterior(i,j) && i-j <= LOOP_MIN) || (is_interior(i,j) && d <= LOOP_MIN+2) || !can_pair(i,j,n,bcp)) continue; real_t vij = INF; #ifdef __HIPCC__ const int d1start = threadIdx.x; const int d1inc = blockDim.x; #else const int d1start = 0; const int d1inc = 1; #endif const int dmax = int_min(LOOP_MAX, d-2); const int d1max = int_min(dmax, n-i-2); int d1; for (d1 = d1start; d1 <= d1max; d1 += d1inc) { const int ip = i+d1+1; const int d2max = int_min(dmax-d1, j-1); #ifdef __HIPCC__ const int d2start = d1 > 0 ? threadIdx.y : threadIdx.y + 1; const int d2inc = blockDim.y; #else const int d2start = d1 > 0 ? 0 : 1; const int d2inc = 1; #endif int d2; for (d2 = d2start; d2 <= d2max; d2 += d2inc) { const int jp = j-d2-1; if (can_pair(ip,jp,n,bcp)) free_energy_accumulate(&vij, internal_loop_energy(s,i,j,ip,jp,d1,d2,p) + v[ind(ip,jp,n)]); } } #ifdef __HIPCC__ const int tid = threadIdx.x * blockDim.y + threadIdx.y; free_energy_reduce(&vij, tid, blockDim.x*blockDim.y); if (tid != 0) continue; #endif free_energy_accumulate(&v[ind(i,j,n)], vij); } } DEV static real_t coaxial_flush(const base_t *s, const int i, const int j, const int ip, const int jp, param_t p) { return terminal_U_penalty(s,i,j,p) + terminal_U_penalty(s,ip,jp,p) + p->coaxial[s[i]][s[j]][s[ip]][s[jp]]; } DEV static real_t coaxial_mismatch1(const base_t *s, const int i, const int j, const int ip, const int jp, param_t p) { return terminal_U_penalty(s,i,j,p) + terminal_U_penalty(s,ip,jp,p) + p->tstackcoax[s[j]][s[i]][s[j+1]][s[i-1]] + p->coaxstack[s[j+1]][s[i-1]][s[ip]][s[jp]]; } DEV static real_t coaxial_mismatch2(const base_t *s, const int i, const int j, const int ip, const int jp, param_t p) { return terminal_U_penalty(s,i,j,p) + terminal_U_penalty(s,ip,jp,p) + p->tstackcoax[s[jp]][s[ip]][s[jp+1]][s[ip-1]] + p->coaxstack[s[j]][s[i]][s[j+1]][s[jp+1]]; } GLOBAL static void calc_coaxial (const int d, /* diagonal - length of bases in between i and j, exclusive */ const int n, const base_t *__restrict s, const int *__restrict bcp, real_t *__restrict v, const real_t *__restrict y, const real_t *__restrict w5, const real_t *__restrict w3, const param_t p) { int i; for (i = ISTART; i < n; i += IINC) { const int jtmp = i+d+1; const int j = wrap(jtmp,n); if ((is_exterior(i,j) && i-j <= LOOP_MIN) || !can_pair(i,j,n,bcp)) continue; const real_t *v1; real_t vij = INF; /* exterior */ if (is_exterior(i,j)) { int k, kstart; #ifdef __HIPCC__ kstart = threadIdx.x; const int kinc = blockDim.x; #else kstart = 0; const int kinc = 1; #endif for (k = kstart; k < j - LOOP_MIN; k += kinc) { if ((v1 = array_val(v,k,j-1,n,bcp))) free_energy_accumulate(&vij, w3[i+1] + w5[k-1] + coaxial_flush(s,k,j-1,j,i,p) + (*v1)); if (j-2 >= 0) { if (i < n-1 && (v1 = array_val(v,k,j-2,n,bcp))) free_energy_accumulate(&vij, w3[i+2] + w5[k-1] + coaxial_mismatch2(s,k,j-2,j,i,p) + (*v1)); if ((v1 = array_val(v,k+1,j-2,n,bcp))) free_energy_accumulate(&vij, w3[i+1] + w5[k-1] + coaxial_mismatch1(s,k+1,j-2,j,i,p) + (*v1)); } } #ifdef __HIPCC__ kstart = i+LOOP_MIN+1 + threadIdx.x; #else kstart = i+LOOP_MIN+1; #endif for (k = kstart; k < n; k += kinc) { if ((v1 = array_val(v,i+1,k,n,bcp))) free_energy_accumulate(&vij, w3[k+1] + w5[j-1] + coaxial_flush(s,j,i,i+1,k,p) + (*v1)); if (j > 0 && (v1 = array_val(v,i+2,k,n,bcp))) free_energy_accumulate(&vij, w3[k+1] + w5[j-2] + coaxial_mismatch1(s,j,i,i+2,k,p) + (*v1)); if ((v1 = array_val(v,i+2,k-1,n,bcp))) free_energy_accumulate(&vij, w3[k+1] + w5[j-1] + coaxial_mismatch2(s,j,i,i+2,k-1,p) + (*v1)); } } /* end exterior */ /* multibranch */ if (d > 2*LOOP_MIN + 3 && i != n-1 && j != 0) { int ktmp; #ifdef __HIPCC__ int ktmpstart = i+2 + threadIdx.x; const int ktmpinc = blockDim.x; #else int ktmpstart = i+2; const int ktmpinc = 1; #endif for (ktmp = ktmpstart; ktmp < jtmp-2; ktmp += ktmpinc) { const int k = wrap(ktmp,n); if (k != n-1) { if ((v1 = array_val(v,i+1,k,n,bcp))) free_energy_accumulate(&vij, coaxial_flush(s,j,i,i+1,k,p) + (*v1) + p->a_2c + y[ind(k+1,j-1,n)]); if (ktmp+2 < jtmp-1 && i+1 != n-1 && k+1 != n-1 && (v1 = array_val(v,i+2,k,n,bcp))) { const real_t tmp = (*v1) + p->a_2b_2c; free_energy_accumulate(&vij, coaxial_mismatch2(s,j,i,i+2,k,p) + tmp + y[ind(k+2,j-1,n)]); if (j != 1) { free_energy_accumulate(&vij, coaxial_mismatch1(s,j,i,i+2,k,p) + tmp + y[ind(k+1,j-2,n)]); } } } } #ifdef __HIPCC__ ktmpstart = i+3 + threadIdx.x; #else ktmpstart = i+3; #endif for (ktmp = ktmpstart; ktmp < jtmp-1; ktmp += ktmpinc) { const int k = wrap(ktmp,n); if (k != 0) { if ((v1 = array_val(v,k,j-1,n,bcp))) free_energy_accumulate(&vij, coaxial_flush(s,k,j-1,j,i,p) + (*v1) + p->a_2c + y[ind(i+1,k-1,n)]); if (j != 1 && ktmp > i+3 && (v1 = array_val(v,k,j-2,n,bcp))) { const real_t tmp = (*v1) + p->a_2b_2c; if (k != 1) free_energy_accumulate(&vij, coaxial_mismatch1(s,k,j-2,j,i,p) + tmp + y[ind(i+1,k-2,n)]); if (i != n-2) free_energy_accumulate(&vij, coaxial_mismatch2(s,k,j-2,j,i,p) + tmp + y[ind(i+2,k-1,n)]); } } } } /* end multibranch */ #ifdef __HIPCC__ free_energy_reduce(&vij, threadIdx.x, blockDim.x); if (threadIdx.x != 0) continue; #endif free_energy_accumulate(&v[ind(i,j,n)], vij); } /* end loop over i */ } /* end calc_coaxial */ /*** * For arrays w, wl, xl, two diagonals are stored. * Element i of the current diagonal - that is, w(i,j) - * is referenced as w[d%2][i]. * Element i of the previous diagonal - that is, w(i,j-1) - * is referenced as w((d-1)%2,i) * * For array x, five diagonals are stored. * Similarly to w, x[ind(d%5,i,n)] refers to element i on * the current diagonal, and x[ind((d-k)%5,i,n)] to element i * on a previous diagonal d-k. * Specifically: * * x(i,j) --> x(d%5,i,n) * x(i+1,j,n) --> x((d-1)%5,i+1) * x(i+1,j-1,n) --> x((d-2)%5,i+1) * x(i+2,j-1,n) --> x((d-3)%5,i+1) * x(i+1,j-2,n) --> x((d-3)%5,i+1) * x(i+2,j-2,n) --> x((d-4)%5,i+1) ***/ GLOBAL static void calc_wl (const int d, /* diagonal - length of bases in between i and j, exclusive */ const int n, const base_t *__restrict s, const int *__restrict bcp, real_t *__restrict v, real_t *__restrict z, real_t *__restrict wq, real_t *__restrict w, real_t *__restrict wl, const param_t p) { int i; for (i = ISTART; i < n; i += IINC) { const int jtmp = i+d+1; const int j = wrap(jtmp,n); if (is_exterior(i,j) && i-j <= LOOP_MIN) continue; real_t wqtmp = INF, wltmp = INF; const real_t *v1; if ((v1 = array_val(v,i,j,n,bcp))) { const real_t tmp = (*v1) + terminal_U_penalty(s,i,j,p); free_energy_accumulate(&wqtmp, tmp); free_energy_accumulate(&wltmp, tmp + p->c); } if (i != n-1 && (v1 = array_val(v,i+1,j,n,bcp))) { const real_t tmp = (*v1) + dangle_5p_energy(s,j,i+1,i,p); free_energy_accumulate(&wqtmp, tmp); free_energy_accumulate(&wltmp, tmp + p->b + p->c); } if (j != 0 && (v1 = array_val(v,i,j-1,n,bcp))) { const real_t tmp = (*v1) + dangle_3p_energy(s,j-1,i,j,p); free_energy_accumulate(&wqtmp, tmp); free_energy_accumulate(&wltmp, tmp + p->b + p->c); } if (i != n-1 && j != 0 && (v1 = array_val(v,i+1,j-1,n,bcp))) { const real_t tmp = (*v1) + terminal_stack_multibranch(s,j-1,i+1,j,i,p); free_energy_accumulate(&wqtmp, tmp); free_energy_accumulate(&wltmp, tmp + 2*p->b + p->c); } if (is_interior(i,j)) wq[upper_triangle_index(i,j)] = wqtmp; /* WL array */ wl[ind(d%2,i,n)] = z[ind(i,j,n)] = wltmp; if (i != n-1 && d > 0) free_energy_accumulate(&wl[ind(d%2,i,n)], wl[ind((d-1)%2,i+1,n)] + p->b); /* W array */ w[ind(d%2,i,n)] = wl[ind(d%2,i,n)]; if (j != 0 && d > 0) free_energy_accumulate(&w[ind(d%2,i,n)], w[ind((d-1)%2,i,n)] + p->b); } /* end loop over i */ } /* end calc_wl */ GLOBAL static void calc_xl (const int d, /* diagonal - length of bases in between i and j, exclusive */ const int n, const real_t *__restrict z, const real_t *__restrict yl, real_t *__restrict xl) { int i; for (i = ISTART; i < n; i += IINC) { const int jtmp = i+d+1; const int j = wrap(jtmp,n); if (is_exterior(i,j) && i-j <= LOOP_MIN) continue; #ifdef __HIPCC__ if (threadIdx.x == 0) xl[ind(d%2,i,n)] = INF; #else xl[ind(d%2,i,n)] = INF; #endif if (is_interior(i,j) && d <= 2*LOOP_MIN+1) continue; #ifdef __HIPCC__ const int kstart = i+1 + threadIdx.x; const int kinc = blockDim.x; #else const int kstart = i+1; const int kinc = 1; #endif int ktmp; real_t tmp = INF; for (ktmp = kstart; ktmp < jtmp-1; ktmp += kinc) { if (ktmp != n-1) { const int k = wrap(ktmp,n); free_energy_accumulate(&tmp, z[ind(i,k,n)] + yl[ind(k+1,j,n)]); } } #ifdef __HIPCC__ free_energy_reduce(&tmp, threadIdx.x, blockDim.x); if (threadIdx.x != 0) continue; #endif free_energy_accumulate(&xl[ind(d%2,i,n)], tmp); } /* end loop over i */ } /* end calc_xl */ GLOBAL static void calc_z (const int d, /* diagonal - length of bases in between i and j, exclusive */ const int n, const base_t *__restrict s, const int *__restrict bcp, real_t *__restrict v, real_t *__restrict z, real_t *__restrict xl, real_t *__restrict wq, const param_t p) { int i; for (i = ISTART; i < n; i += IINC) { const int jtmp = i+d+1; const int j = wrap(jtmp,n); if ((is_exterior(i,j) && i-j <= LOOP_MIN) || (is_interior(i,j) && d <= 2*LOOP_MIN+1)) continue; #ifdef __HIPCC__ const int kstart = i+LOOP_MIN+1 + threadIdx.x; const int kinc = blockDim.x; #else const int kstart = i+LOOP_MIN+1; const int kinc = 1; #endif int ktmp; real_t tmp1 = INF, tmp2 = INF; for (ktmp = kstart; ktmp < jtmp-LOOP_MIN-1; ktmp += kinc) { const int k = wrap(ktmp,n); if (k == n-1) continue; real_t *v1, *v2; if ((v1 = array_val(v,i,k,n,bcp)) && (v2 = array_val(v,k+1,j,n,bcp))) free_energy_accumulate(&tmp1, (*v1) + (*v2) + coaxial_flush(s,i,k,k+1,j,p)); if (j == 0 || k+1 == n-1) continue; if (i != n-1 && (v1 = array_val(v,i+1,k,n,bcp)) && (v2 = array_val(v,k+2,j,n,bcp))) free_energy_accumulate(&tmp2, (*v1) + (*v2) + coaxial_mismatch1(s,i+1,k,k+2,j,p)); if ((v1 = array_val(v,i,k,n,bcp)) && (v2 = array_val(v,k+2,j-1,n,bcp))) free_energy_accumulate(&tmp2, (*v1) + (*v2) + coaxial_mismatch2(s,i,k,k+2,j-1,p)); } #ifdef __HIPCC__ free_energy_reduce(&tmp1, threadIdx.x, blockDim.x); free_energy_reduce(&tmp2, threadIdx.x, blockDim.x); if (threadIdx.x != 0) continue; #endif if (is_interior(i,j)) free_energy_accumulate(&wq[upper_triangle_index(i,j)], free_energy_sum(tmp1,tmp2)); const real_t wcoax = free_energy_sum(tmp1 + 2*p->c, tmp2 + 2*p->b + 2*p->c); free_energy_accumulate(&z[ind(i,j,n)], wcoax); free_energy_accumulate(&xl[ind(d%2,i,n)], wcoax); } /* end loop over i */ } /* end calc_z */ GLOBAL static void calc_x (const int d, /* diagonal - length of bases in between i and j, exclusive */ const int n, real_t *__restrict yl, real_t *__restrict y, const real_t *__restrict w, const real_t *__restrict wl, real_t *__restrict xl, real_t *__restrict x, const param_t p) { int i; for (i = ISTART; i < n; i += IINC) { const int jtmp = i+d+1; const int j = wrap(jtmp,n); if (is_exterior(i,j) && i-j <= LOOP_MIN) continue; x[ind(d%5,i,n)] = INF; if (d > 2*LOOP_MIN+1 || is_exterior(i,j)) { if (i != n-1) free_energy_accumulate(&xl[ind(d%2,i,n)], xl[ind((d-1)%2,i+1,n)] + p->b); /* x array */ x[ind(d%5,i,n)] = xl[ind(d%2,i,n)]; if (j != 0) free_energy_accumulate(&x[ind(d%5,i,n)], x[ind((d-1)%5,i,n)] + p->b); } yl[ind(i,j,n)] = free_energy_sum(wl[ind(d%2,i,n)], xl[ind(d%2,i,n)]); y[ind(i,j,n)] = free_energy_sum(w[ind(d%2,i,n)], x[ind(d%5,i,n)]); } /* end loop over i */ } /* end calc_x */ GLOBAL static void init_w5_and_w3(int n, real_t *w5, real_t *w3) { w5[-1] = w5[0] = w3[n-1] = w3[n] = 0; } GLOBAL static void calc_w5_and_w3( const int d, const int n, real_t *__restrict w5, real_t *__restrict w3, const real_t *__restrict wq) { #ifdef __HIPCC__ const int istart = threadIdx.x; const int iinc = blockDim.x; #else const int istart = 0; const int iinc = 1; #endif real_t w5tmp = INF, w3tmp = INF; int i; for (i = istart; i + LOOP_MIN <= d; i += iinc) { free_energy_accumulate(&w5tmp, w5[i-1] + wq[upper_triangle_index(i,d+1)]); free_energy_accumulate(&w3tmp, w3[n-i] + wq[upper_triangle_index(n-d-2,n-i-1)]); } #ifdef __HIPCC__ free_energy_reduce(&w5tmp, threadIdx.x, blockDim.x); free_energy_reduce(&w3tmp, threadIdx.x, blockDim.x); if (threadIdx.x != 0) return; #endif w5[d+1] = w5[d]; w3[n-d-2] = w3[n-d-1]; free_energy_accumulate(&w5[d+1], w5tmp); free_energy_accumulate(&w3[n-d-2], w3tmp); } /* end calc_w5_and_w3 */ prna_t prna_new(const char *s, param_t par, int quiet, int *base_cp) { prna_t p = (prna_t) safe_malloc(sizeof(struct prna)); memset(p, 0, sizeof(struct prna)); const int n = p->n = strlen(s); printf("sequence length = %d\n", n); p->seq = (base_t *) safe_malloc(n*sizeof(base_t)); p->base_can_pair = base_cp; sequence_from_string(p->seq, s); p->v = (real_t *) safe_malloc(n*n*sizeof(real_t)); p->w5 = (real_t *) safe_malloc((n+1)*sizeof(real_t)) + 1; p->w3 = (real_t *) safe_malloc((n+1)*sizeof(real_t)); real_t *z, *yl, *y, *wq, *w, *wl, *xl, *x; #ifdef __HIPCC__ /* do multithreaded fill on GPU */ printf("Performing Calculation on GPU\n"); real_t *v, *w5, *w3; #define ALLOC(a,sz) CU(hipMalloc(&a,(sz)*sizeof(real_t))) ALLOC(v,n*n); ALLOC(w5,n+1); w5++; ALLOC(w3,n+1); ALLOC(z,n*n); ALLOC(yl,n*n); ALLOC(y,n*n); ALLOC(wq,n*(n-1)/2); ALLOC(w,2*n); ALLOC(wl,2*n); ALLOC(xl,2*n); ALLOC(x,5*n); param_t dev_par; CU(hipMalloc(&dev_par, sizeof(struct param))); CU(hipMemcpy(dev_par, par, sizeof(struct param), hipMemcpyHostToDevice)); base_t *dev_s; CU(hipMalloc(&dev_s,n*sizeof(base_t))); CU(hipMemcpy(dev_s, p->seq, n*sizeof(base_t), hipMemcpyHostToDevice)); int *dev_bcp; CU(hipMalloc(&dev_bcp,(n*(n-1)/2)*sizeof(int))); CU(hipMemcpy(dev_bcp, p->base_can_pair, (n*(n-1)/2)*sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( init_w5_and_w3), dim3(1),dim3(1), 0, 0, n,w5,w3); for (int d = 0; d < n-1; d++) { hipLaunchKernelGGL(( calc_hairpin_stack_exterior_multibranch), dim3(n),dim3(1), 0, 0, d, n, dev_s, dev_bcp, v, x, w5, w3, dev_par); hipLaunchKernelGGL(( calc_internal), dim3(n),dim3(dim3(THREAD_X,THREAD_Y,1)), 0, 0, d, n, dev_s, dev_bcp, v, dev_par); hipLaunchKernelGGL(( calc_coaxial), dim3(n),dim3(NTHREAD), 0, 0, d, n, dev_s, dev_bcp, v, y, w5, w3, dev_par); hipLaunchKernelGGL(( calc_wl), dim3(n),dim3(1), 0, 0, d, n, dev_s, dev_bcp, v, z, wq, w, wl, dev_par); hipLaunchKernelGGL(( calc_xl), dim3(n),dim3(NTHREAD), 0, 0, d, n, z, yl, xl); hipLaunchKernelGGL(( calc_z), dim3(n),dim3(NTHREAD), 0, 0, d, n, dev_s, dev_bcp, v, z, xl, wq, dev_par); hipLaunchKernelGGL(( calc_x), dim3(n),dim3(1), 0, 0, d, n, yl, y, w, wl, xl, x, dev_par); hipLaunchKernelGGL(( calc_w5_and_w3), dim3(1),dim3(NTHREAD), 0, 0, d, n, w5, w3, wq); } CU(hipMemcpy(p->v, v, n*n*sizeof(base_t), hipMemcpyDeviceToHost)); CU(hipMemcpy(p->w5 - 1, w5 - 1, (n+1)*sizeof(base_t), hipMemcpyDeviceToHost)); CU(hipMemcpy(p->w3, w3, (n+1)*sizeof(base_t), hipMemcpyDeviceToHost)); CU(hipFree(v)); CU(hipFree(w5 - 1)); CU(hipFree(w3)); CU(hipFree(z)); CU(hipFree(yl)); CU(hipFree(y)); CU(hipFree(wq)); CU(hipFree(w)); CU(hipFree(wl)); CU(hipFree(xl)); CU(hipFree(x)); CU(hipFree(dev_par)); CU(hipFree(dev_s)); CU(hipFree(dev_bcp)); #else /* do serial fill on CPU */ #define ALLOC(a,sz) a = (real_t *) safe_malloc((sz)*sizeof(real_t)) printf("Performing Calculations on CPU\n"); ALLOC(z,n*n); ALLOC(yl,n*n); ALLOC(y,n*n); ALLOC(wq,n*(n-1)/2); ALLOC(w,2*n); ALLOC(wl,2*n); ALLOC(xl,2*n); ALLOC(x,5*n); init_w5_and_w3(n,p->w5,p->w3); int d; for (d = 0; d < n-1; d++) { calc_hairpin_stack_exterior_multibranch(d, n, p->seq, p->base_can_pair, p->v, x, p->w5, p->w3, par); calc_internal(d, n, p->seq, p->base_can_pair, p->v, par); calc_coaxial(d, n, p->seq, p->base_can_pair, p->v, y, p->w5, p->w3, par); calc_wl(d, n, p->seq, p->base_can_pair, p->v, z, wq, w, wl, par); calc_xl(d, n, z, yl, xl); calc_z(d, n, p->seq, p->base_can_pair, p->v, z, xl, wq, par); calc_x(d, n, yl, y, w, wl, xl, x, par); calc_w5_and_w3(d, n, p->w5, p->w3, wq); } free(z); free(yl); free(y); free(wq); free(w); free(wl); free(xl); free(x); #endif /* __HIPCC__ */ return p; } /* end prna_new */ void prna_delete(prna_t p) { if (p) { if (p->seq) free(p->seq); if (p->v) free(p->v); if (p->w5 - 1) free(p->w5 - 1); if (p->w3) free(p->w3); free(p); } } #define SHOWARR(a) \ if (p->a) { \ int i, j; \ for (i = 0; i < n; i++) { \ printf("%s%4d: ",#a,i+1); \ for (j = 0; j < n; j++) { \ const real_t *aij = array_val(p->a,i,j,n,bcp); \ printf(RF" ", aij ? (*aij)*RT : INF); \ } \ printf("\n"); \ } \ } #define SHOW(a) \ if (p->a) { \ int i; \ printf("%s: ",#a); \ for (i = 0; i < n; i++) \ printf(RF" ", p->a[i] * RT); \ printf("\n"); \ } \ void prna_show(const prna_t p) { int i, n = p->n; const base_t *s = p->seq; const int *bcp = p->base_can_pair; printf("n: %d\n", n); printf("seq: "); for (i = 0; i < n; i++) printf("%c", base_as_char(s[i])); printf("\n"); SHOWARR(v); SHOW(w5); SHOW(w3); } static real_t free_energy_of_pair(const prna_t p, int i, int j) { const int n = p->n; //const base_t *s = p->seq; const int *bcp = p->base_can_pair; if (can_pair(i,j,n,bcp)){ return *array_val(p->v,i,j,n,bcp) + *array_val(p->v,j,i,n,bcp) - p->w3[0]; } else return INF; } real_t probability_of_pair(const prna_t p, int i, int j) { return exp(-free_energy_of_pair(p,i,j)); } real_t get_v_array(const prna_t p, int i, int j) { const int n = p->n; const int *bcp = p->base_can_pair; if (can_pair(i,j,n,bcp)){ return *array_val(p->v,i,j,n,bcp); } else return -INF; } real_t get_w3_array(const prna_t p, int i) { return p->w3[i]; } real_t get_w5_array(const prna_t p, int i) { return p->w5[i]; } void prna_write_neg_log10_probabilities(const prna_t p, const char *fn) { FILE *f = safe_fopen(fn,"w"); int i, j; fprintf(f,"%d\n%-8s%-8s-log10(probability)\n",p->n,"i","j"); for (i = 0; i < p->n; i++) for (j = i+1; j < p->n; j++) if (can_pair(i,j,p->n,p->base_can_pair)) fprintf(f,"%-8d%-8d" RF "\n", i+1, j+1, free_energy_of_pair(p,i,j)/LOG(10)); fclose(f); } void prna_write_probability_matrix(const prna_t p, const char *fn) { FILE *f = safe_fopen(fn,"w"); const int n = p->n; //const base_t *s = p->seq; const int *bcp = p->base_can_pair; int i, j; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) fprintf(f,RF" ", can_pair(i,j,n,bcp) ? probability_of_pair(p,i,j) : 0); fprintf(f,"\n"); } fclose(f); } static void write_ct_structure(FILE *f, const char *s, int n, const int *pair) { char fmt[256]; sprintf(fmt,"%d",n); int ns = strlen(fmt)+1; if (ns < 5) ns = 5; sprintf(fmt,"%%%dd",ns); int i; for (i = 0; i < n; i++) { fprintf(f,fmt,i+1); fprintf(f,"%2c ",s[i]); fprintf(f,fmt,i); fprintf(f,fmt,i == n-1 ? 0 : i+2); fprintf(f,fmt,pair[i] == i ? 0 : pair[i]+1); fprintf(f,fmt,i+1); fprintf(f,"\n"); } } static void unpair(int *pair, int i) { const int j = pair[i]; pair[i] = i; pair[j] = j; } static int is_paired(const int *pair, int i) { return pair[i] != i; } static void remove_helices_shorter_than(int min_helix_length, int *pair, int n) { int i; for (i = 0; i < n-2; i++) { int j = pair[i]; if (j <= i) continue; int npair = 1; while (pair[i+1] == j-1 || pair[i+2] == j-1 || pair[i+1] == j-2) { if (pair[i+1] == j-1) ; else if (pair[i+2] == j-1) { if (is_paired(pair,i+1)) unpair(pair,i+1); i++; } else j--; i++; j--; npair++; } if (npair < min_helix_length) { unpair(pair,i); if (i >= 2) { while (pair[i-1] == j+1 || pair[i-2] == j+1 || pair[i-1] == j+2) { if (pair[i-1] == j+1) unpair(pair,i-1); else if (pair[i-2] == j+1) { unpair(pair,i-2); i--; } else { unpair(pair,i-1); j++; } i--; j++; } } else if (i == 1) { while (pair[i-1] == j+1 || pair[i-1] == j+2) { if (pair[i-1] == j+1) unpair(pair,i-1); else { unpair(pair,i-1); j++; } i--; j++; } } } } } /* end remove_helices_shorter_than */ void prna_write_probknot(const prna_t p, const char *fn, const char *s, int min_helix_length) { const int n = p->n; int *pair = (int *) safe_malloc(n*sizeof(int)); int i; for (i = 0; i < n; i++) { pair[i] = i; /* unpaired */ int j; for (j = 0; j < n; j++) if (free_energy_of_pair(p,i,j) < free_energy_of_pair(p,i,pair[i])) pair[i] = j; } for (i = 0; i < n; i++) if (pair[pair[i]] != i) pair[i] = i; /* unpaired */ if (min_helix_length > 1) remove_helices_shorter_than(min_helix_length,pair,n); /* write the structure */ if (fn) { FILE *f = safe_fopen(fn,"w"); write_ct_structure(f,s,n,pair); fclose(f); } else { write_ct_structure(stdout,s,n,pair); } free(pair); } int *generate_bcp(const char *s) { int length = strlen(s); int i, j; int *base_cp = (int *) safe_malloc((length*(length-1)/2)*sizeof(int)); base_t *seq = (base_t *) safe_malloc(length*sizeof(base_t)); sequence_from_string(seq, s); for (i=0; i<length; i++){ for (j=i+1; j<length; j++){ if ((j-i < LOOP_MIN+1) || !isupper(s[i]) || !isupper(s[j])){ base_cp[(j*(j-1))/2 + i]=0; } else{ base_cp[upper_triangle_index(i,j)]=is_canonical_pair(seq[i],seq[j]) && ((i > 0 && j < length-1 && is_canonical_pair(seq[i-1],seq[j+1])) || (j-i>=LOOP_MIN+3 && is_canonical_pair(seq[i+1],seq[j-1]))); } } } return base_cp; }
f065103be316f3c5fa276ba5b112b0a8c2da085e.cu
#include <math.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <ctype.h> #include "cu.h" #include "base.h" #include "prna.h" #include "util.h" #include "param.h" /* penalty for a helix terminated by a pair containing a U */ DEV static real_t terminal_U_penalty(const base_t *s, const int i, const int j, param_t p) { return s[i] == U || s[j] == U ? p->terminal_AU_penalty : RCONST(0.); } DEV static real_t dangle_3p_energy(const base_t *s, const int i, const int j, const int ip1, param_t p) { return p->dangle_3p[s[i]][s[j]][s[ip1]] + terminal_U_penalty(s,i,j,p); } DEV static real_t dangle_5p_energy(const base_t *s, const int i, const int j, const int jm1, param_t p) { return p->dangle_5p[s[i]][s[j]][s[jm1]] + terminal_U_penalty(s,i,j,p); } DEV static real_t terminal_stack(const base_t *s, const int i, const int j, const int ip1, const int jm1, param_t p) { return p->tstack[s[i]][s[j]][s[ip1]][s[jm1]] + terminal_U_penalty(s,i,j,p); } DEV static real_t terminal_stack_multibranch(const base_t *s, const int i, const int j, const int ip1, const int jm1, param_t p) { return p->tstackm[s[i]][s[j]][s[ip1]][s[jm1]] + terminal_U_penalty(s,i,j,p); } DEV static const real_t *lookup_find(const base_t *s, const int d, param_t p) { int i; switch (d) { case 3: for (i = 0; i < p->ntriloop; i++) if (sequences_match(s, p->triloop[i].seq, d+2)) return &p->triloop[i].val; break; case 4: for (i = 0; i < p->ntloop; i++) if (sequences_match(s, p->tloop[i].seq, d+2)) return &p->tloop[i].val; break; case 6: for (i = 0; i < p->nhexaloop; i++) if (sequences_match(s, p->hexaloop[i].seq, d+2)) return &p->hexaloop[i].val; break; } return 0; } /*** * Energy of a hairpin loop with d unpaired bases, d = j-i-1 * s[i] is paired with s[j] * s[i+1] is mismatched with s[j-1] ***/ DEV static real_t hairpin_loop_energy(const base_t *s, const int i, const int j, const int d, param_t p) { /* Lookup tables for special hairpin loops */ const real_t *val; if ((val = lookup_find(&s[i],d,p))) return *val; /* Hairpin loop initiation penalty */ real_t e; if (d > LOOP_MAX) e = p->hairpin_loop_initiation[LOOP_MAX] + p->Extrapolation_for_large_loops * LOG((real_t) d / LOOP_MAX); else e = p->hairpin_loop_initiation[d]; if (d == 3) { if (contains_only_base(C,d,&s[i+1])) e += p->c_hairpin_of_3; e += terminal_U_penalty(s,i,j,p); } else { e += p->tstackh[s[i]][s[j]][s[i+1]][s[j-1]]; if (contains_only_base(C,d,&s[i+1])) e += p->c_hairpin_slope*d + p->c_hairpin_intercept; } if (s[i] == G && s[j] == U && i > 1 && s[i-1] == G && s[i-2] == G) e += p->bonus_for_GGG_hairpin; return e; } DEV static real_t real_min(real_t a, real_t b) { return a < b ? a : b; } /*** * Energy of an internal/bulge loop with d1, d2 unpaired bases, * d1 = ip-i-1, d2 = j-jp-1 * s[i] is paired with s[j] * s[i+1] is mismatched sith s[j-1] * s[ip-1] is mismatched with s[jp+1] * s[ip] is paired with s[jp] ***/ DEV static real_t internal_loop_energy(const base_t *s, const int i, const int j, const int ip, const int jp, const int d1, const int d2, param_t p) { /* Bulge loops */ if (d1 == 0 || d2 == 0) { real_t e = p->bulge_loop_initiation[d1+d2]; if (d1 == 1 || d2 == 1) { /* single-nucleotide bulge */ e += p->stack[s[i]][s[j]][s[ip]][s[jp]]; if ((d1 == 1 && s[i+1] == C && (s[i] == C || s[i+2] == C)) || (d2 == 1 && s[j-1] == C && (s[j] == C || s[j-2] == C))) e += p->Bonus_for_Single_C_bulges_adjacent_to_C; } else { e += terminal_U_penalty(s,i,j,p); e += terminal_U_penalty(s,ip,jp,p); } return e; } /* Small internal loops */ if (d1 == 1 && d2 == 1) return p->int11[s[i]][s[i+1]][s[i+2]][s[j-2]][s[j-1]][s[j]]; if (d1 == 2 && d2 == 2) return p->int22[s[i]][s[ip]][s[j]][s[jp]][s[i+1]][s[i+2]][s[j-1]][s[j-2]]; if (d1 == 1 && d2 == 2) return p->int21[s[i]][s[j]][s[i+1]][s[j-1]][s[jp+1]][s[ip]][s[jp]]; if (d1 == 2 && d2 == 1) return p->int21[s[jp]][s[ip]][s[jp+1]][s[ip-1]][s[i+1]][s[j]][s[i]]; /* Larger internal loops */ tab4_t *sp; if (d1 == 1 || d2 == 1) sp = &p->tstacki1n; else if ((d1 == 2 && d2 == 3) || (d1 == 3 && d2 == 2)) sp = &p->tstacki23; else sp = &p->tstacki; return p->internal_loop_initiation[d1+d2] + real_min(p->fm_array_first_element * abs(d1-d2), p->maximum_correction) + (*sp)[s[i]][s[j]][s[i+1]][s[j-1]] + (*sp)[s[jp]][s[ip]][s[jp+1]][s[ip-1]]; } /* return -ln(e^-a + e^-b) */ DEV static real_t free_energy_sum(const real_t a, const real_t b) { if (a < b) return a - LOG1P(EXP(a-b)); else if (b < a) return b - LOG1P(EXP(b-a)); else return a - LOG(2); } DEV static void free_energy_accumulate(real_t *a, const real_t b) { *a = free_energy_sum(*a,b); } DEV HOST static int int_min(int a, int b) { return a < b ? a : b; } DEV HOST static int ind(int i, int j, int n) { return i*n + j; } DEV HOST static int upper_triangle_index(int i, int j) { return (j*(j-1))/2 + i; } DEV HOST inline static int can_pair(int i, int j, int n, const int *bcp) { if (i>=0 && j<=n-1 && i != j && j>=0 && i<=n-1){ if (i < j) return bcp[upper_triangle_index(i, j)]; else return bcp[upper_triangle_index(j, i)]; } else return 0; } DEV static int wrap(int i, int n) { return i >= n ? i-n : i; } DEV static int is_exterior(int i, int j) { return j < i; } DEV static int is_interior(int i, int j) { return i < j; } DEV HOST real_t* array_val(real_t *__restrict a, int i, int j, int n, const int *__restrict bcp) { return can_pair(i,j,n,bcp) ? &a[ind(i,j,n)] : 0; } #ifdef __CUDACC__ #define ISTART blockIdx.x #define IINC gridDim.x #else #define ISTART 0 #define IINC 1 #endif GLOBAL static void calc_hairpin_stack_exterior_multibranch (const int d, const int n, const base_t *__restrict s, const int *__restrict bcp, real_t *__restrict v, const real_t *__restrict x, const real_t *__restrict w5, const real_t *__restrict w3, const param_t p) { int i; for (i = ISTART; i < n; i += IINC) { const int jtmp = i+d+1; const int j = wrap(jtmp,n); if ((is_exterior(i,j) && i-j <= LOOP_MIN) || !can_pair(i,j,n,bcp)) continue; real_t vij = INF; if (i != n-1 && j != 0) { /* hairpin loop */ if (is_interior(i,j)) vij = hairpin_loop_energy(s,i,j,d,p); /* stack */ if (can_pair(i+1,j-1,n,bcp) && !(is_interior(i,j) && d <= LOOP_MIN-2)) free_energy_accumulate(&vij, p->stack[s[i]][s[j]][s[i+1]][s[j-1]] + v[ind(i+1,j-1,n)]); } /* exterior loop */ if (is_exterior(i,j)) { free_energy_accumulate(&vij, w3[i+1] + w5[j-1] + terminal_U_penalty(s,i,j,p)); if (i != n-1) free_energy_accumulate(&vij, w3[i+2] + w5[j-1] + dangle_3p_energy(s,i,j,i+1,p)); if (j != 0) free_energy_accumulate(&vij, w3[i+1] + w5[j-2] + dangle_5p_energy(s,i,j,j-1,p)); if (i != n-1 && j != 0) free_energy_accumulate(&vij, w3[i+2] + w5[j-2] + terminal_stack(s,i,j,i+1,j-1,p)); } /* multibranch loop */ if (d > 2*LOOP_MIN + 3 && i != n-1 && j != 0) { free_energy_accumulate(&vij, x[ind((d-2)%5,i+1,n)] + terminal_U_penalty(s,i,j,p) + p->a + p->c); if (i != n-2) free_energy_accumulate(&vij, x[ind((d-3)%5,i+2,n)] + dangle_3p_energy(s,i,j,i+1,p) + p->a + p->b + p->c); if (j != 1) free_energy_accumulate(&vij, x[ind((d-3)%5,i+1,n)] + dangle_5p_energy(s,i,j,j-1,p) + p->a + p->b + p->c); if (i != n-2 && j != 1) free_energy_accumulate(&vij, x[ind((d-4)%5,i+2,n)] + terminal_stack_multibranch(s,i,j,i+1,j-1,p) + p->a + 2*p->b + p->c); } v[ind(i,j,n)] = vij; } } #ifdef __CUDACC__ #define NTHREAD 128 #define THREAD_X 8 #define THREAD_Y 16 #if THREAD_X*THREAD_Y != NTHREAD #error THREAD_X * THREAD_Y must be equal to NTHREAD #endif DEV static void free_energy_reduce(real_t *x, int tid, int nt) { __shared__ real_t buf[NTHREAD]; buf[tid] = *x; for (nt /= 2, __syncthreads(); nt > 0; nt /= 2, __syncthreads()) if (tid < nt) free_energy_accumulate(&buf[tid], buf[tid+nt]); if (tid == 0) *x = buf[0]; } #endif /* __CUDACC__ */ GLOBAL static void calc_internal (const int d, const int n, const base_t *__restrict s, const int *__restrict bcp, real_t *__restrict v, const param_t p) { int i; for (i = ISTART; i < n; i += IINC) { const int jtmp = i+d+1; const int j = wrap(jtmp,n); if ((is_exterior(i,j) && i-j <= LOOP_MIN) || (is_interior(i,j) && d <= LOOP_MIN+2) || !can_pair(i,j,n,bcp)) continue; real_t vij = INF; #ifdef __CUDACC__ const int d1start = threadIdx.x; const int d1inc = blockDim.x; #else const int d1start = 0; const int d1inc = 1; #endif const int dmax = int_min(LOOP_MAX, d-2); const int d1max = int_min(dmax, n-i-2); int d1; for (d1 = d1start; d1 <= d1max; d1 += d1inc) { const int ip = i+d1+1; const int d2max = int_min(dmax-d1, j-1); #ifdef __CUDACC__ const int d2start = d1 > 0 ? threadIdx.y : threadIdx.y + 1; const int d2inc = blockDim.y; #else const int d2start = d1 > 0 ? 0 : 1; const int d2inc = 1; #endif int d2; for (d2 = d2start; d2 <= d2max; d2 += d2inc) { const int jp = j-d2-1; if (can_pair(ip,jp,n,bcp)) free_energy_accumulate(&vij, internal_loop_energy(s,i,j,ip,jp,d1,d2,p) + v[ind(ip,jp,n)]); } } #ifdef __CUDACC__ const int tid = threadIdx.x * blockDim.y + threadIdx.y; free_energy_reduce(&vij, tid, blockDim.x*blockDim.y); if (tid != 0) continue; #endif free_energy_accumulate(&v[ind(i,j,n)], vij); } } DEV static real_t coaxial_flush(const base_t *s, const int i, const int j, const int ip, const int jp, param_t p) { return terminal_U_penalty(s,i,j,p) + terminal_U_penalty(s,ip,jp,p) + p->coaxial[s[i]][s[j]][s[ip]][s[jp]]; } DEV static real_t coaxial_mismatch1(const base_t *s, const int i, const int j, const int ip, const int jp, param_t p) { return terminal_U_penalty(s,i,j,p) + terminal_U_penalty(s,ip,jp,p) + p->tstackcoax[s[j]][s[i]][s[j+1]][s[i-1]] + p->coaxstack[s[j+1]][s[i-1]][s[ip]][s[jp]]; } DEV static real_t coaxial_mismatch2(const base_t *s, const int i, const int j, const int ip, const int jp, param_t p) { return terminal_U_penalty(s,i,j,p) + terminal_U_penalty(s,ip,jp,p) + p->tstackcoax[s[jp]][s[ip]][s[jp+1]][s[ip-1]] + p->coaxstack[s[j]][s[i]][s[j+1]][s[jp+1]]; } GLOBAL static void calc_coaxial (const int d, /* diagonal - length of bases in between i and j, exclusive */ const int n, const base_t *__restrict s, const int *__restrict bcp, real_t *__restrict v, const real_t *__restrict y, const real_t *__restrict w5, const real_t *__restrict w3, const param_t p) { int i; for (i = ISTART; i < n; i += IINC) { const int jtmp = i+d+1; const int j = wrap(jtmp,n); if ((is_exterior(i,j) && i-j <= LOOP_MIN) || !can_pair(i,j,n,bcp)) continue; const real_t *v1; real_t vij = INF; /* exterior */ if (is_exterior(i,j)) { int k, kstart; #ifdef __CUDACC__ kstart = threadIdx.x; const int kinc = blockDim.x; #else kstart = 0; const int kinc = 1; #endif for (k = kstart; k < j - LOOP_MIN; k += kinc) { if ((v1 = array_val(v,k,j-1,n,bcp))) free_energy_accumulate(&vij, w3[i+1] + w5[k-1] + coaxial_flush(s,k,j-1,j,i,p) + (*v1)); if (j-2 >= 0) { if (i < n-1 && (v1 = array_val(v,k,j-2,n,bcp))) free_energy_accumulate(&vij, w3[i+2] + w5[k-1] + coaxial_mismatch2(s,k,j-2,j,i,p) + (*v1)); if ((v1 = array_val(v,k+1,j-2,n,bcp))) free_energy_accumulate(&vij, w3[i+1] + w5[k-1] + coaxial_mismatch1(s,k+1,j-2,j,i,p) + (*v1)); } } #ifdef __CUDACC__ kstart = i+LOOP_MIN+1 + threadIdx.x; #else kstart = i+LOOP_MIN+1; #endif for (k = kstart; k < n; k += kinc) { if ((v1 = array_val(v,i+1,k,n,bcp))) free_energy_accumulate(&vij, w3[k+1] + w5[j-1] + coaxial_flush(s,j,i,i+1,k,p) + (*v1)); if (j > 0 && (v1 = array_val(v,i+2,k,n,bcp))) free_energy_accumulate(&vij, w3[k+1] + w5[j-2] + coaxial_mismatch1(s,j,i,i+2,k,p) + (*v1)); if ((v1 = array_val(v,i+2,k-1,n,bcp))) free_energy_accumulate(&vij, w3[k+1] + w5[j-1] + coaxial_mismatch2(s,j,i,i+2,k-1,p) + (*v1)); } } /* end exterior */ /* multibranch */ if (d > 2*LOOP_MIN + 3 && i != n-1 && j != 0) { int ktmp; #ifdef __CUDACC__ int ktmpstart = i+2 + threadIdx.x; const int ktmpinc = blockDim.x; #else int ktmpstart = i+2; const int ktmpinc = 1; #endif for (ktmp = ktmpstart; ktmp < jtmp-2; ktmp += ktmpinc) { const int k = wrap(ktmp,n); if (k != n-1) { if ((v1 = array_val(v,i+1,k,n,bcp))) free_energy_accumulate(&vij, coaxial_flush(s,j,i,i+1,k,p) + (*v1) + p->a_2c + y[ind(k+1,j-1,n)]); if (ktmp+2 < jtmp-1 && i+1 != n-1 && k+1 != n-1 && (v1 = array_val(v,i+2,k,n,bcp))) { const real_t tmp = (*v1) + p->a_2b_2c; free_energy_accumulate(&vij, coaxial_mismatch2(s,j,i,i+2,k,p) + tmp + y[ind(k+2,j-1,n)]); if (j != 1) { free_energy_accumulate(&vij, coaxial_mismatch1(s,j,i,i+2,k,p) + tmp + y[ind(k+1,j-2,n)]); } } } } #ifdef __CUDACC__ ktmpstart = i+3 + threadIdx.x; #else ktmpstart = i+3; #endif for (ktmp = ktmpstart; ktmp < jtmp-1; ktmp += ktmpinc) { const int k = wrap(ktmp,n); if (k != 0) { if ((v1 = array_val(v,k,j-1,n,bcp))) free_energy_accumulate(&vij, coaxial_flush(s,k,j-1,j,i,p) + (*v1) + p->a_2c + y[ind(i+1,k-1,n)]); if (j != 1 && ktmp > i+3 && (v1 = array_val(v,k,j-2,n,bcp))) { const real_t tmp = (*v1) + p->a_2b_2c; if (k != 1) free_energy_accumulate(&vij, coaxial_mismatch1(s,k,j-2,j,i,p) + tmp + y[ind(i+1,k-2,n)]); if (i != n-2) free_energy_accumulate(&vij, coaxial_mismatch2(s,k,j-2,j,i,p) + tmp + y[ind(i+2,k-1,n)]); } } } } /* end multibranch */ #ifdef __CUDACC__ free_energy_reduce(&vij, threadIdx.x, blockDim.x); if (threadIdx.x != 0) continue; #endif free_energy_accumulate(&v[ind(i,j,n)], vij); } /* end loop over i */ } /* end calc_coaxial */ /*** * For arrays w, wl, xl, two diagonals are stored. * Element i of the current diagonal - that is, w(i,j) - * is referenced as w[d%2][i]. * Element i of the previous diagonal - that is, w(i,j-1) - * is referenced as w((d-1)%2,i) * * For array x, five diagonals are stored. * Similarly to w, x[ind(d%5,i,n)] refers to element i on * the current diagonal, and x[ind((d-k)%5,i,n)] to element i * on a previous diagonal d-k. * Specifically: * * x(i,j) --> x(d%5,i,n) * x(i+1,j,n) --> x((d-1)%5,i+1) * x(i+1,j-1,n) --> x((d-2)%5,i+1) * x(i+2,j-1,n) --> x((d-3)%5,i+1) * x(i+1,j-2,n) --> x((d-3)%5,i+1) * x(i+2,j-2,n) --> x((d-4)%5,i+1) ***/ GLOBAL static void calc_wl (const int d, /* diagonal - length of bases in between i and j, exclusive */ const int n, const base_t *__restrict s, const int *__restrict bcp, real_t *__restrict v, real_t *__restrict z, real_t *__restrict wq, real_t *__restrict w, real_t *__restrict wl, const param_t p) { int i; for (i = ISTART; i < n; i += IINC) { const int jtmp = i+d+1; const int j = wrap(jtmp,n); if (is_exterior(i,j) && i-j <= LOOP_MIN) continue; real_t wqtmp = INF, wltmp = INF; const real_t *v1; if ((v1 = array_val(v,i,j,n,bcp))) { const real_t tmp = (*v1) + terminal_U_penalty(s,i,j,p); free_energy_accumulate(&wqtmp, tmp); free_energy_accumulate(&wltmp, tmp + p->c); } if (i != n-1 && (v1 = array_val(v,i+1,j,n,bcp))) { const real_t tmp = (*v1) + dangle_5p_energy(s,j,i+1,i,p); free_energy_accumulate(&wqtmp, tmp); free_energy_accumulate(&wltmp, tmp + p->b + p->c); } if (j != 0 && (v1 = array_val(v,i,j-1,n,bcp))) { const real_t tmp = (*v1) + dangle_3p_energy(s,j-1,i,j,p); free_energy_accumulate(&wqtmp, tmp); free_energy_accumulate(&wltmp, tmp + p->b + p->c); } if (i != n-1 && j != 0 && (v1 = array_val(v,i+1,j-1,n,bcp))) { const real_t tmp = (*v1) + terminal_stack_multibranch(s,j-1,i+1,j,i,p); free_energy_accumulate(&wqtmp, tmp); free_energy_accumulate(&wltmp, tmp + 2*p->b + p->c); } if (is_interior(i,j)) wq[upper_triangle_index(i,j)] = wqtmp; /* WL array */ wl[ind(d%2,i,n)] = z[ind(i,j,n)] = wltmp; if (i != n-1 && d > 0) free_energy_accumulate(&wl[ind(d%2,i,n)], wl[ind((d-1)%2,i+1,n)] + p->b); /* W array */ w[ind(d%2,i,n)] = wl[ind(d%2,i,n)]; if (j != 0 && d > 0) free_energy_accumulate(&w[ind(d%2,i,n)], w[ind((d-1)%2,i,n)] + p->b); } /* end loop over i */ } /* end calc_wl */ GLOBAL static void calc_xl (const int d, /* diagonal - length of bases in between i and j, exclusive */ const int n, const real_t *__restrict z, const real_t *__restrict yl, real_t *__restrict xl) { int i; for (i = ISTART; i < n; i += IINC) { const int jtmp = i+d+1; const int j = wrap(jtmp,n); if (is_exterior(i,j) && i-j <= LOOP_MIN) continue; #ifdef __CUDACC__ if (threadIdx.x == 0) xl[ind(d%2,i,n)] = INF; #else xl[ind(d%2,i,n)] = INF; #endif if (is_interior(i,j) && d <= 2*LOOP_MIN+1) continue; #ifdef __CUDACC__ const int kstart = i+1 + threadIdx.x; const int kinc = blockDim.x; #else const int kstart = i+1; const int kinc = 1; #endif int ktmp; real_t tmp = INF; for (ktmp = kstart; ktmp < jtmp-1; ktmp += kinc) { if (ktmp != n-1) { const int k = wrap(ktmp,n); free_energy_accumulate(&tmp, z[ind(i,k,n)] + yl[ind(k+1,j,n)]); } } #ifdef __CUDACC__ free_energy_reduce(&tmp, threadIdx.x, blockDim.x); if (threadIdx.x != 0) continue; #endif free_energy_accumulate(&xl[ind(d%2,i,n)], tmp); } /* end loop over i */ } /* end calc_xl */ GLOBAL static void calc_z (const int d, /* diagonal - length of bases in between i and j, exclusive */ const int n, const base_t *__restrict s, const int *__restrict bcp, real_t *__restrict v, real_t *__restrict z, real_t *__restrict xl, real_t *__restrict wq, const param_t p) { int i; for (i = ISTART; i < n; i += IINC) { const int jtmp = i+d+1; const int j = wrap(jtmp,n); if ((is_exterior(i,j) && i-j <= LOOP_MIN) || (is_interior(i,j) && d <= 2*LOOP_MIN+1)) continue; #ifdef __CUDACC__ const int kstart = i+LOOP_MIN+1 + threadIdx.x; const int kinc = blockDim.x; #else const int kstart = i+LOOP_MIN+1; const int kinc = 1; #endif int ktmp; real_t tmp1 = INF, tmp2 = INF; for (ktmp = kstart; ktmp < jtmp-LOOP_MIN-1; ktmp += kinc) { const int k = wrap(ktmp,n); if (k == n-1) continue; real_t *v1, *v2; if ((v1 = array_val(v,i,k,n,bcp)) && (v2 = array_val(v,k+1,j,n,bcp))) free_energy_accumulate(&tmp1, (*v1) + (*v2) + coaxial_flush(s,i,k,k+1,j,p)); if (j == 0 || k+1 == n-1) continue; if (i != n-1 && (v1 = array_val(v,i+1,k,n,bcp)) && (v2 = array_val(v,k+2,j,n,bcp))) free_energy_accumulate(&tmp2, (*v1) + (*v2) + coaxial_mismatch1(s,i+1,k,k+2,j,p)); if ((v1 = array_val(v,i,k,n,bcp)) && (v2 = array_val(v,k+2,j-1,n,bcp))) free_energy_accumulate(&tmp2, (*v1) + (*v2) + coaxial_mismatch2(s,i,k,k+2,j-1,p)); } #ifdef __CUDACC__ free_energy_reduce(&tmp1, threadIdx.x, blockDim.x); free_energy_reduce(&tmp2, threadIdx.x, blockDim.x); if (threadIdx.x != 0) continue; #endif if (is_interior(i,j)) free_energy_accumulate(&wq[upper_triangle_index(i,j)], free_energy_sum(tmp1,tmp2)); const real_t wcoax = free_energy_sum(tmp1 + 2*p->c, tmp2 + 2*p->b + 2*p->c); free_energy_accumulate(&z[ind(i,j,n)], wcoax); free_energy_accumulate(&xl[ind(d%2,i,n)], wcoax); } /* end loop over i */ } /* end calc_z */ GLOBAL static void calc_x (const int d, /* diagonal - length of bases in between i and j, exclusive */ const int n, real_t *__restrict yl, real_t *__restrict y, const real_t *__restrict w, const real_t *__restrict wl, real_t *__restrict xl, real_t *__restrict x, const param_t p) { int i; for (i = ISTART; i < n; i += IINC) { const int jtmp = i+d+1; const int j = wrap(jtmp,n); if (is_exterior(i,j) && i-j <= LOOP_MIN) continue; x[ind(d%5,i,n)] = INF; if (d > 2*LOOP_MIN+1 || is_exterior(i,j)) { if (i != n-1) free_energy_accumulate(&xl[ind(d%2,i,n)], xl[ind((d-1)%2,i+1,n)] + p->b); /* x array */ x[ind(d%5,i,n)] = xl[ind(d%2,i,n)]; if (j != 0) free_energy_accumulate(&x[ind(d%5,i,n)], x[ind((d-1)%5,i,n)] + p->b); } yl[ind(i,j,n)] = free_energy_sum(wl[ind(d%2,i,n)], xl[ind(d%2,i,n)]); y[ind(i,j,n)] = free_energy_sum(w[ind(d%2,i,n)], x[ind(d%5,i,n)]); } /* end loop over i */ } /* end calc_x */ GLOBAL static void init_w5_and_w3(int n, real_t *w5, real_t *w3) { w5[-1] = w5[0] = w3[n-1] = w3[n] = 0; } GLOBAL static void calc_w5_and_w3( const int d, const int n, real_t *__restrict w5, real_t *__restrict w3, const real_t *__restrict wq) { #ifdef __CUDACC__ const int istart = threadIdx.x; const int iinc = blockDim.x; #else const int istart = 0; const int iinc = 1; #endif real_t w5tmp = INF, w3tmp = INF; int i; for (i = istart; i + LOOP_MIN <= d; i += iinc) { free_energy_accumulate(&w5tmp, w5[i-1] + wq[upper_triangle_index(i,d+1)]); free_energy_accumulate(&w3tmp, w3[n-i] + wq[upper_triangle_index(n-d-2,n-i-1)]); } #ifdef __CUDACC__ free_energy_reduce(&w5tmp, threadIdx.x, blockDim.x); free_energy_reduce(&w3tmp, threadIdx.x, blockDim.x); if (threadIdx.x != 0) return; #endif w5[d+1] = w5[d]; w3[n-d-2] = w3[n-d-1]; free_energy_accumulate(&w5[d+1], w5tmp); free_energy_accumulate(&w3[n-d-2], w3tmp); } /* end calc_w5_and_w3 */ prna_t prna_new(const char *s, param_t par, int quiet, int *base_cp) { prna_t p = (prna_t) safe_malloc(sizeof(struct prna)); memset(p, 0, sizeof(struct prna)); const int n = p->n = strlen(s); printf("sequence length = %d\n", n); p->seq = (base_t *) safe_malloc(n*sizeof(base_t)); p->base_can_pair = base_cp; sequence_from_string(p->seq, s); p->v = (real_t *) safe_malloc(n*n*sizeof(real_t)); p->w5 = (real_t *) safe_malloc((n+1)*sizeof(real_t)) + 1; p->w3 = (real_t *) safe_malloc((n+1)*sizeof(real_t)); real_t *z, *yl, *y, *wq, *w, *wl, *xl, *x; #ifdef __CUDACC__ /* do multithreaded fill on GPU */ printf("Performing Calculation on GPU\n"); real_t *v, *w5, *w3; #define ALLOC(a,sz) CU(cudaMalloc(&a,(sz)*sizeof(real_t))) ALLOC(v,n*n); ALLOC(w5,n+1); w5++; ALLOC(w3,n+1); ALLOC(z,n*n); ALLOC(yl,n*n); ALLOC(y,n*n); ALLOC(wq,n*(n-1)/2); ALLOC(w,2*n); ALLOC(wl,2*n); ALLOC(xl,2*n); ALLOC(x,5*n); param_t dev_par; CU(cudaMalloc(&dev_par, sizeof(struct param))); CU(cudaMemcpy(dev_par, par, sizeof(struct param), cudaMemcpyHostToDevice)); base_t *dev_s; CU(cudaMalloc(&dev_s,n*sizeof(base_t))); CU(cudaMemcpy(dev_s, p->seq, n*sizeof(base_t), cudaMemcpyHostToDevice)); int *dev_bcp; CU(cudaMalloc(&dev_bcp,(n*(n-1)/2)*sizeof(int))); CU(cudaMemcpy(dev_bcp, p->base_can_pair, (n*(n-1)/2)*sizeof(int), cudaMemcpyHostToDevice)); init_w5_and_w3<<<1,1>>>(n,w5,w3); for (int d = 0; d < n-1; d++) { calc_hairpin_stack_exterior_multibranch<<<n,1>>>(d, n, dev_s, dev_bcp, v, x, w5, w3, dev_par); calc_internal<<<n,dim3(THREAD_X,THREAD_Y,1)>>>(d, n, dev_s, dev_bcp, v, dev_par); calc_coaxial<<<n,NTHREAD>>>(d, n, dev_s, dev_bcp, v, y, w5, w3, dev_par); calc_wl<<<n,1>>>(d, n, dev_s, dev_bcp, v, z, wq, w, wl, dev_par); calc_xl<<<n,NTHREAD>>>(d, n, z, yl, xl); calc_z<<<n,NTHREAD>>>(d, n, dev_s, dev_bcp, v, z, xl, wq, dev_par); calc_x<<<n,1>>>(d, n, yl, y, w, wl, xl, x, dev_par); calc_w5_and_w3<<<1,NTHREAD>>>(d, n, w5, w3, wq); } CU(cudaMemcpy(p->v, v, n*n*sizeof(base_t), cudaMemcpyDeviceToHost)); CU(cudaMemcpy(p->w5 - 1, w5 - 1, (n+1)*sizeof(base_t), cudaMemcpyDeviceToHost)); CU(cudaMemcpy(p->w3, w3, (n+1)*sizeof(base_t), cudaMemcpyDeviceToHost)); CU(cudaFree(v)); CU(cudaFree(w5 - 1)); CU(cudaFree(w3)); CU(cudaFree(z)); CU(cudaFree(yl)); CU(cudaFree(y)); CU(cudaFree(wq)); CU(cudaFree(w)); CU(cudaFree(wl)); CU(cudaFree(xl)); CU(cudaFree(x)); CU(cudaFree(dev_par)); CU(cudaFree(dev_s)); CU(cudaFree(dev_bcp)); #else /* do serial fill on CPU */ #define ALLOC(a,sz) a = (real_t *) safe_malloc((sz)*sizeof(real_t)) printf("Performing Calculations on CPU\n"); ALLOC(z,n*n); ALLOC(yl,n*n); ALLOC(y,n*n); ALLOC(wq,n*(n-1)/2); ALLOC(w,2*n); ALLOC(wl,2*n); ALLOC(xl,2*n); ALLOC(x,5*n); init_w5_and_w3(n,p->w5,p->w3); int d; for (d = 0; d < n-1; d++) { calc_hairpin_stack_exterior_multibranch(d, n, p->seq, p->base_can_pair, p->v, x, p->w5, p->w3, par); calc_internal(d, n, p->seq, p->base_can_pair, p->v, par); calc_coaxial(d, n, p->seq, p->base_can_pair, p->v, y, p->w5, p->w3, par); calc_wl(d, n, p->seq, p->base_can_pair, p->v, z, wq, w, wl, par); calc_xl(d, n, z, yl, xl); calc_z(d, n, p->seq, p->base_can_pair, p->v, z, xl, wq, par); calc_x(d, n, yl, y, w, wl, xl, x, par); calc_w5_and_w3(d, n, p->w5, p->w3, wq); } free(z); free(yl); free(y); free(wq); free(w); free(wl); free(xl); free(x); #endif /* __CUDACC__ */ return p; } /* end prna_new */ void prna_delete(prna_t p) { if (p) { if (p->seq) free(p->seq); if (p->v) free(p->v); if (p->w5 - 1) free(p->w5 - 1); if (p->w3) free(p->w3); free(p); } } #define SHOWARR(a) \ if (p->a) { \ int i, j; \ for (i = 0; i < n; i++) { \ printf("%s%4d: ",#a,i+1); \ for (j = 0; j < n; j++) { \ const real_t *aij = array_val(p->a,i,j,n,bcp); \ printf(RF" ", aij ? (*aij)*RT : INF); \ } \ printf("\n"); \ } \ } #define SHOW(a) \ if (p->a) { \ int i; \ printf("%s: ",#a); \ for (i = 0; i < n; i++) \ printf(RF" ", p->a[i] * RT); \ printf("\n"); \ } \ void prna_show(const prna_t p) { int i, n = p->n; const base_t *s = p->seq; const int *bcp = p->base_can_pair; printf("n: %d\n", n); printf("seq: "); for (i = 0; i < n; i++) printf("%c", base_as_char(s[i])); printf("\n"); SHOWARR(v); SHOW(w5); SHOW(w3); } static real_t free_energy_of_pair(const prna_t p, int i, int j) { const int n = p->n; //const base_t *s = p->seq; const int *bcp = p->base_can_pair; if (can_pair(i,j,n,bcp)){ return *array_val(p->v,i,j,n,bcp) + *array_val(p->v,j,i,n,bcp) - p->w3[0]; } else return INF; } real_t probability_of_pair(const prna_t p, int i, int j) { return exp(-free_energy_of_pair(p,i,j)); } real_t get_v_array(const prna_t p, int i, int j) { const int n = p->n; const int *bcp = p->base_can_pair; if (can_pair(i,j,n,bcp)){ return *array_val(p->v,i,j,n,bcp); } else return -INF; } real_t get_w3_array(const prna_t p, int i) { return p->w3[i]; } real_t get_w5_array(const prna_t p, int i) { return p->w5[i]; } void prna_write_neg_log10_probabilities(const prna_t p, const char *fn) { FILE *f = safe_fopen(fn,"w"); int i, j; fprintf(f,"%d\n%-8s%-8s-log10(probability)\n",p->n,"i","j"); for (i = 0; i < p->n; i++) for (j = i+1; j < p->n; j++) if (can_pair(i,j,p->n,p->base_can_pair)) fprintf(f,"%-8d%-8d" RF "\n", i+1, j+1, free_energy_of_pair(p,i,j)/LOG(10)); fclose(f); } void prna_write_probability_matrix(const prna_t p, const char *fn) { FILE *f = safe_fopen(fn,"w"); const int n = p->n; //const base_t *s = p->seq; const int *bcp = p->base_can_pair; int i, j; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) fprintf(f,RF" ", can_pair(i,j,n,bcp) ? probability_of_pair(p,i,j) : 0); fprintf(f,"\n"); } fclose(f); } static void write_ct_structure(FILE *f, const char *s, int n, const int *pair) { char fmt[256]; sprintf(fmt,"%d",n); int ns = strlen(fmt)+1; if (ns < 5) ns = 5; sprintf(fmt,"%%%dd",ns); int i; for (i = 0; i < n; i++) { fprintf(f,fmt,i+1); fprintf(f,"%2c ",s[i]); fprintf(f,fmt,i); fprintf(f,fmt,i == n-1 ? 0 : i+2); fprintf(f,fmt,pair[i] == i ? 0 : pair[i]+1); fprintf(f,fmt,i+1); fprintf(f,"\n"); } } static void unpair(int *pair, int i) { const int j = pair[i]; pair[i] = i; pair[j] = j; } static int is_paired(const int *pair, int i) { return pair[i] != i; } static void remove_helices_shorter_than(int min_helix_length, int *pair, int n) { int i; for (i = 0; i < n-2; i++) { int j = pair[i]; if (j <= i) continue; int npair = 1; while (pair[i+1] == j-1 || pair[i+2] == j-1 || pair[i+1] == j-2) { if (pair[i+1] == j-1) ; else if (pair[i+2] == j-1) { if (is_paired(pair,i+1)) unpair(pair,i+1); i++; } else j--; i++; j--; npair++; } if (npair < min_helix_length) { unpair(pair,i); if (i >= 2) { while (pair[i-1] == j+1 || pair[i-2] == j+1 || pair[i-1] == j+2) { if (pair[i-1] == j+1) unpair(pair,i-1); else if (pair[i-2] == j+1) { unpair(pair,i-2); i--; } else { unpair(pair,i-1); j++; } i--; j++; } } else if (i == 1) { while (pair[i-1] == j+1 || pair[i-1] == j+2) { if (pair[i-1] == j+1) unpair(pair,i-1); else { unpair(pair,i-1); j++; } i--; j++; } } } } } /* end remove_helices_shorter_than */ void prna_write_probknot(const prna_t p, const char *fn, const char *s, int min_helix_length) { const int n = p->n; int *pair = (int *) safe_malloc(n*sizeof(int)); int i; for (i = 0; i < n; i++) { pair[i] = i; /* unpaired */ int j; for (j = 0; j < n; j++) if (free_energy_of_pair(p,i,j) < free_energy_of_pair(p,i,pair[i])) pair[i] = j; } for (i = 0; i < n; i++) if (pair[pair[i]] != i) pair[i] = i; /* unpaired */ if (min_helix_length > 1) remove_helices_shorter_than(min_helix_length,pair,n); /* write the structure */ if (fn) { FILE *f = safe_fopen(fn,"w"); write_ct_structure(f,s,n,pair); fclose(f); } else { write_ct_structure(stdout,s,n,pair); } free(pair); } int *generate_bcp(const char *s) { int length = strlen(s); int i, j; int *base_cp = (int *) safe_malloc((length*(length-1)/2)*sizeof(int)); base_t *seq = (base_t *) safe_malloc(length*sizeof(base_t)); sequence_from_string(seq, s); for (i=0; i<length; i++){ for (j=i+1; j<length; j++){ if ((j-i < LOOP_MIN+1) || !isupper(s[i]) || !isupper(s[j])){ base_cp[(j*(j-1))/2 + i]=0; } else{ base_cp[upper_triangle_index(i,j)]=is_canonical_pair(seq[i],seq[j]) && ((i > 0 && j < length-1 && is_canonical_pair(seq[i-1],seq[j+1])) || (j-i>=LOOP_MIN+3 && is_canonical_pair(seq[i+1],seq[j-1]))); } } } return base_cp; }
4ceb1a170b327384f25cc939411cdec727178e2b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vectorLength.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int size = XSIZE*YSIZE; const double *x = NULL; hipMalloc(&x, XSIZE*YSIZE); const double *y = NULL; hipMalloc(&y, XSIZE*YSIZE); double *len = NULL; hipMalloc(&len, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vectorLength), dim3(gridBlock),dim3(threadBlock), 0, 0, size,x,y,len); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vectorLength), dim3(gridBlock),dim3(threadBlock), 0, 0, size,x,y,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vectorLength), dim3(gridBlock),dim3(threadBlock), 0, 0, size,x,y,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4ceb1a170b327384f25cc939411cdec727178e2b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vectorLength.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int size = XSIZE*YSIZE; const double *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); const double *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); double *len = NULL; cudaMalloc(&len, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vectorLength<<<gridBlock,threadBlock>>>(size,x,y,len); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vectorLength<<<gridBlock,threadBlock>>>(size,x,y,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vectorLength<<<gridBlock,threadBlock>>>(size,x,y,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
fdb840064679ce5e55db289e44fc5d532f2d4ad8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "floattoint.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *out = NULL; hipMalloc(&out, XSIZE*YSIZE); float *in = NULL; hipMalloc(&in, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( floattoint), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( floattoint), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( floattoint), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
fdb840064679ce5e55db289e44fc5d532f2d4ad8.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "floattoint.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); float *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); floattoint<<<gridBlock,threadBlock>>>(out,in); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { floattoint<<<gridBlock,threadBlock>>>(out,in); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { floattoint<<<gridBlock,threadBlock>>>(out,in); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
60413dbe61c0ae4416c83840cce93e0d5e680d4c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "TopForcing.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double ppt = 1; double *eff_rain = NULL; hipMalloc(&eff_rain, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( TopForcing), dim3(gridBlock),dim3(threadBlock), 0, 0, ppt,eff_rain,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( TopForcing), dim3(gridBlock),dim3(threadBlock), 0, 0, ppt,eff_rain,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( TopForcing), dim3(gridBlock),dim3(threadBlock), 0, 0, ppt,eff_rain,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
60413dbe61c0ae4416c83840cce93e0d5e680d4c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "TopForcing.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double ppt = 1; double *eff_rain = NULL; cudaMalloc(&eff_rain, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); TopForcing<<<gridBlock,threadBlock>>>(ppt,eff_rain,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { TopForcing<<<gridBlock,threadBlock>>>(ppt,eff_rain,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { TopForcing<<<gridBlock,threadBlock>>>(ppt,eff_rain,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
Optimizer.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Optimizer.cuh" __global__ void gradient_descent_kernal(const val_t* deltas, val_t* weigts, size_t weightsSize, val_t alpha, val_t lambda) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < weightsSize; i += blockDim.x * gridDim.x) { weigts[i] = weigts[i] - alpha * (deltas[i] + lambda * weigts[i]); } } void gradient_descent::update(const val_t* deltas, val_t* weights, size_t weightsSize, hipStream_t stream) { size_t threads = DEFAULT_THREAD_SIZE; size_t blocks = ::ceil(weightsSize / (float)threads); gradient_descent_kernal << <blocks, threads, 0, stream >> > (deltas, weights, weightsSize, alpha, lambda); }
Optimizer.cu
#include "Optimizer.cuh" __global__ void gradient_descent_kernal(const val_t* deltas, val_t* weigts, size_t weightsSize, val_t alpha, val_t lambda) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < weightsSize; i += blockDim.x * gridDim.x) { weigts[i] = weigts[i] - alpha * (deltas[i] + lambda * weigts[i]); } } void gradient_descent::update(const val_t* deltas, val_t* weights, size_t weightsSize, cudaStream_t stream) { size_t threads = DEFAULT_THREAD_SIZE; size_t blocks = std::ceil(weightsSize / (float)threads); gradient_descent_kernal << <blocks, threads, 0, stream >> > (deltas, weights, weightsSize, alpha, lambda); }
43a2315bd1a1c6db53f33cf5ffe70cf272b22ff8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel4_minus_2_b; int xdim0_update_halo_kernel4_minus_2_b_h = -1; __constant__ int ydim0_update_halo_kernel4_minus_2_b; int ydim0_update_halo_kernel4_minus_2_b_h = -1; __constant__ int xdim1_update_halo_kernel4_minus_2_b; int xdim1_update_halo_kernel4_minus_2_b_h = -1; __constant__ int ydim1_update_halo_kernel4_minus_2_b; int ydim1_update_halo_kernel4_minus_2_b_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel4_minus_2_b*(y)+xdim0_update_halo_kernel4_minus_2_b*ydim0_update_halo_kernel4_minus_2_b*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel4_minus_2_b*(y)+xdim1_update_halo_kernel4_minus_2_b*ydim1_update_halo_kernel4_minus_2_b*(z)) //user function __device__ inline void update_halo_kernel4_minus_2_b_gpu(double *vol_flux_y, double *mass_flux_y, const int* fields) { if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0,0,0)] = -(vol_flux_y[OPS_ACC0(0,-2,0)]); if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0,0,0)] = -(mass_flux_y[OPS_ACC1(0,-2,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel4_minus_2_b( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel4_minus_2_b + idx_z * 1*1 * xdim0_update_halo_kernel4_minus_2_b * ydim0_update_halo_kernel4_minus_2_b; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel4_minus_2_b + idx_z * 1*1 * xdim1_update_halo_kernel4_minus_2_b * ydim1_update_halo_kernel4_minus_2_b; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel4_minus_2_b_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel4_minus_2_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel4_minus_2_b_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,74)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(74,"update_halo_kernel4_minus_2_b"); OPS_kernels[74].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel4_minus_2_b_h || ydim0 != ydim0_update_halo_kernel4_minus_2_b_h || xdim1 != xdim1_update_halo_kernel4_minus_2_b_h || ydim1 != ydim1_update_halo_kernel4_minus_2_b_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel4_minus_2_b, &xdim0, sizeof(int) ); xdim0_update_halo_kernel4_minus_2_b_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel4_minus_2_b, &ydim0, sizeof(int) ); ydim0_update_halo_kernel4_minus_2_b_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel4_minus_2_b, &xdim1, sizeof(int) ); xdim1_update_halo_kernel4_minus_2_b_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel4_minus_2_b, &ydim1, sizeof(int) ); ydim1_update_halo_kernel4_minus_2_b_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[74].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel4_minus_2_b), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[74].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[74].mpi_time += t2-t1; OPS_kernels[74].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[74].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel4_minus_2_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 74; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 74; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel4_minus_2_b_execute; if (OPS_diags > 1) { ops_timing_realloc(74,"update_halo_kernel4_minus_2_b"); } ops_enqueue_kernel(desc); } #endif
43a2315bd1a1c6db53f33cf5ffe70cf272b22ff8.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel4_minus_2_b; int xdim0_update_halo_kernel4_minus_2_b_h = -1; __constant__ int ydim0_update_halo_kernel4_minus_2_b; int ydim0_update_halo_kernel4_minus_2_b_h = -1; __constant__ int xdim1_update_halo_kernel4_minus_2_b; int xdim1_update_halo_kernel4_minus_2_b_h = -1; __constant__ int ydim1_update_halo_kernel4_minus_2_b; int ydim1_update_halo_kernel4_minus_2_b_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel4_minus_2_b*(y)+xdim0_update_halo_kernel4_minus_2_b*ydim0_update_halo_kernel4_minus_2_b*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel4_minus_2_b*(y)+xdim1_update_halo_kernel4_minus_2_b*ydim1_update_halo_kernel4_minus_2_b*(z)) //user function __device__ inline void update_halo_kernel4_minus_2_b_gpu(double *vol_flux_y, double *mass_flux_y, const int* fields) { if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0,0,0)] = -(vol_flux_y[OPS_ACC0(0,-2,0)]); if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0,0,0)] = -(mass_flux_y[OPS_ACC1(0,-2,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel4_minus_2_b( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel4_minus_2_b + idx_z * 1*1 * xdim0_update_halo_kernel4_minus_2_b * ydim0_update_halo_kernel4_minus_2_b; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel4_minus_2_b + idx_z * 1*1 * xdim1_update_halo_kernel4_minus_2_b * ydim1_update_halo_kernel4_minus_2_b; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel4_minus_2_b_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel4_minus_2_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel4_minus_2_b_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,74)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(74,"update_halo_kernel4_minus_2_b"); OPS_kernels[74].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel4_minus_2_b_h || ydim0 != ydim0_update_halo_kernel4_minus_2_b_h || xdim1 != xdim1_update_halo_kernel4_minus_2_b_h || ydim1 != ydim1_update_halo_kernel4_minus_2_b_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel4_minus_2_b, &xdim0, sizeof(int) ); xdim0_update_halo_kernel4_minus_2_b_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel4_minus_2_b, &ydim0, sizeof(int) ); ydim0_update_halo_kernel4_minus_2_b_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel4_minus_2_b, &xdim1, sizeof(int) ); xdim1_update_halo_kernel4_minus_2_b_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel4_minus_2_b, &ydim1, sizeof(int) ); ydim1_update_halo_kernel4_minus_2_b_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[74].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel4_minus_2_b<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[74].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[74].mpi_time += t2-t1; OPS_kernels[74].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[74].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel4_minus_2_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 74; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 74; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel4_minus_2_b_execute; if (OPS_diags > 1) { ops_timing_realloc(74,"update_halo_kernel4_minus_2_b"); } ops_enqueue_kernel(desc); } #endif
626acd5cb3c9681fce747fb5f56ae251fe57c83c.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) NVIDIA Corporation and Microsoft Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Limitations of current Longformer Attention CUDA Kernels: // (1) Does not support global tokens in the middle. All global tokens shall be in the beginning of sequence. // (2) Maximum number of global tokens <= one-sided attention window #include <hipcub/hipcub.hpp> #include <rocblas.h> #include <hip/hip_fp16.h> #include <hip/hip_runtime.h> #include <math_constants.h> #include <hip/library_types.h> #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" #include "contrib_ops/cuda/bert/add_bias_transpose.h" #include "contrib_ops/cuda/bert/attention_impl.h" #include "contrib_ops/cuda/bert/longformer_attention_softmax.h" #include "contrib_ops/cuda/bert/longformer_attention_impl.h" using namespace onnxruntime::cuda; using namespace cub; #define CHECK(expr) CUBLAS_RETURN_IF_ERROR(expr) #define CHECK_CUDA(expr) CUDA_RETURN_IF_ERROR(expr) namespace onnxruntime { namespace contrib { namespace cuda { // Denote: batch size (B), sequence length (S), number of heads (N), dimension per head (H), maximum global tokens (G) // // Workspace layout (default data type T is float or half): // [SoftmaxSpace] [Q:BxNxSxH] [K:BxNxSxH] [V:BxNxSxH] [Global_Q:BxNxSxH] [Global_K:BxNxSxH] [Global_V:BxNxSxH] // where Global_Q, Global_K and Global_V are optional. They are not allocated when there is no global token. // // SoftmaxSpace layout is the following when compact memory is enabled: // [scratch1: (5S-3W)*W*N*B] [scratch2: size_t 15] // Scratch1 has 5 buffers for local and global attention calculation. // Scratch2 has 5 input/output pointers, 5 buffer sizes and 5 strides related to scratch1. // // SoftmaxSpace layout is the following When compact memory is disabled: // [scratch1: BxNxSxS] [scratch2: BxNxSxS] static size_t Align(size_t a) { const size_t alignment = 128; // Align on a 16-byte boundary to avoid "misaligned address" error. return CeilDiv(a, alignment) * alignment; } size_t GetScratch1Size(size_t element_size, size_t batch_size, size_t num_heads, size_t sequence_length, size_t window) { size_t bytes = (5 * sequence_length - 3 * window) * window * num_heads * batch_size * element_size; return Align(bytes); } constexpr size_t GetScratch2Size() { return 5 * sizeof(void*) + 10 * sizeof(size_t); } size_t GetLongformerSoftmaxWorkspaceSize( size_t element_size, size_t batch_size, size_t num_heads, size_t sequence_length, size_t window, bool disable_compact_memory) { if (!disable_compact_memory) { size_t scratch1_size = GetScratch1Size(element_size, batch_size, num_heads, sequence_length, window); size_t scratch2_size = GetScratch2Size(); return Align(scratch1_size + scratch2_size); } else { return 2 * GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, sequence_length); } } size_t GetLongformerAttentionWorkspaceSize( size_t element_size, size_t batch_size, size_t num_heads, size_t head_size, size_t sequence_length, size_t max_num_global, size_t window, bool disable_compact_memory) { size_t softmax_size = GetLongformerSoftmaxWorkspaceSize(element_size, batch_size, num_heads, sequence_length, window, disable_compact_memory); size_t qkv_size = static_cast<size_t>(3) * batch_size * sequence_length * num_heads * head_size * element_size; size_t global_qkv_size = max_num_global > 0 ? qkv_size : 0; return softmax_size + qkv_size + global_qkv_size; } // Size of buffer of pinned memory in CPU. The buffer is used to copy memory between CPU and GPU. // The buffer includes two parts: [global_count (copy of batch_global_num): int Bx1] [copy of scratch2] size_t GetPinnedBufferSize(size_t batch_size) { return sizeof(int) * batch_size + GetScratch2Size(); } // Softmax kernel for compact format template <typename T, int blockSize> __launch_bounds__(blockSize) __global__ void LongformerSoftmaxKernel(const int* global_attention, const int* global_index, const int* batch_global_num, void* buffer_pointers, const T* attention_mask, float scaler, int sequence_length, int num_heads, int window) { typedef hipcub::BlockReduce<float, blockSize> BlockReduce; __shared__ typename BlockReduce::TempStorage block_reduce_temp; int tid = threadIdx.x; const int batch_index = blockIdx.x / (sequence_length * num_heads); const int row_index = blockIdx.x % sequence_length; const int head_index = (blockIdx.x / sequence_length) % num_heads; // Adjust the pointers for the batch const T* mask_block = attention_mask + sequence_length * batch_index; const int* global_index_block = global_index + sequence_length * batch_index; const int global_num = batch_global_num[batch_index]; size_t* p_inputs = reinterpret_cast<size_t*>(buffer_pointers); size_t* p_outputs = reinterpret_cast<size_t*>(buffer_pointers); size_t* input_sizes = reinterpret_cast<size_t*>(buffer_pointers) + 5; size_t* input_strides = reinterpret_cast<size_t*>(buffer_pointers) + 10; const T* inputs[5]; T* outputs[5]; for (int i = 0; i < 5; ++i) { inputs[i] = reinterpret_cast<T*>(p_inputs[i]) + batch_index * num_heads * input_sizes[i]; outputs[i] = reinterpret_cast<T*>(p_outputs[i]) + batch_index * num_heads * input_sizes[i]; } // Local attention token int col_start = 0; int col_end = sequence_length; bool is_local_row = (global_attention[batch_index * sequence_length + row_index] == static_cast<int>(0)); if (is_local_row) { col_start = row_index - window; if (col_start < 0) { col_start = 0; } col_end = row_index + window + 1; if (col_end > sequence_length) { col_end = sequence_length; } } // If mask is set then set everything to zero to match huggingface transformers implementation if ((float)mask_block[row_index] != 0.f) { if (is_local_row) { T* output_block = nullptr; T* output_global = nullptr; int local_offset = row_index % window; int local_start = 0; int local_end = 3 * window; if (row_index < window) { local_start = 0; local_end = 2 * window; output_block = outputs[0] + row_index * input_strides[0] + head_index * input_sizes[0]; } else if (row_index < sequence_length - window) { output_block = outputs[1] + (row_index - window) * input_strides[1] + head_index * input_sizes[1]; } else { local_start = 0; local_end = 2 * window; output_block = outputs[2] + local_offset * input_strides[2] + head_index * input_sizes[2]; } for (int i = local_start + tid; i < local_end; i += blockSize) { output_block[i] = 0; } if ((row_index - 2 * window) >= 0) { output_global = outputs[3] + (row_index - window) * input_strides[3] + head_index * input_sizes[3]; } if (output_global != nullptr) { for (int i = tid; i < global_num; i += blockSize) { output_global[i] = 0; } } } else { T* output_block = outputs[4]; for (int i = tid; i < sequence_length; i += blockSize) output_block[i] = 0; } return; } float sum_input = 0.; __shared__ float sum_shared; // Calculate max input float max_input = -CUDART_INF_F; __shared__ float max_shared; if (is_local_row) { const T* input_block = nullptr; T* output_block = nullptr; T* output_global = nullptr; int local_offset = row_index % window; int local_start = local_offset; int local_end = local_start + 2 * window + 1; int zero_start = 0; int zero_end = 3 * window; if (row_index < window) { local_start = 0; local_end = local_offset + window + 1; zero_end = 2 * window; input_block = inputs[0] + row_index * input_strides[0] + head_index * input_sizes[0]; output_block = outputs[0] + row_index * input_strides[0] + head_index * input_sizes[0]; } else if (row_index < sequence_length - window) { input_block = inputs[1] + (row_index - window) * input_strides[1] + head_index * input_sizes[1]; output_block = outputs[1] + (row_index - window) * input_strides[1] + head_index * input_sizes[1]; } else { local_start = local_offset; local_end = 2 * window; zero_end = 2 * window; input_block = inputs[2] + local_offset * input_strides[2] + head_index * input_sizes[2]; output_block = outputs[2] + local_offset * input_strides[2] + head_index * input_sizes[2]; } const T* input_global = nullptr; int local_global = row_index - window; if (local_global > global_num) { local_global = global_num; } if (local_global > 0) { input_global = inputs[3] + (row_index - window) * input_strides[3] + head_index * input_sizes[3]; } if (row_index < window) { output_global = (T*)outputs[0] + row_index * input_strides[0] + head_index * input_sizes[0]; } else if (row_index < 2 * window) { output_global = outputs[1] + (row_index - window) * input_strides[1] + head_index * input_sizes[1]; } else { output_global = outputs[3] + (row_index - window) * input_strides[3] + head_index * input_sizes[3]; } for (int i = local_start + tid, j = col_start + tid; i < local_end; i += blockSize, j += blockSize) { float x = input_block[i]; x = x * scaler + (float)mask_block[j]; if (max_input < x) max_input = x; } if (input_global != nullptr) { for (int i = tid; i < local_global; i += blockSize) { float x = input_global[global_index_block[i]]; x = x * scaler + (float)mask_block[global_index_block[i]]; if (max_input < x) max_input = x; } } float max_block = BlockReduce(block_reduce_temp).Reduce(max_input, hipcub::Max()); if (tid == 0) { max_shared = max_block; } __syncthreads(); for (int i = local_start + tid, j = col_start + tid; i < local_end; i += blockSize, j += blockSize) { float x = input_block[i]; x = expf((x)*scaler + (float)mask_block[j] - max_shared); sum_input += x; } if (input_global != nullptr) { for (int i = tid, j = col_start + tid; i < local_global; i += blockSize, j += blockSize) { float x = input_global[global_index_block[i]]; x = expf((x)*scaler + (float)mask_block[j] - max_shared); sum_input += x; } } float sum_block = BlockReduce(block_reduce_temp).Reduce(sum_input, hipcub::Sum()); if (tid == 0) { sum_shared = sum_block; } __syncthreads(); float recip_sum = 1.f / sum_shared; for (int i = tid + zero_start; i < local_start; i += blockSize) { output_block[i] = (T)(0.); } for (int i = tid + local_end; i < zero_end; i += blockSize) { output_block[i] = (T)(0.); } __syncthreads(); for (int i = local_start + tid, j = col_start + tid; i < local_end; i += blockSize, j += blockSize) { float x = input_block[i]; x = expf((x)*scaler + (float)mask_block[j] - max_shared); output_block[i] = (T)(recip_sum * x); } if (input_global != nullptr) { for (int i = tid; i < local_global; i += blockSize) { float x = input_global[global_index_block[i]]; x = expf((x)*scaler + (float)mask_block[global_index_block[i]] - max_shared); output_global[i] = (T)(recip_sum * x); } } } else { // Global tokens const T* input_block = inputs[4] + row_index * input_strides[4] + head_index * input_sizes[4]; T* output_block = outputs[4] + row_index * input_strides[4] + head_index * input_sizes[4]; for (int i = tid; i < sequence_length; i += blockSize) { float x = input_block[i]; x = x * scaler + (float)mask_block[i]; if (max_input < x) max_input = x; } float max_block = BlockReduce(block_reduce_temp).Reduce(max_input, hipcub::Max()); if (tid == 0) { max_shared = max_block; } __syncthreads(); for (int i = tid; i < sequence_length; i += blockSize) { float x = input_block[i]; x = expf((x)*scaler + (float)mask_block[i] - max_shared); sum_input += x; } float sum_block = BlockReduce(block_reduce_temp).Reduce(sum_input, hipcub::Sum()); if (tid == 0) { sum_shared = sum_block; } __syncthreads(); float recip_sum = 1.f / sum_shared; for (int i = tid; i < sequence_length; i += blockSize) { float x = input_block[i]; x = expf((x)*scaler + (float)mask_block[i] - max_shared); output_block[i] = (T)(recip_sum * x); } } } Status LaunchLongformerSoftmaxKernel( hipStream_t stream, hipblasHandle_t cublas, void* workspace, const void* q, // transposed Q with shape (B, N, S, H) const void* k, // transposed K with shape (B, N, S, H) const void* v, // transposed V with shape (B, N, S, H) const void* attention_mask, // attention mask with shape (B, S), with value 0 not masked and -10000 masked. int max_num_global, // maximum number of global tokens (G) const bool compact_global_q, // whether global_q has shape (B, N, G, H) instead of (B, N, S, H) const void* global_q, // Q for global tokens with shape (B, N, S, H). const void* global_k, // K for global tokens with shape (B, N, S, H) const void* global_v, // V for global tokens with shape (B, N, S, H) const int* global_attention, // global attention flags with shape (B, S), with value 0 for local and 1 for global. const int* global_index, // Global index with shape (B, S) const int* batch_global_num, // Number of global tokens per batch with shape (B, 1) void* pinned_buffer, // Pinned memory in CPU with 2 parts: global tokens per batch, and data for scratch2 void* output, // output with shape (B, N, S, H) float scaler, // scalar int batch_size, // batch size int sequence_length, // sequence length int num_heads, // number of heads int head_size, // hidden size per head int window, // one sided window size size_t element_size) { // size of element: 2 for half, and 4 for float const int* global_count = reinterpret_cast<const int*>(pinned_buffer); bool is_fp16 = (element_size == 2); char* scratch1 = reinterpret_cast<char*>(workspace); char* scratch2 = scratch1 + GetScratch1Size(element_size, batch_size, num_heads, sequence_length, window); // Setup shared parameters for two strided batched matrix multiplies hipDataType Atype; hipDataType Btype; hipDataType Ctype; hipDataType resultType; hipblasGemmAlgo_t algo = HIPBLAS_GEMM_DEFAULT; __half one_fp16, zero_fp16; float one_fp32, zero_fp32; void *alpha, *beta_0, *beta_1; if (is_fp16) { one_fp16 = __float2half(1.f); zero_fp16 = __float2half(0.f); alpha = static_cast<void*>(&one_fp16); beta_0 = static_cast<void*>(&zero_fp16); beta_1 = static_cast<void*>(&one_fp16); Atype = HIP_R_16F; Btype = HIP_R_16F; Ctype = HIP_R_16F; resultType = HIP_R_16F; algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP; } else { one_fp32 = 1.f; zero_fp32 = 0.f; alpha = static_cast<void*>(&one_fp32); beta_0 = static_cast<void*>(&zero_fp32); beta_1 = static_cast<void*>(&one_fp32); Atype = HIP_R_32F; Btype = HIP_R_32F; Ctype = HIP_R_32F; resultType = HIP_R_32F; } // Strided batch matrix multiply // qk = q * k^T // Shapes: q and k = B x N x S x H, qk = B x N x S x S // Convert col-major to row-major by swapping q and k in Gemm size_t elements_per_batch = num_heads * sequence_length * head_size; int stride_per_head = sequence_length * head_size; // stride for Q, K, V and output // Local attention part // S x S is calculated using sliding block WxW (W is one sided window size) like the following: // [W][W] // [W][W][W] // [W][W][W] // [W][W] // The first and last rows have 2 blocks per row, and the remaining has 3 blocks per row. // The calculation are splited into 3 parts: the first row, middle rows and finally the last row. // To save space, we do not store the whole matrix. Instead, we only allocate space for these blocks. // // For global attention part, we have two assumptions: // (1) Global tokens are at the beginging of sequence // (2) Number of global tokens <= attention window // // The results are stored in scratch1 buffer: // Number of elements for local attention are (3*S/W-2)*W*W*N*B, or (3S-2W)*W*N*B // Number of elements for local attends to global are (S-W)*W*N*B // Number of elements for global attends to everything are S*W*N*B // Total elements (FP16 or FP32) are (5S-3W)*W*N*B const int w = window; const int middle_count = (sequence_length - 2 * w) / w; int last_block = (sequence_length / w) - 1; // Determine the non-zero block dimensions and pointers // Buffer size per head for a single batch size_t buffer_sizes[5] = { static_cast<size_t>(w * w * 2), // first row of blocks has 2 WxW blocks static_cast<size_t>(w * w * middle_count * 3), // middle rows of blocks have 3 WxW blocks per row static_cast<size_t>(w * w * 2), // last row of blocks has 2 WxW blocks static_cast<size_t>(w * (sequence_length - w)), // local attends to global: global tokens <= window size static_cast<size_t>(w * sequence_length)}; // global attends to everything. size_t buffer_strides[5] = { static_cast<size_t>(w * 2), static_cast<size_t>(w * 3), static_cast<size_t>(w * 2), static_cast<size_t>(w), // number of global tokens <= window size static_cast<size_t>(sequence_length)}; void* buffer_pointers[5]; char* current_pointer = scratch1; for (int i = 0; i < 5; ++i) { buffer_pointers[i] = reinterpret_cast<void*>(current_pointer); current_pointer += buffer_sizes[i] * num_heads * batch_size * element_size; } // Copy to a continues buffer first so that we only need call hipMemcpyAsync once char* temp_buffer = reinterpret_cast<char*>(pinned_buffer) + sizeof(int) * batch_size; memcpy(temp_buffer, &buffer_pointers[0], 5 * sizeof(void*)); memcpy(temp_buffer + 5 * sizeof(void*), &buffer_sizes[0], 5 * sizeof(size_t)); memcpy(temp_buffer + 5 * sizeof(void*) + 5 * sizeof(size_t), &buffer_strides[0], 5 * sizeof(size_t)); CHECK_CUDA(hipMemcpyAsync(scratch2, temp_buffer, GetScratch2Size(), hipMemcpyHostToDevice, stream)); // Local attention part { // local attention per head - head CHECK(hipblasGemmStridedBatchedEx(cublas, HIPBLAS_OP_T, HIPBLAS_OP_N, 2 * w, // m w, // n head_size, // k alpha, // alpha k, // A Atype, // A type head_size, // lda stride_per_head, // strideA q, // B Btype, // B type head_size, // ldb stride_per_head, // strideB beta_0, // beta buffer_pointers[0], // C Ctype, // C type 2 * w, // ldc buffer_sizes[0], // strideC batch_size * num_heads, // batch count resultType, algo)); // local attention per head - middle if (middle_count > 0) { for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < num_heads; ++j) { const void* q_head = reinterpret_cast<const char*>(q) + (i * elements_per_batch + (j * sequence_length + w) * head_size) * element_size; const void* k_head = reinterpret_cast<const char*>(k) + (i * elements_per_batch + j * sequence_length * head_size) * element_size; void* qk_head = reinterpret_cast<char*>(buffer_pointers[1]) + static_cast<size_t>(i * num_heads + j) * buffer_sizes[1] * element_size; CHECK(hipblasGemmStridedBatchedEx(cublas, HIPBLAS_OP_T, HIPBLAS_OP_N, 3 * w, // m w, // n head_size, // k alpha, // alpha k_head, // A Atype, // A type head_size, // lda w * head_size, // strideA q_head, // B Btype, // B type head_size, // ldb w * head_size, // strideB beta_0, // beta qk_head, // C Ctype, // C type 3 * w, // ldc 3 * w * w, // strideC middle_count, // batch count resultType, algo)); } } } // local attention per head - tail const void* q_head = reinterpret_cast<const char*>(q) + (last_block * w * head_size) * element_size; const void* k_head = reinterpret_cast<const char*>(k) + ((last_block - 1) * w * head_size) * element_size; CHECK(hipblasGemmStridedBatchedEx(cublas, HIPBLAS_OP_T, HIPBLAS_OP_N, 2 * w, // m w, // n head_size, // k alpha, // alpha k_head, // A Atype, // A type head_size, // lda stride_per_head, // strideA q_head, // B Btype, // B type head_size, // ldb stride_per_head, // strideB beta_0, // beta buffer_pointers[2], // C Ctype, // C type 2 * w, // ldc buffer_sizes[2], // strideC batch_size * num_heads, // batch count resultType, algo)); } // Global attention part for (int i = 0; i < batch_size; ++i) { if (global_count[i] > 0) { const void* q_batch = reinterpret_cast<const char*>(q) + (i * elements_per_batch + w * head_size) * element_size; const void* k_batch = reinterpret_cast<const char*>(k) + (i * elements_per_batch) * element_size; void* qk_batch = reinterpret_cast<char*>(buffer_pointers[3]) + (i * buffer_sizes[3]) * num_heads * element_size; // Local tokens attending global tokens CHECK(hipblasGemmStridedBatchedEx(cublas, HIPBLAS_OP_T, HIPBLAS_OP_N, global_count[i], // m sequence_length - w, // n head_size, // k alpha, // alpha k_batch, // A Atype, // A type head_size, // lda stride_per_head, // strideA q_batch, // B Btype, // B type head_size, // ldb stride_per_head, // strideB beta_0, // beta qk_batch, // C Ctype, // C type w, // ldc buffer_sizes[3], // strideC num_heads, // batch count resultType, algo)); const size_t global_q_per_batch = compact_global_q ? num_heads * max_num_global * head_size : elements_per_batch; const int global_q_stride = (compact_global_q ? max_num_global * head_size : stride_per_head); const void* global_q_batch = reinterpret_cast<const char*>(global_q) + (i * global_q_per_batch) * element_size; const void* global_k_batch = reinterpret_cast<const char*>(global_k) + (i * elements_per_batch) * element_size; qk_batch = reinterpret_cast<char*>(buffer_pointers[4]) + (i * buffer_sizes[4] * num_heads) * element_size; // Global tokens attending everything // This GEMMs need to be last to make sure all global token entries are re-written. CHECK(hipblasGemmStridedBatchedEx(cublas, HIPBLAS_OP_T, HIPBLAS_OP_N, sequence_length, // m global_count[i], // n head_size, // k alpha, // alpha global_k_batch, // A Atype, // A type head_size, // lda stride_per_head, // strideA global_q_batch, // B Btype, // B type head_size, // ldb global_q_stride, // strideB. beta_0, // beta qk_batch, // C Ctype, // C type sequence_length, // ldc buffer_sizes[4], // strideC num_heads, // batch count resultType, algo)); } } const int blockSize = 64; const int gridSize = batch_size * num_heads * sequence_length; if (is_fp16) { hipLaunchKernelGGL(( LongformerSoftmaxKernel<__half, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream, global_attention, global_index, batch_global_num, scratch2, static_cast<const __half*>(attention_mask), scaler, sequence_length, num_heads, window); } else { hipLaunchKernelGGL(( LongformerSoftmaxKernel<float, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream, global_attention, global_index, batch_global_num, scratch2, static_cast<const float*>(attention_mask), scaler, sequence_length, num_heads, window); } // local values attending the softmax score. { // local attention per head - head CHECK(hipblasGemmStridedBatchedEx(cublas, HIPBLAS_OP_N, HIPBLAS_OP_N, head_size, // m w, // n 2 * w, // k alpha, // alpha v, // A Atype, // A type head_size, // lda stride_per_head, // strideA buffer_pointers[0], // B Btype, // B type static_cast<int>(buffer_strides[0]), // ldb buffer_sizes[0], // strideB beta_0, // beta output, // C Ctype, // C type head_size, // ldc stride_per_head, // strideC batch_size * num_heads, // batch count resultType, algo)); // local attention per head - middle if (middle_count > 0) { for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < num_heads; ++j) { const void* v_head = reinterpret_cast<const char*>(v) + (i * elements_per_batch + j * head_size * sequence_length) * element_size; const void* prob_head = reinterpret_cast<const char*>(buffer_pointers[1]) + (i * num_heads + j) * buffer_sizes[1] * element_size; void* out_head = reinterpret_cast<char*>(output) + (i * elements_per_batch + j * head_size * sequence_length + w * head_size) * element_size; CHECK(hipblasGemmStridedBatchedEx(cublas, HIPBLAS_OP_N, HIPBLAS_OP_N, head_size, // m w, // n 3 * w, // k alpha, // alpha v_head, // A Atype, // A type head_size, // lda w * head_size, // strideA prob_head, // B Btype, // B type static_cast<int>(buffer_strides[1]), // ldb 3 * w * w, // strideB beta_0, // beta out_head, // C Ctype, // C type head_size, // ldc w * head_size, // strideC middle_count, // batch count resultType, algo)); } } } // local attention per head - tail const void* v_head = reinterpret_cast<const char*>(v) + (last_block - 1) * w * head_size * element_size; void* out_head = reinterpret_cast<char*>(output) + last_block * w * head_size * element_size; CHECK(hipblasGemmStridedBatchedEx(cublas, HIPBLAS_OP_N, HIPBLAS_OP_N, head_size, // m w, // n 2 * w, // k alpha, // alpha v_head, // A Atype, // A type head_size, // lda stride_per_head, // strideA buffer_pointers[2], // B Btype, // B type static_cast<int>(buffer_strides[2]), // ldb buffer_sizes[2], // strideB beta_0, // beta out_head, // C Ctype, // C type head_size, // ldc stride_per_head, // strideC batch_size * num_heads, // batch count resultType, algo)); } // global attention part for (int i = 0; i < batch_size; ++i) { if (global_count[i] > 0) { // Local tokens attending global tokens const void* v_head = reinterpret_cast<const char*>(v) + (i * elements_per_batch) * element_size; const void* prob_head = reinterpret_cast<const char*>(buffer_pointers[3]) + (i * buffer_sizes[3] * num_heads + w * buffer_strides[3]) * element_size; void* out_head = reinterpret_cast<char*>(output) + (i * elements_per_batch + 2 * w * head_size) * element_size; CHECK(hipblasGemmStridedBatchedEx(cublas, HIPBLAS_OP_N, HIPBLAS_OP_N, head_size, // m sequence_length - 2 * w, // n global_count[i], // k alpha, // alpha v_head, // A Atype, // A type head_size, // lda stride_per_head, // strideA prob_head, // B Btype, // B type static_cast<int>(buffer_strides[3]), // ldb buffer_sizes[3], // strideB beta_1, // beta out_head, // C Ctype, // C type head_size, // ldc stride_per_head, // strideC num_heads, // batch count resultType, algo)); // Global tokens attending everything v_head = reinterpret_cast<const char*>(global_v) + (i * elements_per_batch) * element_size; prob_head = reinterpret_cast<const char*>(buffer_pointers[4]) + (i * buffer_sizes[4] * num_heads) * element_size; out_head = reinterpret_cast<char*>(output) + (i * elements_per_batch) * element_size; CHECK(hipblasGemmStridedBatchedEx(cublas, HIPBLAS_OP_N, HIPBLAS_OP_N, head_size, // m global_count[i], // n sequence_length, // k: re-write entries completely alpha, // alpha v_head, // A Atype, // A type head_size, // lda stride_per_head, // strideA prob_head, // B Btype, // B type static_cast<int>(buffer_strides[4]), // ldb buffer_sizes[4], // strideB beta_0, // beta: overwrite out_head, // C: assumes global tokens at the beginning of sequence Ctype, // C type head_size, // ldc stride_per_head, // strideC num_heads, // batch count resultType, algo)); } } return Status::OK(); } template <typename T> Status LongformerQkvToContext( const hipDeviceProp_t& device_prop, hipblasHandle_t cublas, hipStream_t stream, const int batch_size, // batch size const int sequence_length, // sequence length const int num_heads, // number of attention heads const int head_size, // hidden size per head const int window, // Half (one-sided) window size const size_t element_size, const T* input, // input for transpose const T* bias, // bias to add to transposed input const T* attention_mask, // attention mask with shape (B, S), with value 0.0 not masked, and -10000.0 masked. const T* global_input, // global input for transpose const T* global_bias, // bias to add to transposed global input const int* global_attention, // global attention flags with shape (B, S), with value 0 for local and 1 for global. const int* global_index, // Global index with shape (B, S) const int* batch_global_num, // Number of global tokens per batch with shape (B, 1) const int max_num_global, // Maximum number of global tokens (G) void* pinned_buffer, // Pinned memory in CPU. Number of global tokens per batch with shape (B, 1) T* workspace, // Softmax space T* output, // output size_t softmax_workspace_size, bool disable_compact_memory, bool use_merged_qkv_weights, bool use_half4) { T* qkv = reinterpret_cast<T*>(reinterpret_cast<char*>(workspace) + softmax_workspace_size); // Number of elements in Q, K, V, Global_Q, Global_K or Global_V are same: BxNxSxH const int elements = batch_size * num_heads * sequence_length * head_size; const int max_threads_per_block(device_prop.maxThreadsPerBlock); const int format = static_cast<int>(use_merged_qkv_weights); bool compact_global_q = false; // The order of qkv space: // Q, K, V, Global_K, Global_V, Global_Q (format 0) // Q, K, V, Global_Q, Global_K, Global_V (format 1) if (format == 1 || max_num_global == 0 || nullptr == global_input) { if (bias == nullptr) { ORT_RETURN_IF_ERROR(LaunchTransQkv(stream, 3, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, false, input, qkv)); } else { LaunchAddBiasTranspose(stream, 3, format, max_threads_per_block, batch_size, sequence_length, num_heads, head_size, input, bias, qkv, use_half4); } if (max_num_global > 0 && nullptr != global_input) { if (global_bias == nullptr) { ORT_RETURN_IF_ERROR(LaunchTransQkv(stream, 3, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, false, global_input, qkv + 3 * elements)); } else { LaunchAddBiasTranspose(stream, 3, format, max_threads_per_block, batch_size, sequence_length, num_heads, head_size, global_input, global_bias, qkv + 3 * elements, use_half4); } } } else { LaunchAddBiasTranspose(stream, 5, format, max_threads_per_block, batch_size, sequence_length, num_heads, head_size, input, bias, qkv, use_half4); compact_global_q = (disable_compact_memory == false); LaunchAddBiasTranspose(stream, 1, format, max_threads_per_block, batch_size, compact_global_q ? max_num_global : sequence_length, num_heads, head_size, global_input + 2 * elements, global_bias, qkv + 5 * elements, use_half4); } CUDA_RETURN_IF_ERROR(hipGetLastError()); // Transposed Q, K, V with shape (B, N, S, H) const T* q = qkv; const T* k = q + elements; const T* v = k + elements; // Transposed global Q, K, V with shape (B, N, S, H). // When compact_global_q is true, Global Q has actual shape (B, N, G, H) although we allocated space of (B, N, S, H) // When max_num_global == 0, these pointers are not used in GEMM so the value does not matter. const T* global_q = (format == 1 ? v + elements : qkv + 5 * elements); const T* global_k = (format == 1 ? global_q + elements : qkv + 3 * elements); const T* global_v = (format == 1 ? global_k + elements : qkv + 4 * elements); // Q*K' are scaled by 1/sqrt(H) const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size)); T* temp_output = qkv; // Q will be overwritten if (disable_compact_memory) { ORT_RETURN_IF_ERROR(LaunchLongformerSoftmaxSimpleKernel( stream, cublas, workspace, q, k, v, attention_mask, global_q, global_k, global_v, global_attention, global_index, batch_global_num, pinned_buffer, temp_output, rsqrt_head_size, batch_size, sequence_length, num_heads, head_size, window, element_size)); } else { ORT_ENFORCE(max_num_global <= window); ORT_RETURN_IF_ERROR(LaunchLongformerSoftmaxKernel( stream, cublas, workspace, q, k, v, attention_mask, max_num_global, compact_global_q, global_q, global_k, global_v, global_attention, global_index, batch_global_num, pinned_buffer, temp_output, rsqrt_head_size, batch_size, sequence_length, num_heads, head_size, window, element_size)); } // The temp_output is BxNxSxH, transpose it to final output BxSxNxH return LaunchTransCtx(stream, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, false, temp_output, output); } Status LaunchLongformerAttentionKernel( const hipDeviceProp_t& device_prop, hipblasHandle_t cublas, hipStream_t stream, const void* input, const void* bias, const void* attention_mask, const void* global_input, const void* global_bias, const int* global_attention, const int* global_index, const int* batch_global_num, void* pinned_buffer, void* workspace, void* output, int batch_size, int sequence_length, int num_heads, int head_size, int window, int max_num_global, const size_t element_size, bool disable_compact_memory, bool use_merged_qkv_weights, bool use_half4) { CublasMathModeSetter helper(device_prop, cublas, CUBLAS_TENSOR_OP_MATH); size_t softmax_workspace_size = GetLongformerSoftmaxWorkspaceSize(element_size, batch_size, num_heads, sequence_length, window, disable_compact_memory); if (element_size == 2) { return LongformerQkvToContext(device_prop, cublas, stream, batch_size, sequence_length, num_heads, head_size, window, element_size, reinterpret_cast<const half*>(input), reinterpret_cast<const half*>(bias), reinterpret_cast<const half*>(attention_mask), reinterpret_cast<const half*>(global_input), reinterpret_cast<const half*>(global_bias), global_attention, global_index, batch_global_num, max_num_global, pinned_buffer, reinterpret_cast<half*>(workspace), reinterpret_cast<half*>(output), softmax_workspace_size, disable_compact_memory, use_merged_qkv_weights, use_half4); } else { return LongformerQkvToContext(device_prop, cublas, stream, batch_size, sequence_length, num_heads, head_size, window, element_size, reinterpret_cast<const float*>(input), reinterpret_cast<const float*>(bias), reinterpret_cast<const float*>(attention_mask), reinterpret_cast<const float*>(global_input), reinterpret_cast<const float*>(global_bias), global_attention, global_index, batch_global_num, max_num_global, pinned_buffer, reinterpret_cast<float*>(workspace), reinterpret_cast<float*>(output), softmax_workspace_size, disable_compact_memory, use_merged_qkv_weights, false); } } } // namespace cuda } // namespace contrib } // namespace onnxruntime
626acd5cb3c9681fce747fb5f56ae251fe57c83c.cu
/* Copyright (c) NVIDIA Corporation and Microsoft Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Limitations of current Longformer Attention CUDA Kernels: // (1) Does not support global tokens in the middle. All global tokens shall be in the beginning of sequence. // (2) Maximum number of global tokens <= one-sided attention window #include <cub/cub.cuh> #include <cublas_v2.h> #include <cuda_fp16.h> #include <cuda_runtime.h> #include <math_constants.h> #include <library_types.h> #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" #include "contrib_ops/cuda/bert/add_bias_transpose.h" #include "contrib_ops/cuda/bert/attention_impl.h" #include "contrib_ops/cuda/bert/longformer_attention_softmax.h" #include "contrib_ops/cuda/bert/longformer_attention_impl.h" using namespace onnxruntime::cuda; using namespace cub; #define CHECK(expr) CUBLAS_RETURN_IF_ERROR(expr) #define CHECK_CUDA(expr) CUDA_RETURN_IF_ERROR(expr) namespace onnxruntime { namespace contrib { namespace cuda { // Denote: batch size (B), sequence length (S), number of heads (N), dimension per head (H), maximum global tokens (G) // // Workspace layout (default data type T is float or half): // [SoftmaxSpace] [Q:BxNxSxH] [K:BxNxSxH] [V:BxNxSxH] [Global_Q:BxNxSxH] [Global_K:BxNxSxH] [Global_V:BxNxSxH] // where Global_Q, Global_K and Global_V are optional. They are not allocated when there is no global token. // // SoftmaxSpace layout is the following when compact memory is enabled: // [scratch1: (5S-3W)*W*N*B] [scratch2: size_t 15] // Scratch1 has 5 buffers for local and global attention calculation. // Scratch2 has 5 input/output pointers, 5 buffer sizes and 5 strides related to scratch1. // // SoftmaxSpace layout is the following When compact memory is disabled: // [scratch1: BxNxSxS] [scratch2: BxNxSxS] static size_t Align(size_t a) { const size_t alignment = 128; // Align on a 16-byte boundary to avoid "misaligned address" error. return CeilDiv(a, alignment) * alignment; } size_t GetScratch1Size(size_t element_size, size_t batch_size, size_t num_heads, size_t sequence_length, size_t window) { size_t bytes = (5 * sequence_length - 3 * window) * window * num_heads * batch_size * element_size; return Align(bytes); } constexpr size_t GetScratch2Size() { return 5 * sizeof(void*) + 10 * sizeof(size_t); } size_t GetLongformerSoftmaxWorkspaceSize( size_t element_size, size_t batch_size, size_t num_heads, size_t sequence_length, size_t window, bool disable_compact_memory) { if (!disable_compact_memory) { size_t scratch1_size = GetScratch1Size(element_size, batch_size, num_heads, sequence_length, window); size_t scratch2_size = GetScratch2Size(); return Align(scratch1_size + scratch2_size); } else { return 2 * GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, sequence_length); } } size_t GetLongformerAttentionWorkspaceSize( size_t element_size, size_t batch_size, size_t num_heads, size_t head_size, size_t sequence_length, size_t max_num_global, size_t window, bool disable_compact_memory) { size_t softmax_size = GetLongformerSoftmaxWorkspaceSize(element_size, batch_size, num_heads, sequence_length, window, disable_compact_memory); size_t qkv_size = static_cast<size_t>(3) * batch_size * sequence_length * num_heads * head_size * element_size; size_t global_qkv_size = max_num_global > 0 ? qkv_size : 0; return softmax_size + qkv_size + global_qkv_size; } // Size of buffer of pinned memory in CPU. The buffer is used to copy memory between CPU and GPU. // The buffer includes two parts: [global_count (copy of batch_global_num): int Bx1] [copy of scratch2] size_t GetPinnedBufferSize(size_t batch_size) { return sizeof(int) * batch_size + GetScratch2Size(); } // Softmax kernel for compact format template <typename T, int blockSize> __launch_bounds__(blockSize) __global__ void LongformerSoftmaxKernel(const int* global_attention, const int* global_index, const int* batch_global_num, void* buffer_pointers, const T* attention_mask, float scaler, int sequence_length, int num_heads, int window) { typedef cub::BlockReduce<float, blockSize> BlockReduce; __shared__ typename BlockReduce::TempStorage block_reduce_temp; int tid = threadIdx.x; const int batch_index = blockIdx.x / (sequence_length * num_heads); const int row_index = blockIdx.x % sequence_length; const int head_index = (blockIdx.x / sequence_length) % num_heads; // Adjust the pointers for the batch const T* mask_block = attention_mask + sequence_length * batch_index; const int* global_index_block = global_index + sequence_length * batch_index; const int global_num = batch_global_num[batch_index]; size_t* p_inputs = reinterpret_cast<size_t*>(buffer_pointers); size_t* p_outputs = reinterpret_cast<size_t*>(buffer_pointers); size_t* input_sizes = reinterpret_cast<size_t*>(buffer_pointers) + 5; size_t* input_strides = reinterpret_cast<size_t*>(buffer_pointers) + 10; const T* inputs[5]; T* outputs[5]; for (int i = 0; i < 5; ++i) { inputs[i] = reinterpret_cast<T*>(p_inputs[i]) + batch_index * num_heads * input_sizes[i]; outputs[i] = reinterpret_cast<T*>(p_outputs[i]) + batch_index * num_heads * input_sizes[i]; } // Local attention token int col_start = 0; int col_end = sequence_length; bool is_local_row = (global_attention[batch_index * sequence_length + row_index] == static_cast<int>(0)); if (is_local_row) { col_start = row_index - window; if (col_start < 0) { col_start = 0; } col_end = row_index + window + 1; if (col_end > sequence_length) { col_end = sequence_length; } } // If mask is set then set everything to zero to match huggingface transformers implementation if ((float)mask_block[row_index] != 0.f) { if (is_local_row) { T* output_block = nullptr; T* output_global = nullptr; int local_offset = row_index % window; int local_start = 0; int local_end = 3 * window; if (row_index < window) { local_start = 0; local_end = 2 * window; output_block = outputs[0] + row_index * input_strides[0] + head_index * input_sizes[0]; } else if (row_index < sequence_length - window) { output_block = outputs[1] + (row_index - window) * input_strides[1] + head_index * input_sizes[1]; } else { local_start = 0; local_end = 2 * window; output_block = outputs[2] + local_offset * input_strides[2] + head_index * input_sizes[2]; } for (int i = local_start + tid; i < local_end; i += blockSize) { output_block[i] = 0; } if ((row_index - 2 * window) >= 0) { output_global = outputs[3] + (row_index - window) * input_strides[3] + head_index * input_sizes[3]; } if (output_global != nullptr) { for (int i = tid; i < global_num; i += blockSize) { output_global[i] = 0; } } } else { T* output_block = outputs[4]; for (int i = tid; i < sequence_length; i += blockSize) output_block[i] = 0; } return; } float sum_input = 0.; __shared__ float sum_shared; // Calculate max input float max_input = -CUDART_INF_F; __shared__ float max_shared; if (is_local_row) { const T* input_block = nullptr; T* output_block = nullptr; T* output_global = nullptr; int local_offset = row_index % window; int local_start = local_offset; int local_end = local_start + 2 * window + 1; int zero_start = 0; int zero_end = 3 * window; if (row_index < window) { local_start = 0; local_end = local_offset + window + 1; zero_end = 2 * window; input_block = inputs[0] + row_index * input_strides[0] + head_index * input_sizes[0]; output_block = outputs[0] + row_index * input_strides[0] + head_index * input_sizes[0]; } else if (row_index < sequence_length - window) { input_block = inputs[1] + (row_index - window) * input_strides[1] + head_index * input_sizes[1]; output_block = outputs[1] + (row_index - window) * input_strides[1] + head_index * input_sizes[1]; } else { local_start = local_offset; local_end = 2 * window; zero_end = 2 * window; input_block = inputs[2] + local_offset * input_strides[2] + head_index * input_sizes[2]; output_block = outputs[2] + local_offset * input_strides[2] + head_index * input_sizes[2]; } const T* input_global = nullptr; int local_global = row_index - window; if (local_global > global_num) { local_global = global_num; } if (local_global > 0) { input_global = inputs[3] + (row_index - window) * input_strides[3] + head_index * input_sizes[3]; } if (row_index < window) { output_global = (T*)outputs[0] + row_index * input_strides[0] + head_index * input_sizes[0]; } else if (row_index < 2 * window) { output_global = outputs[1] + (row_index - window) * input_strides[1] + head_index * input_sizes[1]; } else { output_global = outputs[3] + (row_index - window) * input_strides[3] + head_index * input_sizes[3]; } for (int i = local_start + tid, j = col_start + tid; i < local_end; i += blockSize, j += blockSize) { float x = input_block[i]; x = x * scaler + (float)mask_block[j]; if (max_input < x) max_input = x; } if (input_global != nullptr) { for (int i = tid; i < local_global; i += blockSize) { float x = input_global[global_index_block[i]]; x = x * scaler + (float)mask_block[global_index_block[i]]; if (max_input < x) max_input = x; } } float max_block = BlockReduce(block_reduce_temp).Reduce(max_input, cub::Max()); if (tid == 0) { max_shared = max_block; } __syncthreads(); for (int i = local_start + tid, j = col_start + tid; i < local_end; i += blockSize, j += blockSize) { float x = input_block[i]; x = expf((x)*scaler + (float)mask_block[j] - max_shared); sum_input += x; } if (input_global != nullptr) { for (int i = tid, j = col_start + tid; i < local_global; i += blockSize, j += blockSize) { float x = input_global[global_index_block[i]]; x = expf((x)*scaler + (float)mask_block[j] - max_shared); sum_input += x; } } float sum_block = BlockReduce(block_reduce_temp).Reduce(sum_input, cub::Sum()); if (tid == 0) { sum_shared = sum_block; } __syncthreads(); float recip_sum = 1.f / sum_shared; for (int i = tid + zero_start; i < local_start; i += blockSize) { output_block[i] = (T)(0.); } for (int i = tid + local_end; i < zero_end; i += blockSize) { output_block[i] = (T)(0.); } __syncthreads(); for (int i = local_start + tid, j = col_start + tid; i < local_end; i += blockSize, j += blockSize) { float x = input_block[i]; x = expf((x)*scaler + (float)mask_block[j] - max_shared); output_block[i] = (T)(recip_sum * x); } if (input_global != nullptr) { for (int i = tid; i < local_global; i += blockSize) { float x = input_global[global_index_block[i]]; x = expf((x)*scaler + (float)mask_block[global_index_block[i]] - max_shared); output_global[i] = (T)(recip_sum * x); } } } else { // Global tokens const T* input_block = inputs[4] + row_index * input_strides[4] + head_index * input_sizes[4]; T* output_block = outputs[4] + row_index * input_strides[4] + head_index * input_sizes[4]; for (int i = tid; i < sequence_length; i += blockSize) { float x = input_block[i]; x = x * scaler + (float)mask_block[i]; if (max_input < x) max_input = x; } float max_block = BlockReduce(block_reduce_temp).Reduce(max_input, cub::Max()); if (tid == 0) { max_shared = max_block; } __syncthreads(); for (int i = tid; i < sequence_length; i += blockSize) { float x = input_block[i]; x = expf((x)*scaler + (float)mask_block[i] - max_shared); sum_input += x; } float sum_block = BlockReduce(block_reduce_temp).Reduce(sum_input, cub::Sum()); if (tid == 0) { sum_shared = sum_block; } __syncthreads(); float recip_sum = 1.f / sum_shared; for (int i = tid; i < sequence_length; i += blockSize) { float x = input_block[i]; x = expf((x)*scaler + (float)mask_block[i] - max_shared); output_block[i] = (T)(recip_sum * x); } } } Status LaunchLongformerSoftmaxKernel( cudaStream_t stream, cublasHandle_t cublas, void* workspace, const void* q, // transposed Q with shape (B, N, S, H) const void* k, // transposed K with shape (B, N, S, H) const void* v, // transposed V with shape (B, N, S, H) const void* attention_mask, // attention mask with shape (B, S), with value 0 not masked and -10000 masked. int max_num_global, // maximum number of global tokens (G) const bool compact_global_q, // whether global_q has shape (B, N, G, H) instead of (B, N, S, H) const void* global_q, // Q for global tokens with shape (B, N, S, H). const void* global_k, // K for global tokens with shape (B, N, S, H) const void* global_v, // V for global tokens with shape (B, N, S, H) const int* global_attention, // global attention flags with shape (B, S), with value 0 for local and 1 for global. const int* global_index, // Global index with shape (B, S) const int* batch_global_num, // Number of global tokens per batch with shape (B, 1) void* pinned_buffer, // Pinned memory in CPU with 2 parts: global tokens per batch, and data for scratch2 void* output, // output with shape (B, N, S, H) float scaler, // scalar int batch_size, // batch size int sequence_length, // sequence length int num_heads, // number of heads int head_size, // hidden size per head int window, // one sided window size size_t element_size) { // size of element: 2 for half, and 4 for float const int* global_count = reinterpret_cast<const int*>(pinned_buffer); bool is_fp16 = (element_size == 2); char* scratch1 = reinterpret_cast<char*>(workspace); char* scratch2 = scratch1 + GetScratch1Size(element_size, batch_size, num_heads, sequence_length, window); // Setup shared parameters for two strided batched matrix multiplies cudaDataType_t Atype; cudaDataType_t Btype; cudaDataType_t Ctype; cudaDataType_t resultType; cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT; __half one_fp16, zero_fp16; float one_fp32, zero_fp32; void *alpha, *beta_0, *beta_1; if (is_fp16) { one_fp16 = __float2half(1.f); zero_fp16 = __float2half(0.f); alpha = static_cast<void*>(&one_fp16); beta_0 = static_cast<void*>(&zero_fp16); beta_1 = static_cast<void*>(&one_fp16); Atype = CUDA_R_16F; Btype = CUDA_R_16F; Ctype = CUDA_R_16F; resultType = CUDA_R_16F; algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP; } else { one_fp32 = 1.f; zero_fp32 = 0.f; alpha = static_cast<void*>(&one_fp32); beta_0 = static_cast<void*>(&zero_fp32); beta_1 = static_cast<void*>(&one_fp32); Atype = CUDA_R_32F; Btype = CUDA_R_32F; Ctype = CUDA_R_32F; resultType = CUDA_R_32F; } // Strided batch matrix multiply // qk = q * k^T // Shapes: q and k = B x N x S x H, qk = B x N x S x S // Convert col-major to row-major by swapping q and k in Gemm size_t elements_per_batch = num_heads * sequence_length * head_size; int stride_per_head = sequence_length * head_size; // stride for Q, K, V and output // Local attention part // S x S is calculated using sliding block WxW (W is one sided window size) like the following: // [W][W] // [W][W][W] // [W][W][W] // [W][W] // The first and last rows have 2 blocks per row, and the remaining has 3 blocks per row. // The calculation are splited into 3 parts: the first row, middle rows and finally the last row. // To save space, we do not store the whole matrix. Instead, we only allocate space for these blocks. // // For global attention part, we have two assumptions: // (1) Global tokens are at the beginging of sequence // (2) Number of global tokens <= attention window // // The results are stored in scratch1 buffer: // Number of elements for local attention are (3*S/W-2)*W*W*N*B, or (3S-2W)*W*N*B // Number of elements for local attends to global are (S-W)*W*N*B // Number of elements for global attends to everything are S*W*N*B // Total elements (FP16 or FP32) are (5S-3W)*W*N*B const int w = window; const int middle_count = (sequence_length - 2 * w) / w; int last_block = (sequence_length / w) - 1; // Determine the non-zero block dimensions and pointers // Buffer size per head for a single batch size_t buffer_sizes[5] = { static_cast<size_t>(w * w * 2), // first row of blocks has 2 WxW blocks static_cast<size_t>(w * w * middle_count * 3), // middle rows of blocks have 3 WxW blocks per row static_cast<size_t>(w * w * 2), // last row of blocks has 2 WxW blocks static_cast<size_t>(w * (sequence_length - w)), // local attends to global: global tokens <= window size static_cast<size_t>(w * sequence_length)}; // global attends to everything. size_t buffer_strides[5] = { static_cast<size_t>(w * 2), static_cast<size_t>(w * 3), static_cast<size_t>(w * 2), static_cast<size_t>(w), // number of global tokens <= window size static_cast<size_t>(sequence_length)}; void* buffer_pointers[5]; char* current_pointer = scratch1; for (int i = 0; i < 5; ++i) { buffer_pointers[i] = reinterpret_cast<void*>(current_pointer); current_pointer += buffer_sizes[i] * num_heads * batch_size * element_size; } // Copy to a continues buffer first so that we only need call cudaMemcpyAsync once char* temp_buffer = reinterpret_cast<char*>(pinned_buffer) + sizeof(int) * batch_size; memcpy(temp_buffer, &buffer_pointers[0], 5 * sizeof(void*)); memcpy(temp_buffer + 5 * sizeof(void*), &buffer_sizes[0], 5 * sizeof(size_t)); memcpy(temp_buffer + 5 * sizeof(void*) + 5 * sizeof(size_t), &buffer_strides[0], 5 * sizeof(size_t)); CHECK_CUDA(cudaMemcpyAsync(scratch2, temp_buffer, GetScratch2Size(), cudaMemcpyHostToDevice, stream)); // Local attention part { // local attention per head - head CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_T, CUBLAS_OP_N, 2 * w, // m w, // n head_size, // k alpha, // alpha k, // A Atype, // A type head_size, // lda stride_per_head, // strideA q, // B Btype, // B type head_size, // ldb stride_per_head, // strideB beta_0, // beta buffer_pointers[0], // C Ctype, // C type 2 * w, // ldc buffer_sizes[0], // strideC batch_size * num_heads, // batch count resultType, algo)); // local attention per head - middle if (middle_count > 0) { for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < num_heads; ++j) { const void* q_head = reinterpret_cast<const char*>(q) + (i * elements_per_batch + (j * sequence_length + w) * head_size) * element_size; const void* k_head = reinterpret_cast<const char*>(k) + (i * elements_per_batch + j * sequence_length * head_size) * element_size; void* qk_head = reinterpret_cast<char*>(buffer_pointers[1]) + static_cast<size_t>(i * num_heads + j) * buffer_sizes[1] * element_size; CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_T, CUBLAS_OP_N, 3 * w, // m w, // n head_size, // k alpha, // alpha k_head, // A Atype, // A type head_size, // lda w * head_size, // strideA q_head, // B Btype, // B type head_size, // ldb w * head_size, // strideB beta_0, // beta qk_head, // C Ctype, // C type 3 * w, // ldc 3 * w * w, // strideC middle_count, // batch count resultType, algo)); } } } // local attention per head - tail const void* q_head = reinterpret_cast<const char*>(q) + (last_block * w * head_size) * element_size; const void* k_head = reinterpret_cast<const char*>(k) + ((last_block - 1) * w * head_size) * element_size; CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_T, CUBLAS_OP_N, 2 * w, // m w, // n head_size, // k alpha, // alpha k_head, // A Atype, // A type head_size, // lda stride_per_head, // strideA q_head, // B Btype, // B type head_size, // ldb stride_per_head, // strideB beta_0, // beta buffer_pointers[2], // C Ctype, // C type 2 * w, // ldc buffer_sizes[2], // strideC batch_size * num_heads, // batch count resultType, algo)); } // Global attention part for (int i = 0; i < batch_size; ++i) { if (global_count[i] > 0) { const void* q_batch = reinterpret_cast<const char*>(q) + (i * elements_per_batch + w * head_size) * element_size; const void* k_batch = reinterpret_cast<const char*>(k) + (i * elements_per_batch) * element_size; void* qk_batch = reinterpret_cast<char*>(buffer_pointers[3]) + (i * buffer_sizes[3]) * num_heads * element_size; // Local tokens attending global tokens CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_T, CUBLAS_OP_N, global_count[i], // m sequence_length - w, // n head_size, // k alpha, // alpha k_batch, // A Atype, // A type head_size, // lda stride_per_head, // strideA q_batch, // B Btype, // B type head_size, // ldb stride_per_head, // strideB beta_0, // beta qk_batch, // C Ctype, // C type w, // ldc buffer_sizes[3], // strideC num_heads, // batch count resultType, algo)); const size_t global_q_per_batch = compact_global_q ? num_heads * max_num_global * head_size : elements_per_batch; const int global_q_stride = (compact_global_q ? max_num_global * head_size : stride_per_head); const void* global_q_batch = reinterpret_cast<const char*>(global_q) + (i * global_q_per_batch) * element_size; const void* global_k_batch = reinterpret_cast<const char*>(global_k) + (i * elements_per_batch) * element_size; qk_batch = reinterpret_cast<char*>(buffer_pointers[4]) + (i * buffer_sizes[4] * num_heads) * element_size; // Global tokens attending everything // This GEMMs need to be last to make sure all global token entries are re-written. CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_T, CUBLAS_OP_N, sequence_length, // m global_count[i], // n head_size, // k alpha, // alpha global_k_batch, // A Atype, // A type head_size, // lda stride_per_head, // strideA global_q_batch, // B Btype, // B type head_size, // ldb global_q_stride, // strideB. beta_0, // beta qk_batch, // C Ctype, // C type sequence_length, // ldc buffer_sizes[4], // strideC num_heads, // batch count resultType, algo)); } } const int blockSize = 64; const int gridSize = batch_size * num_heads * sequence_length; if (is_fp16) { LongformerSoftmaxKernel<__half, blockSize><<<gridSize, blockSize, 0, stream>>>( global_attention, global_index, batch_global_num, scratch2, static_cast<const __half*>(attention_mask), scaler, sequence_length, num_heads, window); } else { LongformerSoftmaxKernel<float, blockSize><<<gridSize, blockSize, 0, stream>>>( global_attention, global_index, batch_global_num, scratch2, static_cast<const float*>(attention_mask), scaler, sequence_length, num_heads, window); } // local values attending the softmax score. { // local attention per head - head CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, head_size, // m w, // n 2 * w, // k alpha, // alpha v, // A Atype, // A type head_size, // lda stride_per_head, // strideA buffer_pointers[0], // B Btype, // B type static_cast<int>(buffer_strides[0]), // ldb buffer_sizes[0], // strideB beta_0, // beta output, // C Ctype, // C type head_size, // ldc stride_per_head, // strideC batch_size * num_heads, // batch count resultType, algo)); // local attention per head - middle if (middle_count > 0) { for (int i = 0; i < batch_size; ++i) { for (int j = 0; j < num_heads; ++j) { const void* v_head = reinterpret_cast<const char*>(v) + (i * elements_per_batch + j * head_size * sequence_length) * element_size; const void* prob_head = reinterpret_cast<const char*>(buffer_pointers[1]) + (i * num_heads + j) * buffer_sizes[1] * element_size; void* out_head = reinterpret_cast<char*>(output) + (i * elements_per_batch + j * head_size * sequence_length + w * head_size) * element_size; CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, head_size, // m w, // n 3 * w, // k alpha, // alpha v_head, // A Atype, // A type head_size, // lda w * head_size, // strideA prob_head, // B Btype, // B type static_cast<int>(buffer_strides[1]), // ldb 3 * w * w, // strideB beta_0, // beta out_head, // C Ctype, // C type head_size, // ldc w * head_size, // strideC middle_count, // batch count resultType, algo)); } } } // local attention per head - tail const void* v_head = reinterpret_cast<const char*>(v) + (last_block - 1) * w * head_size * element_size; void* out_head = reinterpret_cast<char*>(output) + last_block * w * head_size * element_size; CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, head_size, // m w, // n 2 * w, // k alpha, // alpha v_head, // A Atype, // A type head_size, // lda stride_per_head, // strideA buffer_pointers[2], // B Btype, // B type static_cast<int>(buffer_strides[2]), // ldb buffer_sizes[2], // strideB beta_0, // beta out_head, // C Ctype, // C type head_size, // ldc stride_per_head, // strideC batch_size * num_heads, // batch count resultType, algo)); } // global attention part for (int i = 0; i < batch_size; ++i) { if (global_count[i] > 0) { // Local tokens attending global tokens const void* v_head = reinterpret_cast<const char*>(v) + (i * elements_per_batch) * element_size; const void* prob_head = reinterpret_cast<const char*>(buffer_pointers[3]) + (i * buffer_sizes[3] * num_heads + w * buffer_strides[3]) * element_size; void* out_head = reinterpret_cast<char*>(output) + (i * elements_per_batch + 2 * w * head_size) * element_size; CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, head_size, // m sequence_length - 2 * w, // n global_count[i], // k alpha, // alpha v_head, // A Atype, // A type head_size, // lda stride_per_head, // strideA prob_head, // B Btype, // B type static_cast<int>(buffer_strides[3]), // ldb buffer_sizes[3], // strideB beta_1, // beta out_head, // C Ctype, // C type head_size, // ldc stride_per_head, // strideC num_heads, // batch count resultType, algo)); // Global tokens attending everything v_head = reinterpret_cast<const char*>(global_v) + (i * elements_per_batch) * element_size; prob_head = reinterpret_cast<const char*>(buffer_pointers[4]) + (i * buffer_sizes[4] * num_heads) * element_size; out_head = reinterpret_cast<char*>(output) + (i * elements_per_batch) * element_size; CHECK(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, head_size, // m global_count[i], // n sequence_length, // k: re-write entries completely alpha, // alpha v_head, // A Atype, // A type head_size, // lda stride_per_head, // strideA prob_head, // B Btype, // B type static_cast<int>(buffer_strides[4]), // ldb buffer_sizes[4], // strideB beta_0, // beta: overwrite out_head, // C: assumes global tokens at the beginning of sequence Ctype, // C type head_size, // ldc stride_per_head, // strideC num_heads, // batch count resultType, algo)); } } return Status::OK(); } template <typename T> Status LongformerQkvToContext( const cudaDeviceProp& device_prop, cublasHandle_t cublas, cudaStream_t stream, const int batch_size, // batch size const int sequence_length, // sequence length const int num_heads, // number of attention heads const int head_size, // hidden size per head const int window, // Half (one-sided) window size const size_t element_size, const T* input, // input for transpose const T* bias, // bias to add to transposed input const T* attention_mask, // attention mask with shape (B, S), with value 0.0 not masked, and -10000.0 masked. const T* global_input, // global input for transpose const T* global_bias, // bias to add to transposed global input const int* global_attention, // global attention flags with shape (B, S), with value 0 for local and 1 for global. const int* global_index, // Global index with shape (B, S) const int* batch_global_num, // Number of global tokens per batch with shape (B, 1) const int max_num_global, // Maximum number of global tokens (G) void* pinned_buffer, // Pinned memory in CPU. Number of global tokens per batch with shape (B, 1) T* workspace, // Softmax space T* output, // output size_t softmax_workspace_size, bool disable_compact_memory, bool use_merged_qkv_weights, bool use_half4) { T* qkv = reinterpret_cast<T*>(reinterpret_cast<char*>(workspace) + softmax_workspace_size); // Number of elements in Q, K, V, Global_Q, Global_K or Global_V are same: BxNxSxH const int elements = batch_size * num_heads * sequence_length * head_size; const int max_threads_per_block(device_prop.maxThreadsPerBlock); const int format = static_cast<int>(use_merged_qkv_weights); bool compact_global_q = false; // The order of qkv space: // Q, K, V, Global_K, Global_V, Global_Q (format 0) // Q, K, V, Global_Q, Global_K, Global_V (format 1) if (format == 1 || max_num_global == 0 || nullptr == global_input) { if (bias == nullptr) { ORT_RETURN_IF_ERROR(LaunchTransQkv(stream, 3, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, false, input, qkv)); } else { LaunchAddBiasTranspose(stream, 3, format, max_threads_per_block, batch_size, sequence_length, num_heads, head_size, input, bias, qkv, use_half4); } if (max_num_global > 0 && nullptr != global_input) { if (global_bias == nullptr) { ORT_RETURN_IF_ERROR(LaunchTransQkv(stream, 3, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, false, global_input, qkv + 3 * elements)); } else { LaunchAddBiasTranspose(stream, 3, format, max_threads_per_block, batch_size, sequence_length, num_heads, head_size, global_input, global_bias, qkv + 3 * elements, use_half4); } } } else { LaunchAddBiasTranspose(stream, 5, format, max_threads_per_block, batch_size, sequence_length, num_heads, head_size, input, bias, qkv, use_half4); compact_global_q = (disable_compact_memory == false); LaunchAddBiasTranspose(stream, 1, format, max_threads_per_block, batch_size, compact_global_q ? max_num_global : sequence_length, num_heads, head_size, global_input + 2 * elements, global_bias, qkv + 5 * elements, use_half4); } CUDA_RETURN_IF_ERROR(cudaGetLastError()); // Transposed Q, K, V with shape (B, N, S, H) const T* q = qkv; const T* k = q + elements; const T* v = k + elements; // Transposed global Q, K, V with shape (B, N, S, H). // When compact_global_q is true, Global Q has actual shape (B, N, G, H) although we allocated space of (B, N, S, H) // When max_num_global == 0, these pointers are not used in GEMM so the value does not matter. const T* global_q = (format == 1 ? v + elements : qkv + 5 * elements); const T* global_k = (format == 1 ? global_q + elements : qkv + 3 * elements); const T* global_v = (format == 1 ? global_k + elements : qkv + 4 * elements); // Q*K' are scaled by 1/sqrt(H) const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size)); T* temp_output = qkv; // Q will be overwritten if (disable_compact_memory) { ORT_RETURN_IF_ERROR(LaunchLongformerSoftmaxSimpleKernel( stream, cublas, workspace, q, k, v, attention_mask, global_q, global_k, global_v, global_attention, global_index, batch_global_num, pinned_buffer, temp_output, rsqrt_head_size, batch_size, sequence_length, num_heads, head_size, window, element_size)); } else { ORT_ENFORCE(max_num_global <= window); ORT_RETURN_IF_ERROR(LaunchLongformerSoftmaxKernel( stream, cublas, workspace, q, k, v, attention_mask, max_num_global, compact_global_q, global_q, global_k, global_v, global_attention, global_index, batch_global_num, pinned_buffer, temp_output, rsqrt_head_size, batch_size, sequence_length, num_heads, head_size, window, element_size)); } // The temp_output is BxNxSxH, transpose it to final output BxSxNxH return LaunchTransCtx(stream, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, false, temp_output, output); } Status LaunchLongformerAttentionKernel( const cudaDeviceProp& device_prop, cublasHandle_t cublas, cudaStream_t stream, const void* input, const void* bias, const void* attention_mask, const void* global_input, const void* global_bias, const int* global_attention, const int* global_index, const int* batch_global_num, void* pinned_buffer, void* workspace, void* output, int batch_size, int sequence_length, int num_heads, int head_size, int window, int max_num_global, const size_t element_size, bool disable_compact_memory, bool use_merged_qkv_weights, bool use_half4) { CublasMathModeSetter helper(device_prop, cublas, CUBLAS_TENSOR_OP_MATH); size_t softmax_workspace_size = GetLongformerSoftmaxWorkspaceSize(element_size, batch_size, num_heads, sequence_length, window, disable_compact_memory); if (element_size == 2) { return LongformerQkvToContext(device_prop, cublas, stream, batch_size, sequence_length, num_heads, head_size, window, element_size, reinterpret_cast<const half*>(input), reinterpret_cast<const half*>(bias), reinterpret_cast<const half*>(attention_mask), reinterpret_cast<const half*>(global_input), reinterpret_cast<const half*>(global_bias), global_attention, global_index, batch_global_num, max_num_global, pinned_buffer, reinterpret_cast<half*>(workspace), reinterpret_cast<half*>(output), softmax_workspace_size, disable_compact_memory, use_merged_qkv_weights, use_half4); } else { return LongformerQkvToContext(device_prop, cublas, stream, batch_size, sequence_length, num_heads, head_size, window, element_size, reinterpret_cast<const float*>(input), reinterpret_cast<const float*>(bias), reinterpret_cast<const float*>(attention_mask), reinterpret_cast<const float*>(global_input), reinterpret_cast<const float*>(global_bias), global_attention, global_index, batch_global_num, max_num_global, pinned_buffer, reinterpret_cast<float*>(workspace), reinterpret_cast<float*>(output), softmax_workspace_size, disable_compact_memory, use_merged_qkv_weights, false); } } } // namespace cuda } // namespace contrib } // namespace onnxruntime
b630839c7033b4cb060728d20c1f60b625d31812.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<cuda.h> #include<time.h> #include<math.h> __global__ void zad3(float *a,float *b,float *c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<N) c[idx] = a[idx] + b[idx]; } int main(void) { clock_t t1,t2; float *a_h,*b_h,*c_h; float *a_d,*b_d,*c_d; const int N = 50000000; hipEvent_t start1,start2,start3,start4,start5,stop1,stop2,stop3,stop4,stop5; float time1,time2,time3,time4,time5; hipEventCreate(&start1); hipEventCreate(&start2); hipEventCreate(&stop1); hipEventCreate(&stop2); hipEventCreate(&stop3); hipEventCreate(&stop4); hipEventCreate(&stop5); hipEventCreate(&start3); hipEventCreate(&start4); hipEventCreate(&start5); size_t size = N * sizeof(float); t1=clock(); hipHostMalloc((void**)&a_h,size); //alokowanie na hoscie (pamiec przypinana) hipHostMalloc((void**)&b_h,size); hipHostMalloc((void**)&c_h,size); hipMalloc((void **) &a_d,size); //alokowanie pamieci na device hipMalloc((void **) &b_d,size); hipMalloc((void **) &c_d,size); t2=clock(); for(int i=0;i<N;i++) { a_h[i]=(float)(i+1); b_h[i]=(float)(i+1); c_h[i]=(float)(i+1); } hipStream_t strumien1, strumien2; hipStreamCreate(&strumien1); hipStreamCreate(&strumien2); hipEventRecord(start3, 0); hipMemcpyAsync(a_d,a_h,size/2,hipMemcpyHostToDevice,strumien1); hipMemcpyAsync(b_d,b_h,size/2,hipMemcpyHostToDevice,strumien1); hipMemcpyAsync(c_d,c_h,size/2,hipMemcpyHostToDevice,strumien1); hipEventRecord(stop3, 0); hipEventSynchronize(stop3); hipEventRecord(start1, 0); hipLaunchKernelGGL(( zad3), dim3(N/2/N+1),dim3(N),0,strumien1, a_d,b_d,c_d,N/2); hipEventRecord(stop1, 0); hipEventSynchronize(stop1); hipEventRecord(start4, 0); hipMemcpyAsync(a_h,a_d,size/2,hipMemcpyDeviceToHost,strumien1); hipMemcpyAsync(b_h,b_d,size/2,hipMemcpyDeviceToHost,strumien1); hipMemcpyAsync(c_h,c_d,size/2,hipMemcpyDeviceToHost,strumien1); hipMemcpyAsync(a_d+N/2,a_h+N/2,size/2,hipMemcpyHostToDevice,strumien2); hipMemcpyAsync(b_d+N/2,b_h+N/2,size/2,hipMemcpyHostToDevice,strumien2); hipMemcpyAsync(c_d+N/2,c_h+N/2,size/2,hipMemcpyHostToDevice,strumien2); hipEventRecord(stop4, 0); hipEventSynchronize(stop4); hipEventRecord(start2, 0);hipLaunchKernelGGL(( zad3), dim3(N/2/N+1),dim3(N),0,strumien2, a_d+N/2,b_d+N/2,c_d+N/2,N/2); hipEventRecord(stop2, 0); hipEventSynchronize(stop2); hipEventRecord(start5, 0); hipMemcpyAsync(a_h+N/2,a_d+N/2,size/2,hipMemcpyDeviceToHost,strumien2); hipMemcpyAsync(b_h+N/2,b_d+N/2,size/2,hipMemcpyDeviceToHost,strumien2); hipMemcpyAsync(c_h+N/2,c_d+N/2,size/2,hipMemcpyDeviceToHost,strumien2); hipEventRecord(stop5, 0); hipEventSynchronize(stop5); printf("Czas alokowania danych: %f s\n",(float)(t2-t1)/CLOCKS_PER_SEC); //printf("Czas przesyu danych: %f s\n",(float)(((t4-t3)+(t6-t5)+(t8-t7)))/CLOCKS_PER_SEC); hipStreamDestroy(strumien1); hipStreamDestroy(strumien2); hipHostFree(a_h); hipHostFree(b_h); hipHostFree(c_h); hipFree(a_d); hipFree(b_d); hipFree(c_d); hipEventElapsedTime(&time1, start1, stop1); hipEventElapsedTime(&time2, start2, stop2); hipEventElapsedTime(&time3, start3, stop3); hipEventElapsedTime(&time4, start4, stop4); hipEventElapsedTime(&time5, start5, stop5); printf ("Czas wykonania dodawania wektorw: %f ms\n", time1+time2); printf("Czas przesyu danych: %f ms\n",time3+time4+time5); }
b630839c7033b4cb060728d20c1f60b625d31812.cu
#include<stdio.h> #include<cuda.h> #include<time.h> #include<math.h> __global__ void zad3(float *a,float *b,float *c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<N) c[idx] = a[idx] + b[idx]; } int main(void) { clock_t t1,t2; float *a_h,*b_h,*c_h; float *a_d,*b_d,*c_d; const int N = 50000000; cudaEvent_t start1,start2,start3,start4,start5,stop1,stop2,stop3,stop4,stop5; float time1,time2,time3,time4,time5; cudaEventCreate(&start1); cudaEventCreate(&start2); cudaEventCreate(&stop1); cudaEventCreate(&stop2); cudaEventCreate(&stop3); cudaEventCreate(&stop4); cudaEventCreate(&stop5); cudaEventCreate(&start3); cudaEventCreate(&start4); cudaEventCreate(&start5); size_t size = N * sizeof(float); t1=clock(); cudaMallocHost((void**)&a_h,size); //alokowanie na hoscie (pamiec przypinana) cudaMallocHost((void**)&b_h,size); cudaMallocHost((void**)&c_h,size); cudaMalloc((void **) &a_d,size); //alokowanie pamieci na device cudaMalloc((void **) &b_d,size); cudaMalloc((void **) &c_d,size); t2=clock(); for(int i=0;i<N;i++) { a_h[i]=(float)(i+1); b_h[i]=(float)(i+1); c_h[i]=(float)(i+1); } cudaStream_t strumien1, strumien2; cudaStreamCreate(&strumien1); cudaStreamCreate(&strumien2); cudaEventRecord(start3, 0); cudaMemcpyAsync(a_d,a_h,size/2,cudaMemcpyHostToDevice,strumien1); cudaMemcpyAsync(b_d,b_h,size/2,cudaMemcpyHostToDevice,strumien1); cudaMemcpyAsync(c_d,c_h,size/2,cudaMemcpyHostToDevice,strumien1); cudaEventRecord(stop3, 0); cudaEventSynchronize(stop3); cudaEventRecord(start1, 0); zad3<<<N/2/N+1,N,0,strumien1>>>(a_d,b_d,c_d,N/2); cudaEventRecord(stop1, 0); cudaEventSynchronize(stop1); cudaEventRecord(start4, 0); cudaMemcpyAsync(a_h,a_d,size/2,cudaMemcpyDeviceToHost,strumien1); cudaMemcpyAsync(b_h,b_d,size/2,cudaMemcpyDeviceToHost,strumien1); cudaMemcpyAsync(c_h,c_d,size/2,cudaMemcpyDeviceToHost,strumien1); cudaMemcpyAsync(a_d+N/2,a_h+N/2,size/2,cudaMemcpyHostToDevice,strumien2); cudaMemcpyAsync(b_d+N/2,b_h+N/2,size/2,cudaMemcpyHostToDevice,strumien2); cudaMemcpyAsync(c_d+N/2,c_h+N/2,size/2,cudaMemcpyHostToDevice,strumien2); cudaEventRecord(stop4, 0); cudaEventSynchronize(stop4); cudaEventRecord(start2, 0); zad3<<<N/2/N+1,N,0,strumien2>>>(a_d+N/2,b_d+N/2,c_d+N/2,N/2); cudaEventRecord(stop2, 0); cudaEventSynchronize(stop2); cudaEventRecord(start5, 0); cudaMemcpyAsync(a_h+N/2,a_d+N/2,size/2,cudaMemcpyDeviceToHost,strumien2); cudaMemcpyAsync(b_h+N/2,b_d+N/2,size/2,cudaMemcpyDeviceToHost,strumien2); cudaMemcpyAsync(c_h+N/2,c_d+N/2,size/2,cudaMemcpyDeviceToHost,strumien2); cudaEventRecord(stop5, 0); cudaEventSynchronize(stop5); printf("Czas alokowania danych: %f s\n",(float)(t2-t1)/CLOCKS_PER_SEC); //printf("Czas przesyłu danych: %f s\n",(float)(((t4-t3)+(t6-t5)+(t8-t7)))/CLOCKS_PER_SEC); cudaStreamDestroy(strumien1); cudaStreamDestroy(strumien2); cudaFreeHost(a_h); cudaFreeHost(b_h); cudaFreeHost(c_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); cudaEventElapsedTime(&time1, start1, stop1); cudaEventElapsedTime(&time2, start2, stop2); cudaEventElapsedTime(&time3, start3, stop3); cudaEventElapsedTime(&time4, start4, stop4); cudaEventElapsedTime(&time5, start5, stop5); printf ("Czas wykonania dodawania wektorów: %f ms\n", time1+time2); printf("Czas przesyłu danych: %f ms\n",time3+time4+time5); }
601f1368b20d36db746957ec23346396ba86376c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; //const int label_value = static_cast<int>(label[n * spatial_dim + s]); int label_value = static_cast<int>(label[n * spatial_dim + s]); if(label_value==2) label_value=0; if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> __global__ void SoftmaxWeightedLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss,const float* loss_weights, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; //const int label_value = static_cast<int>(label[n * spatial_dim + s]); int label_value = static_cast<int>(label[n * spatial_dim + s]); if(label_value==2) label_value=0; if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN)))*loss_weights[label_value]; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; //LOG(INFO)<<"has_ignore_label_ = " <<has_ignore_label_; // const float* lossWeights =NULL; // if(has_sample_selector_) // lossWeights = sample_selector_->Get_Label_prob_gpu_data(); // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // if(has_sample_selector_){ // // NOLINT_NEXT_LINE(whitespace/operators) // SoftmaxWeightedLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), // CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,lossWeights, // outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); // }else{ // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); ///} Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxWeightedLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts,const float* lossWeights) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; // const int label_value = static_cast<int>(label[n * spatial_dim + s]); int label_value = static_cast<int>(label[n * spatial_dim + s]); if(label_value==2) label_value=0; if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s]-=1; //size_t idx= n * dim + label_value * spatial_dim + s; //Dtype curent_diff = bottom_diff[idx]; for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s]*=static_cast<Dtype> (lossWeights[label_value]); } counts[index] = 1; } } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { /* if(has_sample_selector_){ Backward_cpu(top, propagate_down,bottom); return; } */ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; const float* lossWeights ; if(has_sample_selector_){ if (this->layer_param_.label_select_param().auto_balance()) sample_selector_->Compute_label_prob_fromBlob(bottom[1]); lossWeights = sample_selector_->Get_Label_prob_gpu_data(); } const float*lossWeights_cpu =sample_selector_->Get_Label_prob_cpu_data(); if(rand()%200==0) LOG(INFO)<<"weight loss 0 ="<<lossWeights_cpu[0]<<" weight loss 1 ="<<lossWeights_cpu[1]<<" weight loss 2 ="<<lossWeights_cpu[2]; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); if(has_sample_selector_){ // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxWeightedLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts,lossWeights ); } else{ // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); } Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
601f1368b20d36db746957ec23346396ba86376c.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; //const int label_value = static_cast<int>(label[n * spatial_dim + s]); int label_value = static_cast<int>(label[n * spatial_dim + s]); if(label_value==2) label_value=0; if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> __global__ void SoftmaxWeightedLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss,const float* loss_weights, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; //const int label_value = static_cast<int>(label[n * spatial_dim + s]); int label_value = static_cast<int>(label[n * spatial_dim + s]); if(label_value==2) label_value=0; if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN)))*loss_weights[label_value]; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; //LOG(INFO)<<"has_ignore_label_ = " <<has_ignore_label_; // const float* lossWeights =NULL; // if(has_sample_selector_) // lossWeights = sample_selector_->Get_Label_prob_gpu_data(); // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // if(has_sample_selector_){ // // NOLINT_NEXT_LINE(whitespace/operators) // SoftmaxWeightedLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), // CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,lossWeights, // outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); // }else{ // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); ///} Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxWeightedLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts,const float* lossWeights) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; // const int label_value = static_cast<int>(label[n * spatial_dim + s]); int label_value = static_cast<int>(label[n * spatial_dim + s]); if(label_value==2) label_value=0; if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s]-=1; //size_t idx= n * dim + label_value * spatial_dim + s; //Dtype curent_diff = bottom_diff[idx]; for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s]*=static_cast<Dtype> (lossWeights[label_value]); } counts[index] = 1; } } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { /* if(has_sample_selector_){ Backward_cpu(top, propagate_down,bottom); return; } */ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; const float* lossWeights ; if(has_sample_selector_){ if (this->layer_param_.label_select_param().auto_balance()) sample_selector_->Compute_label_prob_fromBlob(bottom[1]); lossWeights = sample_selector_->Get_Label_prob_gpu_data(); } const float*lossWeights_cpu =sample_selector_->Get_Label_prob_cpu_data(); if(rand()%200==0) LOG(INFO)<<"weight loss 0 ="<<lossWeights_cpu[0]<<" weight loss 1 ="<<lossWeights_cpu[1]<<" weight loss 2 ="<<lossWeights_cpu[2]; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); if(has_sample_selector_){ // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxWeightedLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts,lossWeights ); } else{ // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); } Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
1bf312a8ad9ea583ff5b9c3a18dc18deb3e86e0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ------------ * This code is provided solely for the personal and private use of * students taking the CSC367H1 course at the University of Toronto. * Copying for purposes other than this use is expressly prohibited. * All forms of distribution of this code, whether as given or with * any changes, are expressly prohibited. * * Authors: Bogdan Simion, Felipe de Azevedo Piovezan * * All of the files in this directory and all subdirectories are: * Copyright (c) 2017 Bogdan Simion * ------------- */ #include "kernels.h" __global__ void kernel1(const int8_t *filter, int32_t dimension, const int32_t *input, int32_t *output, int32_t width, int32_t height) { int index = blockIdx.x * blockDim.x + threadIdx.x; int row = index % height; int column = index / height; int image_index = row * width + column; if (index < width * height) { output[image_index] = apply2d_gpu(filter, dimension, input, output, width, height, row, column); } } __global__ void normalize1(int32_t *image, int32_t width, int32_t height, int32_t smallest, int32_t biggest) { int index = blockIdx.x * blockDim.x + threadIdx.x; int row = index % height; int column = index / height; int image_index = row * width + column; if (index < width * height && biggest != smallest) { image[image_index] = ((image[image_index] - smallest) * 255) / (biggest - smallest); } }
1bf312a8ad9ea583ff5b9c3a18dc18deb3e86e0e.cu
/* ------------ * This code is provided solely for the personal and private use of * students taking the CSC367H1 course at the University of Toronto. * Copying for purposes other than this use is expressly prohibited. * All forms of distribution of this code, whether as given or with * any changes, are expressly prohibited. * * Authors: Bogdan Simion, Felipe de Azevedo Piovezan * * All of the files in this directory and all subdirectories are: * Copyright (c) 2017 Bogdan Simion * ------------- */ #include "kernels.h" __global__ void kernel1(const int8_t *filter, int32_t dimension, const int32_t *input, int32_t *output, int32_t width, int32_t height) { int index = blockIdx.x * blockDim.x + threadIdx.x; int row = index % height; int column = index / height; int image_index = row * width + column; if (index < width * height) { output[image_index] = apply2d_gpu(filter, dimension, input, output, width, height, row, column); } } __global__ void normalize1(int32_t *image, int32_t width, int32_t height, int32_t smallest, int32_t biggest) { int index = blockIdx.x * blockDim.x + threadIdx.x; int row = index % height; int column = index / height; int image_index = row * width + column; if (index < width * height && biggest != smallest) { image[image_index] = ((image[image_index] - smallest) * 255) / (biggest - smallest); } }
d87f37223f7d7e9f23f9d978980c66ec43f8a403.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" ////////////////////////////////////////////////////////////////////////////////////////// // miniCFD // Author: Omitted ////////////////////////////////////////////////////////////////////////////////////////// #include <stdlib.h> #include <math.h> #include <stdio.h> #include <ctime> #include <iostream> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } #define DEBUG const double pi = 3.14159265358979323846264338327; //Pi const double grav = 9.8; //Gravitational acceleration (m / s^2) const double cp = 1004.; //Specific heat of dry air at constant pressure const double cv = 717.; //Specific heat of dry air at constant volume const double rd = 287.; //Dry air constant for equation of state (P=rho*rd*T) const double p0 = 1.e5; //Standard pressure at the surface in Pascals const double C0 = 27.5629410929725927310572984382; //Constant to translate potential temperature into pressure (P=C0*(rho*theta)**gamma) const double gamm = 1.40027894002789401278940017893; //gamma=cp/Rd , have to call this gamm because "gamma" is taken (I hate C so much) //Define domain and stability-related constants const double xlen = 2.e4; //Length of the domain in the x-direction (meters) const double zlen = 1.e4; //Length of the domain in the z-direction (meters) const double hv = 0.25; //How strong to diffuse the solution: hv \in [0:1] const double cfl = 1.50; //"Courant, Friedrichs, Lewy" number (for numerical stability) const double max_speed = 450; //Assumed maximum wave speed during the simulation (speed of sound + speed of wind) (meter / sec) const int hs = 2; //"Halo" size: number of cells beyond the MPI tasks's domain needed for a full "stencil" of information for reconstruction const int cfd_size = 4; //Size of the stencil used for interpolation //Parameters for indexing and flags const int NUM_VARS = 4; //Number of fluid state variables const int POS_DENS = 0; //index for density ("rho") const int POS_UMOM = 1; //index for momentum in the x-direction ("rho * u") const int POS_WMOM = 2; //index for momentum in the z-direction ("rho * w") const int POS_RHOT = 3; //index for density * potential temperature ("rho * theta") const int DIR_X = 1; //Integer constant to express that this operation is in the x-direction const int DIR_Z = 2; //Integer constant to express that this operation is in the z-direction enum test_cases {CONFIG_IN_TEST1, CONFIG_IN_TEST2, CONFIG_IN_TEST3, CONFIG_IN_TEST4, CONFIG_IN_TEST5, CONFIG_IN_TEST6 }; const int nqpoints = 3; double qpoints [] = { 0.112701665379258311482074460012E0 , 0.510000000000000000000000000000E0 , 0.887298334621741688517926529880E0 }; double qweights[] = { 0.277777777777777777777777777778E0 , 0.444444444444444444444444444445E0 , 0.277777777777777777777777777786E0 }; /////////////////////////////////////////////////////////////////////////////////////// // Variables that are initialized but remain static over the coure of the simulation /////////////////////////////////////////////////////////////////////////////////////// const double sim_time = _SIM_TIME; //total simulation time in seconds const double output_freq = _OUT_FREQ; //frequency to perform output in seconds double dt; //Model time step (seconds) const int nx_cfd = _NX, nz_cfd = _NZ; //Number of total grid cells in the x- and z- dimensions const int nnx = nx_cfd, nnz = nz_cfd; //Number of local grid cells in the x- and z- dimensions for this MPI task const double dx = xlen / nx_cfd, dz = zlen / nz_cfd;; //Grid space length in x- and z-dimension (meters) const int i_beg = 0, k_beg = 0; //beginning index in the x- and z-directions for this MPI task const int nranks = 1, myrank = 0; //Number of MPI ranks and my rank id const int masterproc = (myrank == 0); //Am I the master process (rank == 0)? const int config_spec = _IN_CONFIG; //Which data initialization to use double *cfd_dens_cell_cpu; //density (vert cell avgs). Dimensions: (1-hs:nnz+hs) double *cfd_dens_cell_gpu; double *cfd_dens_theta_cell_cpu; //rho*t (vert cell avgs). Dimensions: (1-hs:nnz+hs) double *cfd_dens_theta_cell_gpu; double *cfd_dens_int_cpu; //density (vert cell interf). Dimensions: (1:nnz+1) double *cfd_dens_int_gpu; double *cfd_dens_theta_int_cpu; //rho*t (vert cell interf). Dimensions: (1:nnz+1) double *cfd_dens_theta_int_gpu; double *cfd_pressure_int_cpu; //press (vert cell interf). Dimensions: (1:nnz+1) double *cfd_pressure_int_gpu; /////////////////////////////////////////////////////////////////////////////////////// // Variables that are dynamics over the course of the simulation /////////////////////////////////////////////////////////////////////////////////////// double etime; //Elapsed model time double output_counter; //Helps determine when it's time to do output //Runtime variable arrays double *state_cpu; //Fluid state. Dimensions: (1-hs:nnx+hs,1-hs:nnz+hs,NUM_VARS) double *state_gpu; double *state_tmp_cpu; //Fluid state. Dimensions: (1-hs:nnx+hs,1-hs:nnz+hs,NUM_VARS) double *state_tmp_gpu; double *flux_cpu; //Cell interface fluxes. Dimensions: (nnx+1,nnz+1,NUM_VARS) double *flux_gpu; double *tend_cpu; //Fluid state tendencies. Dimensions: (nnx,nnz,NUM_VARS) double *tend_gpu; int num_out = 0; //The number of outputs performed so far int direction_switch = 1; double mass0, te0; //Initial domain totals for mass and total energy double mass , te ; //Domain totals for mass and total energy //How is this not in the standard?! double dmin( double a , double b ) { if (a<b) {return a;} else {return b;} }; //Declaring the functions defined after "main" void initialize ( int *argc , char ***argv ); void finalize ( ); void testcase6 ( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ); void testcase5 ( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ); void testcase4 ( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ); void testcase3 ( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ); void testcase2 ( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ); void testcase1 ( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ); void const_theta ( double z , double &r , double &t ); void const_bvfreq ( double z , double bv_freq0 , double &r , double &t ); double sample_cosine( double x , double z , double amp , double x0 , double z0 , double xrad , double zrad ); void output ( double *state , double etime ); void do_timestep ( double *state , double *state_tmp , double *flux , double *tend , double dt ); void do_semi_step ( double *state_init , double *state_forcing , double *state_out , double dt , int dir , double *flux , double *tend ); void do_dir_x ( double *state , double *flux , double *tend ); void do_dir_z ( double *state , double *flux , double *tend ); void exchange_border_x ( double *state ); void exchange_border_z ( double *state ); void do_results ( double &mass , double &te ); const int blockSize = 512; __global__ void do_semi_step_add(double *state_out, double *state_init, double *tend, int n, double dt){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < n){ int ll= id /(nnz * nnx); int k = (id / nnx) % nnz; int i = id % nnx; int inds = (k+hs)*(nnx+2*hs) + ll*(nnz+2*hs)*(nnx+2*hs) + i+hs; int indt = ll*nnz*nnx + k*nnx + i; state_out[inds] = state_init[inds] + dt * tend[indt]; } } __global__ void do_dir_x_flux(double *state, double *flux, double *tend, double *cfd_dens_cell, double *cfd_dens_theta_cell, int n, double v_coef){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < n){ int k = id / (nnx + 1); int i = id % (nnx + 1); double vals[NUM_VARS], d_vals[NUM_VARS]; for (int ll=0; ll<NUM_VARS; ll++) { double stencil[4]; for (int s=0; s < cfd_size; s++) { int inds = ll*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i+s; stencil[s] = state[inds]; } //Fourth-order-accurate interpolation of the state vals[ll] = -stencil[0]/12 + 7*stencil[1]/12 + 7*stencil[2]/12 - stencil[3]/12; //First-order-accurate interpolation of the third spatial derivative of the state (for artificial viscosity) d_vals[ll] = -stencil[0] + 3*stencil[1] - 3*stencil[2] + stencil[3]; } double r = vals[POS_DENS] + cfd_dens_cell[k+hs]; double u = vals[POS_UMOM] / r; double w = vals[POS_WMOM] / r; double t = ( cfd_dens_theta_cell[k+hs] + vals[POS_RHOT] ) / r; double p = pow((r*t),gamm)*C0; flux[POS_DENS*(nnz+1)*(nnx+1) + k*(nnx+1) + i] = r*u - v_coef*d_vals[POS_DENS]; flux[POS_UMOM*(nnz+1)*(nnx+1) + k*(nnx+1) + i] = r*u*u+p - v_coef*d_vals[POS_UMOM]; flux[POS_WMOM*(nnz+1)*(nnx+1) + k*(nnx+1) + i] = r*u*w - v_coef*d_vals[POS_WMOM]; flux[POS_RHOT*(nnz+1)*(nnx+1) + k*(nnx+1) + i] = r*u*t - v_coef*d_vals[POS_RHOT]; } } __global__ void do_dir_x_add(double *tend, double *flux, int n){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < n){ int ll= id /(nnz * nnx); int k = (id / nnx) % nnz; int i = id % nnx; int indt = ll* nnz * nnx + k* nnx + i ; int indf1 = ll*(nnz+1)*(nnx+1) + k*(nnx+1) + i ; int indf2 = ll*(nnz+1)*(nnx+1) + k*(nnx+1) + i+1; tend[indt] = -( flux[indf2] - flux[indf1] ) / dx; } } __global__ void do_dir_z_flux(double *state , double *flux, double *tend, double *cfd_dens_int, double *cfd_dens_theta_int, double *cfd_pressure_int, int n, double v_coef){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < n){ int k = id / nnx; int i = id % nnx; //Use fourth-order interpolation from four cell averages to compute the value at the interface in question double stencil[4], d_vals[NUM_VARS], vals[NUM_VARS]; for (int ll=0; ll<NUM_VARS; ll++) { for (int s=0; s<cfd_size; s++) { int inds = ll*(nnz+2*hs)*(nnx+2*hs) + (k+s)*(nnx+2*hs) + i+hs; stencil[s] = state[inds]; } //Fourth-order-accurate interpolation of the state vals[ll] = -stencil[0]/12 + 7*stencil[1]/12 + 7*stencil[2]/12 - stencil[3]/12; //First-order-accurate interpolation of the third spatial derivative of the state d_vals[ll] = -stencil[0] + 3*stencil[1] - 3*stencil[2] + stencil[3]; } //Compute density, u-wind, w-wind, potential temperature, and pressure (r,u,w,t,p respectively) double r = vals[POS_DENS] + cfd_dens_int[k]; double u = vals[POS_UMOM] / r; double w = vals[POS_WMOM] / r; double t = ( vals[POS_RHOT] + cfd_dens_theta_int[k] ) / r; double p = C0*pow((r*t),gamm) - cfd_pressure_int[k]; //Enforce vertical boundary condition and exact mass conservation if (k == 0 || k == nnz) { w = 0; d_vals[POS_DENS] = 0; } //Compute the flux vector with viscosity flux[POS_DENS*(nnz+1)*(nnx+1) + k*(nnx+1) + i] = r*w - v_coef*d_vals[POS_DENS]; flux[POS_UMOM*(nnz+1)*(nnx+1) + k*(nnx+1) + i] = r*w*u - v_coef*d_vals[POS_UMOM]; flux[POS_WMOM*(nnz+1)*(nnx+1) + k*(nnx+1) + i] = r*w*w+p - v_coef*d_vals[POS_WMOM]; flux[POS_RHOT*(nnz+1)*(nnx+1) + k*(nnx+1) + i] = r*w*t - v_coef*d_vals[POS_RHOT]; } } __global__ void do_dir_z_add(double *state, double *tend, double *flux, int n){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < n){ int ll= id /(nnz * nnx); int k = (id / nnx) % nnz; int i = id % nnx; int indt = ll* nnz * nnx + k* nnx + i ; int indf1 = ll*(nnz+1)*(nnx+1) + (k )*(nnx+1) + i; int indf2 = ll*(nnz+1)*(nnx+1) + (k+1)*(nnx+1) + i; tend[indt] = -( flux[indf2] - flux[indf1] ) / dz; if (ll == POS_WMOM) { int inds = POS_DENS*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i+hs; tend[indt] = tend[indt] - state[inds]*grav; } } } __global__ void exchange_border_x_1(double *state, int n){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < n){ int ll = id / nnz; int k = id % nnz; int pos = ll*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs); state[pos + 0 ] = state[pos + nnx+hs-2]; state[pos + 1 ] = state[pos + nnx+hs-1]; state[pos + nnx+hs ] = state[pos + hs ]; state[pos + nnx+hs+1] = state[pos + hs+1 ]; } } __global__ void exchange_border_x_2(double *state, double *cfd_dens_cell, double *cfd_dens_theta_cell, int n){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < n){ int k = id / hs; int i = id % hs; double z = (k_beg + k+0.5)*dz; if (fabs(z-3*zlen/4) <= zlen/16) { int ind_r = POS_DENS*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i; int ind_u = POS_UMOM*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i; int ind_t = POS_RHOT*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i; state[ind_u] = (state[ind_r]+cfd_dens_cell[k+hs]) * 50.; state[ind_t] = (state[ind_r]+cfd_dens_cell[k+hs]) * 298. - cfd_dens_theta_cell[k+hs]; } } } __global__ void exchange_border_z_1(double *state, int n, int mnt_width){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < n){ int ll = id / (nnx+2*hs); int i = id % (nnx+2*hs); int pos = ll*(nnz+2*hs)*(nnx+2*hs); if (ll == POS_WMOM) { state[pos + (0 )*(nnx+2*hs) + i] = 0.; state[pos + (1 )*(nnx+2*hs) + i] = 0.; state[pos + (nnz+hs )*(nnx+2*hs) + i] = 0.; state[pos + (nnz+hs+1)*(nnx+2*hs) + i] = 0.; //Impose the vertical momentum effects of an artificial cos^2 mountain at the lower boundary if (config_spec == CONFIG_IN_TEST3) { double x = (i_beg+i-hs+0.5)*dx; if ( fabs(x-xlen/4) < mnt_width ) { double xloc = (x-(xlen/4)) / mnt_width; //Compute the derivative of the fake mountain double mnt_deriv = -pi*cos(pi*xloc/2)*sin(pi*xloc/2)*10/dx; //w = (dz/dx)*u state[POS_WMOM*(nnz+2*hs)*(nnx+2*hs) + (0)*(nnx+2*hs) + i] = mnt_deriv*state[POS_UMOM*(nnz+2*hs)*(nnx+2*hs) + hs*(nnx+2*hs) + i]; state[POS_WMOM*(nnz+2*hs)*(nnx+2*hs) + (1)*(nnx+2*hs) + i] = mnt_deriv*state[POS_UMOM*(nnz+2*hs)*(nnx+2*hs) + hs*(nnx+2*hs) + i]; } } } else { state[pos + (0 )*(nnx+2*hs) + i] = state[pos + (hs )*(nnx+2*hs) + i]; state[pos + (1 )*(nnx+2*hs) + i] = state[pos + (hs )*(nnx+2*hs) + i]; state[pos + (nnz+hs )*(nnx+2*hs) + i] = state[pos + (nnz+hs-1)*(nnx+2*hs) + i]; state[pos + (nnz+hs+1)*(nnx+2*hs) + i] = state[pos + (nnz+hs-1)*(nnx+2*hs) + i]; } } } //Performs a single dimensionally split time step using a simple low-storate three-stage Runge-Kutta time integrator //The dimensional splitting is a second-order-accurate alternating Strang splitting in which the //order of directions is alternated each time step. //The Runge-Kutta method used here is defined as follows: // q* = q[n] + dt/3 * rhs(q[n]) // q** = q[n] + dt/2 * rhs(q* ) // q[n+1] = q[n] + dt/1 * rhs(q** ) void do_timestep( double *state , double *state_tmp , double *flux , double *tend , double dt ) { if (direction_switch) { //x-direction first do_semi_step( state , state , state_tmp , dt / 3 , DIR_X , flux , tend ); do_semi_step( state , state_tmp , state_tmp , dt / 2 , DIR_X , flux , tend ); do_semi_step( state , state_tmp , state , dt / 1 , DIR_X , flux , tend ); //z-direction second do_semi_step( state , state , state_tmp , dt / 3 , DIR_Z , flux , tend ); do_semi_step( state , state_tmp , state_tmp , dt / 2 , DIR_Z , flux , tend ); do_semi_step( state , state_tmp , state , dt / 1 , DIR_Z , flux , tend ); } else { //z-direction second do_semi_step( state , state , state_tmp , dt / 3 , DIR_Z , flux , tend ); do_semi_step( state , state_tmp , state_tmp , dt / 2 , DIR_Z , flux , tend ); do_semi_step( state , state_tmp , state , dt / 1 , DIR_Z , flux , tend ); //x-direction first do_semi_step( state , state , state_tmp , dt / 3 , DIR_X , flux , tend ); do_semi_step( state , state_tmp , state_tmp , dt / 2 , DIR_X , flux , tend ); do_semi_step( state , state_tmp , state , dt / 1 , DIR_X , flux , tend ); } if (direction_switch) { direction_switch = 0; } else { direction_switch = 1; } } //Perform a single semi-discretized step in time with the form: //state_out = state_init + dt * rhs(state_forcing) //Meaning the step starts from state_init, computes the rhs using state_forcing, and stores the result in state_out void do_semi_step( double *state_init , double *state_forcing , double *state_out , double dt , int dir , double *flux , double *tend ) { if(dir == DIR_X) { //Set the halo values for this MPI task's fluid state in the x-direction exchange_border_x(state_forcing); //Compute the time tendencies for the fluid state in the x-direction do_dir_x(state_forcing,flux,tend); } else if (dir == DIR_Z) { //Set the halo values for this MPI task's fluid state in the z-direction exchange_border_z(state_forcing); //Compute the time tendencies for the fluid state in the z-direction do_dir_z(state_forcing,flux,tend); } int n = NUM_VARS * nnz * nnx; int gridSize = (n + blockSize - 1) / blockSize; hipLaunchKernelGGL(( do_semi_step_add), dim3(gridSize), dim3(blockSize), 0, 0, state_out, state_init, tend, n, dt); #ifdef DEBUG gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); #endif } //Compute the time tendencies of the fluid state using forcing in the x-direction //Since the halos are set in a separate routine, this will not require MPI //First, compute the flux vector at each cell interface in the x-direction (including viscosity) //Then, compute the tendencies using those fluxes void do_dir_x( double *state , double *flux , double *tend ) { //Compute the hyperviscosity coeficient const double v_coef = -hv * dx / (16*dt); int n = (nnz) * (nnx + 1); int gridSize = (n + blockSize - 1) / blockSize; hipLaunchKernelGGL(( do_dir_x_flux), dim3(gridSize), dim3(blockSize), 0, 0, state, flux, tend, cfd_dens_cell_gpu, cfd_dens_theta_cell_gpu, n, v_coef); #ifdef DEBUG gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); #endif n = NUM_VARS * nnz * nnx; gridSize = (n + blockSize - 1) / blockSize; hipLaunchKernelGGL(( do_dir_x_add), dim3(gridSize), dim3(blockSize), 0, 0, tend, flux, n); #ifdef DEBUG gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); #endif } //Compute the time tendencies of the fluid state using forcing in the z-direction //Since the halos are set in a separate routine, this will not require MPI //First, compute the flux vector at each cell interface in the z-direction (including viscosity) //Then, compute the tendencies using those fluxes void do_dir_z( double *state , double *flux , double *tend ) { //Compute the viscosity coeficient const double v_coef = -hv * dz / (16 * dt); int n = (nnz + 1) * nnx; int gridSize = (n + blockSize - 1) / blockSize;; hipLaunchKernelGGL(( do_dir_z_flux), dim3(gridSize), dim3(blockSize), 0, 0, state, flux, tend, cfd_dens_int_gpu, cfd_dens_theta_int_gpu, cfd_pressure_int_gpu, n, v_coef); #ifdef DEBUG gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); #endif n = NUM_VARS * nnz * nnx; gridSize = (n + blockSize - 1) / blockSize;; hipLaunchKernelGGL(( do_dir_z_add), dim3(gridSize), dim3(blockSize), 0, 0, state, tend, flux, n); #ifdef DEBUG gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); #endif } // CUDA kernel. __global__ void copyStatesX(double *d, int n, int nnx_, int nnz_) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n){ int ll = id / nnz_; int k = id % nnz_; int pos = ll*(nnz_+4)*(nnx_+4) + (k+hs)*(nnx_+4); d[pos ] = d[pos + nnx_]; d[pos + 1 ] = d[pos + nnx_+1]; d[pos + nnx_+2 ] = d[pos + 2 ]; d[pos + nnx_+3] = d[pos + 3 ]; } } //Set this MPI task's halo values in the x-direction. This routine will require MPI void exchange_border_x( double *state ) { int n = NUM_VARS * nnz; int gridSize = (n + blockSize - 1)/blockSize; hipLaunchKernelGGL(( exchange_border_x_1), dim3(gridSize), dim3(blockSize), 0, 0, state, n); #ifdef DEBUG gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); #endif if (config_spec == CONFIG_IN_TEST6) { if (myrank == 0) { n = nnz * hs; gridSize = (n + blockSize - 1)/blockSize; hipLaunchKernelGGL(( exchange_border_x_2), dim3(gridSize), dim3(blockSize), 0, 0, state, cfd_dens_cell_gpu, cfd_dens_theta_cell_gpu, n); #ifdef DEBUG gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); #endif } } } //Set this MPI task's halo values in the z-direction. This does not require MPI because there is no MPI //decomposition in the vertical direction void exchange_border_z( double *state ) { const double mnt_width = xlen/8; int n = NUM_VARS * (nnx+2*hs); int gridSize = (n + blockSize - 1)/blockSize; hipLaunchKernelGGL(( exchange_border_z_1), dim3(gridSize), dim3(blockSize), 0, 0, state, n, mnt_width); #ifdef DEBUG gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); #endif } void initialize( int *argc , char ***argv ) { int i, k, ii, kk, ll, inds; double x, z, r, u, w, t, hr, ht; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // YOU DON'T NEED TO ALTER ANYTHING BELOW THIS POINT IN THE CODE //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //Allocate the model data state_cpu = (double *) malloc( (nnx+2*hs)*(nnz+2*hs)*NUM_VARS*sizeof(double) ); state_tmp_cpu = (double *) malloc( (nnx+2*hs)*(nnz+2*hs)*NUM_VARS*sizeof(double) ); flux_cpu = (double *) malloc( (nnx+1)*(nnz+1)*NUM_VARS*sizeof(double) ); tend_cpu = (double *) malloc( nnx*nnz*NUM_VARS*sizeof(double) ); cfd_dens_cell_cpu = (double *) malloc( (nnz+2*hs)*sizeof(double) ); cfd_dens_theta_cell_cpu = (double *) malloc( (nnz+2*hs)*sizeof(double) ); cfd_dens_int_cpu = (double *) malloc( (nnz+1)*sizeof(double) ); cfd_dens_theta_int_cpu = (double *) malloc( (nnz+1)*sizeof(double) ); cfd_pressure_int_cpu = (double *) malloc( (nnz+1)*sizeof(double) ); //Allocate GPU memory hipMalloc(&state_gpu, (nnx+2*hs)*(nnz+2*hs)*NUM_VARS*sizeof(double) ); hipMalloc(&state_tmp_gpu, (nnx+2*hs)*(nnz+2*hs)*NUM_VARS*sizeof(double) ); hipMalloc(&flux_gpu, (nnx+1)*(nnz+1)*NUM_VARS*sizeof(double) ); hipMalloc(&tend_gpu, nnx*nnz*NUM_VARS*sizeof(double) ); hipMalloc(&cfd_dens_cell_gpu, (nnz+2*hs)*sizeof(double) ); hipMalloc(&cfd_dens_theta_cell_gpu, (nnz+2*hs)*sizeof(double) ); hipMalloc(&cfd_dens_int_gpu, (nnz+1)*sizeof(double) ); hipMalloc(&cfd_dens_theta_int_gpu, (nnz+1)*sizeof(double) ); hipMalloc(&cfd_pressure_int_gpu, (nnz+1)*sizeof(double) ); //Define the maximum stable time step based on an assumed maximum wind speed dt = dmin(dx,dz) / max_speed * cfl; //Set initial elapsed model time etime = 0.; //If I'm the master process in MPI, display some grid information if (masterproc) { printf( "nx_cfd, nz_cfd: %d %d\n", nx_cfd, nz_cfd); printf( "dx,dz: %lf %lf\n",dx,dz); printf( "dt: %lf\n",dt); } ////////////////////////////////////////////////////////////////////////// // Initialize the cell-averaged fluid state via Gauss-Legendre quadrature ////////////////////////////////////////////////////////////////////////// for (k=0; k<nnz+2*hs; k++) { for (i=0; i<nnx+2*hs; i++) { //Initialize the state to zero for (ll=0; ll<NUM_VARS; ll++) { inds = ll*(nnz+2*hs)*(nnx+2*hs) + k*(nnx+2*hs) + i; state_cpu[inds] = 0.; } //Use Gauss-Legendre quadrature to initialize a balance + temperature perturbation for (kk=0; kk<nqpoints; kk++) { for (ii=0; ii<nqpoints; ii++) { //Compute the x,z location within the global domain based on cell and quadrature index x = (i_beg + i-hs+0.5)*dx + (qpoints[ii]-0.5)*dx; z = (k_beg + k-hs+0.5)*dz + (qpoints[kk]-0.5)*dz; //Set the fluid state based on the user's specification switch(config_spec){ case CONFIG_IN_TEST1: testcase1(x,z,r,u,w,t,hr,ht); break; case CONFIG_IN_TEST2: testcase2(x,z,r,u,w,t,hr,ht); break; case CONFIG_IN_TEST3: testcase3(x,z,r,u,w,t,hr,ht); break; case CONFIG_IN_TEST4: testcase4(x,z,r,u,w,t,hr,ht); break; case CONFIG_IN_TEST5: testcase5(x,z,r,u,w,t,hr,ht); break; case CONFIG_IN_TEST6: testcase6(x,z,r,u,w,t,hr,ht); break; } //Store into the fluid state array inds = POS_DENS*(nnz+2*hs)*(nnx+2*hs) + k*(nnx+2*hs) + i; state_cpu[inds] = state_cpu[inds] + r * qweights[ii]*qweights[kk]; inds = POS_UMOM*(nnz+2*hs)*(nnx+2*hs) + k*(nnx+2*hs) + i; state_cpu[inds] = state_cpu[inds] + (r+hr)*u * qweights[ii]*qweights[kk]; inds = POS_WMOM*(nnz+2*hs)*(nnx+2*hs) + k*(nnx+2*hs) + i; state_cpu[inds] = state_cpu[inds] + (r+hr)*w * qweights[ii]*qweights[kk]; inds = POS_RHOT*(nnz+2*hs)*(nnx+2*hs) + k*(nnx+2*hs) + i; state_cpu[inds] = state_cpu[inds] + ( (r+hr)*(t+ht) - hr*ht ) * qweights[ii]*qweights[kk]; } } for (ll=0; ll<NUM_VARS; ll++) { inds = ll*(nnz+2*hs)*(nnx+2*hs) + k*(nnx+2*hs) + i; state_tmp_cpu[inds] = state_cpu[inds]; } } } //Compute the background state over vertical cell averages for (k=0; k<nnz+2*hs; k++) { cfd_dens_cell_cpu [k] = 0.; cfd_dens_theta_cell_cpu[k] = 0.; for (kk=0; kk<nqpoints; kk++) { z = (k_beg + k-hs+0.5)*dz; //Set the fluid state based on the user's specification if (config_spec == CONFIG_IN_TEST1 ) { testcase1 (0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST2 ) { testcase2 (0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST3 ) { testcase3 (0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST4 ) { testcase4 (0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST5) { testcase5(0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST6 ) { testcase6 (0.,z,r,u,w,t,hr,ht); } cfd_dens_cell_cpu [k] = cfd_dens_cell_cpu [k] + hr * qweights[kk]; cfd_dens_theta_cell_cpu[k] = cfd_dens_theta_cell_cpu[k] + hr*ht * qweights[kk]; } } //Compute the background state at vertical cell interfaces for (k=0; k<nnz+1; k++) { z = (k_beg + k)*dz; if (config_spec == CONFIG_IN_TEST1 ) { testcase1 (0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST2 ) { testcase2 (0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST3 ) { testcase3 (0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST4 ) { testcase4 (0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST5) { testcase5(0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST6 ) { testcase6 (0.,z,r,u,w,t,hr,ht); } cfd_dens_int_cpu [k] = hr; cfd_dens_theta_int_cpu[k] = hr*ht; cfd_pressure_int_cpu [k] = C0*pow((hr*ht),gamm); } } //This test case is initially balanced but injects fast, cold air from the left boundary near the model top //x and z are input coordinates at which to sample //r,u,w,t are output density, u-wind, w-wind, and potential temperature at that location //hr and ht are output background density and potential temperature at that location void testcase6( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ) { const_theta(z,hr,ht); r = 0.; t = 0.; u = 0.; w = 0.; } //Initialize a density current (falling cold thermal that propagates along the model bottom) //x and z are input coordinates at which to sample //r,u,w,t are output density, u-wind, w-wind, and potential temperature at that location //hr and ht are output background density and potential temperature at that location void testcase5( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ) { const_theta(z,hr,ht); r = 0.; t = 0.; u = 0.; w = 0.; t = t + sample_cosine(x,z,-20. ,xlen/2,5000.,4000.,2000.); } //x and z are input coordinates at which to sample //r,u,w,t are output density, u-wind, w-wind, and potential temperature at that location //hr and ht are output background density and potential temperature at that location void testcase4( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ) { const_theta(z,hr,ht); r = 0.; t = 0.; u = 0.; w = 0.; // call random_number(u); // call random_number(w); // u = (u-0.5)*20; // w = (w-0.5)*20; } //x and z are input coordinates at which to sample //r,u,w,t are output density, u-wind, w-wind, and potential temperature at that location //hr and ht are output background density and potential temperature at that location void testcase3( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ) { const_bvfreq(z,0.02,hr,ht); r = 0.; t = 0.; u = 15.; w = 0.; } //x and z are input coordinates at which to sample //r,u,w,t are output density, u-wind, w-wind, and potential temperature at that location //hr and ht are output background density and potential temperature at that location void testcase2( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ) { const_theta(z,hr,ht); r = 0.; t = 0.; u = 0.; w = 0.; t = t + sample_cosine(x,z, 3. ,xlen/2,2000.,2000.,2000.); } //x and z are input coordinates at which to sample //r,u,w,t are output density, u-wind, w-wind, and potential temperature at that location //hr and ht are output background density and potential temperature at that location void testcase1( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ) { const_theta(z,hr,ht); r = 0.; t = 0.; u = 0.; w = 0.; t = t + sample_cosine(x,z, 20.,xlen/2,2000.,2000.,2000.); t = t + sample_cosine(x,z,-20.,xlen/2,8000.,2000.,2000.); } //Establish hydrstatic balance using constant potential temperature (thermally neutral atmosphere) //z is the input coordinate //r and t are the output background density and potential temperature void const_theta( double z , double &r , double &t ) { const double theta0 = 300.; //Background potential temperature const double exner0 = 1.; //Surface-level Exner pressure double p,exner,rt; //Establish balance first using Exner pressure t = theta0; //Potential Temperature at z exner = exner0 - grav * z / (cp * theta0); //Exner pressure at z p = p0 * pow(exner,(cp/rd)); //Pressure at z rt = pow((p / C0),(1. / gamm)); //rho*theta at z r = rt / t; //Density at z } //Establish hydrstatic balance using constant Brunt-Vaisala frequency //z is the input coordinate //bv_freq0 is the constant Brunt-Vaisala frequency //r and t are the output background density and potential temperature void const_bvfreq( double z , double bv_freq0 , double &r , double &t ) { const double theta0 = 300.; //Background potential temperature const double exner0 = 1.; //Surface-level Exner pressure double p, exner, rt; t = theta0 * exp( bv_freq0*bv_freq0 / grav * z ); //Pot temp at z exner = exner0 - grav*grav / (cp * bv_freq0*bv_freq0) * (t - theta0) / (t * theta0); //Exner pressure at z p = p0 * pow(exner,(cp/rd)); //Pressure at z rt = pow((p / C0),(1. / gamm)); //rho*theta at z r = rt / t; //Density at z } //Sample from an ellipse of a specified center, radius, and amplitude at a specified location //x and z are input coordinates //amp,x0,z0,xrad,zrad are input amplitude, center, and radius of the ellipse double sample_cosine( double x , double z , double amp , double x0 , double z0 , double xrad , double zrad ) { double dist; //Compute distance from bubble center dist = sqrt( ((x-x0)/xrad)*((x-x0)/xrad) + ((z-z0)/zrad)*((z-z0)/zrad) ) * pi / 2.; //If the distance from bubble center is less than the radius, create a cos**2 profile if (dist <= pi / 2.) { return amp * pow(cos(dist),2.); } else { return 0.; } } void finalize() { free( state_cpu ); free( state_tmp_cpu ); free( flux_cpu ); free( tend_cpu ); free( cfd_dens_cell_cpu ); free( cfd_dens_theta_cell_cpu ); free( cfd_dens_int_cpu ); free( cfd_dens_theta_int_cpu ); free( cfd_pressure_int_cpu ); hipFree( state_gpu ); hipFree( state_tmp_gpu ); hipFree( flux_gpu ); hipFree( tend_gpu ); hipFree( cfd_dens_cell_gpu ); hipFree( cfd_dens_theta_cell_gpu ); hipFree( cfd_dens_int_gpu ); hipFree( cfd_dens_theta_int_gpu ); hipFree( cfd_pressure_int_gpu ); } //Compute reduced quantities for error checking without resorting void do_results( double &mass , double &te ) { mass = 0; te = 0; for (int k=0; k<nnz; k++) { for (int i=0; i<nnx; i++) { int ind_r = POS_DENS*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i+hs; int ind_u = POS_UMOM*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i+hs; int ind_w = POS_WMOM*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i+hs; int ind_t = POS_RHOT*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i+hs; double r = state_cpu[ind_r] + cfd_dens_cell_cpu[hs+k]; // Density double u = state_cpu[ind_u] / r; // U-wind double w = state_cpu[ind_w] / r; // W-wind double th = ( state_cpu[ind_t] + cfd_dens_theta_cell_cpu[hs+k] ) / r; // Potential Temperature (theta) double p = C0*pow(r*th,gamm); // Pressure double t = th / pow(p0/p,rd/cp); // Temperature double ke = r*(u*u+w*w); // Kinetic Energy double ie = r*cv*t; // Internal Energy mass += r *dx*dz; // Accumulate domain mass te += (ke + ie)*dx*dz; // Accumulate domain total energy } } } void copy_to_gpu(){ hipMemcpy(state_gpu, state_cpu, (nnx+2*hs)*(nnz+2*hs)*NUM_VARS*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(state_tmp_gpu, state_tmp_cpu, (nnx+2*hs)*(nnz+2*hs)*NUM_VARS*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(flux_gpu, flux_cpu, (nnx+1)*(nnz+1)*NUM_VARS*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(tend_gpu, tend_cpu, nnx*nnz*NUM_VARS*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(cfd_dens_cell_gpu, cfd_dens_cell_cpu, (nnz+2*hs)*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(cfd_dens_theta_cell_gpu, cfd_dens_theta_cell_cpu, (nnz+2*hs)*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(cfd_dens_int_gpu, cfd_dens_int_cpu, (nnz+1)*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(cfd_dens_theta_int_gpu, cfd_dens_theta_int_cpu, (nnz+1)*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(cfd_pressure_int_gpu, cfd_pressure_int_cpu, (nnz+1)*sizeof(double), hipMemcpyHostToDevice); } void copy_to_cpu(){ hipMemcpy(state_cpu, state_gpu, (nnx+2*hs)*(nnz+2*hs)*NUM_VARS*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(state_tmp_cpu, state_tmp_gpu, (nnx+2*hs)*(nnz+2*hs)*NUM_VARS*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(flux_cpu, flux_gpu, (nnx+1)*(nnz+1)*NUM_VARS*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(tend_cpu, tend_gpu, nnx*nnz*NUM_VARS*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(cfd_dens_cell_cpu, cfd_dens_cell_gpu, (nnz+2*hs)*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(cfd_dens_theta_cell_cpu, cfd_dens_theta_cell_gpu, (nnz+2*hs)*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(cfd_dens_int_cpu, cfd_dens_int_gpu, (nnz+1)*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(cfd_dens_theta_int_cpu, cfd_dens_theta_int_gpu, (nnz+1)*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(cfd_pressure_int_cpu, cfd_pressure_int_gpu, (nnz+1)*sizeof(double), hipMemcpyDeviceToHost); } void print(double *v, int n){ for(int i = 0; i < n; i++) if(v[i] != 0.0) printf("%d: %lf\n", i, v[i]); printf("\n"); } /////////////////////////////////////////////////////////////////////////////////////// // THE MAIN PROGRAM STARTS HERE /////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { initialize( &argc , &argv ); //hipDeviceSynchronize(); //Initial reductions for mass, kinetic energy, and total energy do_results(mass0,te0); //Copying data to GPU copy_to_gpu(); #ifdef DEBUG gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); #endif //////////////////////////////////////////////////// // MAIN TIME STEP LOOP //////////////////////////////////////////////////// auto c_start = std::clock(); while (etime < sim_time) { //If the time step leads to exceeding the simulation time, shorten it for the last step if (etime + dt > sim_time) { dt = sim_time - etime; } //Perform a single time step do_timestep(state_gpu,state_tmp_gpu,flux_gpu,tend_gpu,dt); //hipDeviceSynchronize(); //Update the elapsed time and output counter etime = etime + dt; output_counter = output_counter + dt; //If it's time for output, reset the counter, and do output if (output_counter >= output_freq) { output_counter = output_counter - output_freq; //Inform the user if (masterproc) { printf( "Elapsed Time: %lf / %lf\n", etime , sim_time ); } } } auto c_end = std::clock(); if (masterproc) { std::cout << "CPU Time: " << ( (double) (c_end-c_start) ) / CLOCKS_PER_SEC << " sec\n"; } copy_to_cpu(); #ifdef DEBUG gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); #endif //Final reductions for mass, kinetic energy, and total energy do_results(mass,te); if (masterproc) { printf( "d_mass: %le\n" , (mass - mass0)/mass0); printf( "d_te: %le\n" , (te - te0 )/te0 ); } finalize(); }
d87f37223f7d7e9f23f9d978980c66ec43f8a403.cu
////////////////////////////////////////////////////////////////////////////////////////// // miniCFD // Author: Omitted ////////////////////////////////////////////////////////////////////////////////////////// #include <stdlib.h> #include <math.h> #include <stdio.h> #include <ctime> #include <iostream> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #define DEBUG const double pi = 3.14159265358979323846264338327; //Pi const double grav = 9.8; //Gravitational acceleration (m / s^2) const double cp = 1004.; //Specific heat of dry air at constant pressure const double cv = 717.; //Specific heat of dry air at constant volume const double rd = 287.; //Dry air constant for equation of state (P=rho*rd*T) const double p0 = 1.e5; //Standard pressure at the surface in Pascals const double C0 = 27.5629410929725927310572984382; //Constant to translate potential temperature into pressure (P=C0*(rho*theta)**gamma) const double gamm = 1.40027894002789401278940017893; //gamma=cp/Rd , have to call this gamm because "gamma" is taken (I hate C so much) //Define domain and stability-related constants const double xlen = 2.e4; //Length of the domain in the x-direction (meters) const double zlen = 1.e4; //Length of the domain in the z-direction (meters) const double hv = 0.25; //How strong to diffuse the solution: hv \in [0:1] const double cfl = 1.50; //"Courant, Friedrichs, Lewy" number (for numerical stability) const double max_speed = 450; //Assumed maximum wave speed during the simulation (speed of sound + speed of wind) (meter / sec) const int hs = 2; //"Halo" size: number of cells beyond the MPI tasks's domain needed for a full "stencil" of information for reconstruction const int cfd_size = 4; //Size of the stencil used for interpolation //Parameters for indexing and flags const int NUM_VARS = 4; //Number of fluid state variables const int POS_DENS = 0; //index for density ("rho") const int POS_UMOM = 1; //index for momentum in the x-direction ("rho * u") const int POS_WMOM = 2; //index for momentum in the z-direction ("rho * w") const int POS_RHOT = 3; //index for density * potential temperature ("rho * theta") const int DIR_X = 1; //Integer constant to express that this operation is in the x-direction const int DIR_Z = 2; //Integer constant to express that this operation is in the z-direction enum test_cases {CONFIG_IN_TEST1, CONFIG_IN_TEST2, CONFIG_IN_TEST3, CONFIG_IN_TEST4, CONFIG_IN_TEST5, CONFIG_IN_TEST6 }; const int nqpoints = 3; double qpoints [] = { 0.112701665379258311482074460012E0 , 0.510000000000000000000000000000E0 , 0.887298334621741688517926529880E0 }; double qweights[] = { 0.277777777777777777777777777778E0 , 0.444444444444444444444444444445E0 , 0.277777777777777777777777777786E0 }; /////////////////////////////////////////////////////////////////////////////////////// // Variables that are initialized but remain static over the coure of the simulation /////////////////////////////////////////////////////////////////////////////////////// const double sim_time = _SIM_TIME; //total simulation time in seconds const double output_freq = _OUT_FREQ; //frequency to perform output in seconds double dt; //Model time step (seconds) const int nx_cfd = _NX, nz_cfd = _NZ; //Number of total grid cells in the x- and z- dimensions const int nnx = nx_cfd, nnz = nz_cfd; //Number of local grid cells in the x- and z- dimensions for this MPI task const double dx = xlen / nx_cfd, dz = zlen / nz_cfd;; //Grid space length in x- and z-dimension (meters) const int i_beg = 0, k_beg = 0; //beginning index in the x- and z-directions for this MPI task const int nranks = 1, myrank = 0; //Number of MPI ranks and my rank id const int masterproc = (myrank == 0); //Am I the master process (rank == 0)? const int config_spec = _IN_CONFIG; //Which data initialization to use double *cfd_dens_cell_cpu; //density (vert cell avgs). Dimensions: (1-hs:nnz+hs) double *cfd_dens_cell_gpu; double *cfd_dens_theta_cell_cpu; //rho*t (vert cell avgs). Dimensions: (1-hs:nnz+hs) double *cfd_dens_theta_cell_gpu; double *cfd_dens_int_cpu; //density (vert cell interf). Dimensions: (1:nnz+1) double *cfd_dens_int_gpu; double *cfd_dens_theta_int_cpu; //rho*t (vert cell interf). Dimensions: (1:nnz+1) double *cfd_dens_theta_int_gpu; double *cfd_pressure_int_cpu; //press (vert cell interf). Dimensions: (1:nnz+1) double *cfd_pressure_int_gpu; /////////////////////////////////////////////////////////////////////////////////////// // Variables that are dynamics over the course of the simulation /////////////////////////////////////////////////////////////////////////////////////// double etime; //Elapsed model time double output_counter; //Helps determine when it's time to do output //Runtime variable arrays double *state_cpu; //Fluid state. Dimensions: (1-hs:nnx+hs,1-hs:nnz+hs,NUM_VARS) double *state_gpu; double *state_tmp_cpu; //Fluid state. Dimensions: (1-hs:nnx+hs,1-hs:nnz+hs,NUM_VARS) double *state_tmp_gpu; double *flux_cpu; //Cell interface fluxes. Dimensions: (nnx+1,nnz+1,NUM_VARS) double *flux_gpu; double *tend_cpu; //Fluid state tendencies. Dimensions: (nnx,nnz,NUM_VARS) double *tend_gpu; int num_out = 0; //The number of outputs performed so far int direction_switch = 1; double mass0, te0; //Initial domain totals for mass and total energy double mass , te ; //Domain totals for mass and total energy //How is this not in the standard?! double dmin( double a , double b ) { if (a<b) {return a;} else {return b;} }; //Declaring the functions defined after "main" void initialize ( int *argc , char ***argv ); void finalize ( ); void testcase6 ( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ); void testcase5 ( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ); void testcase4 ( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ); void testcase3 ( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ); void testcase2 ( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ); void testcase1 ( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ); void const_theta ( double z , double &r , double &t ); void const_bvfreq ( double z , double bv_freq0 , double &r , double &t ); double sample_cosine( double x , double z , double amp , double x0 , double z0 , double xrad , double zrad ); void output ( double *state , double etime ); void do_timestep ( double *state , double *state_tmp , double *flux , double *tend , double dt ); void do_semi_step ( double *state_init , double *state_forcing , double *state_out , double dt , int dir , double *flux , double *tend ); void do_dir_x ( double *state , double *flux , double *tend ); void do_dir_z ( double *state , double *flux , double *tend ); void exchange_border_x ( double *state ); void exchange_border_z ( double *state ); void do_results ( double &mass , double &te ); const int blockSize = 512; __global__ void do_semi_step_add(double *state_out, double *state_init, double *tend, int n, double dt){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < n){ int ll= id /(nnz * nnx); int k = (id / nnx) % nnz; int i = id % nnx; int inds = (k+hs)*(nnx+2*hs) + ll*(nnz+2*hs)*(nnx+2*hs) + i+hs; int indt = ll*nnz*nnx + k*nnx + i; state_out[inds] = state_init[inds] + dt * tend[indt]; } } __global__ void do_dir_x_flux(double *state, double *flux, double *tend, double *cfd_dens_cell, double *cfd_dens_theta_cell, int n, double v_coef){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < n){ int k = id / (nnx + 1); int i = id % (nnx + 1); double vals[NUM_VARS], d_vals[NUM_VARS]; for (int ll=0; ll<NUM_VARS; ll++) { double stencil[4]; for (int s=0; s < cfd_size; s++) { int inds = ll*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i+s; stencil[s] = state[inds]; } //Fourth-order-accurate interpolation of the state vals[ll] = -stencil[0]/12 + 7*stencil[1]/12 + 7*stencil[2]/12 - stencil[3]/12; //First-order-accurate interpolation of the third spatial derivative of the state (for artificial viscosity) d_vals[ll] = -stencil[0] + 3*stencil[1] - 3*stencil[2] + stencil[3]; } double r = vals[POS_DENS] + cfd_dens_cell[k+hs]; double u = vals[POS_UMOM] / r; double w = vals[POS_WMOM] / r; double t = ( cfd_dens_theta_cell[k+hs] + vals[POS_RHOT] ) / r; double p = pow((r*t),gamm)*C0; flux[POS_DENS*(nnz+1)*(nnx+1) + k*(nnx+1) + i] = r*u - v_coef*d_vals[POS_DENS]; flux[POS_UMOM*(nnz+1)*(nnx+1) + k*(nnx+1) + i] = r*u*u+p - v_coef*d_vals[POS_UMOM]; flux[POS_WMOM*(nnz+1)*(nnx+1) + k*(nnx+1) + i] = r*u*w - v_coef*d_vals[POS_WMOM]; flux[POS_RHOT*(nnz+1)*(nnx+1) + k*(nnx+1) + i] = r*u*t - v_coef*d_vals[POS_RHOT]; } } __global__ void do_dir_x_add(double *tend, double *flux, int n){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < n){ int ll= id /(nnz * nnx); int k = (id / nnx) % nnz; int i = id % nnx; int indt = ll* nnz * nnx + k* nnx + i ; int indf1 = ll*(nnz+1)*(nnx+1) + k*(nnx+1) + i ; int indf2 = ll*(nnz+1)*(nnx+1) + k*(nnx+1) + i+1; tend[indt] = -( flux[indf2] - flux[indf1] ) / dx; } } __global__ void do_dir_z_flux(double *state , double *flux, double *tend, double *cfd_dens_int, double *cfd_dens_theta_int, double *cfd_pressure_int, int n, double v_coef){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < n){ int k = id / nnx; int i = id % nnx; //Use fourth-order interpolation from four cell averages to compute the value at the interface in question double stencil[4], d_vals[NUM_VARS], vals[NUM_VARS]; for (int ll=0; ll<NUM_VARS; ll++) { for (int s=0; s<cfd_size; s++) { int inds = ll*(nnz+2*hs)*(nnx+2*hs) + (k+s)*(nnx+2*hs) + i+hs; stencil[s] = state[inds]; } //Fourth-order-accurate interpolation of the state vals[ll] = -stencil[0]/12 + 7*stencil[1]/12 + 7*stencil[2]/12 - stencil[3]/12; //First-order-accurate interpolation of the third spatial derivative of the state d_vals[ll] = -stencil[0] + 3*stencil[1] - 3*stencil[2] + stencil[3]; } //Compute density, u-wind, w-wind, potential temperature, and pressure (r,u,w,t,p respectively) double r = vals[POS_DENS] + cfd_dens_int[k]; double u = vals[POS_UMOM] / r; double w = vals[POS_WMOM] / r; double t = ( vals[POS_RHOT] + cfd_dens_theta_int[k] ) / r; double p = C0*pow((r*t),gamm) - cfd_pressure_int[k]; //Enforce vertical boundary condition and exact mass conservation if (k == 0 || k == nnz) { w = 0; d_vals[POS_DENS] = 0; } //Compute the flux vector with viscosity flux[POS_DENS*(nnz+1)*(nnx+1) + k*(nnx+1) + i] = r*w - v_coef*d_vals[POS_DENS]; flux[POS_UMOM*(nnz+1)*(nnx+1) + k*(nnx+1) + i] = r*w*u - v_coef*d_vals[POS_UMOM]; flux[POS_WMOM*(nnz+1)*(nnx+1) + k*(nnx+1) + i] = r*w*w+p - v_coef*d_vals[POS_WMOM]; flux[POS_RHOT*(nnz+1)*(nnx+1) + k*(nnx+1) + i] = r*w*t - v_coef*d_vals[POS_RHOT]; } } __global__ void do_dir_z_add(double *state, double *tend, double *flux, int n){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < n){ int ll= id /(nnz * nnx); int k = (id / nnx) % nnz; int i = id % nnx; int indt = ll* nnz * nnx + k* nnx + i ; int indf1 = ll*(nnz+1)*(nnx+1) + (k )*(nnx+1) + i; int indf2 = ll*(nnz+1)*(nnx+1) + (k+1)*(nnx+1) + i; tend[indt] = -( flux[indf2] - flux[indf1] ) / dz; if (ll == POS_WMOM) { int inds = POS_DENS*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i+hs; tend[indt] = tend[indt] - state[inds]*grav; } } } __global__ void exchange_border_x_1(double *state, int n){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < n){ int ll = id / nnz; int k = id % nnz; int pos = ll*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs); state[pos + 0 ] = state[pos + nnx+hs-2]; state[pos + 1 ] = state[pos + nnx+hs-1]; state[pos + nnx+hs ] = state[pos + hs ]; state[pos + nnx+hs+1] = state[pos + hs+1 ]; } } __global__ void exchange_border_x_2(double *state, double *cfd_dens_cell, double *cfd_dens_theta_cell, int n){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < n){ int k = id / hs; int i = id % hs; double z = (k_beg + k+0.5)*dz; if (fabs(z-3*zlen/4) <= zlen/16) { int ind_r = POS_DENS*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i; int ind_u = POS_UMOM*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i; int ind_t = POS_RHOT*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i; state[ind_u] = (state[ind_r]+cfd_dens_cell[k+hs]) * 50.; state[ind_t] = (state[ind_r]+cfd_dens_cell[k+hs]) * 298. - cfd_dens_theta_cell[k+hs]; } } } __global__ void exchange_border_z_1(double *state, int n, int mnt_width){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < n){ int ll = id / (nnx+2*hs); int i = id % (nnx+2*hs); int pos = ll*(nnz+2*hs)*(nnx+2*hs); if (ll == POS_WMOM) { state[pos + (0 )*(nnx+2*hs) + i] = 0.; state[pos + (1 )*(nnx+2*hs) + i] = 0.; state[pos + (nnz+hs )*(nnx+2*hs) + i] = 0.; state[pos + (nnz+hs+1)*(nnx+2*hs) + i] = 0.; //Impose the vertical momentum effects of an artificial cos^2 mountain at the lower boundary if (config_spec == CONFIG_IN_TEST3) { double x = (i_beg+i-hs+0.5)*dx; if ( fabs(x-xlen/4) < mnt_width ) { double xloc = (x-(xlen/4)) / mnt_width; //Compute the derivative of the fake mountain double mnt_deriv = -pi*cos(pi*xloc/2)*sin(pi*xloc/2)*10/dx; //w = (dz/dx)*u state[POS_WMOM*(nnz+2*hs)*(nnx+2*hs) + (0)*(nnx+2*hs) + i] = mnt_deriv*state[POS_UMOM*(nnz+2*hs)*(nnx+2*hs) + hs*(nnx+2*hs) + i]; state[POS_WMOM*(nnz+2*hs)*(nnx+2*hs) + (1)*(nnx+2*hs) + i] = mnt_deriv*state[POS_UMOM*(nnz+2*hs)*(nnx+2*hs) + hs*(nnx+2*hs) + i]; } } } else { state[pos + (0 )*(nnx+2*hs) + i] = state[pos + (hs )*(nnx+2*hs) + i]; state[pos + (1 )*(nnx+2*hs) + i] = state[pos + (hs )*(nnx+2*hs) + i]; state[pos + (nnz+hs )*(nnx+2*hs) + i] = state[pos + (nnz+hs-1)*(nnx+2*hs) + i]; state[pos + (nnz+hs+1)*(nnx+2*hs) + i] = state[pos + (nnz+hs-1)*(nnx+2*hs) + i]; } } } //Performs a single dimensionally split time step using a simple low-storate three-stage Runge-Kutta time integrator //The dimensional splitting is a second-order-accurate alternating Strang splitting in which the //order of directions is alternated each time step. //The Runge-Kutta method used here is defined as follows: // q* = q[n] + dt/3 * rhs(q[n]) // q** = q[n] + dt/2 * rhs(q* ) // q[n+1] = q[n] + dt/1 * rhs(q** ) void do_timestep( double *state , double *state_tmp , double *flux , double *tend , double dt ) { if (direction_switch) { //x-direction first do_semi_step( state , state , state_tmp , dt / 3 , DIR_X , flux , tend ); do_semi_step( state , state_tmp , state_tmp , dt / 2 , DIR_X , flux , tend ); do_semi_step( state , state_tmp , state , dt / 1 , DIR_X , flux , tend ); //z-direction second do_semi_step( state , state , state_tmp , dt / 3 , DIR_Z , flux , tend ); do_semi_step( state , state_tmp , state_tmp , dt / 2 , DIR_Z , flux , tend ); do_semi_step( state , state_tmp , state , dt / 1 , DIR_Z , flux , tend ); } else { //z-direction second do_semi_step( state , state , state_tmp , dt / 3 , DIR_Z , flux , tend ); do_semi_step( state , state_tmp , state_tmp , dt / 2 , DIR_Z , flux , tend ); do_semi_step( state , state_tmp , state , dt / 1 , DIR_Z , flux , tend ); //x-direction first do_semi_step( state , state , state_tmp , dt / 3 , DIR_X , flux , tend ); do_semi_step( state , state_tmp , state_tmp , dt / 2 , DIR_X , flux , tend ); do_semi_step( state , state_tmp , state , dt / 1 , DIR_X , flux , tend ); } if (direction_switch) { direction_switch = 0; } else { direction_switch = 1; } } //Perform a single semi-discretized step in time with the form: //state_out = state_init + dt * rhs(state_forcing) //Meaning the step starts from state_init, computes the rhs using state_forcing, and stores the result in state_out void do_semi_step( double *state_init , double *state_forcing , double *state_out , double dt , int dir , double *flux , double *tend ) { if(dir == DIR_X) { //Set the halo values for this MPI task's fluid state in the x-direction exchange_border_x(state_forcing); //Compute the time tendencies for the fluid state in the x-direction do_dir_x(state_forcing,flux,tend); } else if (dir == DIR_Z) { //Set the halo values for this MPI task's fluid state in the z-direction exchange_border_z(state_forcing); //Compute the time tendencies for the fluid state in the z-direction do_dir_z(state_forcing,flux,tend); } int n = NUM_VARS * nnz * nnx; int gridSize = (n + blockSize - 1) / blockSize; do_semi_step_add<<<gridSize, blockSize>>>(state_out, state_init, tend, n, dt); #ifdef DEBUG gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); #endif } //Compute the time tendencies of the fluid state using forcing in the x-direction //Since the halos are set in a separate routine, this will not require MPI //First, compute the flux vector at each cell interface in the x-direction (including viscosity) //Then, compute the tendencies using those fluxes void do_dir_x( double *state , double *flux , double *tend ) { //Compute the hyperviscosity coeficient const double v_coef = -hv * dx / (16*dt); int n = (nnz) * (nnx + 1); int gridSize = (n + blockSize - 1) / blockSize; do_dir_x_flux<<<gridSize, blockSize>>>(state, flux, tend, cfd_dens_cell_gpu, cfd_dens_theta_cell_gpu, n, v_coef); #ifdef DEBUG gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); #endif n = NUM_VARS * nnz * nnx; gridSize = (n + blockSize - 1) / blockSize; do_dir_x_add<<<gridSize, blockSize>>>(tend, flux, n); #ifdef DEBUG gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); #endif } //Compute the time tendencies of the fluid state using forcing in the z-direction //Since the halos are set in a separate routine, this will not require MPI //First, compute the flux vector at each cell interface in the z-direction (including viscosity) //Then, compute the tendencies using those fluxes void do_dir_z( double *state , double *flux , double *tend ) { //Compute the viscosity coeficient const double v_coef = -hv * dz / (16 * dt); int n = (nnz + 1) * nnx; int gridSize = (n + blockSize - 1) / blockSize;; do_dir_z_flux<<<gridSize, blockSize>>>(state, flux, tend, cfd_dens_int_gpu, cfd_dens_theta_int_gpu, cfd_pressure_int_gpu, n, v_coef); #ifdef DEBUG gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); #endif n = NUM_VARS * nnz * nnx; gridSize = (n + blockSize - 1) / blockSize;; do_dir_z_add<<<gridSize, blockSize>>>(state, tend, flux, n); #ifdef DEBUG gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); #endif } // CUDA kernel. __global__ void copyStatesX(double *d, int n, int nnx_, int nnz_) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n){ int ll = id / nnz_; int k = id % nnz_; int pos = ll*(nnz_+4)*(nnx_+4) + (k+hs)*(nnx_+4); d[pos ] = d[pos + nnx_]; d[pos + 1 ] = d[pos + nnx_+1]; d[pos + nnx_+2 ] = d[pos + 2 ]; d[pos + nnx_+3] = d[pos + 3 ]; } } //Set this MPI task's halo values in the x-direction. This routine will require MPI void exchange_border_x( double *state ) { int n = NUM_VARS * nnz; int gridSize = (n + blockSize - 1)/blockSize; exchange_border_x_1<<<gridSize, blockSize>>>(state, n); #ifdef DEBUG gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); #endif if (config_spec == CONFIG_IN_TEST6) { if (myrank == 0) { n = nnz * hs; gridSize = (n + blockSize - 1)/blockSize; exchange_border_x_2<<<gridSize, blockSize>>>(state, cfd_dens_cell_gpu, cfd_dens_theta_cell_gpu, n); #ifdef DEBUG gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); #endif } } } //Set this MPI task's halo values in the z-direction. This does not require MPI because there is no MPI //decomposition in the vertical direction void exchange_border_z( double *state ) { const double mnt_width = xlen/8; int n = NUM_VARS * (nnx+2*hs); int gridSize = (n + blockSize - 1)/blockSize; exchange_border_z_1<<<gridSize, blockSize>>>(state, n, mnt_width); #ifdef DEBUG gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); #endif } void initialize( int *argc , char ***argv ) { int i, k, ii, kk, ll, inds; double x, z, r, u, w, t, hr, ht; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // YOU DON'T NEED TO ALTER ANYTHING BELOW THIS POINT IN THE CODE //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //Allocate the model data state_cpu = (double *) malloc( (nnx+2*hs)*(nnz+2*hs)*NUM_VARS*sizeof(double) ); state_tmp_cpu = (double *) malloc( (nnx+2*hs)*(nnz+2*hs)*NUM_VARS*sizeof(double) ); flux_cpu = (double *) malloc( (nnx+1)*(nnz+1)*NUM_VARS*sizeof(double) ); tend_cpu = (double *) malloc( nnx*nnz*NUM_VARS*sizeof(double) ); cfd_dens_cell_cpu = (double *) malloc( (nnz+2*hs)*sizeof(double) ); cfd_dens_theta_cell_cpu = (double *) malloc( (nnz+2*hs)*sizeof(double) ); cfd_dens_int_cpu = (double *) malloc( (nnz+1)*sizeof(double) ); cfd_dens_theta_int_cpu = (double *) malloc( (nnz+1)*sizeof(double) ); cfd_pressure_int_cpu = (double *) malloc( (nnz+1)*sizeof(double) ); //Allocate GPU memory cudaMalloc(&state_gpu, (nnx+2*hs)*(nnz+2*hs)*NUM_VARS*sizeof(double) ); cudaMalloc(&state_tmp_gpu, (nnx+2*hs)*(nnz+2*hs)*NUM_VARS*sizeof(double) ); cudaMalloc(&flux_gpu, (nnx+1)*(nnz+1)*NUM_VARS*sizeof(double) ); cudaMalloc(&tend_gpu, nnx*nnz*NUM_VARS*sizeof(double) ); cudaMalloc(&cfd_dens_cell_gpu, (nnz+2*hs)*sizeof(double) ); cudaMalloc(&cfd_dens_theta_cell_gpu, (nnz+2*hs)*sizeof(double) ); cudaMalloc(&cfd_dens_int_gpu, (nnz+1)*sizeof(double) ); cudaMalloc(&cfd_dens_theta_int_gpu, (nnz+1)*sizeof(double) ); cudaMalloc(&cfd_pressure_int_gpu, (nnz+1)*sizeof(double) ); //Define the maximum stable time step based on an assumed maximum wind speed dt = dmin(dx,dz) / max_speed * cfl; //Set initial elapsed model time etime = 0.; //If I'm the master process in MPI, display some grid information if (masterproc) { printf( "nx_cfd, nz_cfd: %d %d\n", nx_cfd, nz_cfd); printf( "dx,dz: %lf %lf\n",dx,dz); printf( "dt: %lf\n",dt); } ////////////////////////////////////////////////////////////////////////// // Initialize the cell-averaged fluid state via Gauss-Legendre quadrature ////////////////////////////////////////////////////////////////////////// for (k=0; k<nnz+2*hs; k++) { for (i=0; i<nnx+2*hs; i++) { //Initialize the state to zero for (ll=0; ll<NUM_VARS; ll++) { inds = ll*(nnz+2*hs)*(nnx+2*hs) + k*(nnx+2*hs) + i; state_cpu[inds] = 0.; } //Use Gauss-Legendre quadrature to initialize a balance + temperature perturbation for (kk=0; kk<nqpoints; kk++) { for (ii=0; ii<nqpoints; ii++) { //Compute the x,z location within the global domain based on cell and quadrature index x = (i_beg + i-hs+0.5)*dx + (qpoints[ii]-0.5)*dx; z = (k_beg + k-hs+0.5)*dz + (qpoints[kk]-0.5)*dz; //Set the fluid state based on the user's specification switch(config_spec){ case CONFIG_IN_TEST1: testcase1(x,z,r,u,w,t,hr,ht); break; case CONFIG_IN_TEST2: testcase2(x,z,r,u,w,t,hr,ht); break; case CONFIG_IN_TEST3: testcase3(x,z,r,u,w,t,hr,ht); break; case CONFIG_IN_TEST4: testcase4(x,z,r,u,w,t,hr,ht); break; case CONFIG_IN_TEST5: testcase5(x,z,r,u,w,t,hr,ht); break; case CONFIG_IN_TEST6: testcase6(x,z,r,u,w,t,hr,ht); break; } //Store into the fluid state array inds = POS_DENS*(nnz+2*hs)*(nnx+2*hs) + k*(nnx+2*hs) + i; state_cpu[inds] = state_cpu[inds] + r * qweights[ii]*qweights[kk]; inds = POS_UMOM*(nnz+2*hs)*(nnx+2*hs) + k*(nnx+2*hs) + i; state_cpu[inds] = state_cpu[inds] + (r+hr)*u * qweights[ii]*qweights[kk]; inds = POS_WMOM*(nnz+2*hs)*(nnx+2*hs) + k*(nnx+2*hs) + i; state_cpu[inds] = state_cpu[inds] + (r+hr)*w * qweights[ii]*qweights[kk]; inds = POS_RHOT*(nnz+2*hs)*(nnx+2*hs) + k*(nnx+2*hs) + i; state_cpu[inds] = state_cpu[inds] + ( (r+hr)*(t+ht) - hr*ht ) * qweights[ii]*qweights[kk]; } } for (ll=0; ll<NUM_VARS; ll++) { inds = ll*(nnz+2*hs)*(nnx+2*hs) + k*(nnx+2*hs) + i; state_tmp_cpu[inds] = state_cpu[inds]; } } } //Compute the background state over vertical cell averages for (k=0; k<nnz+2*hs; k++) { cfd_dens_cell_cpu [k] = 0.; cfd_dens_theta_cell_cpu[k] = 0.; for (kk=0; kk<nqpoints; kk++) { z = (k_beg + k-hs+0.5)*dz; //Set the fluid state based on the user's specification if (config_spec == CONFIG_IN_TEST1 ) { testcase1 (0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST2 ) { testcase2 (0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST3 ) { testcase3 (0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST4 ) { testcase4 (0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST5) { testcase5(0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST6 ) { testcase6 (0.,z,r,u,w,t,hr,ht); } cfd_dens_cell_cpu [k] = cfd_dens_cell_cpu [k] + hr * qweights[kk]; cfd_dens_theta_cell_cpu[k] = cfd_dens_theta_cell_cpu[k] + hr*ht * qweights[kk]; } } //Compute the background state at vertical cell interfaces for (k=0; k<nnz+1; k++) { z = (k_beg + k)*dz; if (config_spec == CONFIG_IN_TEST1 ) { testcase1 (0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST2 ) { testcase2 (0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST3 ) { testcase3 (0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST4 ) { testcase4 (0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST5) { testcase5(0.,z,r,u,w,t,hr,ht); } if (config_spec == CONFIG_IN_TEST6 ) { testcase6 (0.,z,r,u,w,t,hr,ht); } cfd_dens_int_cpu [k] = hr; cfd_dens_theta_int_cpu[k] = hr*ht; cfd_pressure_int_cpu [k] = C0*pow((hr*ht),gamm); } } //This test case is initially balanced but injects fast, cold air from the left boundary near the model top //x and z are input coordinates at which to sample //r,u,w,t are output density, u-wind, w-wind, and potential temperature at that location //hr and ht are output background density and potential temperature at that location void testcase6( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ) { const_theta(z,hr,ht); r = 0.; t = 0.; u = 0.; w = 0.; } //Initialize a density current (falling cold thermal that propagates along the model bottom) //x and z are input coordinates at which to sample //r,u,w,t are output density, u-wind, w-wind, and potential temperature at that location //hr and ht are output background density and potential temperature at that location void testcase5( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ) { const_theta(z,hr,ht); r = 0.; t = 0.; u = 0.; w = 0.; t = t + sample_cosine(x,z,-20. ,xlen/2,5000.,4000.,2000.); } //x and z are input coordinates at which to sample //r,u,w,t are output density, u-wind, w-wind, and potential temperature at that location //hr and ht are output background density and potential temperature at that location void testcase4( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ) { const_theta(z,hr,ht); r = 0.; t = 0.; u = 0.; w = 0.; // call random_number(u); // call random_number(w); // u = (u-0.5)*20; // w = (w-0.5)*20; } //x and z are input coordinates at which to sample //r,u,w,t are output density, u-wind, w-wind, and potential temperature at that location //hr and ht are output background density and potential temperature at that location void testcase3( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ) { const_bvfreq(z,0.02,hr,ht); r = 0.; t = 0.; u = 15.; w = 0.; } //x and z are input coordinates at which to sample //r,u,w,t are output density, u-wind, w-wind, and potential temperature at that location //hr and ht are output background density and potential temperature at that location void testcase2( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ) { const_theta(z,hr,ht); r = 0.; t = 0.; u = 0.; w = 0.; t = t + sample_cosine(x,z, 3. ,xlen/2,2000.,2000.,2000.); } //x and z are input coordinates at which to sample //r,u,w,t are output density, u-wind, w-wind, and potential temperature at that location //hr and ht are output background density and potential temperature at that location void testcase1( double x , double z , double &r , double &u , double &w , double &t , double &hr , double &ht ) { const_theta(z,hr,ht); r = 0.; t = 0.; u = 0.; w = 0.; t = t + sample_cosine(x,z, 20.,xlen/2,2000.,2000.,2000.); t = t + sample_cosine(x,z,-20.,xlen/2,8000.,2000.,2000.); } //Establish hydrstatic balance using constant potential temperature (thermally neutral atmosphere) //z is the input coordinate //r and t are the output background density and potential temperature void const_theta( double z , double &r , double &t ) { const double theta0 = 300.; //Background potential temperature const double exner0 = 1.; //Surface-level Exner pressure double p,exner,rt; //Establish balance first using Exner pressure t = theta0; //Potential Temperature at z exner = exner0 - grav * z / (cp * theta0); //Exner pressure at z p = p0 * pow(exner,(cp/rd)); //Pressure at z rt = pow((p / C0),(1. / gamm)); //rho*theta at z r = rt / t; //Density at z } //Establish hydrstatic balance using constant Brunt-Vaisala frequency //z is the input coordinate //bv_freq0 is the constant Brunt-Vaisala frequency //r and t are the output background density and potential temperature void const_bvfreq( double z , double bv_freq0 , double &r , double &t ) { const double theta0 = 300.; //Background potential temperature const double exner0 = 1.; //Surface-level Exner pressure double p, exner, rt; t = theta0 * exp( bv_freq0*bv_freq0 / grav * z ); //Pot temp at z exner = exner0 - grav*grav / (cp * bv_freq0*bv_freq0) * (t - theta0) / (t * theta0); //Exner pressure at z p = p0 * pow(exner,(cp/rd)); //Pressure at z rt = pow((p / C0),(1. / gamm)); //rho*theta at z r = rt / t; //Density at z } //Sample from an ellipse of a specified center, radius, and amplitude at a specified location //x and z are input coordinates //amp,x0,z0,xrad,zrad are input amplitude, center, and radius of the ellipse double sample_cosine( double x , double z , double amp , double x0 , double z0 , double xrad , double zrad ) { double dist; //Compute distance from bubble center dist = sqrt( ((x-x0)/xrad)*((x-x0)/xrad) + ((z-z0)/zrad)*((z-z0)/zrad) ) * pi / 2.; //If the distance from bubble center is less than the radius, create a cos**2 profile if (dist <= pi / 2.) { return amp * pow(cos(dist),2.); } else { return 0.; } } void finalize() { free( state_cpu ); free( state_tmp_cpu ); free( flux_cpu ); free( tend_cpu ); free( cfd_dens_cell_cpu ); free( cfd_dens_theta_cell_cpu ); free( cfd_dens_int_cpu ); free( cfd_dens_theta_int_cpu ); free( cfd_pressure_int_cpu ); cudaFree( state_gpu ); cudaFree( state_tmp_gpu ); cudaFree( flux_gpu ); cudaFree( tend_gpu ); cudaFree( cfd_dens_cell_gpu ); cudaFree( cfd_dens_theta_cell_gpu ); cudaFree( cfd_dens_int_gpu ); cudaFree( cfd_dens_theta_int_gpu ); cudaFree( cfd_pressure_int_gpu ); } //Compute reduced quantities for error checking without resorting void do_results( double &mass , double &te ) { mass = 0; te = 0; for (int k=0; k<nnz; k++) { for (int i=0; i<nnx; i++) { int ind_r = POS_DENS*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i+hs; int ind_u = POS_UMOM*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i+hs; int ind_w = POS_WMOM*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i+hs; int ind_t = POS_RHOT*(nnz+2*hs)*(nnx+2*hs) + (k+hs)*(nnx+2*hs) + i+hs; double r = state_cpu[ind_r] + cfd_dens_cell_cpu[hs+k]; // Density double u = state_cpu[ind_u] / r; // U-wind double w = state_cpu[ind_w] / r; // W-wind double th = ( state_cpu[ind_t] + cfd_dens_theta_cell_cpu[hs+k] ) / r; // Potential Temperature (theta) double p = C0*pow(r*th,gamm); // Pressure double t = th / pow(p0/p,rd/cp); // Temperature double ke = r*(u*u+w*w); // Kinetic Energy double ie = r*cv*t; // Internal Energy mass += r *dx*dz; // Accumulate domain mass te += (ke + ie)*dx*dz; // Accumulate domain total energy } } } void copy_to_gpu(){ cudaMemcpy(state_gpu, state_cpu, (nnx+2*hs)*(nnz+2*hs)*NUM_VARS*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(state_tmp_gpu, state_tmp_cpu, (nnx+2*hs)*(nnz+2*hs)*NUM_VARS*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(flux_gpu, flux_cpu, (nnx+1)*(nnz+1)*NUM_VARS*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(tend_gpu, tend_cpu, nnx*nnz*NUM_VARS*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(cfd_dens_cell_gpu, cfd_dens_cell_cpu, (nnz+2*hs)*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(cfd_dens_theta_cell_gpu, cfd_dens_theta_cell_cpu, (nnz+2*hs)*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(cfd_dens_int_gpu, cfd_dens_int_cpu, (nnz+1)*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(cfd_dens_theta_int_gpu, cfd_dens_theta_int_cpu, (nnz+1)*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(cfd_pressure_int_gpu, cfd_pressure_int_cpu, (nnz+1)*sizeof(double), cudaMemcpyHostToDevice); } void copy_to_cpu(){ cudaMemcpy(state_cpu, state_gpu, (nnx+2*hs)*(nnz+2*hs)*NUM_VARS*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(state_tmp_cpu, state_tmp_gpu, (nnx+2*hs)*(nnz+2*hs)*NUM_VARS*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(flux_cpu, flux_gpu, (nnx+1)*(nnz+1)*NUM_VARS*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(tend_cpu, tend_gpu, nnx*nnz*NUM_VARS*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(cfd_dens_cell_cpu, cfd_dens_cell_gpu, (nnz+2*hs)*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(cfd_dens_theta_cell_cpu, cfd_dens_theta_cell_gpu, (nnz+2*hs)*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(cfd_dens_int_cpu, cfd_dens_int_gpu, (nnz+1)*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(cfd_dens_theta_int_cpu, cfd_dens_theta_int_gpu, (nnz+1)*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(cfd_pressure_int_cpu, cfd_pressure_int_gpu, (nnz+1)*sizeof(double), cudaMemcpyDeviceToHost); } void print(double *v, int n){ for(int i = 0; i < n; i++) if(v[i] != 0.0) printf("%d: %lf\n", i, v[i]); printf("\n"); } /////////////////////////////////////////////////////////////////////////////////////// // THE MAIN PROGRAM STARTS HERE /////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { initialize( &argc , &argv ); //cudaDeviceSynchronize(); //Initial reductions for mass, kinetic energy, and total energy do_results(mass0,te0); //Copying data to GPU copy_to_gpu(); #ifdef DEBUG gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); #endif //////////////////////////////////////////////////// // MAIN TIME STEP LOOP //////////////////////////////////////////////////// auto c_start = std::clock(); while (etime < sim_time) { //If the time step leads to exceeding the simulation time, shorten it for the last step if (etime + dt > sim_time) { dt = sim_time - etime; } //Perform a single time step do_timestep(state_gpu,state_tmp_gpu,flux_gpu,tend_gpu,dt); //cudaDeviceSynchronize(); //Update the elapsed time and output counter etime = etime + dt; output_counter = output_counter + dt; //If it's time for output, reset the counter, and do output if (output_counter >= output_freq) { output_counter = output_counter - output_freq; //Inform the user if (masterproc) { printf( "Elapsed Time: %lf / %lf\n", etime , sim_time ); } } } auto c_end = std::clock(); if (masterproc) { std::cout << "CPU Time: " << ( (double) (c_end-c_start) ) / CLOCKS_PER_SEC << " sec\n"; } copy_to_cpu(); #ifdef DEBUG gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); #endif //Final reductions for mass, kinetic energy, and total energy do_results(mass,te); if (masterproc) { printf( "d_mass: %le\n" , (mass - mass0)/mass0); printf( "d_te: %le\n" , (te - te0 )/te0 ); } finalize(); }
0a1cd96cf655cc2d4bd348cd34542112f0cb3127.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex helva(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } /* derivative of helva, from Mathematica */ __device__ hipComplex helvp(hipComplex z) { hipComplex out(jnf(2,z.r),jnf(1,z.i)); return out; } __device__ hipComplex lanna(hipComplex z) { hipComplex out(j1f(z.r/j0f(z.i)),j1f(z.i/j1f(z.r))); return out; } __device__ hipComplex harva(hipComplex z) { hipComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i)); return out; } __device__ hipComplex herve(hipComplex z) { hipComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r)); return out; } __device__ hipComplex alver(hipComplex z) { hipComplex out(1.0/j0f(z.r),1.0/j1f(z.i)); return out; } __device__ hipComplex alvir(hipComplex z) { hipComplex out(j0f(z.r),1.0/j1f(z.i)); return out; } __device__ hipComplex hexva(int m, hipComplex z) { hipComplex out(jnf(m,z.r),jnf(m,z.i)); return out; } __device__ hipComplex hilva(hipComplex z) { hipComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ hipComplex halvi(hipComplex z) { hipComplex out(j1f(z.r),-j0f(z.i)); return out; } __device__ hipComplex ahilv(hipComplex z) { hipComplex out(1.0/j1f(z.r),1.0/j0f(z.i)); return out; } __device__ hipComplex halva(hipComplex z) { hipComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ hipComplex aciwa(hipComplex z) { hipComplex out(j0f(j1f(z.r)),j1f(j0f(z.i))); return out; } __device__ hipComplex hinva(hipComplex z) { hipComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ hipComplex henga(hipComplex z) { hipComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ hipComplex holva(hipComplex z) { hipComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ hipComplex aliva(hipComplex z) { hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ hipComplex ariva(hipComplex z) { hipComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * harva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex irigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * helva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thy(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q))); } return out; } __device__ hipComplex urigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex origo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(alvir(q*z),alvir(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __device__ hipComplex mervana(hipComplex z, hipComplex ao, hipComplex uo) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tle(1.0,0.0); hipComplex rhuva(3.0,0); hipComplex netha(1.0,0.0); hipComplex arvir(0.96592582628906831,0.25881904510252074 ); for(v=0;v<5;v++) { out= out * expc(ao/(ao-uo*powc(z,powc(rhuva,tle)))-ao); tle=tle+unity; } return out; } __device__ hipComplex marvana(hipComplex z, hipComplex ao, hipComplex uo) { hipComplex out(0.0,0.0); hipComplex tiny(0.001,0.001); out=(mervana(z+tiny,ao,uo)-mervana(z,ao,uo))/tiny; return out; } __device__ hipComplex arvana(hipComplex z, hipComplex ao, hipComplex uo) { hipComplex out(0.0,0.0); hipComplex tiny(0.001,0.001); out=(marvana(z+tiny,ao,uo)-marvana(z,ao,uo))/tiny; return out; } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale =2.1; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); //hipComplex tin(1/4096.0,0.0); hipComplex aon = expc(ai*moux); hipComplex uon= expc(mouy); hipComplex flurn(0.0,0.0); hipComplex accume(1.0,0.0); hipComplex eccume(0.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(1.0,0.0); hipComplex sle(1.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/3.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = unity; hipComplex vue = cue*ai; hipComplex lue = unity; hipComplex rhuva(3.0,0.0); hipComplex rarva(2.0,0.0); hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); hipComplex arvir(0.96592582628906831,0.25881904510252074 ); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // almost Klein's j-invariant //cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva); accume = unity; sle = unity; for(v=0;v<20;v++) { accume= accume * expc(aon/(uon+powc(q,powc(rhuva,tle)))); tle=tle+unity; } cue = accume; // for the lines of the hyperbolic tiling, we want the second derivative of mervana, which I'm going to define above. /*cue =cue - powc(conj(cue),conj(cue-aon*conj(cue)))-powc(conj(cue),conj(cue-uon*conj(cue)));*/ double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; if((cue.i>0) ) { d_out[i].x = 0; d_out[i].y = 0; d_out[i].z = 0; d_out[i].w = 255; } else { if((cue.r>0) && (cue.i<0) ) { d_out[i].x = 128; d_out[i].y = 128; d_out[i].z = 128; d_out[i].w = 255; } else { d_out[i].x = 255; d_out[i].y = 255; d_out[i].z = 255; d_out[i].w = 255; } } } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
0a1cd96cf655cc2d4bd348cd34542112f0cb3127.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex helva(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } /* derivative of helva, from Mathematica */ __device__ cuComplex helvp(cuComplex z) { cuComplex out(jnf(2,z.r),jnf(1,z.i)); return out; } __device__ cuComplex lanna(cuComplex z) { cuComplex out(j1f(z.r/j0f(z.i)),j1f(z.i/j1f(z.r))); return out; } __device__ cuComplex harva(cuComplex z) { cuComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i)); return out; } __device__ cuComplex herve(cuComplex z) { cuComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r)); return out; } __device__ cuComplex alver(cuComplex z) { cuComplex out(1.0/j0f(z.r),1.0/j1f(z.i)); return out; } __device__ cuComplex alvir(cuComplex z) { cuComplex out(j0f(z.r),1.0/j1f(z.i)); return out; } __device__ cuComplex hexva(int m, cuComplex z) { cuComplex out(jnf(m,z.r),jnf(m,z.i)); return out; } __device__ cuComplex hilva(cuComplex z) { cuComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ cuComplex halvi(cuComplex z) { cuComplex out(j1f(z.r),-j0f(z.i)); return out; } __device__ cuComplex ahilv(cuComplex z) { cuComplex out(1.0/j1f(z.r),1.0/j0f(z.i)); return out; } __device__ cuComplex halva(cuComplex z) { cuComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ cuComplex aciwa(cuComplex z) { cuComplex out(j0f(j1f(z.r)),j1f(j0f(z.i))); return out; } __device__ cuComplex hinva(cuComplex z) { cuComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ cuComplex henga(cuComplex z) { cuComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ cuComplex holva(cuComplex z) { cuComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ cuComplex aliva(cuComplex z) { cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ cuComplex ariva(cuComplex z) { cuComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * harva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex irigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * helva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thy(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q))); } return out; } __device__ cuComplex urigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex origo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(alvir(q*z),alvir(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __device__ cuComplex mervana(cuComplex z, cuComplex ao, cuComplex uo) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tle(1.0,0.0); cuComplex rhuva(3.0,0); cuComplex netha(1.0,0.0); cuComplex arvir(0.96592582628906831,0.25881904510252074 ); for(v=0;v<5;v++) { out= out * expc(ao/(ao-uo*powc(z,powc(rhuva,tle)))-ao); tle=tle+unity; } return out; } __device__ cuComplex marvana(cuComplex z, cuComplex ao, cuComplex uo) { cuComplex out(0.0,0.0); cuComplex tiny(0.001,0.001); out=(mervana(z+tiny,ao,uo)-mervana(z,ao,uo))/tiny; return out; } __device__ cuComplex arvana(cuComplex z, cuComplex ao, cuComplex uo) { cuComplex out(0.0,0.0); cuComplex tiny(0.001,0.001); out=(marvana(z+tiny,ao,uo)-marvana(z,ao,uo))/tiny; return out; } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale =2.1; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); //cuComplex tin(1/4096.0,0.0); cuComplex aon = expc(ai*moux); cuComplex uon= expc(mouy); cuComplex flurn(0.0,0.0); cuComplex accume(1.0,0.0); cuComplex eccume(0.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(1.0,0.0); cuComplex sle(1.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/3.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = unity; cuComplex vue = cue*ai; cuComplex lue = unity; cuComplex rhuva(3.0,0.0); cuComplex rarva(2.0,0.0); cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); cuComplex arvir(0.96592582628906831,0.25881904510252074 ); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // almost Klein's j-invariant //cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva); accume = unity; sle = unity; for(v=0;v<20;v++) { accume= accume * expc(aon/(uon+powc(q,powc(rhuva,tle)))); tle=tle+unity; } cue = accume; // for the lines of the hyperbolic tiling, we want the second derivative of mervana, which I'm going to define above. /*cue =cue - powc(conj(cue),conj(cue-aon*conj(cue)))-powc(conj(cue),conj(cue-uon*conj(cue)));*/ double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; if((cue.i>0) ) { d_out[i].x = 0; d_out[i].y = 0; d_out[i].z = 0; d_out[i].w = 255; } else { if((cue.r>0) && (cue.i<0) ) { d_out[i].x = 128; d_out[i].y = 128; d_out[i].z = 128; d_out[i].w = 255; } else { d_out[i].x = 255; d_out[i].y = 255; d_out[i].z = 255; d_out[i].w = 255; } } } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
e5bc1727172ff3530aa0e2cc4c53c03b615b8555.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/explainer/kernel_shap.hpp> #include <test_utils.h> #include <raft/cuda_utils.cuh> #include <raft/cudart_utils.h> #include <raft/handle.hpp> #include <rmm/device_uvector.hpp> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <test_utils.h> namespace MLCommon { } #include <gtest/gtest.h> namespace ML { namespace Explainer { struct MakeKSHAPDatasetInputs { int nrows_exact; int nrows_sampled; int ncols; int nrows_background; int max_samples; uint64_t seed; }; template <typename T> class MakeKSHAPDatasetTest : public ::testing::TestWithParam<MakeKSHAPDatasetInputs> { protected: void SetUp() override { params = ::testing::TestWithParam<MakeKSHAPDatasetInputs>::GetParam(); stream = handle.get_stream(); int i, j; nrows_X = params.nrows_exact + params.nrows_sampled; rmm::device_uvector<T> background(params.nrows_background * params.ncols, stream); rmm::device_uvector<T> observation(params.ncols, stream); rmm::device_uvector<int> nsamples(params.nrows_sampled / 2, stream); rmm::device_uvector<float> X(nrows_X * params.ncols, stream); rmm::device_uvector<T> dataset(nrows_X * params.nrows_background * params.ncols, stream); thrust::device_ptr<T> b_ptr = thrust::device_pointer_cast(background.data()); thrust::device_ptr<T> o_ptr = thrust::device_pointer_cast(observation.data()); thrust::device_ptr<int> n_ptr = thrust::device_pointer_cast(nsamples.data()); thrust::device_ptr<float> X_ptr = thrust::device_pointer_cast(X.data()); thrust::device_ptr<T> d_ptr = thrust::device_pointer_cast(dataset.data()); // Initialize arrays: // Aassign a sentinel value to the observation to check easily later T sent_value = nrows_X * params.nrows_background * params.ncols * 100; for (i = 0; i < params.ncols; i++) { o_ptr[i] = sent_value; } // Initialize background array with different odd value per row, makes // it easier to debug if something goes wrong. for (i = 0; i < params.nrows_background; i++) { for (j = 0; j < params.ncols; j++) { b_ptr[i * params.ncols + j] = (i * 2) + 1; } } // Initialize the exact part of X. We create 2 `1` values per row for the test thrust::fill(thrust::device, X_ptr, &X_ptr[nrows_X * params.ncols - 1], 0); for (i = 0; i < params.nrows_exact; i++) { for (j = i; j < i + 2; j++) { X_ptr[i * params.ncols + j] = (float)1.0; } } // Initialize the number of samples per row, we initialize each even row to // max samples and each odd row to max_samples - 1 for (i = 0; i < params.nrows_sampled / 2; i++) { n_ptr[i] = params.max_samples - i % 2; } kernel_dataset(handle, X.data(), nrows_X, params.ncols, background.data(), params.nrows_background, dataset.data(), observation.data(), nsamples.data(), params.nrows_sampled, params.max_samples, params.seed); handle.sync_stream(stream); int counter; // Check the generated part of X by sampling. The first nrows_exact // correspond to the exact part generated before, so we just test after that. test_sampled_X = true; j = 0; for (i = params.nrows_exact * params.ncols; i < nrows_X * params.ncols / 2; i += 2 * params.ncols) { // check that number of samples is the number indicated by nsamples. counter = thrust::count(&X_ptr[i], &X_ptr[i + params.ncols], 1); test_sampled_X = (test_sampled_X && (counter == n_ptr[j])); // check that number of samples of the next line is the compliment, // i.e. ncols - nsamples[j] counter = thrust::count(&X_ptr[i + params.ncols], &X_ptr[i + 2 * params.ncols], 1); test_sampled_X = (test_sampled_X && (counter == (params.ncols - n_ptr[j]))); j++; } // Check for the exact part of the generated dataset. test_scatter_exact = true; for (i = 0; i < params.nrows_exact; i++) { for (j = i * params.nrows_background * params.ncols; j < (i + 1) * params.nrows_background * params.ncols; j += params.ncols) { counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value); // Check that indeed we have two observation entries ber row test_scatter_exact = test_scatter_exact && (counter == 2); if (not test_scatter_exact) { std::cout << "test_scatter_exact counter failed with: " << counter << ", expected value was 2." << std::endl; break; } } if (not test_scatter_exact) { break; } } // Check for the sampled part of the generated dataset test_scatter_sampled = true; // compliment_ctr is a helper counter to help check nrows_dataset per entry in // nsamples without complicating indexing since sampled part starts at nrows_sampled int compliment_ctr = 0; for (i = params.nrows_exact; i < params.nrows_exact + params.nrows_sampled / 2; i++) { // First set of dataset observations must correspond to nsamples[i] for (j = (i + compliment_ctr) * params.nrows_background * params.ncols; j < (i + compliment_ctr + 1) * params.nrows_background * params.ncols; j += params.ncols) { counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value); test_scatter_sampled = test_scatter_sampled && (counter == n_ptr[i - params.nrows_exact]); } // The next set of samples must correspond to the compliment: ncols - nsamples[i] compliment_ctr++; for (j = (i + compliment_ctr) * params.nrows_background * params.ncols; j < (i + compliment_ctr + 1) * params.nrows_background * params.ncols; j += params.ncols) { // Check that number of observation entries corresponds to nsamples. counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value); test_scatter_sampled = test_scatter_sampled && (counter == params.ncols - n_ptr[i - params.nrows_exact]); } } } protected: MakeKSHAPDatasetInputs params; int nrows_X; bool test_sampled_X; bool test_scatter_exact; bool test_scatter_sampled; raft::handle_t handle; hipStream_t stream = 0; }; const std::vector<MakeKSHAPDatasetInputs> inputsf = {{10, 10, 12, 2, 3, 1234ULL}, {10, 0, 12, 2, 3, 1234ULL}, {100, 50, 200, 10, 10, 1234ULL}, {100, 0, 200, 10, 10, 1234ULL}, {0, 10, 12, 2, 3, 1234ULL}, {0, 50, 200, 10, 10, 1234ULL} }; typedef MakeKSHAPDatasetTest<float> MakeKSHAPDatasetTestF; TEST_P(MakeKSHAPDatasetTestF, Result) { ASSERT_TRUE(test_sampled_X); // todo (dgd): re-enable assertions // disabled due to a sporadic cuda 10.1 fail (by one value in one case!) // will be re-enabled soon after 0.17 release // ASSERT_TRUE(test_scatter_exact); // ASSERT_TRUE(test_scatter_sampled); } INSTANTIATE_TEST_CASE_P(MakeKSHAPDatasetTests, MakeKSHAPDatasetTestF, ::testing::ValuesIn(inputsf)); const std::vector<MakeKSHAPDatasetInputs> inputsd = {{10, 10, 12, 2, 3, 1234ULL}, {10, 0, 12, 2, 3, 1234ULL}, {100, 50, 200, 10, 10, 1234ULL}, {100, 0, 200, 10, 10, 1234ULL}, {0, 10, 12, 2, 3, 1234ULL}, {0, 50, 200, 10, 10, 1234ULL}}; typedef MakeKSHAPDatasetTest<double> MakeKSHAPDatasetTestD; TEST_P(MakeKSHAPDatasetTestD, Result) { ASSERT_TRUE(test_sampled_X); // todo (dgd): re-enable assertions // disabled due to a sporadic cuda 10.1 fail (by one value in one case!) // will be re-enabled soon after 0.17 release // ASSERT_TRUE(test_scatter_exact); // ASSERT_TRUE(test_scatter_sampled); } INSTANTIATE_TEST_CASE_P(MakeKSHAPDatasetTests, MakeKSHAPDatasetTestD, ::testing::ValuesIn(inputsd)); } // end namespace Explainer } // end namespace ML
e5bc1727172ff3530aa0e2cc4c53c03b615b8555.cu
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/explainer/kernel_shap.hpp> #include <test_utils.h> #include <raft/cuda_utils.cuh> #include <raft/cudart_utils.h> #include <raft/handle.hpp> #include <rmm/device_uvector.hpp> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <test_utils.h> namespace MLCommon { } #include <gtest/gtest.h> namespace ML { namespace Explainer { struct MakeKSHAPDatasetInputs { int nrows_exact; int nrows_sampled; int ncols; int nrows_background; int max_samples; uint64_t seed; }; template <typename T> class MakeKSHAPDatasetTest : public ::testing::TestWithParam<MakeKSHAPDatasetInputs> { protected: void SetUp() override { params = ::testing::TestWithParam<MakeKSHAPDatasetInputs>::GetParam(); stream = handle.get_stream(); int i, j; nrows_X = params.nrows_exact + params.nrows_sampled; rmm::device_uvector<T> background(params.nrows_background * params.ncols, stream); rmm::device_uvector<T> observation(params.ncols, stream); rmm::device_uvector<int> nsamples(params.nrows_sampled / 2, stream); rmm::device_uvector<float> X(nrows_X * params.ncols, stream); rmm::device_uvector<T> dataset(nrows_X * params.nrows_background * params.ncols, stream); thrust::device_ptr<T> b_ptr = thrust::device_pointer_cast(background.data()); thrust::device_ptr<T> o_ptr = thrust::device_pointer_cast(observation.data()); thrust::device_ptr<int> n_ptr = thrust::device_pointer_cast(nsamples.data()); thrust::device_ptr<float> X_ptr = thrust::device_pointer_cast(X.data()); thrust::device_ptr<T> d_ptr = thrust::device_pointer_cast(dataset.data()); // Initialize arrays: // Aassign a sentinel value to the observation to check easily later T sent_value = nrows_X * params.nrows_background * params.ncols * 100; for (i = 0; i < params.ncols; i++) { o_ptr[i] = sent_value; } // Initialize background array with different odd value per row, makes // it easier to debug if something goes wrong. for (i = 0; i < params.nrows_background; i++) { for (j = 0; j < params.ncols; j++) { b_ptr[i * params.ncols + j] = (i * 2) + 1; } } // Initialize the exact part of X. We create 2 `1` values per row for the test thrust::fill(thrust::device, X_ptr, &X_ptr[nrows_X * params.ncols - 1], 0); for (i = 0; i < params.nrows_exact; i++) { for (j = i; j < i + 2; j++) { X_ptr[i * params.ncols + j] = (float)1.0; } } // Initialize the number of samples per row, we initialize each even row to // max samples and each odd row to max_samples - 1 for (i = 0; i < params.nrows_sampled / 2; i++) { n_ptr[i] = params.max_samples - i % 2; } kernel_dataset(handle, X.data(), nrows_X, params.ncols, background.data(), params.nrows_background, dataset.data(), observation.data(), nsamples.data(), params.nrows_sampled, params.max_samples, params.seed); handle.sync_stream(stream); int counter; // Check the generated part of X by sampling. The first nrows_exact // correspond to the exact part generated before, so we just test after that. test_sampled_X = true; j = 0; for (i = params.nrows_exact * params.ncols; i < nrows_X * params.ncols / 2; i += 2 * params.ncols) { // check that number of samples is the number indicated by nsamples. counter = thrust::count(&X_ptr[i], &X_ptr[i + params.ncols], 1); test_sampled_X = (test_sampled_X && (counter == n_ptr[j])); // check that number of samples of the next line is the compliment, // i.e. ncols - nsamples[j] counter = thrust::count(&X_ptr[i + params.ncols], &X_ptr[i + 2 * params.ncols], 1); test_sampled_X = (test_sampled_X && (counter == (params.ncols - n_ptr[j]))); j++; } // Check for the exact part of the generated dataset. test_scatter_exact = true; for (i = 0; i < params.nrows_exact; i++) { for (j = i * params.nrows_background * params.ncols; j < (i + 1) * params.nrows_background * params.ncols; j += params.ncols) { counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value); // Check that indeed we have two observation entries ber row test_scatter_exact = test_scatter_exact && (counter == 2); if (not test_scatter_exact) { std::cout << "test_scatter_exact counter failed with: " << counter << ", expected value was 2." << std::endl; break; } } if (not test_scatter_exact) { break; } } // Check for the sampled part of the generated dataset test_scatter_sampled = true; // compliment_ctr is a helper counter to help check nrows_dataset per entry in // nsamples without complicating indexing since sampled part starts at nrows_sampled int compliment_ctr = 0; for (i = params.nrows_exact; i < params.nrows_exact + params.nrows_sampled / 2; i++) { // First set of dataset observations must correspond to nsamples[i] for (j = (i + compliment_ctr) * params.nrows_background * params.ncols; j < (i + compliment_ctr + 1) * params.nrows_background * params.ncols; j += params.ncols) { counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value); test_scatter_sampled = test_scatter_sampled && (counter == n_ptr[i - params.nrows_exact]); } // The next set of samples must correspond to the compliment: ncols - nsamples[i] compliment_ctr++; for (j = (i + compliment_ctr) * params.nrows_background * params.ncols; j < (i + compliment_ctr + 1) * params.nrows_background * params.ncols; j += params.ncols) { // Check that number of observation entries corresponds to nsamples. counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value); test_scatter_sampled = test_scatter_sampled && (counter == params.ncols - n_ptr[i - params.nrows_exact]); } } } protected: MakeKSHAPDatasetInputs params; int nrows_X; bool test_sampled_X; bool test_scatter_exact; bool test_scatter_sampled; raft::handle_t handle; cudaStream_t stream = 0; }; const std::vector<MakeKSHAPDatasetInputs> inputsf = {{10, 10, 12, 2, 3, 1234ULL}, {10, 0, 12, 2, 3, 1234ULL}, {100, 50, 200, 10, 10, 1234ULL}, {100, 0, 200, 10, 10, 1234ULL}, {0, 10, 12, 2, 3, 1234ULL}, {0, 50, 200, 10, 10, 1234ULL} }; typedef MakeKSHAPDatasetTest<float> MakeKSHAPDatasetTestF; TEST_P(MakeKSHAPDatasetTestF, Result) { ASSERT_TRUE(test_sampled_X); // todo (dgd): re-enable assertions // disabled due to a sporadic cuda 10.1 fail (by one value in one case!) // will be re-enabled soon after 0.17 release // ASSERT_TRUE(test_scatter_exact); // ASSERT_TRUE(test_scatter_sampled); } INSTANTIATE_TEST_CASE_P(MakeKSHAPDatasetTests, MakeKSHAPDatasetTestF, ::testing::ValuesIn(inputsf)); const std::vector<MakeKSHAPDatasetInputs> inputsd = {{10, 10, 12, 2, 3, 1234ULL}, {10, 0, 12, 2, 3, 1234ULL}, {100, 50, 200, 10, 10, 1234ULL}, {100, 0, 200, 10, 10, 1234ULL}, {0, 10, 12, 2, 3, 1234ULL}, {0, 50, 200, 10, 10, 1234ULL}}; typedef MakeKSHAPDatasetTest<double> MakeKSHAPDatasetTestD; TEST_P(MakeKSHAPDatasetTestD, Result) { ASSERT_TRUE(test_sampled_X); // todo (dgd): re-enable assertions // disabled due to a sporadic cuda 10.1 fail (by one value in one case!) // will be re-enabled soon after 0.17 release // ASSERT_TRUE(test_scatter_exact); // ASSERT_TRUE(test_scatter_sampled); } INSTANTIATE_TEST_CASE_P(MakeKSHAPDatasetTests, MakeKSHAPDatasetTestD, ::testing::ValuesIn(inputsd)); } // end namespace Explainer } // end namespace ML
b3802388f5fbd0d5fa85586fac4b2a89ef3c2976.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> typedef unsigned long long ul; typedef unsigned int uint; int banyakdata = 1024; int dimensigrid = 8; int dimensiblok = 128; typedef struct { char size; uint* value; }big; typedef struct { short size; char* value; }stringnumber; __host__ __device__ short ukuranbit(big *a); __host__ __device__ char getbit(big* a, short count); __host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser); __host__ __device__ void kali(big *a, big *b, big* res); __host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff); __host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff); __device__ void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff); __device__ void dekripsi(big *c1, big *c2, big *e, big *p, big *res, big *minbuff, big *mulbuff); __global__ void kernelenk(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff); __global__ void kerneldek(big *c, big *e, big *p, big *res, big *minbuff, big *mulbuff); hipError_t CUDAenk(big *m, big *k, big* g, big* p, big* y, big *res); hipError_t CUDAdek(big *c, big *e, big* p, big *res); void mainenkripsi(big *m, big *k, big *res, big *g, big *p, big *y); void maindekripsi(big* c,big* x,big* p,big* res2); void tambah(big* a, char b, big* res); void kurang(big* a, big *b, big* res); void divandmod(big* a, big* &b, big* divres, big* modres, uint* minbuff); void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff); void init(big *p, big *g, big *x, big*e, big *y, big *m, big *k, big *res, big *res2); void copybig(big* a, big* res); void stringtobig(stringnumber* sn, big* res, big* mulbuff, big* ten); void bigtostring(big* x, stringnumber* sn, big* ten, big* xbuff, big* divbuff, big* modbuff, uint* minbuff); void printsn(stringnumber* sn); void teskonversi(); __host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff){ res->size = 1; res->value[0] = 1; uint* minbuff2 = (uint*) malloc(sizeof(uint) * c->size); short i = ukuranbit(b); while (i > 0) { i--; kali(res,res,mulbuff); modulo(mulbuff,c,res,minbuff2); if (getbit(b,i)) { kali(res, a, mulbuff); modulo(mulbuff, c, res, minbuff2); } } free(minbuff2); // printf("res adlaah %u\n", res->value[0]); } __device__ void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff) { // // BLok 1 Cipher modexp(g,k,p,res,minbuff->value,mulbuff); // printf("res adalah %u\n", res->value[0]); // // Blok 2 Cipher modexp(y, k, p, res + 1,minbuff->value,mulbuff); kali(res + 1, m, mulbuff); modulo(mulbuff, p, res+1, minbuff->value); // printf("res val 0 adalah %p\n", &(res->value[0])); // printf("res val 1 adalah %p\n", &(res->value[1])); // printf("res val 2 adalah %p\n", &(res->value[2])); // printf("res val 3 adalah %p\n", &(res->value[3])); // printf("res 1 val 0 adalah %p\n", &((res+1)->value[0])); // printf("res 1 val 1 adalah %p\n", &((res+1)->value[1])); // printf("res 1 val 2 adalah %p\n", &((res+1)->value[2])); // printf("res 1 val 3 adalah %p\n", &((res+1)->value[3])); // printf("res val 0 adalah %u\n", res->value[0]); // printf("res 1 val 0 adalah %u\n", (res+1)->value[0]); } __device__ void dekripsi(big *c1, big *c2, big *e, big *p, big *res, big *minbuff, big *mulbuff) { modexp(c1,e,p,res,minbuff->value,mulbuff); kali(res, c2, mulbuff); modulo(mulbuff, p, res, minbuff->value); // printf("c1 adlaah %u\n", c1->value[0]); // printf("c2 adlaah %u\n", c2->value[0]); } __global__ void kernelenk(big *m, big *k, big *g, big *p, big *y, big *res){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int jdx = threadIdx.x; __shared__ big sm[128]; __shared__ big sk[128]; __shared__ big sres[256]; __shared__ big sp; __shared__ big sg; __shared__ big sy; __shared__ uint s[3200]; uint *sresval = s; uint *spval = (uint*)&sresval[8*128*2]; uint *sgval = (uint*)&spval[4]; uint *syval = (uint*)&sgval[4]; uint *smval = (uint*)&syval[4]; uint *skval = (uint*)&smval[4*128]; sm[jdx].size = m[idx].size; sk[jdx].size = k[idx].size; sp.size = p[0].size; sg.size = g[0].size; sy.size = y[0].size; for (int i = 0; i < 4; i++) { smval[jdx*4+i] = m[idx].value[i]; skval[jdx*4+i] = k[idx].value[i]; spval[i] = p[0].value[i]; sgval[i] = g[0].value[i]; syval[i] = y[0].value[i]; } sm[jdx].value = (uint*)&smval[jdx*4]; sk[jdx].value = (uint*)&skval[jdx*4]; sres[2*jdx].value = (uint*)&sresval[jdx*8*2]; sres[2*jdx+1].value = (uint*)&sresval[jdx*8*2+8]; sp.value = spval; sg.value = sgval; sy.value = syval; __syncthreads(); // if(idx < 10){ // // printf("pointer2 di %d = %p \n", 2*jdx,sres[2*jdx].value); // // printf("pointer2 di %d = %p \n", 2*jdx+1,sres[2*jdx+1].value); // // printf("sresval pointer di %d = %p \n", jdx,sresval + jdx); // // printf("pointer big di %d = %p \n", 2*jdx,sres+2*jdx); // // printf("pointer big2 di %d = %p \n", 2*jdx+1,sres+2*jdx+1); big* minbuff = (big*) malloc(sizeof(big)); big* mulbuff = (big*) malloc(sizeof(big)); minbuff->value = (uint*) malloc(sizeof(uint) * sp.size * 2); mulbuff->value = (uint*) malloc(sizeof(uint) * sp.size * 2); enkripsi(sm + jdx, sk + jdx, &sg, &sp, &sy, sres + 2*jdx, minbuff, mulbuff); // } if(idx == 0){ printf("Semangkas\n"); printf("xx[0].size = %u\n", sgval[0]); printf("xx[0].size = %u\n", g[0].value[0]); printf("size = %u\n", m[0].size); printf("sm[0].val 0 = %u\n", y[0].value[1]); printf("Pointer 1 = %p\n", &(sm[0].value[0])); printf("Pointer 2 = %p\n", &(sm+0)->value[0]); big* poinm = sm; printf("Pointer 3 = %p\n", poinm); printf("Pointer 4 = %p\n", &poinm->value[0]); printf("sres 0 adalah %d\n", sres[2].size); printf("sres 0 adalah %d\n", sres[2].value[0]); } // printf("sres %d adalah %d\n", 2*idx, sres[2*jdx].size); // printf("sres %d adalah %d\n", 2*idx+1, sres[2*jdx+1].size); res[2*idx].size = sres[2*jdx].size; res[2*idx+1].size = sres[2*jdx+1].size; for (int i = 0; i < sres[2*jdx].size; i++) { res[2*idx].value[i] = sres[2*jdx].value[i]; } for (int i = 0; i < sres[2*jdx+1].size; i++) { res[2*idx+1].value[i] = sres[2*jdx+1].value[i]; } } __global__ void kerneldek(big *c, big *e, big *p, big *res){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int jdx = threadIdx.x; __shared__ big sa[128]; __shared__ big sb[128]; __shared__ big sres[128]; __shared__ big sp; __shared__ big se; __shared__ uint s[2100]; uint *sresval = s; uint *spval = (uint*)&sresval[8*128]; uint *saval = (uint*)&spval[4]; uint *sbval = (uint*)&saval[4*128]; uint *seval = (uint*)&sbval[4*128]; sa[jdx].size = c[2*idx].size; sb[jdx].size = c[2*idx+1].size; sp.size = p[0].size; se.size = e[0].size; for (int i = 0; i < 4; i++) { saval[jdx*4+i] = c[2*idx].value[i]; sbval[jdx*4+i] = c[2*idx+1].value[i]; spval[i] = p[0].value[i]; seval[i] = e[0].value[i]; } sa[jdx].value = (uint*)&saval[jdx*4]; sb[jdx].value = (uint*)&sbval[jdx*4]; sres[jdx].value = (uint*)&sresval[jdx*8]; sp.value = spval; se.value = seval; __syncthreads(); big* minbuff = (big*) malloc(sizeof(big)); big* mulbuff = (big*) malloc(sizeof(big)); minbuff->value = (uint*) malloc(sizeof(uint) * sp.size * 2); mulbuff->value = (uint*) malloc(sizeof(uint) * sp.size * 2); dekripsi(sa+jdx, sb+jdx, &se, &sp, sres+jdx, minbuff, mulbuff); res[idx].size = sres[jdx].size; for (int i = 0; i < sres[jdx].size; i++) { res[idx].value[i] = sres[jdx].value[i]; } } hipError_t CUDAenk(big *m, big *k, big* g, big* p, big* y, big *res) { hipError_t cudaStatus; hipSetDevice(0); //=====================BAGIAN G, P, DAN Y ====================================// big *devg, *devp, *devy; hipMalloc((void**)&devg, sizeof(big)); hipMalloc((void**)&devp, sizeof(big)); hipMalloc((void**)&devy, sizeof(big)); uint *darrg, *darrp, *darry; hipMalloc((void**)&darrg, g->size * sizeof(uint)); hipMalloc((void**)&darrp, p->size * sizeof(uint)); hipMalloc((void**)&darry, y->size * sizeof(uint)); big tempg; hipMemcpy(darrg, g->value, (sizeof(uint) * g->size), hipMemcpyHostToDevice); tempg.size = g->size; tempg.value = darrg; hipMemcpy((devg), &tempg, (sizeof(big)), hipMemcpyHostToDevice); big tempp; hipMemcpy(darrp, p->value, (sizeof(uint) * p->size), hipMemcpyHostToDevice); tempp.size = p->size; tempp.value = darrp; hipMemcpy((devp), &tempp, (sizeof(big)), hipMemcpyHostToDevice); big tempy; hipMemcpy(darry, y->value, (sizeof(uint) * y->size), hipMemcpyHostToDevice); tempy.size = y->size; tempy.value = darry; hipMemcpy((devy), &tempy, (sizeof(big)), hipMemcpyHostToDevice); //=====================BAGIAN M[] DAN K[] ====================================// big *devm, *devk, *devres, *minbuff, *mulbuff; hipMalloc((void**)&devm, banyakdata * sizeof(big)); hipMalloc((void**)&devk, banyakdata * sizeof(big)); hipMalloc((void**)&devres, banyakdata * 2 *sizeof(big)); hipMalloc((void**)&minbuff, banyakdata * sizeof(big)); hipMalloc((void**)&mulbuff, banyakdata * sizeof(big)); uint **tempvalue = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue2 = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue3a = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue3b = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue4 = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue5 = (uint**)malloc(sizeof(uint*)*banyakdata); // Alokasi Memori untuk blok m dan k for (int i = 0; i < banyakdata; i++) { big temp; hipMalloc((void**)&tempvalue[i], (sizeof(uint) * m[0].size)); hipMemcpy(tempvalue[i], m[0].value, (sizeof(uint) * m[0].size), hipMemcpyHostToDevice); temp.size = m[0].size; temp.value = tempvalue[i]; hipMemcpy((devm + i), &temp, (sizeof(big)), hipMemcpyHostToDevice); big temp2; hipMalloc((void**)&tempvalue2[i], (sizeof(uint) * k[0].size)); hipMemcpy(tempvalue2[i], k[0].value, (sizeof(uint) * k[0].size), hipMemcpyHostToDevice); temp2.size = k[0].size; temp2.value = tempvalue2[i]; hipMemcpy((devk + i), &temp2, (sizeof(big)), hipMemcpyHostToDevice); big temp3a; hipMalloc((void**)&tempvalue3a[i], (sizeof(uint) * p->size * 2)); temp3a.value = tempvalue3a[i]; hipMemcpy((devres + 2 * i), &temp3a, (sizeof(big)), hipMemcpyHostToDevice); big temp3b; hipMalloc((void**)&tempvalue3b[i], (sizeof(uint) * p->size * 2)); temp3b.value = tempvalue3b[i]; hipMemcpy((devres + 2 * i + 1), &temp3b, (sizeof(big)), hipMemcpyHostToDevice); big temp4; hipMalloc((void**)&tempvalue4[i], (sizeof(uint) * p->size * 2)); temp4.value = tempvalue4[i]; hipMemcpy((minbuff + i), &temp4, (sizeof(big)), hipMemcpyHostToDevice); big temp5; hipMalloc((void**)&tempvalue5[i], (sizeof(uint) * p->size * 2)); temp5.value = tempvalue5[i]; hipMemcpy((mulbuff + i), &temp5, (sizeof(big)), hipMemcpyHostToDevice); } // size_t free_byte ; // size_t total_byte ; // hipMemGetInfo( &free_byte, &total_byte ) ; // double free_db = (double)free_byte ; // double total_db = (double)total_byte ; // double used_db = total_db - free_db ; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); kernelenk << <dimensigrid, dimensiblok >> >(devm, devk, devg, devp, devy, devres); cudaStatus = hipGetLastError(); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Durasi = %f milidetik\n", milliseconds); // printf("GPU Memory: used = %f, free = %f MB, total = %f MB\n",used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); if (cudaStatus != hipSuccess) { fprintf(stderr, "Kernel launch failed: %s\n", hipGetErrorString(cudaStatus)); } else { // printf("Success\n"); } hipDeviceSynchronize(); // COPY FROM DEVICE TO HOST HERE big* tempres = (big*) malloc(banyakdata * 2 * sizeof(big)); for (int i = 0; i < banyakdata*2; i++){ tempres[i].value = (uint*) malloc(sizeof(uint) * p->size); } hipMemcpy(tempres, devres, (sizeof(big) * 2 * banyakdata), hipMemcpyDeviceToHost); for (int i = 0; i < banyakdata*2; i++){ res[i].size = tempres[i].size; hipMemcpy(res[i].value, tempres[i].value, sizeof(uint) * p->size, hipMemcpyDeviceToHost); } hipFree(darrg); hipFree(darrp); hipFree(darry); hipFree(devg); hipFree(devp); hipFree(devy); for (int i = 0; i < banyakdata; i++) { hipFree(tempvalue[i]); hipFree(tempvalue2[i]); hipFree(tempvalue3a[i]); hipFree(tempvalue3b[i]); hipFree(tempvalue4[i]); hipFree(tempvalue5[i]); } free(tempvalue); free(tempvalue2); free(tempvalue3a); free(tempvalue3b); free(tempvalue4); free(tempvalue5); hipFree(devm); hipFree(devk); hipFree(devres); hipFree(minbuff); hipFree(mulbuff); free(tempres); //hipProfilerStop(); //free(med); return cudaStatus; } hipError_t CUDAdek(big *c, big *e, big* p, big *res) { hipError_t cudaStatus; hipSetDevice(0); //=====================BAGIAN p dan e ( eksponen) ====================================// big *devp, *deve; hipMalloc((void**)&devp, sizeof(big)); hipMalloc((void**)&deve, sizeof(big)); uint *darrp, *darre; hipMalloc((void**)&darrp, p->size * sizeof(uint)); hipMalloc((void**)&darre, e->size * sizeof(uint)); big tempp; hipMemcpy(darrp, p->value, (sizeof(uint) * p->size), hipMemcpyHostToDevice); tempp.size = p->size; tempp.value = darrp; hipMemcpy((devp), &tempp, (sizeof(big)), hipMemcpyHostToDevice); big tempe; hipMemcpy(darre, e->value, (sizeof(uint) * e->size), hipMemcpyHostToDevice); tempe.size = e->size; tempe.value = darre; hipMemcpy((deve), &tempe, (sizeof(big)), hipMemcpyHostToDevice); //======================================BAGIAN C[] ====================================// big *devc, *devres, *minbuff, *mulbuff; hipMalloc((void**)&devc, banyakdata * 2 * sizeof(big)); hipMalloc((void**)&devres, banyakdata * sizeof(big)); hipMalloc((void**)&minbuff, banyakdata * sizeof(big)); hipMalloc((void**)&mulbuff, banyakdata * sizeof(big)); uint **tempvalue = (uint**)malloc(sizeof(uint*)*banyakdata*2); uint **tempvalue2 = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue3 = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue4 = (uint**)malloc(sizeof(uint*)*banyakdata); // Alokasi Memori untuk blok m dan k for (int i = 0; i < banyakdata; i++) { big temp11; hipMalloc((void**)&tempvalue[2*i], (sizeof(uint) * c[0].size)); hipMemcpy(tempvalue[2*i], c[0].value, (sizeof(uint) * c[0].size), hipMemcpyHostToDevice); temp11.size = c[0].size; temp11.value = tempvalue[2*i]; hipMemcpy((devc + 2*i), &temp11, (sizeof(big)), hipMemcpyHostToDevice); big temp12; hipMalloc((void**)&tempvalue[2*i+1], (sizeof(uint) * c[1].size)); hipMemcpy(tempvalue[2*i+1], c[1].value, (sizeof(uint) * c[1].size), hipMemcpyHostToDevice); temp12.size = c[1].size; temp12.value = tempvalue[2*i+1]; hipMemcpy((devc + 2*i+1), &temp12, (sizeof(big)), hipMemcpyHostToDevice); big temp2; hipMalloc((void**)&tempvalue2[i], (sizeof(uint) * p->size * 2)); temp2.value = tempvalue2[i]; hipMemcpy((devres + i), &temp2, (sizeof(big)), hipMemcpyHostToDevice); big temp3; hipMalloc((void**)&tempvalue3[i], (sizeof(uint) * p->size * 2)); temp3.value = tempvalue3[i]; hipMemcpy((minbuff + i), &temp3, (sizeof(big)), hipMemcpyHostToDevice); big temp4; hipMalloc((void**)&tempvalue4[i], (sizeof(uint) * p->size * 2)); temp4.value = tempvalue4[i]; hipMemcpy((mulbuff + i), &temp4, (sizeof(big)), hipMemcpyHostToDevice); } // size_t free_byte ; // size_t total_byte ; // hipMemGetInfo( &free_byte, &total_byte ) ; // double free_db = (double)free_byte ; // double total_db = (double)total_byte ; // double used_db = total_db - free_db ; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); kerneldek << <dimensigrid, dimensiblok >> >(devc, deve, devp, devres); cudaStatus = hipGetLastError(); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Durasi = %f milidetik\n", milliseconds); // printf("GPU Memory: used = %f, free = %f MB, total = %f MB\n",used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); if (cudaStatus != hipSuccess) { fprintf(stderr, "Kernel launch failed: %s\n", hipGetErrorString(cudaStatus)); } else { // printf("Success\n"); } hipDeviceSynchronize(); // COPY FROM DEVICE TO HOST HERE big* tempres = (big*) malloc(banyakdata * sizeof(big)); for (int i = 0; i < banyakdata; i++){ tempres[i].value = (uint*) malloc(sizeof(uint) * p->size); } hipMemcpy(tempres, devres, (sizeof(big) * banyakdata), hipMemcpyDeviceToHost); for (int i = 0; i < banyakdata; i++){ res[i].size = tempres[i].size; hipMemcpy(res[i].value, tempres[i].value, sizeof(uint) * p->size, hipMemcpyDeviceToHost); } hipFree(darrp); hipFree(darre); hipFree(devp); hipFree(deve); for (int i = 0; i < 2 * banyakdata; i++) { hipFree(tempvalue[i]); } for (int i = 0; i < banyakdata; i++) { hipFree(tempvalue2[i]); hipFree(tempvalue3[i]); hipFree(tempvalue4[i]); } free(tempvalue); free(tempvalue2); free(tempvalue3); free(tempvalue4); hipFree(devc); hipFree(devres); hipFree(minbuff); hipFree(mulbuff); free(tempres); return cudaStatus; } void mainenkripsi(big *m, big *k, big *res, big *g, big *p, big *y){ printf("Encrypting...\n"); //========================================================// hipError_t cudaStatus = CUDAenk(m, k, g, p, y, res); if (cudaStatus != hipSuccess) { fprintf(stderr, "\nenkripsiCUDA failed!"); } cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); } for (int i = 0; i < 5; i++) { printf("Cipher %d size %d : %u\n",i, res[i].size, res[i].value[0]); } printf("Cipher ... : ...\n"); printf("Cipher %d size %d : %u\n",banyakdata*2-2, res[banyakdata*2-2].size, res[banyakdata*2-2].value[0]); printf("Cipher %d size %d : %u\n",banyakdata*2-1, res[banyakdata*2-2].size, res[banyakdata*2-1].value[0]); } void maindekripsi(big* c,big* e,big* p,big* res2){ printf("Decrypting...\n"); //========================================================// hipError_t cudaStatus = CUDAdek(c, e, p, res2); if (cudaStatus != hipSuccess) { fprintf(stderr, "\ndekripsiCUDA failed!"); } cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); } for (int i = 0; i < 5; i++) { printf("Plain %d size %d : %u\n",i, res2[i].size, res2[i].value[0]); printf("Plain %d size %d : %u\n",i, res2[i].size, res2[i].value[1]); } printf("Plain ... : ...\n"); printf("Plain %d size %d : %u\n",banyakdata-1, res2[banyakdata-1].size, res2[banyakdata-1].value[0]); } void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff){ modexp(g,x,p,y,minbuff,mulbuff); } void init(big *p, big *g, big *x, big*e, big *y, big *m, big *k, big *res, big *res2){ // Kunci publik p p->size = 4; p->value = (uint*) malloc(p->size * sizeof(uint)); for (int i = 0; i < p->size; i++) { // p->value[i] = 2357; p->value[i] = UINT_MAX-82; } // p->value[0] = UINT_MAX-4; // p->value[0] = 2387; // p->value[1] = 2357; // Kunci publik g g->size = 4; g->value = (uint*) malloc(g->size * sizeof(uint)); for (int i = 0; i < g->size; i++) { // g->value[i] = 2; g->value[i] = UINT_MAX-902; } // Kunci privat x x->size = 4; x->value = (uint*) malloc(x->size * sizeof(uint)); for (int i = 0; i < x->size; i++) { // x->value[i] = 1751; x->value[i] = UINT_MAX-86262; } // Cari nilai eksponen e = (p-x-1) untuk dekripsi big *xplus1 = (big*) malloc(sizeof(big)); xplus1->value = (uint*) malloc(p->size * sizeof(uint)); e->value = (uint*) malloc(p->size * sizeof(uint)); tambah(x, 1, xplus1); kurang(p,xplus1,e); free(xplus1->value); free(xplus1); // Cari nilai kunci publik y = (g^x) mod p big* mulbuff = (big*) malloc(sizeof(big)); mulbuff->value = (uint*) malloc(sizeof(uint) * p->size * 2); uint* minbuff = (uint*) malloc(sizeof(uint) * p->size * 2); y->value = (uint*) malloc(p->size * 2 * sizeof(uint)); carikunciy(g,x,p,y,minbuff,mulbuff); printf("y 0 : %u\n", y->value[0]); printf("y 0 : %u\n", y->value[1]); //========================================================// // Blok plainteks m->size = 4; m->value = (uint*) malloc(m->size * sizeof(uint)); for (int i = 0; i < m->size; i++) { // m->value[i] = 1001; m->value[i] = UINT_MAX-5522; } // Nilai k masing-masing blok k->size = 4; k->value = (uint*) malloc(k->size * sizeof(uint)); for (int i = 0; i < k->size; i++) { // k->value[i] = 77; k->value[i] = UINT_MAX-38227; } // Alokasi memori untuk result for (int i = 0; i < banyakdata*2; i++) { res[i].value = (uint*) malloc(sizeof(uint) * p->size); } // Alokasi memori untuk result 2 for (int i = 0; i < banyakdata; i++) { res2[i].value = (uint*) malloc(sizeof(uint) * p->size); } } int main(){ big *p, *g, *x, *e, *y, *m, *k, *res, *res2; p = (big*)malloc(sizeof(big)); g = (big*)malloc(sizeof(big)); x = (big*)malloc(sizeof(big)); e = (big*)malloc(sizeof(big)); y = (big*)malloc(sizeof(big)); m = (big*)malloc(banyakdata * sizeof(big)); k = (big*)malloc(banyakdata * sizeof(big)); res = (big*)malloc(banyakdata * 2 * sizeof(big)); res2 = (big*)malloc(banyakdata * sizeof(big)); init(p,g,x,e,y,m,k,res,res2); mainenkripsi(m,k,res,g,p,y); printf(" ========================= \n"); maindekripsi(res,e,p,res2); free(p->value); free(p); free(g->value); free(g); free(x->value); free(x); free(e->value); free(e); free(y->value); free(y); free(m->value); free(m); free(k->value); free(k); free(res->value); free(res); free(res2->value); free(res2); //teskonversi(); return 0; } __host__ __device__ short ukuranbit(big *a) { uint lastval = a->value[a->size-1]; short res = 0; while (lastval != 0) { lastval >>= 1; res++; } return res + (a->size - 1) * 32; } __host__ __device__ char getbit(big* a, short count) { return (a->value[count / 32] & ((uint) 1 << (count % 32))) != 0; } __host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser) { uint part1 = (noblok == 0 || geser == 0) ? 0 : (num->value[noblok - 1] >> (32-geser)); uint part2 = (noblok == num->size) ? 0 : (num->value[noblok] << geser); return part1 | part2; } __host__ __device__ void kali(big *a, big *b, big* res) { if (a->size == 0 || b->size == 0) { res->size = 0; return ; } char ukurana = a->size; char ukuranb = b->size; char ukuranres = ukurana + ukuranb; res->size = ukuranres; for (char i = 0; i < ukuranres; i++) { res->value[i] = 0; } for (char i = 0; i < ukurana; i++) { uint aval = a->value[i]; if (aval==0){ continue; } uint lebih = 0; for (char j = 0, lebih = 0; j < ukuranb; j++) { uint bval = b->value[j]; ul temp = res->value[i+j] + aval * bval + lebih; res->value[i+j] = temp % UINT_MAX; lebih = temp / UINT_MAX; } res->value[i+ukuranb] = lebih; } if (res->value[res->size - 1] == 0){ res->size--; } } __host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff) { res->size = a->size; for(char i = 0 ; i < res->size ;i++){ res->value[i] = a->value[i]; } if (a->size < b->size) { return ; } char i, j, k; char i2; uint temp ; char borrowIn, borrowOut; char ukurana = a->size; char ukuranb = b->size; res->value[res->size] = 0; res->size++; i = ukurana - ukuranb + 1; while (i > 0) { i--; i2 = 32; while (i2 > 0) { i2--; for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) { temp = res->value[k] - getShiftedBlock(b, j, i2); borrowOut = (temp > res->value[k]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } minbuff[k] = temp; borrowIn = borrowOut; } for (; k < ukurana && borrowIn; k++) { borrowIn = (res->value[k] == 0); minbuff[k] = res->value[k] - 1; } if (!borrowIn) { while (k > i) { k--; res->value[k] = minbuff[k]; } } } } while (res->size > 0 && res->value[res->size - 1] == 0) res->size--; } void divandmod(big* a, big* &b, big* divres, big* modres, uint* minbuff) { modres->size = a->size; for(char i = 0 ; i < modres->size ;i++){ modres->value[i] = a->value[i]; } if (a->size < b->size) { return ; } char i, j, k; char i2; uint temp ; char borrowIn, borrowOut; char ukurana = a->size; char ukuranb = b->size; modres->value[modres->size] = 0; modres->size++; divres->size = ukurana - ukuranb + 1; for (i = 0; i < divres->size; i++) divres->value[i] = 0; i = ukurana - ukuranb + 1; while (i > 0) { i--; divres->value[i] = 0; i2 = 32; while (i2 > 0) { i2--; for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) { temp = modres->value[k] - getShiftedBlock(b, j, i2); borrowOut = (temp > modres->value[k]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } minbuff[k] = temp; borrowIn = borrowOut; } for (; k < ukurana && borrowIn; k++) { borrowIn = (modres->value[k] == 0); minbuff[k] = modres->value[k] - 1; } if (!borrowIn) { divres->value[i] |= ((uint) 1 << i2); while (k > i) { k--; modres->value[k] = minbuff[k]; } } } } if (divres->value[divres->size - 1] == 0) divres->size--; while (modres->size > 0 && modres->value[modres->size - 1] == 0) modres->size--; } void tambah(big* a, char b, big* res) { if (a->size == 0) { res->size = 1; res->value[0] = uint(b); return; } char carryIn = 0; uint temp; res->size = a->size + 1; res->value[0] = a->value[0] + (uint)b; carryIn = (res->value[0] < a->value[0]); char i = 1; for (; i < a->size && carryIn; i++) { temp = a->value[i] + (uint)1; carryIn = (temp == 0); res->value[i] = temp; } for (; i < a->size; i++) res->value[i] = a->value[i]; if (carryIn) res->value[i] = 1; else res->size--; } void kurang(big* a, big *b, big* res) { res->size = a->size; for (int i = 0; i < res->size; i++){ res->value[i] = 0; } if (b->size == 0) { return; } char borrowIn, borrowOut; uint temp; char i; for (i = 0, borrowIn = 0; i < b->size; i++) { temp = a->value[i] - b->value[i]; borrowOut = (temp > a->value[i]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } res->value[i] = temp; borrowIn = borrowOut; } for (; i < a->size && borrowIn; i++) { borrowIn = (a->value[i] == 0); res->value[i] = a->value[i] - 1; } for (; i < a->size; i++) res->value[i] = a->value[i]; if (res->value[res->size - 1] == 0){ res->size--; } } void copybig(big* a, big* res){ res->size = a->size; for (int i = 0; i < res->size; i++){ res->value[i] = a->value[i]; } } void stringtobig(stringnumber* sn, big* res, big* mulbuff, big* ten){ res->size = 0; for (int i = sn->size-1; i >= 0; i--){ kali(res, ten, mulbuff); tambah(mulbuff, sn->value[i], res); } } void bigtostring(big* x, stringnumber* sn, big* ten, big* xbuff, big* divbuff, big* modbuff, uint* minbuff) { copybig(x,xbuff); short snlength = 0; while (xbuff->size != 0 ) { divandmod(xbuff,ten,divbuff,modbuff,minbuff); sn->value[snlength] = (char) modbuff->value[0]; snlength++; copybig(divbuff,xbuff); } sn->size = snlength; } void printsn(stringnumber* sn){ for (int i = 0; i < sn->size; ++i){ printf("%d", sn->value[sn->size-i-1]); } printf("\n"); } void teskonversi(){ int seed = time(NULL); srand(seed); stringnumber *sn = (stringnumber*) malloc(sizeof(stringnumber)); sn->size = 25; sn->value = (char *) malloc(sn->size); for (int i = 0; i < sn->size; i++) { sn->value[i] = rand() % 10; } big* konversi = (big*) malloc(sizeof(big)); big* mulbuff = (big*) malloc(sizeof(big)); big* ten = (big*) malloc(sizeof(big)); konversi->value = (uint*) malloc(sizeof(10)); mulbuff->value = (uint*) malloc(sizeof(10)); ten->value = (uint*) malloc(sizeof(1)); ten->size = 1; ten->value[0] = 10; printf("Stringnumber awal : "); printsn(sn); stringtobig(sn, konversi, mulbuff, ten); printf("konversi size %d\n", konversi->size); printf("konversi value 0 %u\n", konversi->value[0]); printf("konversi value 0 %u\n", konversi->value[1]); stringnumber *sn2 = (stringnumber*) malloc(sizeof(stringnumber)); big* xbuff = (big*) malloc(sizeof(big)); big* divbuff = (big*) malloc(sizeof(big)); big* modbuff = (big*) malloc(sizeof(big)); sn2->value = (char *) malloc(100); xbuff->value = (uint *) malloc(sizeof(uint) * 10); divbuff->value = (uint *) malloc(sizeof(uint) * 10); modbuff->value = (uint *) malloc(sizeof(uint) * 10); uint* minbuff = (uint*) malloc(sizeof(uint) * 10); bigtostring(konversi,sn2,ten,xbuff,divbuff,modbuff,minbuff); printf("Stringnumber akhir : "); printsn(sn2); }
b3802388f5fbd0d5fa85586fac4b2a89ef3c2976.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> typedef unsigned long long ul; typedef unsigned int uint; int banyakdata = 1024; int dimensigrid = 8; int dimensiblok = 128; typedef struct { char size; uint* value; }big; typedef struct { short size; char* value; }stringnumber; __host__ __device__ short ukuranbit(big *a); __host__ __device__ char getbit(big* a, short count); __host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser); __host__ __device__ void kali(big *a, big *b, big* res); __host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff); __host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff); __device__ void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff); __device__ void dekripsi(big *c1, big *c2, big *e, big *p, big *res, big *minbuff, big *mulbuff); __global__ void kernelenk(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff); __global__ void kerneldek(big *c, big *e, big *p, big *res, big *minbuff, big *mulbuff); cudaError_t CUDAenk(big *m, big *k, big* g, big* p, big* y, big *res); cudaError_t CUDAdek(big *c, big *e, big* p, big *res); void mainenkripsi(big *m, big *k, big *res, big *g, big *p, big *y); void maindekripsi(big* c,big* x,big* p,big* res2); void tambah(big* a, char b, big* res); void kurang(big* a, big *b, big* res); void divandmod(big* a, big* &b, big* divres, big* modres, uint* minbuff); void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff); void init(big *p, big *g, big *x, big*e, big *y, big *m, big *k, big *res, big *res2); void copybig(big* a, big* res); void stringtobig(stringnumber* sn, big* res, big* mulbuff, big* ten); void bigtostring(big* x, stringnumber* sn, big* ten, big* xbuff, big* divbuff, big* modbuff, uint* minbuff); void printsn(stringnumber* sn); void teskonversi(); __host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff){ res->size = 1; res->value[0] = 1; uint* minbuff2 = (uint*) malloc(sizeof(uint) * c->size); short i = ukuranbit(b); while (i > 0) { i--; kali(res,res,mulbuff); modulo(mulbuff,c,res,minbuff2); if (getbit(b,i)) { kali(res, a, mulbuff); modulo(mulbuff, c, res, minbuff2); } } free(minbuff2); // printf("res adlaah %u\n", res->value[0]); } __device__ void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff) { // // BLok 1 Cipher modexp(g,k,p,res,minbuff->value,mulbuff); // printf("res adalah %u\n", res->value[0]); // // Blok 2 Cipher modexp(y, k, p, res + 1,minbuff->value,mulbuff); kali(res + 1, m, mulbuff); modulo(mulbuff, p, res+1, minbuff->value); // printf("res val 0 adalah %p\n", &(res->value[0])); // printf("res val 1 adalah %p\n", &(res->value[1])); // printf("res val 2 adalah %p\n", &(res->value[2])); // printf("res val 3 adalah %p\n", &(res->value[3])); // printf("res 1 val 0 adalah %p\n", &((res+1)->value[0])); // printf("res 1 val 1 adalah %p\n", &((res+1)->value[1])); // printf("res 1 val 2 adalah %p\n", &((res+1)->value[2])); // printf("res 1 val 3 adalah %p\n", &((res+1)->value[3])); // printf("res val 0 adalah %u\n", res->value[0]); // printf("res 1 val 0 adalah %u\n", (res+1)->value[0]); } __device__ void dekripsi(big *c1, big *c2, big *e, big *p, big *res, big *minbuff, big *mulbuff) { modexp(c1,e,p,res,minbuff->value,mulbuff); kali(res, c2, mulbuff); modulo(mulbuff, p, res, minbuff->value); // printf("c1 adlaah %u\n", c1->value[0]); // printf("c2 adlaah %u\n", c2->value[0]); } __global__ void kernelenk(big *m, big *k, big *g, big *p, big *y, big *res){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int jdx = threadIdx.x; __shared__ big sm[128]; __shared__ big sk[128]; __shared__ big sres[256]; __shared__ big sp; __shared__ big sg; __shared__ big sy; __shared__ uint s[3200]; uint *sresval = s; uint *spval = (uint*)&sresval[8*128*2]; uint *sgval = (uint*)&spval[4]; uint *syval = (uint*)&sgval[4]; uint *smval = (uint*)&syval[4]; uint *skval = (uint*)&smval[4*128]; sm[jdx].size = m[idx].size; sk[jdx].size = k[idx].size; sp.size = p[0].size; sg.size = g[0].size; sy.size = y[0].size; for (int i = 0; i < 4; i++) { smval[jdx*4+i] = m[idx].value[i]; skval[jdx*4+i] = k[idx].value[i]; spval[i] = p[0].value[i]; sgval[i] = g[0].value[i]; syval[i] = y[0].value[i]; } sm[jdx].value = (uint*)&smval[jdx*4]; sk[jdx].value = (uint*)&skval[jdx*4]; sres[2*jdx].value = (uint*)&sresval[jdx*8*2]; sres[2*jdx+1].value = (uint*)&sresval[jdx*8*2+8]; sp.value = spval; sg.value = sgval; sy.value = syval; __syncthreads(); // if(idx < 10){ // // printf("pointer2 di %d = %p \n", 2*jdx,sres[2*jdx].value); // // printf("pointer2 di %d = %p \n", 2*jdx+1,sres[2*jdx+1].value); // // printf("sresval pointer di %d = %p \n", jdx,sresval + jdx); // // printf("pointer big di %d = %p \n", 2*jdx,sres+2*jdx); // // printf("pointer big2 di %d = %p \n", 2*jdx+1,sres+2*jdx+1); big* minbuff = (big*) malloc(sizeof(big)); big* mulbuff = (big*) malloc(sizeof(big)); minbuff->value = (uint*) malloc(sizeof(uint) * sp.size * 2); mulbuff->value = (uint*) malloc(sizeof(uint) * sp.size * 2); enkripsi(sm + jdx, sk + jdx, &sg, &sp, &sy, sres + 2*jdx, minbuff, mulbuff); // } if(idx == 0){ printf("Semangkas\n"); printf("xx[0].size = %u\n", sgval[0]); printf("xx[0].size = %u\n", g[0].value[0]); printf("size = %u\n", m[0].size); printf("sm[0].val 0 = %u\n", y[0].value[1]); printf("Pointer 1 = %p\n", &(sm[0].value[0])); printf("Pointer 2 = %p\n", &(sm+0)->value[0]); big* poinm = sm; printf("Pointer 3 = %p\n", poinm); printf("Pointer 4 = %p\n", &poinm->value[0]); printf("sres 0 adalah %d\n", sres[2].size); printf("sres 0 adalah %d\n", sres[2].value[0]); } // printf("sres %d adalah %d\n", 2*idx, sres[2*jdx].size); // printf("sres %d adalah %d\n", 2*idx+1, sres[2*jdx+1].size); res[2*idx].size = sres[2*jdx].size; res[2*idx+1].size = sres[2*jdx+1].size; for (int i = 0; i < sres[2*jdx].size; i++) { res[2*idx].value[i] = sres[2*jdx].value[i]; } for (int i = 0; i < sres[2*jdx+1].size; i++) { res[2*idx+1].value[i] = sres[2*jdx+1].value[i]; } } __global__ void kerneldek(big *c, big *e, big *p, big *res){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int jdx = threadIdx.x; __shared__ big sa[128]; __shared__ big sb[128]; __shared__ big sres[128]; __shared__ big sp; __shared__ big se; __shared__ uint s[2100]; uint *sresval = s; uint *spval = (uint*)&sresval[8*128]; uint *saval = (uint*)&spval[4]; uint *sbval = (uint*)&saval[4*128]; uint *seval = (uint*)&sbval[4*128]; sa[jdx].size = c[2*idx].size; sb[jdx].size = c[2*idx+1].size; sp.size = p[0].size; se.size = e[0].size; for (int i = 0; i < 4; i++) { saval[jdx*4+i] = c[2*idx].value[i]; sbval[jdx*4+i] = c[2*idx+1].value[i]; spval[i] = p[0].value[i]; seval[i] = e[0].value[i]; } sa[jdx].value = (uint*)&saval[jdx*4]; sb[jdx].value = (uint*)&sbval[jdx*4]; sres[jdx].value = (uint*)&sresval[jdx*8]; sp.value = spval; se.value = seval; __syncthreads(); big* minbuff = (big*) malloc(sizeof(big)); big* mulbuff = (big*) malloc(sizeof(big)); minbuff->value = (uint*) malloc(sizeof(uint) * sp.size * 2); mulbuff->value = (uint*) malloc(sizeof(uint) * sp.size * 2); dekripsi(sa+jdx, sb+jdx, &se, &sp, sres+jdx, minbuff, mulbuff); res[idx].size = sres[jdx].size; for (int i = 0; i < sres[jdx].size; i++) { res[idx].value[i] = sres[jdx].value[i]; } } cudaError_t CUDAenk(big *m, big *k, big* g, big* p, big* y, big *res) { cudaError_t cudaStatus; cudaSetDevice(0); //=====================BAGIAN G, P, DAN Y ====================================// big *devg, *devp, *devy; cudaMalloc((void**)&devg, sizeof(big)); cudaMalloc((void**)&devp, sizeof(big)); cudaMalloc((void**)&devy, sizeof(big)); uint *darrg, *darrp, *darry; cudaMalloc((void**)&darrg, g->size * sizeof(uint)); cudaMalloc((void**)&darrp, p->size * sizeof(uint)); cudaMalloc((void**)&darry, y->size * sizeof(uint)); big tempg; cudaMemcpy(darrg, g->value, (sizeof(uint) * g->size), cudaMemcpyHostToDevice); tempg.size = g->size; tempg.value = darrg; cudaMemcpy((devg), &tempg, (sizeof(big)), cudaMemcpyHostToDevice); big tempp; cudaMemcpy(darrp, p->value, (sizeof(uint) * p->size), cudaMemcpyHostToDevice); tempp.size = p->size; tempp.value = darrp; cudaMemcpy((devp), &tempp, (sizeof(big)), cudaMemcpyHostToDevice); big tempy; cudaMemcpy(darry, y->value, (sizeof(uint) * y->size), cudaMemcpyHostToDevice); tempy.size = y->size; tempy.value = darry; cudaMemcpy((devy), &tempy, (sizeof(big)), cudaMemcpyHostToDevice); //=====================BAGIAN M[] DAN K[] ====================================// big *devm, *devk, *devres, *minbuff, *mulbuff; cudaMalloc((void**)&devm, banyakdata * sizeof(big)); cudaMalloc((void**)&devk, banyakdata * sizeof(big)); cudaMalloc((void**)&devres, banyakdata * 2 *sizeof(big)); cudaMalloc((void**)&minbuff, banyakdata * sizeof(big)); cudaMalloc((void**)&mulbuff, banyakdata * sizeof(big)); uint **tempvalue = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue2 = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue3a = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue3b = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue4 = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue5 = (uint**)malloc(sizeof(uint*)*banyakdata); // Alokasi Memori untuk blok m dan k for (int i = 0; i < banyakdata; i++) { big temp; cudaMalloc((void**)&tempvalue[i], (sizeof(uint) * m[0].size)); cudaMemcpy(tempvalue[i], m[0].value, (sizeof(uint) * m[0].size), cudaMemcpyHostToDevice); temp.size = m[0].size; temp.value = tempvalue[i]; cudaMemcpy((devm + i), &temp, (sizeof(big)), cudaMemcpyHostToDevice); big temp2; cudaMalloc((void**)&tempvalue2[i], (sizeof(uint) * k[0].size)); cudaMemcpy(tempvalue2[i], k[0].value, (sizeof(uint) * k[0].size), cudaMemcpyHostToDevice); temp2.size = k[0].size; temp2.value = tempvalue2[i]; cudaMemcpy((devk + i), &temp2, (sizeof(big)), cudaMemcpyHostToDevice); big temp3a; cudaMalloc((void**)&tempvalue3a[i], (sizeof(uint) * p->size * 2)); temp3a.value = tempvalue3a[i]; cudaMemcpy((devres + 2 * i), &temp3a, (sizeof(big)), cudaMemcpyHostToDevice); big temp3b; cudaMalloc((void**)&tempvalue3b[i], (sizeof(uint) * p->size * 2)); temp3b.value = tempvalue3b[i]; cudaMemcpy((devres + 2 * i + 1), &temp3b, (sizeof(big)), cudaMemcpyHostToDevice); big temp4; cudaMalloc((void**)&tempvalue4[i], (sizeof(uint) * p->size * 2)); temp4.value = tempvalue4[i]; cudaMemcpy((minbuff + i), &temp4, (sizeof(big)), cudaMemcpyHostToDevice); big temp5; cudaMalloc((void**)&tempvalue5[i], (sizeof(uint) * p->size * 2)); temp5.value = tempvalue5[i]; cudaMemcpy((mulbuff + i), &temp5, (sizeof(big)), cudaMemcpyHostToDevice); } // size_t free_byte ; // size_t total_byte ; // cudaMemGetInfo( &free_byte, &total_byte ) ; // double free_db = (double)free_byte ; // double total_db = (double)total_byte ; // double used_db = total_db - free_db ; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); kernelenk << <dimensigrid, dimensiblok >> >(devm, devk, devg, devp, devy, devres); cudaStatus = cudaGetLastError(); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Durasi = %f milidetik\n", milliseconds); // printf("GPU Memory: used = %f, free = %f MB, total = %f MB\n",used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); } else { // printf("Success\n"); } cudaDeviceSynchronize(); // COPY FROM DEVICE TO HOST HERE big* tempres = (big*) malloc(banyakdata * 2 * sizeof(big)); for (int i = 0; i < banyakdata*2; i++){ tempres[i].value = (uint*) malloc(sizeof(uint) * p->size); } cudaMemcpy(tempres, devres, (sizeof(big) * 2 * banyakdata), cudaMemcpyDeviceToHost); for (int i = 0; i < banyakdata*2; i++){ res[i].size = tempres[i].size; cudaMemcpy(res[i].value, tempres[i].value, sizeof(uint) * p->size, cudaMemcpyDeviceToHost); } cudaFree(darrg); cudaFree(darrp); cudaFree(darry); cudaFree(devg); cudaFree(devp); cudaFree(devy); for (int i = 0; i < banyakdata; i++) { cudaFree(tempvalue[i]); cudaFree(tempvalue2[i]); cudaFree(tempvalue3a[i]); cudaFree(tempvalue3b[i]); cudaFree(tempvalue4[i]); cudaFree(tempvalue5[i]); } free(tempvalue); free(tempvalue2); free(tempvalue3a); free(tempvalue3b); free(tempvalue4); free(tempvalue5); cudaFree(devm); cudaFree(devk); cudaFree(devres); cudaFree(minbuff); cudaFree(mulbuff); free(tempres); //cudaProfilerStop(); //free(med); return cudaStatus; } cudaError_t CUDAdek(big *c, big *e, big* p, big *res) { cudaError_t cudaStatus; cudaSetDevice(0); //=====================BAGIAN p dan e ( eksponen) ====================================// big *devp, *deve; cudaMalloc((void**)&devp, sizeof(big)); cudaMalloc((void**)&deve, sizeof(big)); uint *darrp, *darre; cudaMalloc((void**)&darrp, p->size * sizeof(uint)); cudaMalloc((void**)&darre, e->size * sizeof(uint)); big tempp; cudaMemcpy(darrp, p->value, (sizeof(uint) * p->size), cudaMemcpyHostToDevice); tempp.size = p->size; tempp.value = darrp; cudaMemcpy((devp), &tempp, (sizeof(big)), cudaMemcpyHostToDevice); big tempe; cudaMemcpy(darre, e->value, (sizeof(uint) * e->size), cudaMemcpyHostToDevice); tempe.size = e->size; tempe.value = darre; cudaMemcpy((deve), &tempe, (sizeof(big)), cudaMemcpyHostToDevice); //======================================BAGIAN C[] ====================================// big *devc, *devres, *minbuff, *mulbuff; cudaMalloc((void**)&devc, banyakdata * 2 * sizeof(big)); cudaMalloc((void**)&devres, banyakdata * sizeof(big)); cudaMalloc((void**)&minbuff, banyakdata * sizeof(big)); cudaMalloc((void**)&mulbuff, banyakdata * sizeof(big)); uint **tempvalue = (uint**)malloc(sizeof(uint*)*banyakdata*2); uint **tempvalue2 = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue3 = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue4 = (uint**)malloc(sizeof(uint*)*banyakdata); // Alokasi Memori untuk blok m dan k for (int i = 0; i < banyakdata; i++) { big temp11; cudaMalloc((void**)&tempvalue[2*i], (sizeof(uint) * c[0].size)); cudaMemcpy(tempvalue[2*i], c[0].value, (sizeof(uint) * c[0].size), cudaMemcpyHostToDevice); temp11.size = c[0].size; temp11.value = tempvalue[2*i]; cudaMemcpy((devc + 2*i), &temp11, (sizeof(big)), cudaMemcpyHostToDevice); big temp12; cudaMalloc((void**)&tempvalue[2*i+1], (sizeof(uint) * c[1].size)); cudaMemcpy(tempvalue[2*i+1], c[1].value, (sizeof(uint) * c[1].size), cudaMemcpyHostToDevice); temp12.size = c[1].size; temp12.value = tempvalue[2*i+1]; cudaMemcpy((devc + 2*i+1), &temp12, (sizeof(big)), cudaMemcpyHostToDevice); big temp2; cudaMalloc((void**)&tempvalue2[i], (sizeof(uint) * p->size * 2)); temp2.value = tempvalue2[i]; cudaMemcpy((devres + i), &temp2, (sizeof(big)), cudaMemcpyHostToDevice); big temp3; cudaMalloc((void**)&tempvalue3[i], (sizeof(uint) * p->size * 2)); temp3.value = tempvalue3[i]; cudaMemcpy((minbuff + i), &temp3, (sizeof(big)), cudaMemcpyHostToDevice); big temp4; cudaMalloc((void**)&tempvalue4[i], (sizeof(uint) * p->size * 2)); temp4.value = tempvalue4[i]; cudaMemcpy((mulbuff + i), &temp4, (sizeof(big)), cudaMemcpyHostToDevice); } // size_t free_byte ; // size_t total_byte ; // cudaMemGetInfo( &free_byte, &total_byte ) ; // double free_db = (double)free_byte ; // double total_db = (double)total_byte ; // double used_db = total_db - free_db ; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); kerneldek << <dimensigrid, dimensiblok >> >(devc, deve, devp, devres); cudaStatus = cudaGetLastError(); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Durasi = %f milidetik\n", milliseconds); // printf("GPU Memory: used = %f, free = %f MB, total = %f MB\n",used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); } else { // printf("Success\n"); } cudaDeviceSynchronize(); // COPY FROM DEVICE TO HOST HERE big* tempres = (big*) malloc(banyakdata * sizeof(big)); for (int i = 0; i < banyakdata; i++){ tempres[i].value = (uint*) malloc(sizeof(uint) * p->size); } cudaMemcpy(tempres, devres, (sizeof(big) * banyakdata), cudaMemcpyDeviceToHost); for (int i = 0; i < banyakdata; i++){ res[i].size = tempres[i].size; cudaMemcpy(res[i].value, tempres[i].value, sizeof(uint) * p->size, cudaMemcpyDeviceToHost); } cudaFree(darrp); cudaFree(darre); cudaFree(devp); cudaFree(deve); for (int i = 0; i < 2 * banyakdata; i++) { cudaFree(tempvalue[i]); } for (int i = 0; i < banyakdata; i++) { cudaFree(tempvalue2[i]); cudaFree(tempvalue3[i]); cudaFree(tempvalue4[i]); } free(tempvalue); free(tempvalue2); free(tempvalue3); free(tempvalue4); cudaFree(devc); cudaFree(devres); cudaFree(minbuff); cudaFree(mulbuff); free(tempres); return cudaStatus; } void mainenkripsi(big *m, big *k, big *res, big *g, big *p, big *y){ printf("Encrypting...\n"); //========================================================// cudaError_t cudaStatus = CUDAenk(m, k, g, p, y, res); if (cudaStatus != cudaSuccess) { fprintf(stderr, "\nenkripsiCUDA failed!"); } cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); } for (int i = 0; i < 5; i++) { printf("Cipher %d size %d : %u\n",i, res[i].size, res[i].value[0]); } printf("Cipher ... : ...\n"); printf("Cipher %d size %d : %u\n",banyakdata*2-2, res[banyakdata*2-2].size, res[banyakdata*2-2].value[0]); printf("Cipher %d size %d : %u\n",banyakdata*2-1, res[banyakdata*2-2].size, res[banyakdata*2-1].value[0]); } void maindekripsi(big* c,big* e,big* p,big* res2){ printf("Decrypting...\n"); //========================================================// cudaError_t cudaStatus = CUDAdek(c, e, p, res2); if (cudaStatus != cudaSuccess) { fprintf(stderr, "\ndekripsiCUDA failed!"); } cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); } for (int i = 0; i < 5; i++) { printf("Plain %d size %d : %u\n",i, res2[i].size, res2[i].value[0]); printf("Plain %d size %d : %u\n",i, res2[i].size, res2[i].value[1]); } printf("Plain ... : ...\n"); printf("Plain %d size %d : %u\n",banyakdata-1, res2[banyakdata-1].size, res2[banyakdata-1].value[0]); } void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff){ modexp(g,x,p,y,minbuff,mulbuff); } void init(big *p, big *g, big *x, big*e, big *y, big *m, big *k, big *res, big *res2){ // Kunci publik p p->size = 4; p->value = (uint*) malloc(p->size * sizeof(uint)); for (int i = 0; i < p->size; i++) { // p->value[i] = 2357; p->value[i] = UINT_MAX-82; } // p->value[0] = UINT_MAX-4; // p->value[0] = 2387; // p->value[1] = 2357; // Kunci publik g g->size = 4; g->value = (uint*) malloc(g->size * sizeof(uint)); for (int i = 0; i < g->size; i++) { // g->value[i] = 2; g->value[i] = UINT_MAX-902; } // Kunci privat x x->size = 4; x->value = (uint*) malloc(x->size * sizeof(uint)); for (int i = 0; i < x->size; i++) { // x->value[i] = 1751; x->value[i] = UINT_MAX-86262; } // Cari nilai eksponen e = (p-x-1) untuk dekripsi big *xplus1 = (big*) malloc(sizeof(big)); xplus1->value = (uint*) malloc(p->size * sizeof(uint)); e->value = (uint*) malloc(p->size * sizeof(uint)); tambah(x, 1, xplus1); kurang(p,xplus1,e); free(xplus1->value); free(xplus1); // Cari nilai kunci publik y = (g^x) mod p big* mulbuff = (big*) malloc(sizeof(big)); mulbuff->value = (uint*) malloc(sizeof(uint) * p->size * 2); uint* minbuff = (uint*) malloc(sizeof(uint) * p->size * 2); y->value = (uint*) malloc(p->size * 2 * sizeof(uint)); carikunciy(g,x,p,y,minbuff,mulbuff); printf("y 0 : %u\n", y->value[0]); printf("y 0 : %u\n", y->value[1]); //========================================================// // Blok plainteks m->size = 4; m->value = (uint*) malloc(m->size * sizeof(uint)); for (int i = 0; i < m->size; i++) { // m->value[i] = 1001; m->value[i] = UINT_MAX-5522; } // Nilai k masing-masing blok k->size = 4; k->value = (uint*) malloc(k->size * sizeof(uint)); for (int i = 0; i < k->size; i++) { // k->value[i] = 77; k->value[i] = UINT_MAX-38227; } // Alokasi memori untuk result for (int i = 0; i < banyakdata*2; i++) { res[i].value = (uint*) malloc(sizeof(uint) * p->size); } // Alokasi memori untuk result 2 for (int i = 0; i < banyakdata; i++) { res2[i].value = (uint*) malloc(sizeof(uint) * p->size); } } int main(){ big *p, *g, *x, *e, *y, *m, *k, *res, *res2; p = (big*)malloc(sizeof(big)); g = (big*)malloc(sizeof(big)); x = (big*)malloc(sizeof(big)); e = (big*)malloc(sizeof(big)); y = (big*)malloc(sizeof(big)); m = (big*)malloc(banyakdata * sizeof(big)); k = (big*)malloc(banyakdata * sizeof(big)); res = (big*)malloc(banyakdata * 2 * sizeof(big)); res2 = (big*)malloc(banyakdata * sizeof(big)); init(p,g,x,e,y,m,k,res,res2); mainenkripsi(m,k,res,g,p,y); printf(" ========================= \n"); maindekripsi(res,e,p,res2); free(p->value); free(p); free(g->value); free(g); free(x->value); free(x); free(e->value); free(e); free(y->value); free(y); free(m->value); free(m); free(k->value); free(k); free(res->value); free(res); free(res2->value); free(res2); //teskonversi(); return 0; } __host__ __device__ short ukuranbit(big *a) { uint lastval = a->value[a->size-1]; short res = 0; while (lastval != 0) { lastval >>= 1; res++; } return res + (a->size - 1) * 32; } __host__ __device__ char getbit(big* a, short count) { return (a->value[count / 32] & ((uint) 1 << (count % 32))) != 0; } __host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser) { uint part1 = (noblok == 0 || geser == 0) ? 0 : (num->value[noblok - 1] >> (32-geser)); uint part2 = (noblok == num->size) ? 0 : (num->value[noblok] << geser); return part1 | part2; } __host__ __device__ void kali(big *a, big *b, big* res) { if (a->size == 0 || b->size == 0) { res->size = 0; return ; } char ukurana = a->size; char ukuranb = b->size; char ukuranres = ukurana + ukuranb; res->size = ukuranres; for (char i = 0; i < ukuranres; i++) { res->value[i] = 0; } for (char i = 0; i < ukurana; i++) { uint aval = a->value[i]; if (aval==0){ continue; } uint lebih = 0; for (char j = 0, lebih = 0; j < ukuranb; j++) { uint bval = b->value[j]; ul temp = res->value[i+j] + aval * bval + lebih; res->value[i+j] = temp % UINT_MAX; lebih = temp / UINT_MAX; } res->value[i+ukuranb] = lebih; } if (res->value[res->size - 1] == 0){ res->size--; } } __host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff) { res->size = a->size; for(char i = 0 ; i < res->size ;i++){ res->value[i] = a->value[i]; } if (a->size < b->size) { return ; } char i, j, k; char i2; uint temp ; char borrowIn, borrowOut; char ukurana = a->size; char ukuranb = b->size; res->value[res->size] = 0; res->size++; i = ukurana - ukuranb + 1; while (i > 0) { i--; i2 = 32; while (i2 > 0) { i2--; for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) { temp = res->value[k] - getShiftedBlock(b, j, i2); borrowOut = (temp > res->value[k]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } minbuff[k] = temp; borrowIn = borrowOut; } for (; k < ukurana && borrowIn; k++) { borrowIn = (res->value[k] == 0); minbuff[k] = res->value[k] - 1; } if (!borrowIn) { while (k > i) { k--; res->value[k] = minbuff[k]; } } } } while (res->size > 0 && res->value[res->size - 1] == 0) res->size--; } void divandmod(big* a, big* &b, big* divres, big* modres, uint* minbuff) { modres->size = a->size; for(char i = 0 ; i < modres->size ;i++){ modres->value[i] = a->value[i]; } if (a->size < b->size) { return ; } char i, j, k; char i2; uint temp ; char borrowIn, borrowOut; char ukurana = a->size; char ukuranb = b->size; modres->value[modres->size] = 0; modres->size++; divres->size = ukurana - ukuranb + 1; for (i = 0; i < divres->size; i++) divres->value[i] = 0; i = ukurana - ukuranb + 1; while (i > 0) { i--; divres->value[i] = 0; i2 = 32; while (i2 > 0) { i2--; for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) { temp = modres->value[k] - getShiftedBlock(b, j, i2); borrowOut = (temp > modres->value[k]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } minbuff[k] = temp; borrowIn = borrowOut; } for (; k < ukurana && borrowIn; k++) { borrowIn = (modres->value[k] == 0); minbuff[k] = modres->value[k] - 1; } if (!borrowIn) { divres->value[i] |= ((uint) 1 << i2); while (k > i) { k--; modres->value[k] = minbuff[k]; } } } } if (divres->value[divres->size - 1] == 0) divres->size--; while (modres->size > 0 && modres->value[modres->size - 1] == 0) modres->size--; } void tambah(big* a, char b, big* res) { if (a->size == 0) { res->size = 1; res->value[0] = uint(b); return; } char carryIn = 0; uint temp; res->size = a->size + 1; res->value[0] = a->value[0] + (uint)b; carryIn = (res->value[0] < a->value[0]); char i = 1; for (; i < a->size && carryIn; i++) { temp = a->value[i] + (uint)1; carryIn = (temp == 0); res->value[i] = temp; } for (; i < a->size; i++) res->value[i] = a->value[i]; if (carryIn) res->value[i] = 1; else res->size--; } void kurang(big* a, big *b, big* res) { res->size = a->size; for (int i = 0; i < res->size; i++){ res->value[i] = 0; } if (b->size == 0) { return; } char borrowIn, borrowOut; uint temp; char i; for (i = 0, borrowIn = 0; i < b->size; i++) { temp = a->value[i] - b->value[i]; borrowOut = (temp > a->value[i]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } res->value[i] = temp; borrowIn = borrowOut; } for (; i < a->size && borrowIn; i++) { borrowIn = (a->value[i] == 0); res->value[i] = a->value[i] - 1; } for (; i < a->size; i++) res->value[i] = a->value[i]; if (res->value[res->size - 1] == 0){ res->size--; } } void copybig(big* a, big* res){ res->size = a->size; for (int i = 0; i < res->size; i++){ res->value[i] = a->value[i]; } } void stringtobig(stringnumber* sn, big* res, big* mulbuff, big* ten){ res->size = 0; for (int i = sn->size-1; i >= 0; i--){ kali(res, ten, mulbuff); tambah(mulbuff, sn->value[i], res); } } void bigtostring(big* x, stringnumber* sn, big* ten, big* xbuff, big* divbuff, big* modbuff, uint* minbuff) { copybig(x,xbuff); short snlength = 0; while (xbuff->size != 0 ) { divandmod(xbuff,ten,divbuff,modbuff,minbuff); sn->value[snlength] = (char) modbuff->value[0]; snlength++; copybig(divbuff,xbuff); } sn->size = snlength; } void printsn(stringnumber* sn){ for (int i = 0; i < sn->size; ++i){ printf("%d", sn->value[sn->size-i-1]); } printf("\n"); } void teskonversi(){ int seed = time(NULL); srand(seed); stringnumber *sn = (stringnumber*) malloc(sizeof(stringnumber)); sn->size = 25; sn->value = (char *) malloc(sn->size); for (int i = 0; i < sn->size; i++) { sn->value[i] = rand() % 10; } big* konversi = (big*) malloc(sizeof(big)); big* mulbuff = (big*) malloc(sizeof(big)); big* ten = (big*) malloc(sizeof(big)); konversi->value = (uint*) malloc(sizeof(10)); mulbuff->value = (uint*) malloc(sizeof(10)); ten->value = (uint*) malloc(sizeof(1)); ten->size = 1; ten->value[0] = 10; printf("Stringnumber awal : "); printsn(sn); stringtobig(sn, konversi, mulbuff, ten); printf("konversi size %d\n", konversi->size); printf("konversi value 0 %u\n", konversi->value[0]); printf("konversi value 0 %u\n", konversi->value[1]); stringnumber *sn2 = (stringnumber*) malloc(sizeof(stringnumber)); big* xbuff = (big*) malloc(sizeof(big)); big* divbuff = (big*) malloc(sizeof(big)); big* modbuff = (big*) malloc(sizeof(big)); sn2->value = (char *) malloc(100); xbuff->value = (uint *) malloc(sizeof(uint) * 10); divbuff->value = (uint *) malloc(sizeof(uint) * 10); modbuff->value = (uint *) malloc(sizeof(uint) * 10); uint* minbuff = (uint*) malloc(sizeof(uint) * 10); bigtostring(konversi,sn2,ten,xbuff,divbuff,modbuff,minbuff); printf("Stringnumber akhir : "); printsn(sn2); }
0233d4af0c75d481ceea5b518f0cefc3217e1760.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "omp.h" #include <hip/hip_runtime.h> #include <fstream> #include "mainJobA_godunov.cpp" #include "helperJobB_kernel.cu" int main(int argc, char *argv[]) { //software backbone/infrastructure that allows mainJobA to carry on while helperJobB works asynchronously to update //the computationally costly ingredient needed by mainJobA //mainJobA runs on one node, helperJobB runs on another node; updates via MPI int L; float g, dx, dt, IM; if (argc != 2) { std::cerr << "Usage: " << argv[0] << " N\n"; return -1; } const auto num_threads = atoi(argv[1]); omp_set_num_threads(num_threads); // get values for constants if (argc != 3) { L = 1; }else{ L = atoi(argv[2]); } if (argc != 4) { g = 1.4; }else{ g = atoi(argv[3]); } if (argc != 5) { dx = 0.01; }else{ dx = atoi(argv[4]); } if (argc != 6) { dt = 1/2000; }else{ dt = atoi(argv[5]); } if (argc != 7) { IM = L/dx; }else{ IM = atoi(argv[6]); } double start = omp_get_wtime(); #pragma omp parallel shared (L,g,dx,dt,IM) mainJobA_godunov(L,g,dx,dt,IM); #pragma omp end parallel double stop = omp_get_wtime(); double time = (stop - start) * 1000; std::cout << num_threads << "\n" << time << "\n"; }
0233d4af0c75d481ceea5b518f0cefc3217e1760.cu
#include <iostream> #include "omp.h" #include <cuda.h> #include <fstream> #include "mainJobA_godunov.cpp" #include "helperJobB_kernel.cu" int main(int argc, char *argv[]) { //software backbone/infrastructure that allows mainJobA to carry on while helperJobB works asynchronously to update //the computationally costly ingredient needed by mainJobA //mainJobA runs on one node, helperJobB runs on another node; updates via MPI int L; float g, dx, dt, IM; if (argc != 2) { std::cerr << "Usage: " << argv[0] << " N\n"; return -1; } const auto num_threads = atoi(argv[1]); omp_set_num_threads(num_threads); // get values for constants if (argc != 3) { L = 1; }else{ L = atoi(argv[2]); } if (argc != 4) { g = 1.4; }else{ g = atoi(argv[3]); } if (argc != 5) { dx = 0.01; }else{ dx = atoi(argv[4]); } if (argc != 6) { dt = 1/2000; }else{ dt = atoi(argv[5]); } if (argc != 7) { IM = L/dx; }else{ IM = atoi(argv[6]); } double start = omp_get_wtime(); #pragma omp parallel shared (L,g,dx,dt,IM) mainJobA_godunov(L,g,dx,dt,IM); #pragma omp end parallel double stop = omp_get_wtime(); double time = (stop - start) * 1000; std::cout << num_threads << "\n" << time << "\n"; }
672095be50e3f07eaa998f9b903b95a4d351b292.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "timer.h" #include <iostream> #include <fstream> using namespace std; //gigabyte = 1 << 30; #if defined(_DEBUG) #define GIGA 1 << 20 #else #define GIGA 1 << 30 #endif const char* filepath = "C:/Users/educ/Documents/enwiki-latest-abstract.xml"; constexpr size_t BMSize = GIGA / 8; int main(int argc, char* argv[]) { hptimer open; double openTime; char* fileBuffer = nullptr; open.TimeSinceLastCall(); ifstream bigFile(filepath); try { if (argc != 2) { } if (!bigFile.is_open()) { throw("Failed to Open File"); } fileBuffer = new char[GIGA](); bigFile.read(fileBuffer, GIGA); if (!bigFile) { throw("Failed to Read File"); } openTime = open.TimeSinceLastCall(); for (int i = 0; i < 100; i++) { cout << fileBuffer[i]; } cout << endl << "It took " << openTime << " seconds to open and read from the file" << endl; } catch (char* err) { cout << err << endl; } if (fileBuffer != nullptr) { delete[] fileBuffer; } if (bigFile.is_open()) { bigFile.close(); } system("pause"); return 0; }
672095be50e3f07eaa998f9b903b95a4d351b292.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "timer.h" #include <iostream> #include <fstream> using namespace std; //gigabyte = 1 << 30; #if defined(_DEBUG) #define GIGA 1 << 20 #else #define GIGA 1 << 30 #endif const char* filepath = "C:/Users/educ/Documents/enwiki-latest-abstract.xml"; constexpr size_t BMSize = GIGA / 8; int main(int argc, char* argv[]) { hptimer open; double openTime; char* fileBuffer = nullptr; open.TimeSinceLastCall(); ifstream bigFile(filepath); try { if (argc != 2) { } if (!bigFile.is_open()) { throw("Failed to Open File"); } fileBuffer = new char[GIGA](); bigFile.read(fileBuffer, GIGA); if (!bigFile) { throw("Failed to Read File"); } openTime = open.TimeSinceLastCall(); for (int i = 0; i < 100; i++) { cout << fileBuffer[i]; } cout << endl << "It took " << openTime << " seconds to open and read from the file" << endl; } catch (char* err) { cout << err << endl; } if (fileBuffer != nullptr) { delete[] fileBuffer; } if (bigFile.is_open()) { bigFile.close(); } system("pause"); return 0; }
2bea8697230b4df053e26dc1fa4cc64d30e90c80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2017 Darius Rckert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/cusparseHelper.h" #include "saiga/cuda/dot.h" #include "saiga/cuda/tests/test.h" #include "saiga/cuda/tests/test_helper.h" #include "saiga/cuda/thread_info.h" #include "saiga/core/time/timer.h" #include <thrust/inner_product.h> namespace Saiga { namespace CUDA { // nvcc $CPPFLAGS -ptx -src-in-ptx -gencode=arch=compute_52,code=compute_52 -g -std=c++11 --expt-relaxed-constexpr // integrate_test.cu void dotTest() { CUDA_SYNC_CHECK_ERROR(); #ifdef SAIGA_USE_CUBLAS initBLASSPARSE(); #endif { using elementType = float; int N = 100 * 1000 * 1000; size_t readWrites = N * 2 * sizeof(elementType); CUDA::PerformanceTestHelper pth("Dot Product <float>", readWrites); thrust::device_vector<elementType> v1(N, 1); thrust::device_vector<elementType> v2(N, 2); thrust::host_vector<elementType> h1 = v1; thrust::host_vector<elementType> h2 = v2; elementType ref = 0; { float time; { ScopedTimer<float> t(&time); for (int i = 0; i < N; ++i) { ref += h1[i] * h2[i]; } } SAIGA_ASSERT(ref > 0); pth.addMeassurement("CPU dot", time); } { float time; elementType sum; { CUDA::CudaScopedTimer t(time); sum = thrust::inner_product(v1.begin(), v1.end(), v2.begin(), 0); } pth.addMeassurement("thrust::inner_product", time); ref = sum; SAIGA_ASSERT(sum >= ref - 0.1f && sum <= ref + 0.1f); } #ifdef SAIGA_USE_CUBLAS { thrust::device_vector<elementType> d_res(1, 0); // make sure no additional memcpy is issued hipblasSetPointerMode(cublashandle, HIPBLAS_POINTER_MODE_DEVICE); float time; elementType sum; { CUDA::CudaScopedTimer t(time); hipblasSdot(cublashandle, N, thrust::raw_pointer_cast(v1.data()), 1, thrust::raw_pointer_cast(v2.data()), 1, thrust::raw_pointer_cast(d_res.data())); } sum = d_res[0]; SAIGA_ASSERT(sum >= ref - 0.1f && sum <= ref + 0.1f); pth.addMeassurement("hipblasSdot", time); } #endif { thrust::device_vector<elementType> d_res(1, 0); float time; { const int blockSize = 256; SAIGA_ASSERT(0); // static auto numBlocks = max_active_blocks(dot<elementType,blockSize>,blockSize,0); CUDA::CudaScopedTimer t2(time); hipLaunchKernelGGL(( dot<elementType, blockSize>), dim3(1), dim3(blockSize), 0, 0, v1, v2, thrust::raw_pointer_cast(d_res.data())); } elementType sum = d_res[0]; SAIGA_ASSERT(sum >= ref - 0.1f && sum <= ref + 0.1f); time = time; pth.addMeassurement("my dot product", time); } } { using elementType = double; int N = 50 * 1000 * 1000; size_t readWrites = N * 2 * sizeof(elementType); CUDA::PerformanceTestHelper pth("Dot Product <double>", readWrites); thrust::device_vector<elementType> v1(N, 1); thrust::device_vector<elementType> v2(N, 2); thrust::host_vector<elementType> h1 = v1; thrust::host_vector<elementType> h2 = v2; elementType ref = 0; { float time; { ScopedTimer<float> t(&time); for (int i = 0; i < N; ++i) { ref += h1[i] * h2[i]; } } SAIGA_ASSERT(ref > 0); pth.addMeassurement("CPU dot", time); } { float time; elementType sum; { CUDA::CudaScopedTimer t(time); sum = thrust::inner_product(v1.begin(), v1.end(), v2.begin(), 0); } pth.addMeassurement("thrust::inner_product", time); ref = sum; SAIGA_ASSERT(sum >= ref - 0.1f && sum <= ref + 0.1f); } #ifdef SAIGA_USE_CUBLAS { thrust::device_vector<elementType> d_res(1, 0); // make sure no additional memcpy is issued hipblasSetPointerMode(cublashandle, HIPBLAS_POINTER_MODE_DEVICE); float time; elementType sum; { CUDA::CudaScopedTimer t(time); hipblasDdot(cublashandle, N, thrust::raw_pointer_cast(v1.data()), 1, thrust::raw_pointer_cast(v2.data()), 1, thrust::raw_pointer_cast(d_res.data())); } sum = d_res[0]; SAIGA_ASSERT(sum >= ref - 0.1f && sum <= ref + 0.1f); pth.addMeassurement("hipblasSdot", time); } #endif { thrust::device_vector<elementType> d_res(1, 0); float time; { const int blockSize = 256; SAIGA_ASSERT(0); // static auto numBlocks = max_active_blocks(dot<elementType,blockSize>,blockSize,0); CUDA::CudaScopedTimer t2(time); hipLaunchKernelGGL(( dot<elementType, blockSize>), dim3(1), dim3(blockSize), 0, 0, v1, v2, thrust::raw_pointer_cast(d_res.data())); } elementType sum = d_res[0]; SAIGA_ASSERT(sum >= ref - 0.1f && sum <= ref + 0.1f); time = time; pth.addMeassurement("my dot product", time); } } CUDA_SYNC_CHECK_ERROR(); } } // namespace CUDA } // namespace Saiga
2bea8697230b4df053e26dc1fa4cc64d30e90c80.cu
/** * Copyright (c) 2017 Darius Rückert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/cusparseHelper.h" #include "saiga/cuda/dot.h" #include "saiga/cuda/tests/test.h" #include "saiga/cuda/tests/test_helper.h" #include "saiga/cuda/thread_info.h" #include "saiga/core/time/timer.h" #include <thrust/inner_product.h> namespace Saiga { namespace CUDA { // nvcc $CPPFLAGS -ptx -src-in-ptx -gencode=arch=compute_52,code=compute_52 -g -std=c++11 --expt-relaxed-constexpr // integrate_test.cu void dotTest() { CUDA_SYNC_CHECK_ERROR(); #ifdef SAIGA_USE_CUBLAS initBLASSPARSE(); #endif { using elementType = float; int N = 100 * 1000 * 1000; size_t readWrites = N * 2 * sizeof(elementType); CUDA::PerformanceTestHelper pth("Dot Product <float>", readWrites); thrust::device_vector<elementType> v1(N, 1); thrust::device_vector<elementType> v2(N, 2); thrust::host_vector<elementType> h1 = v1; thrust::host_vector<elementType> h2 = v2; elementType ref = 0; { float time; { ScopedTimer<float> t(&time); for (int i = 0; i < N; ++i) { ref += h1[i] * h2[i]; } } SAIGA_ASSERT(ref > 0); pth.addMeassurement("CPU dot", time); } { float time; elementType sum; { CUDA::CudaScopedTimer t(time); sum = thrust::inner_product(v1.begin(), v1.end(), v2.begin(), 0); } pth.addMeassurement("thrust::inner_product", time); ref = sum; SAIGA_ASSERT(sum >= ref - 0.1f && sum <= ref + 0.1f); } #ifdef SAIGA_USE_CUBLAS { thrust::device_vector<elementType> d_res(1, 0); // make sure no additional memcpy is issued cublasSetPointerMode(cublashandle, CUBLAS_POINTER_MODE_DEVICE); float time; elementType sum; { CUDA::CudaScopedTimer t(time); cublasSdot(cublashandle, N, thrust::raw_pointer_cast(v1.data()), 1, thrust::raw_pointer_cast(v2.data()), 1, thrust::raw_pointer_cast(d_res.data())); } sum = d_res[0]; SAIGA_ASSERT(sum >= ref - 0.1f && sum <= ref + 0.1f); pth.addMeassurement("cublasSdot", time); } #endif { thrust::device_vector<elementType> d_res(1, 0); float time; { const int blockSize = 256; SAIGA_ASSERT(0); // static auto numBlocks = max_active_blocks(dot<elementType,blockSize>,blockSize,0); CUDA::CudaScopedTimer t2(time); dot<elementType, blockSize><<<1, blockSize>>>(v1, v2, thrust::raw_pointer_cast(d_res.data())); } elementType sum = d_res[0]; SAIGA_ASSERT(sum >= ref - 0.1f && sum <= ref + 0.1f); time = time; pth.addMeassurement("my dot product", time); } } { using elementType = double; int N = 50 * 1000 * 1000; size_t readWrites = N * 2 * sizeof(elementType); CUDA::PerformanceTestHelper pth("Dot Product <double>", readWrites); thrust::device_vector<elementType> v1(N, 1); thrust::device_vector<elementType> v2(N, 2); thrust::host_vector<elementType> h1 = v1; thrust::host_vector<elementType> h2 = v2; elementType ref = 0; { float time; { ScopedTimer<float> t(&time); for (int i = 0; i < N; ++i) { ref += h1[i] * h2[i]; } } SAIGA_ASSERT(ref > 0); pth.addMeassurement("CPU dot", time); } { float time; elementType sum; { CUDA::CudaScopedTimer t(time); sum = thrust::inner_product(v1.begin(), v1.end(), v2.begin(), 0); } pth.addMeassurement("thrust::inner_product", time); ref = sum; SAIGA_ASSERT(sum >= ref - 0.1f && sum <= ref + 0.1f); } #ifdef SAIGA_USE_CUBLAS { thrust::device_vector<elementType> d_res(1, 0); // make sure no additional memcpy is issued cublasSetPointerMode(cublashandle, CUBLAS_POINTER_MODE_DEVICE); float time; elementType sum; { CUDA::CudaScopedTimer t(time); cublasDdot(cublashandle, N, thrust::raw_pointer_cast(v1.data()), 1, thrust::raw_pointer_cast(v2.data()), 1, thrust::raw_pointer_cast(d_res.data())); } sum = d_res[0]; SAIGA_ASSERT(sum >= ref - 0.1f && sum <= ref + 0.1f); pth.addMeassurement("cublasSdot", time); } #endif { thrust::device_vector<elementType> d_res(1, 0); float time; { const int blockSize = 256; SAIGA_ASSERT(0); // static auto numBlocks = max_active_blocks(dot<elementType,blockSize>,blockSize,0); CUDA::CudaScopedTimer t2(time); dot<elementType, blockSize><<<1, blockSize>>>(v1, v2, thrust::raw_pointer_cast(d_res.data())); } elementType sum = d_res[0]; SAIGA_ASSERT(sum >= ref - 0.1f && sum <= ref + 0.1f); time = time; pth.addMeassurement("my dot product", time); } } CUDA_SYNC_CHECK_ERROR(); } } // namespace CUDA } // namespace Saiga
fa5915438546056761d93a58c1422079d7cb4ae9.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <cuda_utils.cuh> #include <random/rng.cuh> #include <set> #include <vector> #include "test_utils.h" using namespace MLCommon; namespace raft { namespace random { // Terminology: // SWoR - Sample Without Replacement template <typename T> struct SWoRInputs { int len, sampledLen; int largeWeightIndex; T largeWeight; GeneratorType gtype; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const SWoRInputs<T>& dims) { return os; } template <typename T> class SWoRTest : public ::testing::TestWithParam<SWoRInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<SWoRInputs<T>>::GetParam(); CUDA_CHECK(hipStreamCreate(&stream)); Rng r(params.seed, params.gtype); allocate(in, params.len); allocate(wts, params.len); allocate(out, params.sampledLen); allocate(outIdx, params.sampledLen); h_outIdx.resize(params.sampledLen); r.uniform(in, params.len, T(-1.0), T(1.0), stream); r.uniform(wts, params.len, T(1.0), T(2.0), stream); if (params.largeWeightIndex >= 0) { updateDevice(wts + params.largeWeightIndex, &params.largeWeight, 1, stream); } r.sampleWithoutReplacement(handle, out, outIdx, in, wts, params.sampledLen, params.len, stream); updateHost(&(h_outIdx[0]), outIdx, params.sampledLen, stream); } void TearDown() override { CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipStreamDestroy(stream)); CUDA_CHECK(hipFree(in)); CUDA_CHECK(hipFree(wts)); CUDA_CHECK(hipFree(out)); CUDA_CHECK(hipFree(outIdx)); } protected: SWoRInputs<T> params; T *in, *out, *wts; int* outIdx; std::vector<int> h_outIdx; hipStream_t stream; raft::handle_t handle; }; typedef SWoRTest<float> SWoRTestF; const std::vector<SWoRInputs<float>> inputsf = { {1024, 512, -1, 0.f, GenPhilox, 1234ULL}, {1024, 1024, -1, 0.f, GenPhilox, 1234ULL}, {1024, 512 + 1, -1, 0.f, GenPhilox, 1234ULL}, {1024, 1024 - 1, -1, 0.f, GenPhilox, 1234ULL}, {1024, 512 + 2, -1, 0.f, GenPhilox, 1234ULL}, {1024, 1024 - 2, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 512, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 1024, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 512 + 1, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 1024 + 1, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 512 + 2, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 1024 - 2, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 512, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 1024, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 512 + 1, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 1024 + 1, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 512 + 2, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 1024 + 2, -1, 0.f, GenPhilox, 1234ULL}, {1024, 512, 10, 100000.f, GenPhilox, 1234ULL}, {1024, 512, -1, 0.f, GenTaps, 1234ULL}, {1024, 1024, -1, 0.f, GenTaps, 1234ULL}, {1024, 512 + 1, -1, 0.f, GenTaps, 1234ULL}, {1024, 1024 - 1, -1, 0.f, GenTaps, 1234ULL}, {1024, 512 + 2, -1, 0.f, GenTaps, 1234ULL}, {1024, 1024 - 2, -1, 0.f, GenTaps, 1234ULL}, {1024 + 1, 512, -1, 0.f, GenTaps, 1234ULL}, {1024 + 1, 1024, -1, 0.f, GenTaps, 1234ULL}, {1024 + 1, 512 + 1, -1, 0.f, GenTaps, 1234ULL}, {1024 + 1, 1024 + 1, -1, 0.f, GenTaps, 1234ULL}, {1024 + 1, 512 + 2, -1, 0.f, GenTaps, 1234ULL}, {1024 + 1, 1024 - 2, -1, 0.f, GenTaps, 1234ULL}, {1024 + 2, 512, -1, 0.f, GenTaps, 1234ULL}, {1024 + 2, 1024, -1, 0.f, GenTaps, 1234ULL}, {1024 + 2, 512 + 1, -1, 0.f, GenTaps, 1234ULL}, {1024 + 2, 1024 + 1, -1, 0.f, GenTaps, 1234ULL}, {1024 + 2, 512 + 2, -1, 0.f, GenTaps, 1234ULL}, {1024 + 2, 1024 + 2, -1, 0.f, GenTaps, 1234ULL}, {1024, 512, 10, 100000.f, GenTaps, 1234ULL}, {1024, 512, -1, 0.f, GenKiss99, 1234ULL}, {1024, 1024, -1, 0.f, GenKiss99, 1234ULL}, {1024, 512 + 1, -1, 0.f, GenKiss99, 1234ULL}, {1024, 1024 - 1, -1, 0.f, GenKiss99, 1234ULL}, {1024, 512 + 2, -1, 0.f, GenKiss99, 1234ULL}, {1024, 1024 - 2, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 1, 512, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 1, 1024, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 1, 512 + 1, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 1, 1024 + 1, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 1, 512 + 2, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 1, 1024 - 2, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 2, 512, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 2, 1024, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 2, 512 + 1, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 2, 1024 + 1, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 2, 512 + 2, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 2, 1024 + 2, -1, 0.f, GenKiss99, 1234ULL}, {1024, 512, 10, 100000.f, GenKiss99, 1234ULL}, }; TEST_P(SWoRTestF, Result) { std::set<int> occurence; for (int i = 0; i < params.sampledLen; ++i) { auto val = h_outIdx[i]; // indices must be in the given range ASSERT_TRUE(0 <= val && val < params.len) << "out-of-range index @i=" << i << " val=" << val << " sampledLen=" << params.sampledLen; // indices should not repeat ASSERT_TRUE(occurence.find(val) == occurence.end()) << "repeated index @i=" << i << " idx=" << val; occurence.insert(val); } // if there's a skewed distribution, the top index should correspond to the // particular item with a large weight if (params.largeWeightIndex >= 0) { ASSERT_EQ(h_outIdx[0], params.largeWeightIndex); } } INSTANTIATE_TEST_CASE_P(SWoRTests, SWoRTestF, ::testing::ValuesIn(inputsf)); typedef SWoRTest<double> SWoRTestD; const std::vector<SWoRInputs<double>> inputsd = { {1024, 512, -1, 0.0, GenPhilox, 1234ULL}, {1024, 1024, -1, 0.0, GenPhilox, 1234ULL}, {1024, 512 + 1, -1, 0.0, GenPhilox, 1234ULL}, {1024, 1024 - 1, -1, 0.0, GenPhilox, 1234ULL}, {1024, 512 + 2, -1, 0.0, GenPhilox, 1234ULL}, {1024, 1024 - 2, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 512, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 1024, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 512 + 1, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 1024 + 1, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 512 + 2, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 1024 - 2, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 512, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 1024, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 512 + 1, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 1024 + 1, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 512 + 2, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 1024 + 2, -1, 0.0, GenPhilox, 1234ULL}, {1024, 512, 10, 100000.0, GenPhilox, 1234ULL}, {1024, 512, -1, 0.0, GenTaps, 1234ULL}, {1024, 1024, -1, 0.0, GenTaps, 1234ULL}, {1024, 512 + 1, -1, 0.0, GenTaps, 1234ULL}, {1024, 1024 - 1, -1, 0.0, GenTaps, 1234ULL}, {1024, 512 + 2, -1, 0.0, GenTaps, 1234ULL}, {1024, 1024 - 2, -1, 0.0, GenTaps, 1234ULL}, {1024 + 1, 512, -1, 0.0, GenTaps, 1234ULL}, {1024 + 1, 1024, -1, 0.0, GenTaps, 1234ULL}, {1024 + 1, 512 + 1, -1, 0.0, GenTaps, 1234ULL}, {1024 + 1, 1024 + 1, -1, 0.0, GenTaps, 1234ULL}, {1024 + 1, 512 + 2, -1, 0.0, GenTaps, 1234ULL}, {1024 + 1, 1024 - 2, -1, 0.0, GenTaps, 1234ULL}, {1024 + 2, 512, -1, 0.0, GenTaps, 1234ULL}, {1024 + 2, 1024, -1, 0.0, GenTaps, 1234ULL}, {1024 + 2, 512 + 1, -1, 0.0, GenTaps, 1234ULL}, {1024 + 2, 1024 + 1, -1, 0.0, GenTaps, 1234ULL}, {1024 + 2, 512 + 2, -1, 0.0, GenTaps, 1234ULL}, {1024 + 2, 1024 + 2, -1, 0.0, GenTaps, 1234ULL}, {1024, 512, 10, 100000.0, GenTaps, 1234ULL}, {1024, 512, -1, 0.0, GenKiss99, 1234ULL}, {1024, 1024, -1, 0.0, GenKiss99, 1234ULL}, {1024, 512 + 1, -1, 0.0, GenKiss99, 1234ULL}, {1024, 1024 - 1, -1, 0.0, GenKiss99, 1234ULL}, {1024, 512 + 2, -1, 0.0, GenKiss99, 1234ULL}, {1024, 1024 - 2, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 1, 512, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 1, 1024, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 1, 512 + 1, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 1, 1024 + 1, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 1, 512 + 2, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 1, 1024 - 2, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 2, 512, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 2, 1024, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 2, 512 + 1, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 2, 1024 + 1, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 2, 512 + 2, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 2, 1024 + 2, -1, 0.0, GenKiss99, 1234ULL}, {1024, 512, 10, 100000.0, GenKiss99, 1234ULL}, }; TEST_P(SWoRTestD, Result) { std::set<int> occurence; for (int i = 0; i < params.sampledLen; ++i) { auto val = h_outIdx[i]; // indices must be in the given range ASSERT_TRUE(0 <= val && val < params.len) << "out-of-range index @i=" << i << " val=" << val << " sampledLen=" << params.sampledLen; // indices should not repeat ASSERT_TRUE(occurence.find(val) == occurence.end()) << "repeated index @i=" << i << " idx=" << val; occurence.insert(val); } // if there's a skewed distribution, the top index should correspond to the // particular item with a large weight if (params.largeWeightIndex >= 0) { ASSERT_EQ(h_outIdx[0], params.largeWeightIndex); } } INSTANTIATE_TEST_CASE_P(SWoRTests, SWoRTestD, ::testing::ValuesIn(inputsd)); } // namespace random } // namespace raft
fa5915438546056761d93a58c1422079d7cb4ae9.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <cuda_utils.cuh> #include <random/rng.cuh> #include <set> #include <vector> #include "test_utils.h" using namespace MLCommon; namespace raft { namespace random { // Terminology: // SWoR - Sample Without Replacement template <typename T> struct SWoRInputs { int len, sampledLen; int largeWeightIndex; T largeWeight; GeneratorType gtype; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const SWoRInputs<T>& dims) { return os; } template <typename T> class SWoRTest : public ::testing::TestWithParam<SWoRInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<SWoRInputs<T>>::GetParam(); CUDA_CHECK(cudaStreamCreate(&stream)); Rng r(params.seed, params.gtype); allocate(in, params.len); allocate(wts, params.len); allocate(out, params.sampledLen); allocate(outIdx, params.sampledLen); h_outIdx.resize(params.sampledLen); r.uniform(in, params.len, T(-1.0), T(1.0), stream); r.uniform(wts, params.len, T(1.0), T(2.0), stream); if (params.largeWeightIndex >= 0) { updateDevice(wts + params.largeWeightIndex, &params.largeWeight, 1, stream); } r.sampleWithoutReplacement(handle, out, outIdx, in, wts, params.sampledLen, params.len, stream); updateHost(&(h_outIdx[0]), outIdx, params.sampledLen, stream); } void TearDown() override { CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaStreamDestroy(stream)); CUDA_CHECK(cudaFree(in)); CUDA_CHECK(cudaFree(wts)); CUDA_CHECK(cudaFree(out)); CUDA_CHECK(cudaFree(outIdx)); } protected: SWoRInputs<T> params; T *in, *out, *wts; int* outIdx; std::vector<int> h_outIdx; cudaStream_t stream; raft::handle_t handle; }; typedef SWoRTest<float> SWoRTestF; const std::vector<SWoRInputs<float>> inputsf = { {1024, 512, -1, 0.f, GenPhilox, 1234ULL}, {1024, 1024, -1, 0.f, GenPhilox, 1234ULL}, {1024, 512 + 1, -1, 0.f, GenPhilox, 1234ULL}, {1024, 1024 - 1, -1, 0.f, GenPhilox, 1234ULL}, {1024, 512 + 2, -1, 0.f, GenPhilox, 1234ULL}, {1024, 1024 - 2, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 512, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 1024, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 512 + 1, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 1024 + 1, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 512 + 2, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 1, 1024 - 2, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 512, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 1024, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 512 + 1, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 1024 + 1, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 512 + 2, -1, 0.f, GenPhilox, 1234ULL}, {1024 + 2, 1024 + 2, -1, 0.f, GenPhilox, 1234ULL}, {1024, 512, 10, 100000.f, GenPhilox, 1234ULL}, {1024, 512, -1, 0.f, GenTaps, 1234ULL}, {1024, 1024, -1, 0.f, GenTaps, 1234ULL}, {1024, 512 + 1, -1, 0.f, GenTaps, 1234ULL}, {1024, 1024 - 1, -1, 0.f, GenTaps, 1234ULL}, {1024, 512 + 2, -1, 0.f, GenTaps, 1234ULL}, {1024, 1024 - 2, -1, 0.f, GenTaps, 1234ULL}, {1024 + 1, 512, -1, 0.f, GenTaps, 1234ULL}, {1024 + 1, 1024, -1, 0.f, GenTaps, 1234ULL}, {1024 + 1, 512 + 1, -1, 0.f, GenTaps, 1234ULL}, {1024 + 1, 1024 + 1, -1, 0.f, GenTaps, 1234ULL}, {1024 + 1, 512 + 2, -1, 0.f, GenTaps, 1234ULL}, {1024 + 1, 1024 - 2, -1, 0.f, GenTaps, 1234ULL}, {1024 + 2, 512, -1, 0.f, GenTaps, 1234ULL}, {1024 + 2, 1024, -1, 0.f, GenTaps, 1234ULL}, {1024 + 2, 512 + 1, -1, 0.f, GenTaps, 1234ULL}, {1024 + 2, 1024 + 1, -1, 0.f, GenTaps, 1234ULL}, {1024 + 2, 512 + 2, -1, 0.f, GenTaps, 1234ULL}, {1024 + 2, 1024 + 2, -1, 0.f, GenTaps, 1234ULL}, {1024, 512, 10, 100000.f, GenTaps, 1234ULL}, {1024, 512, -1, 0.f, GenKiss99, 1234ULL}, {1024, 1024, -1, 0.f, GenKiss99, 1234ULL}, {1024, 512 + 1, -1, 0.f, GenKiss99, 1234ULL}, {1024, 1024 - 1, -1, 0.f, GenKiss99, 1234ULL}, {1024, 512 + 2, -1, 0.f, GenKiss99, 1234ULL}, {1024, 1024 - 2, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 1, 512, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 1, 1024, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 1, 512 + 1, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 1, 1024 + 1, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 1, 512 + 2, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 1, 1024 - 2, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 2, 512, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 2, 1024, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 2, 512 + 1, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 2, 1024 + 1, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 2, 512 + 2, -1, 0.f, GenKiss99, 1234ULL}, {1024 + 2, 1024 + 2, -1, 0.f, GenKiss99, 1234ULL}, {1024, 512, 10, 100000.f, GenKiss99, 1234ULL}, }; TEST_P(SWoRTestF, Result) { std::set<int> occurence; for (int i = 0; i < params.sampledLen; ++i) { auto val = h_outIdx[i]; // indices must be in the given range ASSERT_TRUE(0 <= val && val < params.len) << "out-of-range index @i=" << i << " val=" << val << " sampledLen=" << params.sampledLen; // indices should not repeat ASSERT_TRUE(occurence.find(val) == occurence.end()) << "repeated index @i=" << i << " idx=" << val; occurence.insert(val); } // if there's a skewed distribution, the top index should correspond to the // particular item with a large weight if (params.largeWeightIndex >= 0) { ASSERT_EQ(h_outIdx[0], params.largeWeightIndex); } } INSTANTIATE_TEST_CASE_P(SWoRTests, SWoRTestF, ::testing::ValuesIn(inputsf)); typedef SWoRTest<double> SWoRTestD; const std::vector<SWoRInputs<double>> inputsd = { {1024, 512, -1, 0.0, GenPhilox, 1234ULL}, {1024, 1024, -1, 0.0, GenPhilox, 1234ULL}, {1024, 512 + 1, -1, 0.0, GenPhilox, 1234ULL}, {1024, 1024 - 1, -1, 0.0, GenPhilox, 1234ULL}, {1024, 512 + 2, -1, 0.0, GenPhilox, 1234ULL}, {1024, 1024 - 2, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 512, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 1024, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 512 + 1, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 1024 + 1, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 512 + 2, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 1, 1024 - 2, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 512, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 1024, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 512 + 1, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 1024 + 1, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 512 + 2, -1, 0.0, GenPhilox, 1234ULL}, {1024 + 2, 1024 + 2, -1, 0.0, GenPhilox, 1234ULL}, {1024, 512, 10, 100000.0, GenPhilox, 1234ULL}, {1024, 512, -1, 0.0, GenTaps, 1234ULL}, {1024, 1024, -1, 0.0, GenTaps, 1234ULL}, {1024, 512 + 1, -1, 0.0, GenTaps, 1234ULL}, {1024, 1024 - 1, -1, 0.0, GenTaps, 1234ULL}, {1024, 512 + 2, -1, 0.0, GenTaps, 1234ULL}, {1024, 1024 - 2, -1, 0.0, GenTaps, 1234ULL}, {1024 + 1, 512, -1, 0.0, GenTaps, 1234ULL}, {1024 + 1, 1024, -1, 0.0, GenTaps, 1234ULL}, {1024 + 1, 512 + 1, -1, 0.0, GenTaps, 1234ULL}, {1024 + 1, 1024 + 1, -1, 0.0, GenTaps, 1234ULL}, {1024 + 1, 512 + 2, -1, 0.0, GenTaps, 1234ULL}, {1024 + 1, 1024 - 2, -1, 0.0, GenTaps, 1234ULL}, {1024 + 2, 512, -1, 0.0, GenTaps, 1234ULL}, {1024 + 2, 1024, -1, 0.0, GenTaps, 1234ULL}, {1024 + 2, 512 + 1, -1, 0.0, GenTaps, 1234ULL}, {1024 + 2, 1024 + 1, -1, 0.0, GenTaps, 1234ULL}, {1024 + 2, 512 + 2, -1, 0.0, GenTaps, 1234ULL}, {1024 + 2, 1024 + 2, -1, 0.0, GenTaps, 1234ULL}, {1024, 512, 10, 100000.0, GenTaps, 1234ULL}, {1024, 512, -1, 0.0, GenKiss99, 1234ULL}, {1024, 1024, -1, 0.0, GenKiss99, 1234ULL}, {1024, 512 + 1, -1, 0.0, GenKiss99, 1234ULL}, {1024, 1024 - 1, -1, 0.0, GenKiss99, 1234ULL}, {1024, 512 + 2, -1, 0.0, GenKiss99, 1234ULL}, {1024, 1024 - 2, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 1, 512, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 1, 1024, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 1, 512 + 1, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 1, 1024 + 1, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 1, 512 + 2, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 1, 1024 - 2, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 2, 512, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 2, 1024, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 2, 512 + 1, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 2, 1024 + 1, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 2, 512 + 2, -1, 0.0, GenKiss99, 1234ULL}, {1024 + 2, 1024 + 2, -1, 0.0, GenKiss99, 1234ULL}, {1024, 512, 10, 100000.0, GenKiss99, 1234ULL}, }; TEST_P(SWoRTestD, Result) { std::set<int> occurence; for (int i = 0; i < params.sampledLen; ++i) { auto val = h_outIdx[i]; // indices must be in the given range ASSERT_TRUE(0 <= val && val < params.len) << "out-of-range index @i=" << i << " val=" << val << " sampledLen=" << params.sampledLen; // indices should not repeat ASSERT_TRUE(occurence.find(val) == occurence.end()) << "repeated index @i=" << i << " idx=" << val; occurence.insert(val); } // if there's a skewed distribution, the top index should correspond to the // particular item with a large weight if (params.largeWeightIndex >= 0) { ASSERT_EQ(h_outIdx[0], params.largeWeightIndex); } } INSTANTIATE_TEST_CASE_P(SWoRTests, SWoRTestD, ::testing::ValuesIn(inputsd)); } // namespace random } // namespace raft
c1d159abc366e94b6086b2e5648d62b408603884.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (C) 2010 by Vitsios Dimitrios * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ /************************************************* * * * Description: a "game of life" implementation * * * * ~ using CUDA ~ * * * *************************************************/ #include <stdio.h> #include <sys/types.h> #include <hip/hip_runtime.h> #include <time.h> #define BLOCK_SIZE 256 char *host_board; int n, t; __global__ void make_move(char *dev_board, int n) { __shared__ char block_brd[324], int sum[256], flags[256]; int i, j; int local_Idx = threadIdx.x + threadIdx.y * 16; flags[local_Idx]=0; int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; int index = ix + iy * n; int th_idx=threadIdx.x + 1 + (threadIdx.y + 1) * 18; block_brd[th_idx] = dev_board[index]; //fill in (a part of) the matrix 'block board' with the '256' elements to process //fill in with the remaining '68' elements int ix_ul = blockIdx.x * blockDim.x; //coordinates for the upper left corner of the quadratic table... int iy_ul = blockIdx.y * blockDim.y; //...(size 16x16) containing the 256 elements designated for processing int upperLeftCorner = (ix_ul == 0 && iy_ul == 0)? (n*n-1): ix_ul - 1 + (iy_ul - 1)*n; block_brd[0] = dev_board[upperLeftCorner]; int ix_ur = ix_ul+15; //coordinates for the upper right corner... int iy_ur = iy_ul; //...of the quadratic table int upperRightCorner = (ix_ur == n-1 && iy_ur == 0)? n*(n-1): ix_ur + 1 + (iy_ur - 1)*n; block_brd[17] = dev_board[upperRightCorner]; int ix_bl = ix_ul; //coordinates for the bottom left corner... int iy_bl = iy_ul+15; //...of the quadratic table int bottomLeftCorner = (ix_bl == 0 && iy_bl == n-1)? n-1: ix_bl - 1 + (iy_bl + 1)*n; block_brd[306] = dev_board[bottomLeftCorner]; int ix_br = ix_ul+15; //coordinates for the bottom right corner... int iy_br = iy_ul+15; //...of the quadratic table int bottomRightCorner = (ix_br == n-1 && iy_br == n-1)? 0: ix_br + 1 + (iy_br + 1)*n; block_brd[323] = dev_board[bottomRightCorner]; //Upper Row for(int k=0; k<16;k++){ int urIdx1 = (iy_ul == 0)? n*(n-1)+ix_ul+k: (ix_ul+k) + (iy_ul-1) * n; block_brd[k+1] = dev_board[urIdx1]; } //Right Column for(int k=0, i=35; k<16; k++, i+=18){ int urIdx2 = (ix_ur == n-1)? n*(iy_ul+k):(ix_ur+1) + (iy_ul+k) * n; block_brd[i] = dev_board[urIdx2]; } //Bottom Row for(int k=0, i=307; k<16;k++, i++){ int urIdx3 = (iy_bl == n-1)? ix_ul+k: (ix_bl+k) + (iy_bl+1) * n; block_brd[i] = dev_board[urIdx3]; } //Left Column for(int k=0, i=18; k<16;k++, i+=18){ int urIdx4 = (ix_ul == 0)? n*(iy_ul+1+k)-1 :(ix_ul-1) + (iy_ul+k) * n; block_brd[i] = dev_board[urIdx4]; } if ( index < n*n ){ sum[local_Idx] = (block_brd[threadIdx.x + threadIdx.y * 18]) +(block_brd[threadIdx.x + 1 + threadIdx.y * 18]) +(block_brd[threadIdx.x + 2 + threadIdx.y * 18]) +(block_brd[threadIdx.x + (threadIdx.y + 1) * 18]) +(block_brd[threadIdx.x + 2 + (threadIdx.y + 1) * 18]) +(block_brd[threadIdx.x + (threadIdx.y + 2) * 18]) +(block_brd[threadIdx.x + 1 + (threadIdx.y + 2) * 18]) +(block_brd[threadIdx.x + 2 + (threadIdx.y + 2) * 18]); if(block_brd[th_idx]==0 && sum[local_Idx]==3) flags[local_Idx]=1; if(block_brd[th_idx]==1 && (sum[local_Idx]<2 || sum[local_Idx]>3)) flags[local_Idx]=2; __syncthreads(); if(flags[local_Idx] == 1) dev_board[index]=1; if(flags[local_Idx] == 2) dev_board[index]=0; } } int main(int argc, char* argv[]){ FILE *Data_File; int *br,i,j; char inFile[256], *inFileName=inFile, test_ch, outFileName[256], *dev_board;; int ncount=0; time_t start, end; if (argc != 3 && argc !=1) { printf("Insufficient parameters!\n"); exit(1); } else{ if (argc == 1){ printf("Type the number of iterations: "); scanf("%d",&t); printf("\nType the name of the data file: "); scanf("%s",inFile); printf("\n\n"); } else{ t=atoi(argv[1]); inFileName=argv[2]; } } Data_File=fopen(inFileName,"r"); do{ fscanf(Data_File, "%c", &test_ch); ncount++; }while(test_ch!='\n'); n=ncount/2; // in d.txt: ncounter = 600 --> 300, the numbers (0,1) and 300, the spaces. So: n=ncounter/2 fseek(Data_File,0,SEEK_SET); int size = n * n *sizeof(char); host_board=(char *)malloc(size); for(i=0;i<n;i++){ for(j=0;j<n;j++){ fscanf(Data_File,"%c ",&host_board[i+j*n]); host_board[i+j*n]-=48; } } printf("Reading done\n\n"); fclose(Data_File); //Start timer... time(&start); hipMalloc((void**)&dev_board,size); hipMemcpy( dev_board, host_board, size, hipMemcpyHostToDevice ); printf("Transfer done\n\n"); dim3 dimBlock(16,16); dim3 dimGrid( (n/dimBlock.x) , (n/dimBlock.y) ); for(int r=0; r<t; r++) { hipLaunchKernelGGL(( make_move), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_board, n); } hipMemcpy(host_board, dev_board, size, hipMemcpyDeviceToHost); printf("GPU PROCESSING COMPLETE!\n\n"); //Stop timer; time(&end); //Writing to the output data file i=0; do{ outFileName[i]=inFileName[i]; i++; }while(inFileName[i]!=0); outFileName[i]='.'; outFileName[i+1]='o'; outFileName[i+2]='u'; outFileName[i+3]='t'; outFileName[i+4]=0; printf("Output File \''%s\'' was created!\n",outFileName); Data_File=fopen(outFileName,"w"); for(i=0;i<n;i++){ for(j=0;j<n;j++){ fprintf(Data_File,"%i",host_board[i+j*n]); if(j!=n-1) fprintf(Data_File," "); } if(i!=n-1) fprintf(Data_File,"\n"); } hipFree(dev_board); free(host_board); double dif=difftime(end,start); printf("\n*******************************************************************************"); printf("\nTotal time elapsed for transfering the data and computing in GPU: %.2lf seconds",dif); scanf("%d",&i); return EXIT_SUCCESS; }
c1d159abc366e94b6086b2e5648d62b408603884.cu
/* * Copyright (C) 2010 by Vitsios Dimitrios * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ /************************************************* * * * Description: a "game of life" implementation * * * * ~ using CUDA ~ * * * *************************************************/ #include <stdio.h> #include <sys/types.h> #include <cuda.h> #include <time.h> #define BLOCK_SIZE 256 char *host_board; int n, t; __global__ void make_move(char *dev_board, int n) { __shared__ char block_brd[324], int sum[256], flags[256]; int i, j; int local_Idx = threadIdx.x + threadIdx.y * 16; flags[local_Idx]=0; int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; int index = ix + iy * n; int th_idx=threadIdx.x + 1 + (threadIdx.y + 1) * 18; block_brd[th_idx] = dev_board[index]; //fill in (a part of) the matrix 'block board' with the '256' elements to process //fill in with the remaining '68' elements int ix_ul = blockIdx.x * blockDim.x; //coordinates for the upper left corner of the quadratic table... int iy_ul = blockIdx.y * blockDim.y; //...(size 16x16) containing the 256 elements designated for processing int upperLeftCorner = (ix_ul == 0 && iy_ul == 0)? (n*n-1): ix_ul - 1 + (iy_ul - 1)*n; block_brd[0] = dev_board[upperLeftCorner]; int ix_ur = ix_ul+15; //coordinates for the upper right corner... int iy_ur = iy_ul; //...of the quadratic table int upperRightCorner = (ix_ur == n-1 && iy_ur == 0)? n*(n-1): ix_ur + 1 + (iy_ur - 1)*n; block_brd[17] = dev_board[upperRightCorner]; int ix_bl = ix_ul; //coordinates for the bottom left corner... int iy_bl = iy_ul+15; //...of the quadratic table int bottomLeftCorner = (ix_bl == 0 && iy_bl == n-1)? n-1: ix_bl - 1 + (iy_bl + 1)*n; block_brd[306] = dev_board[bottomLeftCorner]; int ix_br = ix_ul+15; //coordinates for the bottom right corner... int iy_br = iy_ul+15; //...of the quadratic table int bottomRightCorner = (ix_br == n-1 && iy_br == n-1)? 0: ix_br + 1 + (iy_br + 1)*n; block_brd[323] = dev_board[bottomRightCorner]; //Upper Row for(int k=0; k<16;k++){ int urIdx1 = (iy_ul == 0)? n*(n-1)+ix_ul+k: (ix_ul+k) + (iy_ul-1) * n; block_brd[k+1] = dev_board[urIdx1]; } //Right Column for(int k=0, i=35; k<16; k++, i+=18){ int urIdx2 = (ix_ur == n-1)? n*(iy_ul+k):(ix_ur+1) + (iy_ul+k) * n; block_brd[i] = dev_board[urIdx2]; } //Bottom Row for(int k=0, i=307; k<16;k++, i++){ int urIdx3 = (iy_bl == n-1)? ix_ul+k: (ix_bl+k) + (iy_bl+1) * n; block_brd[i] = dev_board[urIdx3]; } //Left Column for(int k=0, i=18; k<16;k++, i+=18){ int urIdx4 = (ix_ul == 0)? n*(iy_ul+1+k)-1 :(ix_ul-1) + (iy_ul+k) * n; block_brd[i] = dev_board[urIdx4]; } if ( index < n*n ){ sum[local_Idx] = (block_brd[threadIdx.x + threadIdx.y * 18]) +(block_brd[threadIdx.x + 1 + threadIdx.y * 18]) +(block_brd[threadIdx.x + 2 + threadIdx.y * 18]) +(block_brd[threadIdx.x + (threadIdx.y + 1) * 18]) +(block_brd[threadIdx.x + 2 + (threadIdx.y + 1) * 18]) +(block_brd[threadIdx.x + (threadIdx.y + 2) * 18]) +(block_brd[threadIdx.x + 1 + (threadIdx.y + 2) * 18]) +(block_brd[threadIdx.x + 2 + (threadIdx.y + 2) * 18]); if(block_brd[th_idx]==0 && sum[local_Idx]==3) flags[local_Idx]=1; if(block_brd[th_idx]==1 && (sum[local_Idx]<2 || sum[local_Idx]>3)) flags[local_Idx]=2; __syncthreads(); if(flags[local_Idx] == 1) dev_board[index]=1; if(flags[local_Idx] == 2) dev_board[index]=0; } } int main(int argc, char* argv[]){ FILE *Data_File; int *br,i,j; char inFile[256], *inFileName=inFile, test_ch, outFileName[256], *dev_board;; int ncount=0; time_t start, end; if (argc != 3 && argc !=1) { printf("Insufficient parameters!\n"); exit(1); } else{ if (argc == 1){ printf("Type the number of iterations: "); scanf("%d",&t); printf("\nType the name of the data file: "); scanf("%s",inFile); printf("\n\n"); } else{ t=atoi(argv[1]); inFileName=argv[2]; } } Data_File=fopen(inFileName,"r"); do{ fscanf(Data_File, "%c", &test_ch); ncount++; }while(test_ch!='\n'); n=ncount/2; // in d.txt: ncounter = 600 --> 300, the numbers (0,1) and 300, the spaces. So: n=ncounter/2 fseek(Data_File,0,SEEK_SET); int size = n * n *sizeof(char); host_board=(char *)malloc(size); for(i=0;i<n;i++){ for(j=0;j<n;j++){ fscanf(Data_File,"%c ",&host_board[i+j*n]); host_board[i+j*n]-=48; } } printf("Reading done\n\n"); fclose(Data_File); //Start timer... time(&start); cudaMalloc((void**)&dev_board,size); cudaMemcpy( dev_board, host_board, size, cudaMemcpyHostToDevice ); printf("Transfer done\n\n"); dim3 dimBlock(16,16); dim3 dimGrid( (n/dimBlock.x) , (n/dimBlock.y) ); for(int r=0; r<t; r++) { make_move<<< dimGrid, dimBlock>>>(dev_board, n); } cudaMemcpy(host_board, dev_board, size, cudaMemcpyDeviceToHost); printf("GPU PROCESSING COMPLETE!\n\n"); //Stop timer; time(&end); //Writing to the output data file i=0; do{ outFileName[i]=inFileName[i]; i++; }while(inFileName[i]!=0); outFileName[i]='.'; outFileName[i+1]='o'; outFileName[i+2]='u'; outFileName[i+3]='t'; outFileName[i+4]=0; printf("Output File \''%s\'' was created!\n",outFileName); Data_File=fopen(outFileName,"w"); for(i=0;i<n;i++){ for(j=0;j<n;j++){ fprintf(Data_File,"%i",host_board[i+j*n]); if(j!=n-1) fprintf(Data_File," "); } if(i!=n-1) fprintf(Data_File,"\n"); } cudaFree(dev_board); free(host_board); double dif=difftime(end,start); printf("\n*******************************************************************************"); printf("\nTotal time elapsed for transfering the data and computing in GPU: %.2lf seconds",dif); scanf("%d",&i); return EXIT_SUCCESS; }
e3245f1eb3b40033f3ed20764f93cd3065b6d03b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the doubleing point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in doubleing-point. extern "C" // Round to nearest integer value in doubleing-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two doubleing point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the doubleing-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision doubleing-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision doubleing-point remainder. extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //WARNING : device_sum size should be gridDim.x __global__ void vec_normcdfinv (int n, double *result, double *y) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int id = idy * gridDim.x * blockDim.x + idx; if (id < n) { result[id] = normcdfinv(y[id]); } }
e3245f1eb3b40033f3ed20764f93cd3065b6d03b.cu
#include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument × p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the doubleing point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in doubleing-point. extern "C" // Round to nearest integer value in doubleing-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument × p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two doubleing point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the doubleing-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision doubleing-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision doubleing-point remainder. extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //WARNING : device_sum size should be gridDim.x __global__ void vec_normcdfinv (int n, double *result, double *y) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int id = idy * gridDim.x * blockDim.x + idx; if (id < n) { result[id] = normcdfinv(y[id]); } }
ad37b0ad3df41c0a3d07f965a794d90879ab99c6.hip
// !!! This is a file automatically generated by hipify!!! /* Defines the matrix operations for sequential dense with CUDA */ #include <petscpkg_version.h> #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <../src/mat/impls/dense/seq/dense.h> /*I "petscmat.h" I*/ #include <petsccublas.h> /* cublas definitions are here */ #include <petsc/private/cudavecimpl.h> #if defined(PETSC_USE_COMPLEX) #if defined(PETSC_USE_REAL_SINGLE) #define hipsolverDnXpotrf(a,b,c,d,e,f,g,h) hipsolverDnCpotrf((a),(b),(c),(hipComplex*)(d),(e),(hipComplex*)(f),(g),(h)) #define hipsolverDnXpotrf_bufferSize(a,b,c,d,e,f) hipsolverDnCpotrf_bufferSize((a),(b),(c),(hipComplex*)(d),(e),(f)) #define hipsolverDnXpotrs(a,b,c,d,e,f,g,h,i) hipsolverDnCpotrs((a),(b),(c),(d),(hipComplex*)(e),(f),(hipComplex*)(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnCpotri((a),(b),(c),(hipComplex*)(d),(e),(hipComplex*)(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnCpotri_bufferSize((a),(b),(c),(hipComplex*)(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) hipsolverDnCsytrf((a),(b),(c),(hipComplex*)(d),(e),(f),(hipComplex*)(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) hipsolverDnCsytrf_bufferSize((a),(b),(hipComplex*)(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) hipsolverDnCgetrf((a),(b),(c),(hipComplex*)(d),(e),(hipComplex*)(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) hipsolverDnCgetrf_bufferSize((a),(b),(c),(hipComplex*)(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) hipsolverDnCgetrs((a),(b),(c),(d),(hipComplex*)(e),(f),(g),(hipComplex*)(h),(i),(j)) #else /* complex double */ #define hipsolverDnXpotrf(a,b,c,d,e,f,g,h) hipsolverDnZpotrf((a),(b),(c),(hipDoubleComplex*)(d),(e),(hipDoubleComplex*)(f),(g),(h)) #define hipsolverDnXpotrf_bufferSize(a,b,c,d,e,f) hipsolverDnZpotrf_bufferSize((a),(b),(c),(hipDoubleComplex*)(d),(e),(f)) #define hipsolverDnXpotrs(a,b,c,d,e,f,g,h,i) hipsolverDnZpotrs((a),(b),(c),(d),(hipDoubleComplex*)(e),(f),(hipDoubleComplex*)(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnZpotri((a),(b),(c),(hipDoubleComplex*)(d),(e),(hipDoubleComplex*)(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnZpotri_bufferSize((a),(b),(c),(hipDoubleComplex*)(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) hipsolverDnZsytrf((a),(b),(c),(hipDoubleComplex*)(d),(e),(f),(hipDoubleComplex*)(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) hipsolverDnZsytrf_bufferSize((a),(b),(hipDoubleComplex*)(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) hipsolverDnZgetrf((a),(b),(c),(hipDoubleComplex*)(d),(e),(hipDoubleComplex*)(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) hipsolverDnZgetrf_bufferSize((a),(b),(c),(hipDoubleComplex*)(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) hipsolverDnZgetrs((a),(b),(c),(d),(hipDoubleComplex*)(e),(f),(g),(hipDoubleComplex*)(h),(i),(j)) #endif #else /* real single */ #if defined(PETSC_USE_REAL_SINGLE) #define hipsolverDnXpotrf(a,b,c,d,e,f,g,h) hipsolverDnSpotrf((a),(b),(c),(d),(e),(f),(g),(h)) #define hipsolverDnXpotrf_bufferSize(a,b,c,d,e,f) hipsolverDnSpotrf_bufferSize((a),(b),(c),(d),(e),(f)) #define hipsolverDnXpotrs(a,b,c,d,e,f,g,h,i) hipsolverDnSpotrs((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnSpotri((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnSpotri_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) hipsolverDnSsytrf((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) hipsolverDnSsytrf_bufferSize((a),(b),(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) hipsolverDnSgetrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) hipsolverDnSgetrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) hipsolverDnSgetrs((a),(b),(c),(d),(e),(f),(g),(h),(i),(j)) #else /* real double */ #define hipsolverDnXpotrf(a,b,c,d,e,f,g,h) hipsolverDnDpotrf((a),(b),(c),(d),(e),(f),(g),(h)) #define hipsolverDnXpotrf_bufferSize(a,b,c,d,e,f) hipsolverDnDpotrf_bufferSize((a),(b),(c),(d),(e),(f)) #define hipsolverDnXpotrs(a,b,c,d,e,f,g,h,i) hipsolverDnDpotrs((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnDpotri((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnDpotri_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) hipsolverDnDsytrf((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) hipsolverDnDsytrf_bufferSize((a),(b),(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) hipsolverDnDgetrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) hipsolverDnDgetrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) hipsolverDnDgetrs((a),(b),(c),(d),(e),(f),(g),(h),(i),(j)) #endif #endif typedef struct { PetscScalar *d_v; /* pointer to the matrix on the GPU */ /* factorization support */ int *d_fact_ipiv; /* device pivots */ PetscScalar *d_fact_work; /* device workspace */ int fact_lwork; int *d_fact_info; /* device info */ /* workspace */ Vec workvec; } Mat_SeqDenseCUDA; PetscErrorCode MatSeqDenseCUDACopyFromGPU(Mat A) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); ierr = PetscInfo3(A,"%s matrix %d x %d\n",A->offloadmask == PETSC_OFFLOAD_GPU ? "Copy" : "Reusing",A->rmap->n,A->cmap->n);CHKERRQ(ierr); if (A->offloadmask == PETSC_OFFLOAD_GPU) { ierr = PetscLogEventBegin(MAT_DenseCopyFromGPU,A,0,0,0);CHKERRQ(ierr); if (cA->lda > A->rmap->n) { PetscInt j,m = A->rmap->n; for (j=0; j<A->cmap->n; j++) { /* TODO: it can be done better */ cerr = hipMemcpy(cA->v + j*cA->lda,dA->d_v + j*cA->lda,m*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } } else { cerr = hipMemcpy(cA->v,dA->d_v,cA->lda*sizeof(PetscScalar)*A->cmap->n,hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } ierr = PetscLogGpuToCpu(cA->lda*sizeof(PetscScalar)*A->cmap->n);CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_DenseCopyFromGPU,A,0,0,0);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } PetscErrorCode MatSeqDenseCUDACopyToGPU(Mat A) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscBool copy; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); if (A->boundtocpu) PetscFunctionReturn(0); if (!dA->d_v) { cerr = hipMalloc((void**)&dA->d_v,cA->lda*cA->Nmax*sizeof(PetscScalar));CHKERRCUDA(cerr); } copy = (PetscBool)(A->offloadmask == PETSC_OFFLOAD_CPU || A->offloadmask == PETSC_OFFLOAD_UNALLOCATED); ierr = PetscInfo3(A,"%s matrix %d x %d\n",copy ? "Copy" : "Reusing",A->rmap->n,A->cmap->n);CHKERRQ(ierr); if (copy) { ierr = PetscLogEventBegin(MAT_DenseCopyToGPU,A,0,0,0);CHKERRQ(ierr); if (cA->lda > A->rmap->n) { PetscInt j,m = A->rmap->n; for (j=0; j<A->cmap->n; j++) { /* TODO: it can be done better */ cerr = hipMemcpy(dA->d_v + j*cA->lda,cA->v + j*cA->lda,m*sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); } } else { cerr = hipMemcpy(dA->d_v,cA->v,cA->lda*sizeof(PetscScalar)*A->cmap->n,hipMemcpyHostToDevice);CHKERRCUDA(cerr); } ierr = PetscLogCpuToGpu(cA->lda*sizeof(PetscScalar)*A->cmap->n);CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_DenseCopyToGPU,A,0,0,0);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } PetscErrorCode MatDenseCUDAGetArrayWrite(Mat A, PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); if (!dA->d_v) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; hipError_t cerr; cerr = hipMalloc((void**)&dA->d_v,cA->lda*cA->Nmax*sizeof(PetscScalar));CHKERRCUDA(cerr); } *a = dA->d_v; PetscFunctionReturn(0); } PetscErrorCode MatDenseCUDARestoreArrayWrite(Mat A, PetscScalar **a) { PetscFunctionBegin; *a = NULL; A->offloadmask = PETSC_OFFLOAD_GPU; PetscFunctionReturn(0); } PetscErrorCode MatDenseCUDAGetArrayRead(Mat A, const PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); *a = dA->d_v; PetscFunctionReturn(0); } PetscErrorCode MatDenseCUDARestoreArrayRead(Mat A, const PetscScalar **a) { PetscFunctionBegin; *a = NULL; PetscFunctionReturn(0); } PetscErrorCode MatDenseCUDAGetArray(Mat A, PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); *a = dA->d_v; PetscFunctionReturn(0); } PetscErrorCode MatDenseCUDARestoreArray(Mat A, PetscScalar **a) { PetscFunctionBegin; *a = NULL; A->offloadmask = PETSC_OFFLOAD_GPU; PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode MatSeqDenseCUDAInvertFactors_Private(Mat A) { #if PETSC_PKG_CUDA_VERSION_GE(10,1,0) Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; PetscErrorCode ierr; hipError_t ccer; cusolverStatus_t cerr; hipsolverDnHandle_t handle; int n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_LU) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDngetri not implemented"); else if (A->factortype == MAT_FACTOR_CHOLESKY) { if (!dA->d_fact_ipiv) { /* spd */ int il; ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); cerr = cusolverDnXpotri_bufferSize(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,&il);CHKERRCUSOLVER(cerr); if (il > dA->fact_lwork) { dA->fact_lwork = il; ccer = hipFree(dA->d_fact_work);CHKERRCUDA(ccer); ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXpotri(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ccer = WaitForGPU();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); /* TODO (write cuda kernel) */ ierr = MatSeqDenseSymmetrize_Private(A,PETSC_TRUE);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytri not implemented"); } #if defined(PETSC_USE_DEBUG) ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(int), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: leading minor of order %d is zero",info); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif ierr = PetscLogGpuFlops(1.0*n*n*n/3.0);CHKERRQ(ierr); A->ops->solve = NULL; A->ops->solvetranspose = NULL; A->ops->matsolve = NULL; A->factortype = MAT_FACTOR_NONE; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); #else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Upgrade to CUDA version 10.1.0 or higher"); #endif } static PetscErrorCode MatMatSolve_SeqDenseCUDA(Mat A,Mat B,Mat X) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDense *x = (Mat_SeqDense*)X->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; const PetscScalar *da; PetscScalar *dx; hipsolverDnHandle_t handle; PetscBool iscuda; int nrhs,n,lda,ldx; #if defined(PETSC_USE_DEBUG) int info; #endif hipError_t ccer; cusolverStatus_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); if (!dA->d_fact_work) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); ierr = PetscObjectTypeCompareAny((PetscObject)X,&iscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr); if (X != B) { ierr = MatCopy(B,X,SAME_NONZERO_PATTERN);CHKERRQ(ierr); } ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); /* MatMatSolve does not have a dispatching mechanism, we may end up with a MATSEQDENSE here */ ierr = PetscObjectTypeCompare((PetscObject)X,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr); if (!iscuda) { ierr = MatConvert(X,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&X);CHKERRQ(ierr); } ierr = MatDenseCUDAGetArray(X,&dx);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr); ierr = PetscMPIIntCast(X->cmap->n,&nrhs);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); ierr = PetscMPIIntCast(x->lda,&ldx);CHKERRQ(ierr); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_LU) { ierr = PetscInfo2(A,"LU solve %d x %d on backend\n",n,n);CHKERRQ(ierr); cerr = cusolverDnXgetrs(handle,HIPBLAS_OP_N,n,nrhs,da,lda,dA->d_fact_ipiv,dx,ldx,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else if (A->factortype == MAT_FACTOR_CHOLESKY) { ierr = PetscInfo2(A,"Cholesky solve %d x %d on backend\n",n,n);CHKERRQ(ierr); if (!dA->d_fact_ipiv) { /* spd */ /* ========= Program hit hipErrorNotReady (error 34) due to "device not ready" on CUDA API call to hipEventQuery. */ cerr = hipsolverDnXpotrs(handle,HIPBLAS_FILL_MODE_LOWER,n,nrhs,da,lda,dx,ldx,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytrs not implemented"); } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Unknown factor type %d",A->factortype); ccer = WaitForGPU();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(X,&dx);CHKERRQ(ierr); if (!iscuda) { ierr = MatConvert(X,MATSEQDENSE,MAT_INPLACE_MATRIX,&X);CHKERRQ(ierr); } #if defined(PETSC_USE_DEBUG) ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(int), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif ierr = PetscLogGpuFlops(nrhs*(2.0*n*n - n));CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA_Private(Mat A,Vec xx,Vec yy,PetscBool trans) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; const PetscScalar *da; PetscScalar *y; hipsolverDnHandle_t handle; int one = 1,n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif hipError_t ccer; cusolverStatus_t cerr; PetscBool iscuda; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); if (!dA->d_fact_work) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr); /* MatSolve does not have a dispatching mechanism, we may end up with a VECSTANDARD here */ ierr = PetscObjectTypeCompareAny((PetscObject)yy,&iscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr); if (iscuda) { ierr = VecCopy(xx,yy);CHKERRQ(ierr); ierr = VecCUDAGetArray(yy,&y);CHKERRQ(ierr); } else { if (!dA->workvec) { ierr = MatCreateVecs(A,&dA->workvec,NULL);CHKERRQ(ierr); } ierr = VecCopy(xx,dA->workvec);CHKERRQ(ierr); ierr = VecCUDAGetArray(dA->workvec,&y);CHKERRQ(ierr); } ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_LU) { ierr = PetscInfo2(A,"LU solve %d x %d on backend\n",n,n);CHKERRQ(ierr); cerr = cusolverDnXgetrs(handle,trans ? HIPBLAS_OP_T : HIPBLAS_OP_N,n,one,da,lda,dA->d_fact_ipiv,y,n,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else if (A->factortype == MAT_FACTOR_CHOLESKY) { ierr = PetscInfo2(A,"Cholesky solve %d x %d on backend\n",n,n);CHKERRQ(ierr); if (!dA->d_fact_ipiv) { /* spd */ /* ========= Program hit hipErrorNotReady (error 34) due to "device not ready" on CUDA API call to hipEventQuery. */ cerr = hipsolverDnXpotrs(handle,HIPBLAS_FILL_MODE_LOWER,n,one,da,lda,y,n,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytrs not implemented"); } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Unknown factor type %d",A->factortype); ccer = WaitForGPU();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); if (iscuda) { ierr = VecCUDARestoreArray(yy,&y);CHKERRQ(ierr); } else { ierr = VecCUDARestoreArray(dA->workvec,&y);CHKERRQ(ierr); ierr = VecCopy(dA->workvec,yy);CHKERRQ(ierr); } ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(int), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif ierr = PetscLogGpuFlops(2.0*n*n - n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Private(A,xx,yy,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolveTranspose_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Private(A,xx,yy,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatLUFactor_SeqDenseCUDA(Mat A,IS rperm,IS cperm,const MatFactorInfo *factinfo) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; int m,n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif cusolverStatus_t cerr; hipsolverDnHandle_t handle; hipError_t ccer; PetscErrorCode ierr; PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); ierr = PetscInfo2(A,"LU factor %d x %d on backend\n",m,n);CHKERRQ(ierr); if (!dA->d_fact_ipiv) { ccer = hipMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer); } if (!dA->fact_lwork) { cerr = cusolverDnXgetrf_bufferSize(handle,m,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = hipMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXgetrf(handle,m,n,da,lda,dA->d_fact_work,dA->d_fact_ipiv,dA->d_fact_info);CHKERRCUSOLVER(cerr); ccer = WaitForGPU();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(int), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif A->factortype = MAT_FACTOR_LU; ierr = PetscLogGpuFlops(2.0*n*n*m/3.0);CHKERRQ(ierr); A->ops->solve = MatSolve_SeqDenseCUDA; A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA; A->ops->matsolve = MatMatSolve_SeqDenseCUDA; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatCholeskyFactor_SeqDenseCUDA(Mat A,IS perm,const MatFactorInfo *factinfo) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; int n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif cusolverStatus_t cerr; hipsolverDnHandle_t handle; hipError_t ccer; PetscErrorCode ierr; PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr); ierr = PetscInfo2(A,"Cholesky factor %d x %d on backend\n",n,n);CHKERRQ(ierr); if (A->spd) { ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); if (!dA->fact_lwork) { cerr = hipsolverDnXpotrf_bufferSize(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = hipMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = hipsolverDnXpotrf(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ccer = WaitForGPU();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(int), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif A->factortype = MAT_FACTOR_CHOLESKY; ierr = PetscLogGpuFlops(1.0*n*n*n/3.0);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cusolverDnsytrs unavailable. Use MAT_FACTOR_LU"); #if 0 /* at the time of writing this interface (cuda 10.0), cusolverDn does not implement *sytrs and *hetr* routines The code below should work, and it can be activated when *sytrs routines will be available */ if (!dA->d_fact_ipiv) { ccer = hipMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer); } if (!dA->fact_lwork) { cerr = cusolverDnXsytrf_bufferSize(handle,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = hipMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXsytrf(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_ipiv,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); #endif A->ops->solve = MatSolve_SeqDenseCUDA; A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA; A->ops->matsolve = MatMatSolve_SeqDenseCUDA; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); } /* GEMM kernel: C = op(A)*op(B), tA, tB flag transposition */ static PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat A,Mat B,Mat C,PetscBool tA, PetscBool tB) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDense *b = (Mat_SeqDense*)B->data; Mat_SeqDense *c = (Mat_SeqDense*)C->data; const PetscScalar *da,*db; PetscScalar *dc; PetscScalar one=1.0,zero=0.0; int m,n,k,alda,blda,clda; PetscErrorCode ierr; hipblasHandle_t cublasv2handle; hipblasStatus_t berr; hipError_t cerr; PetscFunctionBegin; ierr = PetscMPIIntCast(C->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(C->cmap->n,&n);CHKERRQ(ierr); if (tA) { ierr = PetscMPIIntCast(A->rmap->n,&k);CHKERRQ(ierr); } else { ierr = PetscMPIIntCast(A->cmap->n,&k);CHKERRQ(ierr); } if (!m || !n || !k) PetscFunctionReturn(0); ierr = PetscInfo3(C,"Matrix-Matrix product %d x %d x %d on backend\n",m,k,n);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(B,&db);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayWrite(C,&dc);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&alda);CHKERRQ(ierr); ierr = PetscMPIIntCast(b->lda,&blda);CHKERRQ(ierr); ierr = PetscMPIIntCast(c->lda,&clda);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXgemm(cublasv2handle,tA ? HIPBLAS_OP_T : HIPBLAS_OP_N,tB ? HIPBLAS_OP_T : HIPBLAS_OP_N, m,n,k,&one,da,alda,db,blda,&zero,dc,clda);CHKERRCUBLAS(berr); cerr = WaitForGPU();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(1.0*m*n*k + 1.0*m*n*(k-1));CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(B,&db);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(C,&dc);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatTransposeMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMatTransposeMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_FALSE,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatProductSetFromOptions_SeqDenseCUDA(Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatProductSetFromOptions_SeqDense(C);CHKERRQ(ierr); PetscFunctionReturn(0); } /* zz = op(A)*xx + yy if yy == NULL, only MatMult */ static PetscErrorCode MatMultAdd_SeqDenseCUDA_Private(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; const PetscScalar *xarray,*da; PetscScalar *zarray; PetscScalar one=1.0,zero=0.0; int m, n, lda; /* Use PetscMPIInt as it is typedef'ed to int */ hipblasHandle_t cublasv2handle; hipblasStatus_t berr; PetscErrorCode ierr; PetscFunctionBegin; if (yy && yy != zz) { /* mult add */ ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); } if (!A->rmap->n || !A->cmap->n) { if (!yy) { /* mult only */ ierr = VecSet_SeqCUDA(zz,0.0);CHKERRQ(ierr); } PetscFunctionReturn(0); } ierr = PetscInfo2(A,"Matrix-vector product %d x %d on backend\n",A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscMPIIntCast(mat->lda,&lda);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXgemv(cublasv2handle,trans ? HIPBLAS_OP_T : HIPBLAS_OP_N, m,n,&one,da,lda,xarray,1,(yy ? &one : &zero),zarray,1);CHKERRCUBLAS(berr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*A->rmap->n*A->cmap->n - (yy ? 0 : A->rmap->n));CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultAdd_SeqDenseCUDA(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,yy,zz,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultTransposeAdd_SeqDenseCUDA(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,yy,zz,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMult_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,NULL,yy,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultTranspose_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,NULL,yy,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatDenseGetArrayRead_SeqDenseCUDA(Mat A,const PetscScalar *array[]) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); *array = mat->v; PetscFunctionReturn(0); } PetscErrorCode MatDenseGetArray_SeqDenseCUDA(Mat A,PetscScalar *array[]) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); *array = mat->v; A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } PetscErrorCode MatDenseRestoreArray_SeqDenseCUDA(Mat A,PetscScalar *array[]) { PetscFunctionBegin; PetscFunctionReturn(0); } PetscErrorCode MatAXPY_SeqDenseCUDA(Mat Y,PetscScalar alpha,Mat X,MatStructure str) { Mat_SeqDense *x = (Mat_SeqDense*)X->data; Mat_SeqDense *y = (Mat_SeqDense*)Y->data; const PetscScalar *dx; PetscScalar *dy; int j,N,m,ldax,lday,one = 1; hipblasHandle_t cublasv2handle; hipblasStatus_t berr; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; if (!X->rmap->n || !X->cmap->n) PetscFunctionReturn(0); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(X,&dx);CHKERRQ(ierr); if (alpha != 0.0) { ierr = MatDenseCUDAGetArray(Y,&dy);CHKERRQ(ierr); } else { ierr = MatDenseCUDAGetArrayWrite(Y,&dy);CHKERRQ(ierr); } ierr = PetscMPIIntCast(X->rmap->n*X->cmap->n,&N);CHKERRQ(ierr); ierr = PetscMPIIntCast(X->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(x->lda,&ldax);CHKERRQ(ierr); ierr = PetscMPIIntCast(y->lda,&lday);CHKERRQ(ierr); ierr = PetscInfo2(Y,"Performing AXPY %d x %d on backend\n",Y->rmap->n,Y->cmap->n);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (ldax>m || lday>m) { for (j=0; j<X->cmap->n; j++) { berr = cublasXaxpy(cublasv2handle,m,&alpha,dx+j*ldax,one,dy+j*lday,one);CHKERRCUBLAS(berr); } } else { berr = cublasXaxpy(cublasv2handle,N,&alpha,dx,one,dy,one);CHKERRCUBLAS(berr); } cerr = WaitForGPU();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(PetscMax(2.*N-1,0));CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(X,&dx);CHKERRQ(ierr); if (alpha != 0.0) { ierr = MatDenseCUDARestoreArray(Y,&dy);CHKERRQ(ierr); } else { ierr = MatDenseCUDARestoreArrayWrite(Y,&dy);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatReset_SeqDenseCUDA(Mat A) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; hipError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (dA) { cerr = hipFree(dA->d_v);CHKERRCUDA(cerr); cerr = hipFree(dA->d_fact_ipiv);CHKERRCUDA(cerr); cerr = hipFree(dA->d_fact_info);CHKERRCUDA(cerr); cerr = hipFree(dA->d_fact_work);CHKERRCUDA(cerr); ierr = VecDestroy(&dA->workvec);CHKERRQ(ierr); } ierr = PetscFree(A->spptr);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatDestroy_SeqDenseCUDA(Mat A) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; /* prevent to copy back data if we own the data pointer */ if (!a->user_alloc) { A->offloadmask = PETSC_OFFLOAD_CPU; } ierr = MatConvert_SeqDenseCUDA_SeqDense(A,MATSEQDENSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); ierr = MatDestroy_SeqDense(A);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatSeqDenseSetPreallocation_SeqDenseCUDA(Mat B,PetscScalar *data) { Mat_SeqDense *b; Mat_SeqDenseCUDA *dB; hipError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr); b = (Mat_SeqDense*)B->data; b->Mmax = B->rmap->n; b->Nmax = B->cmap->n; if (b->lda <= 0 || b->changelda) b->lda = B->rmap->n; if (b->lda < B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Invalid lda %D < %D",b->lda,B->rmap->n); ierr = PetscIntMultError(b->lda,b->Nmax,NULL);CHKERRQ(ierr); ierr = MatReset_SeqDenseCUDA(B);CHKERRQ(ierr); ierr = PetscNewLog(B,&dB);CHKERRQ(ierr); B->spptr = dB; cerr = hipMalloc((void**)&dB->d_v,b->lda*b->Nmax*sizeof(PetscScalar));CHKERRCUDA(cerr); if (!data) { /* petsc-allocated storage */ if (!b->user_alloc) { ierr = PetscFree(b->v);CHKERRQ(ierr); } ierr = PetscCalloc1((size_t)b->lda*b->Nmax,&b->v);CHKERRQ(ierr); ierr = PetscLogObjectMemory((PetscObject)B,b->lda*b->Nmax*sizeof(PetscScalar));CHKERRQ(ierr); b->user_alloc = PETSC_FALSE; } else { /* user-allocated storage */ if (!b->user_alloc) { ierr = PetscFree(b->v);CHKERRQ(ierr); } b->v = data; b->user_alloc = PETSC_TRUE; } B->offloadmask = PETSC_OFFLOAD_CPU; B->preallocated = PETSC_TRUE; B->assembled = PETSC_TRUE; PetscFunctionReturn(0); } PetscErrorCode MatDuplicate_SeqDenseCUDA(Mat A,MatDuplicateOption cpvalues,Mat *B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); ierr = MatSetSizes(*B,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = MatSetType(*B,((PetscObject)A)->type_name);CHKERRQ(ierr); ierr = MatDuplicateNoCreate_SeqDense(*B,A,cpvalues);CHKERRQ(ierr); if (cpvalues == MAT_COPY_VALUES && A->offloadmask != PETSC_OFFLOAD_CPU) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; const PetscScalar *da; PetscScalar *db; hipError_t cerr; ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayWrite(*B,&db);CHKERRQ(ierr); if (a->lda > A->rmap->n) { PetscInt j,m = A->rmap->n; for (j=0; j<A->cmap->n; j++) { /* it can be done better */ cerr = hipMemcpy(db+j*m,da+j*a->lda,m*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(cerr); } } else { cerr = hipMemcpy(db,da,a->lda*sizeof(PetscScalar)*A->cmap->n,hipMemcpyDeviceToDevice);CHKERRCUDA(cerr); } ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(*B,&db);CHKERRQ(ierr); (*B)->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatGetFactor_seqdense_cuda(Mat A,MatFactorType ftype,Mat *fact) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),fact);CHKERRQ(ierr); ierr = MatSetSizes(*fact,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = MatSetType(*fact,MATSEQDENSECUDA);CHKERRQ(ierr); if (ftype == MAT_FACTOR_LU) { (*fact)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqDense; } else { (*fact)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqDense; } (*fact)->factortype = ftype; ierr = PetscFree((*fact)->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&(*fact)->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatBindToCPU_SeqDenseCUDA(Mat A,PetscBool flg) { PetscErrorCode ierr; PetscFunctionBegin; A->boundtocpu = flg; if (!flg) { ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqDenseSetPreallocation_C",MatSeqDenseSetPreallocation_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArray_C", MatDenseGetArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayRead_C", MatDenseGetArrayRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreArray_C", MatDenseRestoreArray_SeqDenseCUDA);CHKERRQ(ierr); A->ops->duplicate = MatDuplicate_SeqDenseCUDA; A->ops->mult = MatMult_SeqDenseCUDA; A->ops->multadd = MatMultAdd_SeqDenseCUDA; A->ops->multtranspose = MatMultTranspose_SeqDenseCUDA; A->ops->multtransposeadd = MatMultTransposeAdd_SeqDenseCUDA; A->ops->matmultnumeric = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->transposematmultnumeric = MatTransposeMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->axpy = MatAXPY_SeqDenseCUDA; A->ops->choleskyfactor = MatCholeskyFactor_SeqDenseCUDA; A->ops->lufactor = MatLUFactor_SeqDenseCUDA; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDenseCUDA; } else { /* make sure we have an up-to-date copy on the CPU */ ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqDenseSetPreallocation_C",MatSeqDenseSetPreallocation_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArray_C", MatDenseGetArray_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayRead_C", MatDenseGetArray_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreArray_C", MatDenseRestoreArray_SeqDense);CHKERRQ(ierr); A->ops->duplicate = MatDuplicate_SeqDense; A->ops->mult = MatMult_SeqDense; A->ops->multadd = MatMultAdd_SeqDense; A->ops->multtranspose = MatMultTranspose_SeqDense; A->ops->multtransposeadd = MatMultTransposeAdd_SeqDense; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDense; A->ops->matmultnumeric = MatMatMultNumeric_SeqDense_SeqDense; A->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_SeqDense_SeqDense; A->ops->transposematmultnumeric = MatTransposeMatMultNumeric_SeqDense_SeqDense; A->ops->axpy = MatAXPY_SeqDense; A->ops->choleskyfactor = MatCholeskyFactor_SeqDense; A->ops->lufactor = MatLUFactor_SeqDense; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDense; } PetscFunctionReturn(0); } PetscErrorCode MatConvert_SeqDenseCUDA_SeqDense(Mat M,MatType type,MatReuse reuse,Mat *newmat) { Mat B; PetscErrorCode ierr; PetscFunctionBegin; if (reuse == MAT_REUSE_MATRIX || reuse == MAT_INITIAL_MATRIX) { /* TODO these cases should be optimized */ ierr = MatConvert_Basic(M,type,reuse,newmat);CHKERRQ(ierr); PetscFunctionReturn(0); } B = *newmat; ierr = MatBindToCPU_SeqDenseCUDA(B,PETSC_TRUE);CHKERRQ(ierr); ierr = MatReset_SeqDenseCUDA(B);CHKERRQ(ierr); ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECSTANDARD,&B->defaultvectype);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQDENSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqdensecuda_seqdense_C",NULL);CHKERRQ(ierr); B->ops->bindtocpu = NULL; B->ops->destroy = MatDestroy_SeqDense; B->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } PetscErrorCode MatConvert_SeqDense_SeqDenseCUDA(Mat M,MatType type,MatReuse reuse,Mat *newmat) { Mat_SeqDenseCUDA *dB; Mat B; PetscErrorCode ierr; PetscFunctionBegin; if (reuse == MAT_REUSE_MATRIX || reuse == MAT_INITIAL_MATRIX) { /* TODO these cases should be optimized */ ierr = MatConvert_Basic(M,type,reuse,newmat);CHKERRQ(ierr); PetscFunctionReturn(0); } B = *newmat; ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQDENSECUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqdensecuda_seqdense_C",MatConvert_SeqDenseCUDA_SeqDense);CHKERRQ(ierr); ierr = MatReset_SeqDenseCUDA(B);CHKERRQ(ierr); ierr = PetscNewLog(B,&dB);CHKERRQ(ierr); B->spptr = dB; B->offloadmask = PETSC_OFFLOAD_UNALLOCATED; ierr = MatBindToCPU_SeqDenseCUDA(B,PETSC_FALSE);CHKERRQ(ierr); B->ops->bindtocpu = MatBindToCPU_SeqDenseCUDA; B->ops->destroy = MatDestroy_SeqDenseCUDA; PetscFunctionReturn(0); } /*MC MATSEQDENSECUDA - MATSEQDENSECUDA = "seqdensecuda" - A matrix type to be used for sequential dense matrices on GPUs. Options Database Keys: . -mat_type seqdensecuda - sets the matrix type to "seqdensecuda" during a call to MatSetFromOptions() Level: beginner .seealso: MatCreateSeqDenseCUDA() M*/ PETSC_EXTERN PetscErrorCode MatCreate_SeqDenseCUDA(Mat B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate_SeqDense(B);CHKERRQ(ierr); ierr = MatConvert_SeqDense_SeqDenseCUDA(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); PetscFunctionReturn(0); }
ad37b0ad3df41c0a3d07f965a794d90879ab99c6.cu
/* Defines the matrix operations for sequential dense with CUDA */ #include <petscpkg_version.h> #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <../src/mat/impls/dense/seq/dense.h> /*I "petscmat.h" I*/ #include <petsccublas.h> /* cublas definitions are here */ #include <petsc/private/cudavecimpl.h> #if defined(PETSC_USE_COMPLEX) #if defined(PETSC_USE_REAL_SINGLE) #define cusolverDnXpotrf(a,b,c,d,e,f,g,h) cusolverDnCpotrf((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h)) #define cusolverDnXpotrf_bufferSize(a,b,c,d,e,f) cusolverDnCpotrf_bufferSize((a),(b),(c),(cuComplex*)(d),(e),(f)) #define cusolverDnXpotrs(a,b,c,d,e,f,g,h,i) cusolverDnCpotrs((a),(b),(c),(d),(cuComplex*)(e),(f),(cuComplex*)(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnCpotri((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnCpotri_bufferSize((a),(b),(c),(cuComplex*)(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) cusolverDnCsytrf((a),(b),(c),(cuComplex*)(d),(e),(f),(cuComplex*)(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) cusolverDnCsytrf_bufferSize((a),(b),(cuComplex*)(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) cusolverDnCgetrf((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) cusolverDnCgetrf_bufferSize((a),(b),(c),(cuComplex*)(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) cusolverDnCgetrs((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(cuComplex*)(h),(i),(j)) #else /* complex double */ #define cusolverDnXpotrf(a,b,c,d,e,f,g,h) cusolverDnZpotrf((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h)) #define cusolverDnXpotrf_bufferSize(a,b,c,d,e,f) cusolverDnZpotrf_bufferSize((a),(b),(c),(cuDoubleComplex*)(d),(e),(f)) #define cusolverDnXpotrs(a,b,c,d,e,f,g,h,i) cusolverDnZpotrs((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(cuDoubleComplex*)(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnZpotri((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnZpotri_bufferSize((a),(b),(c),(cuDoubleComplex*)(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) cusolverDnZsytrf((a),(b),(c),(cuDoubleComplex*)(d),(e),(f),(cuDoubleComplex*)(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) cusolverDnZsytrf_bufferSize((a),(b),(cuDoubleComplex*)(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) cusolverDnZgetrf((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) cusolverDnZgetrf_bufferSize((a),(b),(c),(cuDoubleComplex*)(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) cusolverDnZgetrs((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(cuDoubleComplex*)(h),(i),(j)) #endif #else /* real single */ #if defined(PETSC_USE_REAL_SINGLE) #define cusolverDnXpotrf(a,b,c,d,e,f,g,h) cusolverDnSpotrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotrf_bufferSize(a,b,c,d,e,f) cusolverDnSpotrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXpotrs(a,b,c,d,e,f,g,h,i) cusolverDnSpotrs((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnSpotri((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnSpotri_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) cusolverDnSsytrf((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) cusolverDnSsytrf_bufferSize((a),(b),(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) cusolverDnSgetrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) cusolverDnSgetrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) cusolverDnSgetrs((a),(b),(c),(d),(e),(f),(g),(h),(i),(j)) #else /* real double */ #define cusolverDnXpotrf(a,b,c,d,e,f,g,h) cusolverDnDpotrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotrf_bufferSize(a,b,c,d,e,f) cusolverDnDpotrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXpotrs(a,b,c,d,e,f,g,h,i) cusolverDnDpotrs((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnDpotri((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnDpotri_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) cusolverDnDsytrf((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) cusolverDnDsytrf_bufferSize((a),(b),(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) cusolverDnDgetrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) cusolverDnDgetrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) cusolverDnDgetrs((a),(b),(c),(d),(e),(f),(g),(h),(i),(j)) #endif #endif typedef struct { PetscScalar *d_v; /* pointer to the matrix on the GPU */ /* factorization support */ int *d_fact_ipiv; /* device pivots */ PetscScalar *d_fact_work; /* device workspace */ int fact_lwork; int *d_fact_info; /* device info */ /* workspace */ Vec workvec; } Mat_SeqDenseCUDA; PetscErrorCode MatSeqDenseCUDACopyFromGPU(Mat A) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); ierr = PetscInfo3(A,"%s matrix %d x %d\n",A->offloadmask == PETSC_OFFLOAD_GPU ? "Copy" : "Reusing",A->rmap->n,A->cmap->n);CHKERRQ(ierr); if (A->offloadmask == PETSC_OFFLOAD_GPU) { ierr = PetscLogEventBegin(MAT_DenseCopyFromGPU,A,0,0,0);CHKERRQ(ierr); if (cA->lda > A->rmap->n) { PetscInt j,m = A->rmap->n; for (j=0; j<A->cmap->n; j++) { /* TODO: it can be done better */ cerr = cudaMemcpy(cA->v + j*cA->lda,dA->d_v + j*cA->lda,m*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } } else { cerr = cudaMemcpy(cA->v,dA->d_v,cA->lda*sizeof(PetscScalar)*A->cmap->n,cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } ierr = PetscLogGpuToCpu(cA->lda*sizeof(PetscScalar)*A->cmap->n);CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_DenseCopyFromGPU,A,0,0,0);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } PetscErrorCode MatSeqDenseCUDACopyToGPU(Mat A) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscBool copy; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); if (A->boundtocpu) PetscFunctionReturn(0); if (!dA->d_v) { cerr = cudaMalloc((void**)&dA->d_v,cA->lda*cA->Nmax*sizeof(PetscScalar));CHKERRCUDA(cerr); } copy = (PetscBool)(A->offloadmask == PETSC_OFFLOAD_CPU || A->offloadmask == PETSC_OFFLOAD_UNALLOCATED); ierr = PetscInfo3(A,"%s matrix %d x %d\n",copy ? "Copy" : "Reusing",A->rmap->n,A->cmap->n);CHKERRQ(ierr); if (copy) { ierr = PetscLogEventBegin(MAT_DenseCopyToGPU,A,0,0,0);CHKERRQ(ierr); if (cA->lda > A->rmap->n) { PetscInt j,m = A->rmap->n; for (j=0; j<A->cmap->n; j++) { /* TODO: it can be done better */ cerr = cudaMemcpy(dA->d_v + j*cA->lda,cA->v + j*cA->lda,m*sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); } } else { cerr = cudaMemcpy(dA->d_v,cA->v,cA->lda*sizeof(PetscScalar)*A->cmap->n,cudaMemcpyHostToDevice);CHKERRCUDA(cerr); } ierr = PetscLogCpuToGpu(cA->lda*sizeof(PetscScalar)*A->cmap->n);CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_DenseCopyToGPU,A,0,0,0);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } PetscErrorCode MatDenseCUDAGetArrayWrite(Mat A, PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); if (!dA->d_v) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; cudaError_t cerr; cerr = cudaMalloc((void**)&dA->d_v,cA->lda*cA->Nmax*sizeof(PetscScalar));CHKERRCUDA(cerr); } *a = dA->d_v; PetscFunctionReturn(0); } PetscErrorCode MatDenseCUDARestoreArrayWrite(Mat A, PetscScalar **a) { PetscFunctionBegin; *a = NULL; A->offloadmask = PETSC_OFFLOAD_GPU; PetscFunctionReturn(0); } PetscErrorCode MatDenseCUDAGetArrayRead(Mat A, const PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); *a = dA->d_v; PetscFunctionReturn(0); } PetscErrorCode MatDenseCUDARestoreArrayRead(Mat A, const PetscScalar **a) { PetscFunctionBegin; *a = NULL; PetscFunctionReturn(0); } PetscErrorCode MatDenseCUDAGetArray(Mat A, PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); *a = dA->d_v; PetscFunctionReturn(0); } PetscErrorCode MatDenseCUDARestoreArray(Mat A, PetscScalar **a) { PetscFunctionBegin; *a = NULL; A->offloadmask = PETSC_OFFLOAD_GPU; PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode MatSeqDenseCUDAInvertFactors_Private(Mat A) { #if PETSC_PKG_CUDA_VERSION_GE(10,1,0) Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; PetscErrorCode ierr; cudaError_t ccer; cusolverStatus_t cerr; cusolverDnHandle_t handle; int n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_LU) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDngetri not implemented"); else if (A->factortype == MAT_FACTOR_CHOLESKY) { if (!dA->d_fact_ipiv) { /* spd */ int il; ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); cerr = cusolverDnXpotri_bufferSize(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,&il);CHKERRCUSOLVER(cerr); if (il > dA->fact_lwork) { dA->fact_lwork = il; ccer = cudaFree(dA->d_fact_work);CHKERRCUDA(ccer); ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXpotri(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ccer = WaitForGPU();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); /* TODO (write cuda kernel) */ ierr = MatSeqDenseSymmetrize_Private(A,PETSC_TRUE);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytri not implemented"); } #if defined(PETSC_USE_DEBUG) ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(int), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: leading minor of order %d is zero",info); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif ierr = PetscLogGpuFlops(1.0*n*n*n/3.0);CHKERRQ(ierr); A->ops->solve = NULL; A->ops->solvetranspose = NULL; A->ops->matsolve = NULL; A->factortype = MAT_FACTOR_NONE; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); #else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Upgrade to CUDA version 10.1.0 or higher"); #endif } static PetscErrorCode MatMatSolve_SeqDenseCUDA(Mat A,Mat B,Mat X) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDense *x = (Mat_SeqDense*)X->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; const PetscScalar *da; PetscScalar *dx; cusolverDnHandle_t handle; PetscBool iscuda; int nrhs,n,lda,ldx; #if defined(PETSC_USE_DEBUG) int info; #endif cudaError_t ccer; cusolverStatus_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); if (!dA->d_fact_work) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); ierr = PetscObjectTypeCompareAny((PetscObject)X,&iscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr); if (X != B) { ierr = MatCopy(B,X,SAME_NONZERO_PATTERN);CHKERRQ(ierr); } ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); /* MatMatSolve does not have a dispatching mechanism, we may end up with a MATSEQDENSE here */ ierr = PetscObjectTypeCompare((PetscObject)X,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr); if (!iscuda) { ierr = MatConvert(X,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&X);CHKERRQ(ierr); } ierr = MatDenseCUDAGetArray(X,&dx);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr); ierr = PetscMPIIntCast(X->cmap->n,&nrhs);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); ierr = PetscMPIIntCast(x->lda,&ldx);CHKERRQ(ierr); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_LU) { ierr = PetscInfo2(A,"LU solve %d x %d on backend\n",n,n);CHKERRQ(ierr); cerr = cusolverDnXgetrs(handle,CUBLAS_OP_N,n,nrhs,da,lda,dA->d_fact_ipiv,dx,ldx,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else if (A->factortype == MAT_FACTOR_CHOLESKY) { ierr = PetscInfo2(A,"Cholesky solve %d x %d on backend\n",n,n);CHKERRQ(ierr); if (!dA->d_fact_ipiv) { /* spd */ /* ========= Program hit cudaErrorNotReady (error 34) due to "device not ready" on CUDA API call to cudaEventQuery. */ cerr = cusolverDnXpotrs(handle,CUBLAS_FILL_MODE_LOWER,n,nrhs,da,lda,dx,ldx,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytrs not implemented"); } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Unknown factor type %d",A->factortype); ccer = WaitForGPU();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(X,&dx);CHKERRQ(ierr); if (!iscuda) { ierr = MatConvert(X,MATSEQDENSE,MAT_INPLACE_MATRIX,&X);CHKERRQ(ierr); } #if defined(PETSC_USE_DEBUG) ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(int), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif ierr = PetscLogGpuFlops(nrhs*(2.0*n*n - n));CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA_Private(Mat A,Vec xx,Vec yy,PetscBool trans) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; const PetscScalar *da; PetscScalar *y; cusolverDnHandle_t handle; int one = 1,n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif cudaError_t ccer; cusolverStatus_t cerr; PetscBool iscuda; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); if (!dA->d_fact_work) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr); /* MatSolve does not have a dispatching mechanism, we may end up with a VECSTANDARD here */ ierr = PetscObjectTypeCompareAny((PetscObject)yy,&iscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr); if (iscuda) { ierr = VecCopy(xx,yy);CHKERRQ(ierr); ierr = VecCUDAGetArray(yy,&y);CHKERRQ(ierr); } else { if (!dA->workvec) { ierr = MatCreateVecs(A,&dA->workvec,NULL);CHKERRQ(ierr); } ierr = VecCopy(xx,dA->workvec);CHKERRQ(ierr); ierr = VecCUDAGetArray(dA->workvec,&y);CHKERRQ(ierr); } ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_LU) { ierr = PetscInfo2(A,"LU solve %d x %d on backend\n",n,n);CHKERRQ(ierr); cerr = cusolverDnXgetrs(handle,trans ? CUBLAS_OP_T : CUBLAS_OP_N,n,one,da,lda,dA->d_fact_ipiv,y,n,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else if (A->factortype == MAT_FACTOR_CHOLESKY) { ierr = PetscInfo2(A,"Cholesky solve %d x %d on backend\n",n,n);CHKERRQ(ierr); if (!dA->d_fact_ipiv) { /* spd */ /* ========= Program hit cudaErrorNotReady (error 34) due to "device not ready" on CUDA API call to cudaEventQuery. */ cerr = cusolverDnXpotrs(handle,CUBLAS_FILL_MODE_LOWER,n,one,da,lda,y,n,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytrs not implemented"); } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Unknown factor type %d",A->factortype); ccer = WaitForGPU();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); if (iscuda) { ierr = VecCUDARestoreArray(yy,&y);CHKERRQ(ierr); } else { ierr = VecCUDARestoreArray(dA->workvec,&y);CHKERRQ(ierr); ierr = VecCopy(dA->workvec,yy);CHKERRQ(ierr); } ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(int), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif ierr = PetscLogGpuFlops(2.0*n*n - n);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Private(A,xx,yy,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolveTranspose_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Private(A,xx,yy,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatLUFactor_SeqDenseCUDA(Mat A,IS rperm,IS cperm,const MatFactorInfo *factinfo) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; int m,n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif cusolverStatus_t cerr; cusolverDnHandle_t handle; cudaError_t ccer; PetscErrorCode ierr; PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); ierr = PetscInfo2(A,"LU factor %d x %d on backend\n",m,n);CHKERRQ(ierr); if (!dA->d_fact_ipiv) { ccer = cudaMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer); } if (!dA->fact_lwork) { cerr = cusolverDnXgetrf_bufferSize(handle,m,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = cudaMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXgetrf(handle,m,n,da,lda,dA->d_fact_work,dA->d_fact_ipiv,dA->d_fact_info);CHKERRCUSOLVER(cerr); ccer = WaitForGPU();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(int), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif A->factortype = MAT_FACTOR_LU; ierr = PetscLogGpuFlops(2.0*n*n*m/3.0);CHKERRQ(ierr); A->ops->solve = MatSolve_SeqDenseCUDA; A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA; A->ops->matsolve = MatMatSolve_SeqDenseCUDA; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatCholeskyFactor_SeqDenseCUDA(Mat A,IS perm,const MatFactorInfo *factinfo) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; int n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif cusolverStatus_t cerr; cusolverDnHandle_t handle; cudaError_t ccer; PetscErrorCode ierr; PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr); ierr = PetscInfo2(A,"Cholesky factor %d x %d on backend\n",n,n);CHKERRQ(ierr); if (A->spd) { ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr); if (!dA->fact_lwork) { cerr = cusolverDnXpotrf_bufferSize(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = cudaMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXpotrf(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ccer = WaitForGPU();CHKERRCUDA(ccer); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(int), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif A->factortype = MAT_FACTOR_CHOLESKY; ierr = PetscLogGpuFlops(1.0*n*n*n/3.0);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cusolverDnsytrs unavailable. Use MAT_FACTOR_LU"); #if 0 /* at the time of writing this interface (cuda 10.0), cusolverDn does not implement *sytrs and *hetr* routines The code below should work, and it can be activated when *sytrs routines will be available */ if (!dA->d_fact_ipiv) { ccer = cudaMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer); } if (!dA->fact_lwork) { cerr = cusolverDnXsytrf_bufferSize(handle,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = cudaMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXsytrf(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_ipiv,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); #endif A->ops->solve = MatSolve_SeqDenseCUDA; A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA; A->ops->matsolve = MatMatSolve_SeqDenseCUDA; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); } /* GEMM kernel: C = op(A)*op(B), tA, tB flag transposition */ static PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat A,Mat B,Mat C,PetscBool tA, PetscBool tB) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDense *b = (Mat_SeqDense*)B->data; Mat_SeqDense *c = (Mat_SeqDense*)C->data; const PetscScalar *da,*db; PetscScalar *dc; PetscScalar one=1.0,zero=0.0; int m,n,k,alda,blda,clda; PetscErrorCode ierr; cublasHandle_t cublasv2handle; cublasStatus_t berr; cudaError_t cerr; PetscFunctionBegin; ierr = PetscMPIIntCast(C->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(C->cmap->n,&n);CHKERRQ(ierr); if (tA) { ierr = PetscMPIIntCast(A->rmap->n,&k);CHKERRQ(ierr); } else { ierr = PetscMPIIntCast(A->cmap->n,&k);CHKERRQ(ierr); } if (!m || !n || !k) PetscFunctionReturn(0); ierr = PetscInfo3(C,"Matrix-Matrix product %d x %d x %d on backend\n",m,k,n);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(B,&db);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayWrite(C,&dc);CHKERRQ(ierr); ierr = PetscMPIIntCast(a->lda,&alda);CHKERRQ(ierr); ierr = PetscMPIIntCast(b->lda,&blda);CHKERRQ(ierr); ierr = PetscMPIIntCast(c->lda,&clda);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXgemm(cublasv2handle,tA ? CUBLAS_OP_T : CUBLAS_OP_N,tB ? CUBLAS_OP_T : CUBLAS_OP_N, m,n,k,&one,da,alda,db,blda,&zero,dc,clda);CHKERRCUBLAS(berr); cerr = WaitForGPU();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(1.0*m*n*k + 1.0*m*n*(k-1));CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(B,&db);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(C,&dc);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatTransposeMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMatTransposeMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_FALSE,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatProductSetFromOptions_SeqDenseCUDA(Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatProductSetFromOptions_SeqDense(C);CHKERRQ(ierr); PetscFunctionReturn(0); } /* zz = op(A)*xx + yy if yy == NULL, only MatMult */ static PetscErrorCode MatMultAdd_SeqDenseCUDA_Private(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; const PetscScalar *xarray,*da; PetscScalar *zarray; PetscScalar one=1.0,zero=0.0; int m, n, lda; /* Use PetscMPIInt as it is typedef'ed to int */ cublasHandle_t cublasv2handle; cublasStatus_t berr; PetscErrorCode ierr; PetscFunctionBegin; if (yy && yy != zz) { /* mult add */ ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); } if (!A->rmap->n || !A->cmap->n) { if (!yy) { /* mult only */ ierr = VecSet_SeqCUDA(zz,0.0);CHKERRQ(ierr); } PetscFunctionReturn(0); } ierr = PetscInfo2(A,"Matrix-vector product %d x %d on backend\n",A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscMPIIntCast(mat->lda,&lda);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXgemv(cublasv2handle,trans ? CUBLAS_OP_T : CUBLAS_OP_N, m,n,&one,da,lda,xarray,1,(yy ? &one : &zero),zarray,1);CHKERRCUBLAS(berr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*A->rmap->n*A->cmap->n - (yy ? 0 : A->rmap->n));CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultAdd_SeqDenseCUDA(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,yy,zz,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultTransposeAdd_SeqDenseCUDA(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,yy,zz,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMult_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,NULL,yy,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultTranspose_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,NULL,yy,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatDenseGetArrayRead_SeqDenseCUDA(Mat A,const PetscScalar *array[]) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); *array = mat->v; PetscFunctionReturn(0); } PetscErrorCode MatDenseGetArray_SeqDenseCUDA(Mat A,PetscScalar *array[]) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); *array = mat->v; A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } PetscErrorCode MatDenseRestoreArray_SeqDenseCUDA(Mat A,PetscScalar *array[]) { PetscFunctionBegin; PetscFunctionReturn(0); } PetscErrorCode MatAXPY_SeqDenseCUDA(Mat Y,PetscScalar alpha,Mat X,MatStructure str) { Mat_SeqDense *x = (Mat_SeqDense*)X->data; Mat_SeqDense *y = (Mat_SeqDense*)Y->data; const PetscScalar *dx; PetscScalar *dy; int j,N,m,ldax,lday,one = 1; cublasHandle_t cublasv2handle; cublasStatus_t berr; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; if (!X->rmap->n || !X->cmap->n) PetscFunctionReturn(0); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(X,&dx);CHKERRQ(ierr); if (alpha != 0.0) { ierr = MatDenseCUDAGetArray(Y,&dy);CHKERRQ(ierr); } else { ierr = MatDenseCUDAGetArrayWrite(Y,&dy);CHKERRQ(ierr); } ierr = PetscMPIIntCast(X->rmap->n*X->cmap->n,&N);CHKERRQ(ierr); ierr = PetscMPIIntCast(X->rmap->n,&m);CHKERRQ(ierr); ierr = PetscMPIIntCast(x->lda,&ldax);CHKERRQ(ierr); ierr = PetscMPIIntCast(y->lda,&lday);CHKERRQ(ierr); ierr = PetscInfo2(Y,"Performing AXPY %d x %d on backend\n",Y->rmap->n,Y->cmap->n);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (ldax>m || lday>m) { for (j=0; j<X->cmap->n; j++) { berr = cublasXaxpy(cublasv2handle,m,&alpha,dx+j*ldax,one,dy+j*lday,one);CHKERRCUBLAS(berr); } } else { berr = cublasXaxpy(cublasv2handle,N,&alpha,dx,one,dy,one);CHKERRCUBLAS(berr); } cerr = WaitForGPU();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(PetscMax(2.*N-1,0));CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(X,&dx);CHKERRQ(ierr); if (alpha != 0.0) { ierr = MatDenseCUDARestoreArray(Y,&dy);CHKERRQ(ierr); } else { ierr = MatDenseCUDARestoreArrayWrite(Y,&dy);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatReset_SeqDenseCUDA(Mat A) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; cudaError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (dA) { cerr = cudaFree(dA->d_v);CHKERRCUDA(cerr); cerr = cudaFree(dA->d_fact_ipiv);CHKERRCUDA(cerr); cerr = cudaFree(dA->d_fact_info);CHKERRCUDA(cerr); cerr = cudaFree(dA->d_fact_work);CHKERRCUDA(cerr); ierr = VecDestroy(&dA->workvec);CHKERRQ(ierr); } ierr = PetscFree(A->spptr);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatDestroy_SeqDenseCUDA(Mat A) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; /* prevent to copy back data if we own the data pointer */ if (!a->user_alloc) { A->offloadmask = PETSC_OFFLOAD_CPU; } ierr = MatConvert_SeqDenseCUDA_SeqDense(A,MATSEQDENSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); ierr = MatDestroy_SeqDense(A);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatSeqDenseSetPreallocation_SeqDenseCUDA(Mat B,PetscScalar *data) { Mat_SeqDense *b; Mat_SeqDenseCUDA *dB; cudaError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr); b = (Mat_SeqDense*)B->data; b->Mmax = B->rmap->n; b->Nmax = B->cmap->n; if (b->lda <= 0 || b->changelda) b->lda = B->rmap->n; if (b->lda < B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Invalid lda %D < %D",b->lda,B->rmap->n); ierr = PetscIntMultError(b->lda,b->Nmax,NULL);CHKERRQ(ierr); ierr = MatReset_SeqDenseCUDA(B);CHKERRQ(ierr); ierr = PetscNewLog(B,&dB);CHKERRQ(ierr); B->spptr = dB; cerr = cudaMalloc((void**)&dB->d_v,b->lda*b->Nmax*sizeof(PetscScalar));CHKERRCUDA(cerr); if (!data) { /* petsc-allocated storage */ if (!b->user_alloc) { ierr = PetscFree(b->v);CHKERRQ(ierr); } ierr = PetscCalloc1((size_t)b->lda*b->Nmax,&b->v);CHKERRQ(ierr); ierr = PetscLogObjectMemory((PetscObject)B,b->lda*b->Nmax*sizeof(PetscScalar));CHKERRQ(ierr); b->user_alloc = PETSC_FALSE; } else { /* user-allocated storage */ if (!b->user_alloc) { ierr = PetscFree(b->v);CHKERRQ(ierr); } b->v = data; b->user_alloc = PETSC_TRUE; } B->offloadmask = PETSC_OFFLOAD_CPU; B->preallocated = PETSC_TRUE; B->assembled = PETSC_TRUE; PetscFunctionReturn(0); } PetscErrorCode MatDuplicate_SeqDenseCUDA(Mat A,MatDuplicateOption cpvalues,Mat *B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); ierr = MatSetSizes(*B,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = MatSetType(*B,((PetscObject)A)->type_name);CHKERRQ(ierr); ierr = MatDuplicateNoCreate_SeqDense(*B,A,cpvalues);CHKERRQ(ierr); if (cpvalues == MAT_COPY_VALUES && A->offloadmask != PETSC_OFFLOAD_CPU) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; const PetscScalar *da; PetscScalar *db; cudaError_t cerr; ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayWrite(*B,&db);CHKERRQ(ierr); if (a->lda > A->rmap->n) { PetscInt j,m = A->rmap->n; for (j=0; j<A->cmap->n; j++) { /* it can be done better */ cerr = cudaMemcpy(db+j*m,da+j*a->lda,m*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(cerr); } } else { cerr = cudaMemcpy(db,da,a->lda*sizeof(PetscScalar)*A->cmap->n,cudaMemcpyDeviceToDevice);CHKERRCUDA(cerr); } ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(*B,&db);CHKERRQ(ierr); (*B)->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatGetFactor_seqdense_cuda(Mat A,MatFactorType ftype,Mat *fact) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),fact);CHKERRQ(ierr); ierr = MatSetSizes(*fact,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = MatSetType(*fact,MATSEQDENSECUDA);CHKERRQ(ierr); if (ftype == MAT_FACTOR_LU) { (*fact)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqDense; } else { (*fact)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqDense; } (*fact)->factortype = ftype; ierr = PetscFree((*fact)->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&(*fact)->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatBindToCPU_SeqDenseCUDA(Mat A,PetscBool flg) { PetscErrorCode ierr; PetscFunctionBegin; A->boundtocpu = flg; if (!flg) { ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqDenseSetPreallocation_C",MatSeqDenseSetPreallocation_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArray_C", MatDenseGetArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayRead_C", MatDenseGetArrayRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreArray_C", MatDenseRestoreArray_SeqDenseCUDA);CHKERRQ(ierr); A->ops->duplicate = MatDuplicate_SeqDenseCUDA; A->ops->mult = MatMult_SeqDenseCUDA; A->ops->multadd = MatMultAdd_SeqDenseCUDA; A->ops->multtranspose = MatMultTranspose_SeqDenseCUDA; A->ops->multtransposeadd = MatMultTransposeAdd_SeqDenseCUDA; A->ops->matmultnumeric = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->transposematmultnumeric = MatTransposeMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->axpy = MatAXPY_SeqDenseCUDA; A->ops->choleskyfactor = MatCholeskyFactor_SeqDenseCUDA; A->ops->lufactor = MatLUFactor_SeqDenseCUDA; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDenseCUDA; } else { /* make sure we have an up-to-date copy on the CPU */ ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqDenseSetPreallocation_C",MatSeqDenseSetPreallocation_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArray_C", MatDenseGetArray_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayRead_C", MatDenseGetArray_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreArray_C", MatDenseRestoreArray_SeqDense);CHKERRQ(ierr); A->ops->duplicate = MatDuplicate_SeqDense; A->ops->mult = MatMult_SeqDense; A->ops->multadd = MatMultAdd_SeqDense; A->ops->multtranspose = MatMultTranspose_SeqDense; A->ops->multtransposeadd = MatMultTransposeAdd_SeqDense; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDense; A->ops->matmultnumeric = MatMatMultNumeric_SeqDense_SeqDense; A->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_SeqDense_SeqDense; A->ops->transposematmultnumeric = MatTransposeMatMultNumeric_SeqDense_SeqDense; A->ops->axpy = MatAXPY_SeqDense; A->ops->choleskyfactor = MatCholeskyFactor_SeqDense; A->ops->lufactor = MatLUFactor_SeqDense; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDense; } PetscFunctionReturn(0); } PetscErrorCode MatConvert_SeqDenseCUDA_SeqDense(Mat M,MatType type,MatReuse reuse,Mat *newmat) { Mat B; PetscErrorCode ierr; PetscFunctionBegin; if (reuse == MAT_REUSE_MATRIX || reuse == MAT_INITIAL_MATRIX) { /* TODO these cases should be optimized */ ierr = MatConvert_Basic(M,type,reuse,newmat);CHKERRQ(ierr); PetscFunctionReturn(0); } B = *newmat; ierr = MatBindToCPU_SeqDenseCUDA(B,PETSC_TRUE);CHKERRQ(ierr); ierr = MatReset_SeqDenseCUDA(B);CHKERRQ(ierr); ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECSTANDARD,&B->defaultvectype);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQDENSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqdensecuda_seqdense_C",NULL);CHKERRQ(ierr); B->ops->bindtocpu = NULL; B->ops->destroy = MatDestroy_SeqDense; B->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } PetscErrorCode MatConvert_SeqDense_SeqDenseCUDA(Mat M,MatType type,MatReuse reuse,Mat *newmat) { Mat_SeqDenseCUDA *dB; Mat B; PetscErrorCode ierr; PetscFunctionBegin; if (reuse == MAT_REUSE_MATRIX || reuse == MAT_INITIAL_MATRIX) { /* TODO these cases should be optimized */ ierr = MatConvert_Basic(M,type,reuse,newmat);CHKERRQ(ierr); PetscFunctionReturn(0); } B = *newmat; ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQDENSECUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqdensecuda_seqdense_C",MatConvert_SeqDenseCUDA_SeqDense);CHKERRQ(ierr); ierr = MatReset_SeqDenseCUDA(B);CHKERRQ(ierr); ierr = PetscNewLog(B,&dB);CHKERRQ(ierr); B->spptr = dB; B->offloadmask = PETSC_OFFLOAD_UNALLOCATED; ierr = MatBindToCPU_SeqDenseCUDA(B,PETSC_FALSE);CHKERRQ(ierr); B->ops->bindtocpu = MatBindToCPU_SeqDenseCUDA; B->ops->destroy = MatDestroy_SeqDenseCUDA; PetscFunctionReturn(0); } /*MC MATSEQDENSECUDA - MATSEQDENSECUDA = "seqdensecuda" - A matrix type to be used for sequential dense matrices on GPUs. Options Database Keys: . -mat_type seqdensecuda - sets the matrix type to "seqdensecuda" during a call to MatSetFromOptions() Level: beginner .seealso: MatCreateSeqDenseCUDA() M*/ PETSC_EXTERN PetscErrorCode MatCreate_SeqDenseCUDA(Mat B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate_SeqDense(B);CHKERRQ(ierr); ierr = MatConvert_SeqDense_SeqDenseCUDA(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); PetscFunctionReturn(0); }
520e76cf331ca25996eaf3853db9be3bfb217e3b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cu_leaky_relu.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *src = NULL; hipMalloc(&src, XSIZE*YSIZE); float *dst = NULL; hipMalloc(&dst, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cu_leaky_relu), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cu_leaky_relu), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cu_leaky_relu), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
520e76cf331ca25996eaf3853db9be3bfb217e3b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cu_leaky_relu.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *src = NULL; cudaMalloc(&src, XSIZE*YSIZE); float *dst = NULL; cudaMalloc(&dst, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cu_leaky_relu<<<gridBlock,threadBlock>>>(src,dst,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cu_leaky_relu<<<gridBlock,threadBlock>>>(src,dst,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cu_leaky_relu<<<gridBlock,threadBlock>>>(src,dst,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
862d612c23d715a2938eea541c944a249412e59e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _BITONIC_SORT_ #define _BITONIC_SORT_ #include "stdlib.h" #include "stdio.h" #include "string.h" #include "bitonicSort_kernel.cu" #include "math.h" #include "assert.h" #include "mapImpl.cu" #include "GPU_Dll.h" /* @ d_Rin, the input pointer array. @ rLen, the number of tuples. @ d_Rout, the output pointer array. */ void bitonicSortGPU(Record* d_Rin, int rLen, Record *d_Rout, int numThreadPB=NUM_BLOCKS_CHUNK, int numBlock=NUM_BLOCKS_CHUNK) { #ifdef OUTPUT_INFO #ifdef SHARED_MEM printf("YES, SHARED_MEM in bitonic sort\n"); #else printf("NO, SHARED_MEM in bitonic sort\n"); #endif #endif unsigned int numRecordsR; unsigned int size = rLen; unsigned int level = 0; while( size != 1 ) { size = size/2; level++; } if( (1<<level) < rLen ) { level++; } numRecordsR = (1<<level); if( rLen <= 256*1024 ) { //unsigned int numRecordsR = rLen; unsigned int numThreadsSort = numThreadPB; if(numRecordsR<numThreadPB) numThreadsSort=numRecordsR; unsigned int numBlocksXSort = numRecordsR/numThreadsSort; unsigned int numBlocksYSort = 1; dim3 gridSort( numBlocksXSort, numBlocksYSort ); unsigned int memSizeRecordsR = sizeof( Record ) * numRecordsR; //copy the <offset, length> pairs. Record* d_R; GPUMALLOC( (void**) &d_R, memSizeRecordsR) ; Record tempValue; tempValue.x=tempValue.y=TEST_MAX; mapInit(d_R, rLen, numRecordsR, tempValue);//[rLen, numRecordsR) CUDA_SAFE_CALL( hipMemcpy( d_R, d_Rin, rLen*sizeof(Record), hipMemcpyDeviceToDevice) ); for( int k = 2; k <= numRecordsR; k *= 2 ) { for( int j = k/2; j > 0; j /= 2 ) { hipLaunchKernelGGL(( bitonicKernel), dim3(gridSort), dim3(numThreadsSort), 0, 0, d_R, numRecordsR, k, j); } } //CUDA_SAFE_CALL( hipMemcpy( d_Rout, d_R+(numRecordsR-rLen), sizeof(Record)*rLen, hipMemcpyDeviceToDevice) ); CUDA_SAFE_CALL( hipMemcpy( d_Rout, d_R, sizeof(Record)*rLen, hipMemcpyDeviceToDevice) ); hipFree( d_R ); hipDeviceSynchronize(); } else { unsigned int numThreadsSort = numThreadPB; unsigned int numBlocksYSort = 1; unsigned int numBlocksXSort = (numRecordsR/numThreadsSort)/numBlocksYSort; if(numBlocksXSort>=(1<<16)) { numBlocksXSort=(1<<15); numBlocksYSort=(numRecordsR/numThreadsSort)/numBlocksXSort; } unsigned int numBlocksChunk = numBlock; unsigned int numThreadsChunk = numThreadPB; unsigned int chunkSize = numBlocksChunk*numThreadsChunk; unsigned int numChunksR = numRecordsR/chunkSize; dim3 gridSort( numBlocksXSort, numBlocksYSort ); unsigned int memSizeRecordsR = sizeof( Record ) * numRecordsR; Record* d_R; GPUMALLOC( (void**) &d_R, memSizeRecordsR) ; Record tempValue; tempValue.x=tempValue.y=TEST_MAX; mapInit(d_R, rLen, numRecordsR, tempValue); unsigned int timer=0; //startTimer(&timer); CUDA_SAFE_CALL( hipMemcpy( d_R, d_Rin, rLen*sizeof(Record), hipMemcpyDeviceToDevice) ); //endTimer("copy GPUtoGPU", &timer); int sharedMemSize=numThreadPB*sizeof(Record); //startTimer(&timer); for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ ) { hipLaunchKernelGGL(( unitBitonicSortKernel), dim3(numBlocksChunk), dim3(numThreadsChunk), sharedMemSize, 0, d_R, numRecordsR, chunkIdx,numThreadsChunk, chunkSize, numBlocksChunk); } //endTimer("unit", &timer); int j; for( int k = numThreadsChunk*2; k <= numRecordsR; k *= 2 ) { for( j = k/2; j > numThreadsChunk/2; j /= 2 ) { hipLaunchKernelGGL(( bitonicKernel), dim3(gridSort), dim3(numThreadsSort), 0, 0, d_R, numRecordsR, k, j); } for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ ) { hipLaunchKernelGGL(( partBitonicSortKernel), dim3(numBlocksChunk), dim3(numThreadsChunk), sharedMemSize, 0, d_R, numRecordsR, chunkIdx, k/numThreadsSort,numThreadsChunk, chunkSize, numBlocksChunk ); } } //startTimer(&timer); //CUDA_SAFE_CALL( hipMemcpy( d_Rout, d_R+(numRecordsR-rLen), sizeof(Record)*rLen, hipMemcpyDeviceToDevice) ); CUDA_SAFE_CALL( hipMemcpy( d_Rout, d_R, sizeof(Record)*rLen, hipMemcpyDeviceToDevice) ); //endTimer("copy GPUtoGPU result", &timer); hipFree( d_R ); hipDeviceSynchronize(); } } ////////////////////////////////////////////////////////////////////// //the export interface extern "C" void GPUOnly_bitonicSort( Record* d_Rin, int rLen, Record* d_Rout, int numThreadPB, int numBlock) { bitonicSortGPU( d_Rin, rLen, d_Rout, numThreadPB, numBlock ); } extern "C" void GPUCopy_bitonicSort( Record* h_Rin, int rLen, Record* h_Rout, int numThreadPB, int numBlock ) { int memSize = sizeof(Record)*rLen; Record* d_Rin; Record* d_Rout; GPUMALLOC( (void**)&d_Rin, memSize ); GPUMALLOC( (void**)&d_Rout, memSize ); TOGPU( d_Rin, h_Rin, memSize ); bitonicSortGPU( d_Rin, rLen, d_Rout, numThreadPB, numBlock); FROMGPU( h_Rout, d_Rout, memSize ); GPUFREE( d_Rin ); GPUFREE( d_Rout ); } #endif
862d612c23d715a2938eea541c944a249412e59e.cu
#ifndef _BITONIC_SORT_ #define _BITONIC_SORT_ #include "stdlib.h" #include "stdio.h" #include "string.h" #include "bitonicSort_kernel.cu" #include "math.h" #include "assert.h" #include "mapImpl.cu" #include "GPU_Dll.h" /* @ d_Rin, the input pointer array. @ rLen, the number of tuples. @ d_Rout, the output pointer array. */ void bitonicSortGPU(Record* d_Rin, int rLen, Record *d_Rout, int numThreadPB=NUM_BLOCKS_CHUNK, int numBlock=NUM_BLOCKS_CHUNK) { #ifdef OUTPUT_INFO #ifdef SHARED_MEM printf("YES, SHARED_MEM in bitonic sort\n"); #else printf("NO, SHARED_MEM in bitonic sort\n"); #endif #endif unsigned int numRecordsR; unsigned int size = rLen; unsigned int level = 0; while( size != 1 ) { size = size/2; level++; } if( (1<<level) < rLen ) { level++; } numRecordsR = (1<<level); if( rLen <= 256*1024 ) { //unsigned int numRecordsR = rLen; unsigned int numThreadsSort = numThreadPB; if(numRecordsR<numThreadPB) numThreadsSort=numRecordsR; unsigned int numBlocksXSort = numRecordsR/numThreadsSort; unsigned int numBlocksYSort = 1; dim3 gridSort( numBlocksXSort, numBlocksYSort ); unsigned int memSizeRecordsR = sizeof( Record ) * numRecordsR; //copy the <offset, length> pairs. Record* d_R; GPUMALLOC( (void**) &d_R, memSizeRecordsR) ; Record tempValue; tempValue.x=tempValue.y=TEST_MAX; mapInit(d_R, rLen, numRecordsR, tempValue);//[rLen, numRecordsR) CUDA_SAFE_CALL( cudaMemcpy( d_R, d_Rin, rLen*sizeof(Record), cudaMemcpyDeviceToDevice) ); for( int k = 2; k <= numRecordsR; k *= 2 ) { for( int j = k/2; j > 0; j /= 2 ) { bitonicKernel<<<gridSort, numThreadsSort>>>(d_R, numRecordsR, k, j); } } //CUDA_SAFE_CALL( cudaMemcpy( d_Rout, d_R+(numRecordsR-rLen), sizeof(Record)*rLen, cudaMemcpyDeviceToDevice) ); CUDA_SAFE_CALL( cudaMemcpy( d_Rout, d_R, sizeof(Record)*rLen, cudaMemcpyDeviceToDevice) ); cudaFree( d_R ); cudaThreadSynchronize(); } else { unsigned int numThreadsSort = numThreadPB; unsigned int numBlocksYSort = 1; unsigned int numBlocksXSort = (numRecordsR/numThreadsSort)/numBlocksYSort; if(numBlocksXSort>=(1<<16)) { numBlocksXSort=(1<<15); numBlocksYSort=(numRecordsR/numThreadsSort)/numBlocksXSort; } unsigned int numBlocksChunk = numBlock; unsigned int numThreadsChunk = numThreadPB; unsigned int chunkSize = numBlocksChunk*numThreadsChunk; unsigned int numChunksR = numRecordsR/chunkSize; dim3 gridSort( numBlocksXSort, numBlocksYSort ); unsigned int memSizeRecordsR = sizeof( Record ) * numRecordsR; Record* d_R; GPUMALLOC( (void**) &d_R, memSizeRecordsR) ; Record tempValue; tempValue.x=tempValue.y=TEST_MAX; mapInit(d_R, rLen, numRecordsR, tempValue); unsigned int timer=0; //startTimer(&timer); CUDA_SAFE_CALL( cudaMemcpy( d_R, d_Rin, rLen*sizeof(Record), cudaMemcpyDeviceToDevice) ); //endTimer("copy GPUtoGPU", &timer); int sharedMemSize=numThreadPB*sizeof(Record); //startTimer(&timer); for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ ) { unitBitonicSortKernel<<< numBlocksChunk, numThreadsChunk, sharedMemSize>>>( d_R, numRecordsR, chunkIdx,numThreadsChunk, chunkSize, numBlocksChunk); } //endTimer("unit", &timer); int j; for( int k = numThreadsChunk*2; k <= numRecordsR; k *= 2 ) { for( j = k/2; j > numThreadsChunk/2; j /= 2 ) { bitonicKernel<<<gridSort, numThreadsSort>>>( d_R, numRecordsR, k, j); } for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ ) { partBitonicSortKernel<<< numBlocksChunk, numThreadsChunk, sharedMemSize>>>(d_R, numRecordsR, chunkIdx, k/numThreadsSort,numThreadsChunk, chunkSize, numBlocksChunk ); } } //startTimer(&timer); //CUDA_SAFE_CALL( cudaMemcpy( d_Rout, d_R+(numRecordsR-rLen), sizeof(Record)*rLen, cudaMemcpyDeviceToDevice) ); CUDA_SAFE_CALL( cudaMemcpy( d_Rout, d_R, sizeof(Record)*rLen, cudaMemcpyDeviceToDevice) ); //endTimer("copy GPUtoGPU result", &timer); cudaFree( d_R ); cudaThreadSynchronize(); } } ////////////////////////////////////////////////////////////////////// //the export interface extern "C" void GPUOnly_bitonicSort( Record* d_Rin, int rLen, Record* d_Rout, int numThreadPB, int numBlock) { bitonicSortGPU( d_Rin, rLen, d_Rout, numThreadPB, numBlock ); } extern "C" void GPUCopy_bitonicSort( Record* h_Rin, int rLen, Record* h_Rout, int numThreadPB, int numBlock ) { int memSize = sizeof(Record)*rLen; Record* d_Rin; Record* d_Rout; GPUMALLOC( (void**)&d_Rin, memSize ); GPUMALLOC( (void**)&d_Rout, memSize ); TOGPU( d_Rin, h_Rin, memSize ); bitonicSortGPU( d_Rin, rLen, d_Rout, numThreadPB, numBlock); FROMGPU( h_Rout, d_Rout, memSize ); GPUFREE( d_Rin ); GPUFREE( d_Rout ); } #endif
1a2b050d7f6a92ee5fa741f0c36f8e230a17e4aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/null_mask.hpp> #include <cudf/table/table_view.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <rmm/thrust_rmm_allocator.h> #include <thrust/binary_search.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/extrema.h> #include <hipcub/hipcub.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_scalar.hpp> #include <algorithm> #include <numeric> #include <type_traits> namespace cudf { size_type state_null_count(mask_state state, size_type size) { switch (state) { case mask_state::UNALLOCATED: return 0; case mask_state::UNINITIALIZED: return UNKNOWN_NULL_COUNT; case mask_state::ALL_NULL: return size; case mask_state::ALL_VALID: return 0; default: CUDF_FAIL("Invalid null mask state."); } } // Computes required allocation size of a bitmask std::size_t bitmask_allocation_size_bytes(size_type number_of_bits, std::size_t padding_boundary) { CUDF_EXPECTS(padding_boundary > 0, "Invalid padding boundary"); auto necessary_bytes = cudf::util::div_rounding_up_safe<size_type>(number_of_bits, CHAR_BIT); auto padded_bytes = padding_boundary * cudf::util::div_rounding_up_safe<size_type>( necessary_bytes, padding_boundary); return padded_bytes; } // Computes number of *actual* bitmask_type elements needed size_type num_bitmask_words(size_type number_of_bits) { return cudf::util::div_rounding_up_safe<size_type>(number_of_bits, detail::size_in_bits<bitmask_type>()); } // Create a device_buffer for a null mask rmm::device_buffer create_null_mask(size_type size, mask_state state, hipStream_t stream, rmm::mr::device_memory_resource *mr) { size_type mask_size{0}; if (state != mask_state::UNALLOCATED) { mask_size = bitmask_allocation_size_bytes(size); } rmm::device_buffer mask(mask_size, stream, mr); if (state != mask_state::UNINITIALIZED) { uint8_t fill_value = (state == mask_state::ALL_VALID) ? 0xff : 0x00; CUDA_TRY( hipMemsetAsync(static_cast<bitmask_type *>(mask.data()), fill_value, mask_size, stream)); } return mask; } __global__ void set_null_mask_kernel(bitmask_type *__restrict__ destination, size_type begin_bit, size_type end_bit, bool valid, size_type number_of_mask_words) { auto x = destination + word_index(begin_bit); const auto last_word = word_index(end_bit) - word_index(begin_bit); bitmask_type fill_value = (valid == true) ? 0xffffffff : 0x00; for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x; destination_word_index < number_of_mask_words; destination_word_index += blockDim.x * gridDim.x) { if (destination_word_index == 0 || destination_word_index == last_word) { bitmask_type mask = ~bitmask_type{0}; if (destination_word_index == 0) { mask = ~(set_least_significant_bits(intra_word_index(begin_bit))); } if (destination_word_index == last_word) { mask = mask & set_least_significant_bits(intra_word_index(end_bit)); } x[destination_word_index] = (valid == true) ? x[destination_word_index] | mask : x[destination_word_index] & ~mask; } else { x[destination_word_index] = fill_value; } } } // Set pre-allocated null mask of given bit range [begin_bit, end_bit) // to valid, if valid==true, // or null, otherwise; void set_null_mask( bitmask_type *bitmask, size_type begin_bit, size_type end_bit, bool valid, hipStream_t stream) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(begin_bit >= 0, "Invalid range."); CUDF_EXPECTS(begin_bit < end_bit, "Invalid bit range."); if (bitmask != nullptr) { auto number_of_mask_words = num_bitmask_words(end_bit) - begin_bit / detail::size_in_bits<bitmask_type>(); cudf::detail::grid_1d config(number_of_mask_words, 256); hipLaunchKernelGGL(( set_null_mask_kernel), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream, static_cast<bitmask_type *>(bitmask), begin_bit, end_bit, valid, number_of_mask_words); CHECK_CUDA(stream); } } namespace { /** * @brief Counts the number of non-zero bits in a bitmask in the range * `[first_bit_index, last_bit_index]`. * * Expects `0 <= first_bit_index <= last_bit_index`. * * @param[in] bitmask The bitmask whose non-zero bits will be counted. * @param[in] first_bit_index The index (inclusive) of the first bit to count * @param[in] last_bit_index The index (inclusive) of the last bit to count * @param[out] global_count The number of non-zero bits in the specified range **/ template <size_type block_size> __global__ void count_set_bits_kernel(bitmask_type const *bitmask, size_type first_bit_index, size_type last_bit_index, size_type *global_count) { constexpr auto const word_size{detail::size_in_bits<bitmask_type>()}; auto const first_word_index{word_index(first_bit_index)}; auto const last_word_index{word_index(last_bit_index)}; auto const tid = threadIdx.x + blockIdx.x * blockDim.x; auto thread_word_index = tid + first_word_index; size_type thread_count{0}; // First, just count the bits in all words while (thread_word_index <= last_word_index) { thread_count += __popc(bitmask[thread_word_index]); thread_word_index += blockDim.x * gridDim.x; } // Subtract any slack bits counted from the first and last word // Two threads handle this -- one for first word, one for last if (tid < 2) { bool const first{tid == 0}; bool const last{not first}; size_type bit_index = (first) ? first_bit_index : last_bit_index; size_type word_index = (first) ? first_word_index : last_word_index; size_type num_slack_bits = bit_index % word_size; if (last) { num_slack_bits = word_size - num_slack_bits - 1; } if (num_slack_bits > 0) { bitmask_type word = bitmask[word_index]; auto slack_mask = (first) ? set_least_significant_bits(num_slack_bits) : set_most_significant_bits(num_slack_bits); thread_count -= __popc(word & slack_mask); } } using BlockReduce = hipcub::BlockReduce<size_type, block_size>; __shared__ typename BlockReduce::TempStorage temp_storage; size_type block_count{BlockReduce(temp_storage).Sum(thread_count)}; if (threadIdx.x == 0) { atomicAdd(global_count, block_count); } } /** * @brief Convenience function to get offset word from a bitmask * * @see copy_offset_bitmask * @see offset_bitmask_and */ __device__ bitmask_type get_mask_offset_word(bitmask_type const *__restrict__ source, size_type destination_word_index, size_type source_begin_bit, size_type source_end_bit) { size_type source_word_index = destination_word_index + word_index(source_begin_bit); bitmask_type curr_word = source[source_word_index]; bitmask_type next_word = 0; if (word_index(source_end_bit) > word_index(source_begin_bit + destination_word_index * detail::size_in_bits<bitmask_type>())) { next_word = source[source_word_index + 1]; } return __funnelshift_r(curr_word, next_word, source_begin_bit); } /** * For each range `[first_bit_indices[i], last_bit_indices[i])` * (where 0 <= i < `num_ranges`), count the number of bits set outside the range * in the boundary words (i.e. words that include either * `first_bit_indices[i]'th` bit or `(last_bit_indices[i] - 1)'th` bit) and * subtract the count from the range's null count. * * Expects `0 <= first_bit_indices[i] <= last_bit_indices[i]`. * * @param[in] bitmask The bitmask whose non-zero bits outside the range in the * boundary words will be counted. * @param[in] num_ranges The number of ranges * @param[in] first_bit_indices The indices (inclusive) of the first bit in each * range * @param[in] last_bit_indices The indices (exclusive) of the last bit in each * range * @param[in,out] null_counts The number of non-zero bits in each range to be * updated */ template <typename OffsetIterator, typename OutputIterator> __global__ void subtract_set_bits_range_boundaries_kerenel(bitmask_type const *bitmask, size_type num_ranges, OffsetIterator first_bit_indices, OffsetIterator last_bit_indices, OutputIterator null_counts) { constexpr size_type const word_size_in_bits{detail::size_in_bits<bitmask_type>()}; cudf::size_type const tid = threadIdx.x + blockIdx.x * blockDim.x; cudf::size_type range_id = tid; while (range_id < num_ranges) { size_type const first_bit_index = *(first_bit_indices + range_id); size_type const last_bit_index = *(last_bit_indices + range_id); size_type delta = 0; size_type num_slack_bits = 0; // compute delta due to the preceding bits in the first word in the range num_slack_bits = intra_word_index(first_bit_index); if (num_slack_bits > 0) { bitmask_type word = bitmask[word_index(first_bit_index)]; bitmask_type slack_mask = set_least_significant_bits(num_slack_bits); delta -= __popc(word & slack_mask); } // compute delta due to the following bits in the last word in the range num_slack_bits = (last_bit_index % word_size_in_bits) == 0 ? 0 : word_size_in_bits - intra_word_index(last_bit_index); if (num_slack_bits > 0) { bitmask_type word = bitmask[word_index(last_bit_index)]; bitmask_type slack_mask = set_most_significant_bits(num_slack_bits); delta -= __popc(word & slack_mask); } size_type updated_null_count = *(null_counts + range_id) + delta; *(null_counts + range_id) = updated_null_count; range_id += blockDim.x * gridDim.x; } } /** * @brief Copies the bits starting at the specified offset from a source * bitmask into the destination bitmask. * * Bit `i` in `destination` will be equal to bit `i + offset` from `source`. * * @param destination The mask to copy into * @param source The mask to copy from * @param source_begin_bit The offset into `source` from which to begin the copy * @param source_end_bit The offset into `source` till which copying is done * @param number_of_mask_words The number of `cudf::bitmask_type` words to copy **/ // TODO: Also make binops test that uses offset in column_view __global__ void copy_offset_bitmask(bitmask_type *__restrict__ destination, bitmask_type const *__restrict__ source, size_type source_begin_bit, size_type source_end_bit, size_type number_of_mask_words) { for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x; destination_word_index < number_of_mask_words; destination_word_index += blockDim.x * gridDim.x) { destination[destination_word_index] = get_mask_offset_word(source, destination_word_index, source_begin_bit, source_end_bit); } } /** * @brief Computes the bitwise AND of an array of bitmasks * * @param destination The bitmask to write result into * @param source Array of source mask pointers. All masks must be of same size * @param begin_bit Array of offsets into corresponding @p source masks. * Must be same size as source array * @param num_sources Number of masks in @p source array * @param source_size Number of bits in each mask in @p source * @param number_of_mask_words The number of words of type bitmask_type to copy */ __global__ void offset_bitmask_and(bitmask_type *__restrict__ destination, bitmask_type const *const *__restrict__ source, size_type const *__restrict__ begin_bit, size_type num_sources, size_type source_size, size_type number_of_mask_words) { for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x; destination_word_index < number_of_mask_words; destination_word_index += blockDim.x * gridDim.x) { bitmask_type destination_word = ~bitmask_type{0}; // All bits 1 for (size_type i = 0; i < num_sources; i++) { destination_word &= get_mask_offset_word( source[i], destination_word_index, begin_bit[i], begin_bit[i] + source_size); } destination[destination_word_index] = destination_word; } } // convert [first_bit_index,last_bit_index) to // [first_word_index,last_word_index) struct to_word_index : public thrust::unary_function<size_type, size_type> { const bool _inclusive = false; size_type const *const _d_bit_indices = nullptr; /** * @brief Constructor of a functor that converts bit indices to bitmask word * indices. * * @param[in] inclusive Flag that indicates whether bit indices are inclusive * or exclusive. * @param[in] d_bit_indices Pointer to an array of bit indices */ __host__ to_word_index(bool inclusive, size_type const *d_bit_indices) : _inclusive(inclusive), _d_bit_indices(d_bit_indices) { } __device__ size_type operator()(const size_type &i) const { auto bit_index = _d_bit_indices[i]; return word_index(bit_index) + ((_inclusive || intra_word_index(bit_index) == 0) ? 0 : 1); } }; } // namespace namespace detail { // Inplace Bitwise AND of the masks void inplace_bitmask_and(bitmask_type *dest_mask, std::vector<bitmask_type const *> const &masks, std::vector<size_type> const &begin_bits, size_type mask_size, hipStream_t stream, rmm::mr::device_memory_resource *mr) { CUDF_EXPECTS(std::all_of(begin_bits.begin(), begin_bits.end(), [](auto b) { return b >= 0; }), "Invalid range."); CUDF_EXPECTS(mask_size > 0, "Invalid bit range."); CUDF_EXPECTS(std::all_of(masks.begin(), masks.end(), [](auto p) { return p != nullptr; }), "Mask pointer cannot be null"); auto num_bytes = bitmask_allocation_size_bytes(mask_size); auto number_of_mask_words = num_bitmask_words(mask_size); rmm::device_vector<bitmask_type const *> d_masks(masks); rmm::device_vector<size_type> d_begin_bits(begin_bits); cudf::detail::grid_1d config(number_of_mask_words, 256); hipLaunchKernelGGL(( offset_bitmask_and), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream, dest_mask, d_masks.data().get(), d_begin_bits.data().get(), d_masks.size(), mask_size, number_of_mask_words); CHECK_CUDA(stream); } // Bitwise AND of the masks rmm::device_buffer bitmask_and(std::vector<bitmask_type const *> const &masks, std::vector<size_type> const &begin_bits, size_type mask_size, hipStream_t stream, rmm::mr::device_memory_resource *mr) { rmm::device_buffer dest_mask{}; auto num_bytes = bitmask_allocation_size_bytes(mask_size); auto number_of_mask_words = num_bitmask_words(mask_size); dest_mask = rmm::device_buffer{num_bytes, stream, mr}; inplace_bitmask_and( static_cast<bitmask_type *>(dest_mask.data()), masks, begin_bits, mask_size, stream, mr); return dest_mask; } cudf::size_type count_set_bits(bitmask_type const *bitmask, size_type start, size_type stop, hipStream_t stream = 0) { if (nullptr == bitmask) { return 0; } CUDF_EXPECTS(start >= 0, "Invalid range."); CUDF_EXPECTS(start <= stop, "Invalid bit range."); std::size_t num_bits_to_count = stop - start; if (num_bits_to_count == 0) { return 0; } auto num_words = num_bitmask_words(num_bits_to_count); constexpr size_type block_size{256}; cudf::detail::grid_1d grid(num_words, block_size); rmm::device_scalar<size_type> non_zero_count(0, stream); hipLaunchKernelGGL(( count_set_bits_kernel<block_size>), dim3(grid.num_blocks), dim3(grid.num_threads_per_block), 0, stream, bitmask, start, stop - 1, non_zero_count.data()); return non_zero_count.value(); } cudf::size_type count_unset_bits(bitmask_type const *bitmask, size_type start, size_type stop, hipStream_t stream = 0) { if (nullptr == bitmask) { return 0; } auto num_bits = (stop - start); return (num_bits - detail::count_set_bits(bitmask, start, stop, stream)); } std::vector<size_type> segmented_count_set_bits(bitmask_type const *bitmask, std::vector<size_type> const &indices, hipStream_t stream) { CUDF_EXPECTS(indices.size() % 2 == 0, "Array of indices needs to have an even number of elements."); for (size_t i = 0; i < indices.size() / 2; i++) { auto begin = indices[i * 2]; auto end = indices[i * 2 + 1]; CUDF_EXPECTS(begin >= 0, "Starting index cannot be negative."); CUDF_EXPECTS(end >= begin, "End index cannot be smaller than the starting index."); } if (indices.size() == 0) { return std::vector<size_type>{}; } else if (bitmask == nullptr) { std::vector<size_type> ret(indices.size() / 2); for (size_t i = 0; i < indices.size() / 2; i++) { ret[i] = indices[2 * i + 1] - indices[2 * i]; } return ret; } size_type num_ranges = indices.size() / 2; thrust::host_vector<size_type> h_first_indices(num_ranges); thrust::host_vector<size_type> h_last_indices(num_ranges); thrust::stable_partition_copy(thrust::seq, std::begin(indices), std::end(indices), thrust::make_counting_iterator(0), h_first_indices.begin(), h_last_indices.begin(), [](auto i) { return (i % 2) == 0; }); rmm::device_vector<size_type> d_first_indices = h_first_indices; rmm::device_vector<size_type> d_last_indices = h_last_indices; rmm::device_vector<size_type> d_null_counts(num_ranges, 0); auto word_num_set_bits = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [bitmask] __device__(auto i) { return static_cast<size_type>(__popc(bitmask[i])); }); auto first_word_indices = thrust::make_transform_iterator( thrust::make_counting_iterator(0), // We cannot use lambda as hipcub::DeviceSegmentedReduce::Sum() requires // first_word_indices and last_word_indices to have the same type. to_word_index(true, d_first_indices.data().get())); auto last_word_indices = thrust::make_transform_iterator( thrust::make_counting_iterator(0), // We cannot use lambda as hipcub::DeviceSegmentedReduce::Sum() requires // first_word_indices and last_word_indices to have the same type. to_word_index(false, d_last_indices.data().get())); // first allocate temporary memroy size_t temp_storage_bytes{0}; CUDA_TRY(hipcub::DeviceSegmentedReduce::Sum(nullptr, temp_storage_bytes, word_num_set_bits, d_null_counts.begin(), num_ranges, first_word_indices, last_word_indices, stream)); rmm::device_buffer d_temp_storage(temp_storage_bytes, stream); // second perform segmented reduction CUDA_TRY(hipcub::DeviceSegmentedReduce::Sum(d_temp_storage.data(), temp_storage_bytes, word_num_set_bits, d_null_counts.begin(), num_ranges, first_word_indices, last_word_indices, stream)); CHECK_CUDA(stream); // third, adjust counts in segment boundaries (if segments are not // word-aligned) constexpr size_type block_size{256}; cudf::detail::grid_1d grid(num_ranges, block_size); hipLaunchKernelGGL(( subtract_set_bits_range_boundaries_kerenel), dim3(grid.num_blocks), dim3(grid.num_threads_per_block), 0, stream, bitmask, num_ranges, d_first_indices.begin(), d_last_indices.begin(), d_null_counts.begin()); CHECK_CUDA(stream); std::vector<size_type> ret(num_ranges); CUDA_TRY(hipMemcpyAsync(ret.data(), d_null_counts.data().get(), num_ranges * sizeof(size_type), hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipStreamSynchronize(stream)); // now ret is valid. return ret; } std::vector<size_type> segmented_count_unset_bits(bitmask_type const *bitmask, std::vector<size_type> const &indices, hipStream_t stream) { if (indices.size() == 0) { return std::vector<size_type>{}; } else if (bitmask == nullptr) { return std::vector<size_type>(indices.size() / 2, 0); } auto ret = segmented_count_set_bits(bitmask, indices, stream); for (size_t i = 0; i < ret.size(); i++) { auto begin = indices[i * 2]; auto end = indices[i * 2 + 1]; ret[i] = (end - begin) - ret[i]; } return ret; } } // namespace detail // Count non-zero bits in the specified range cudf::size_type count_set_bits(bitmask_type const *bitmask, size_type start, size_type stop) { CUDF_FUNC_RANGE(); return detail::count_set_bits(bitmask, start, stop); } // Count zero bits in the specified range cudf::size_type count_unset_bits(bitmask_type const *bitmask, size_type start, size_type stop) { CUDF_FUNC_RANGE(); return detail::count_unset_bits(bitmask, start, stop); } // Count non-zero bits in the specified ranges std::vector<size_type> segmented_count_set_bits(bitmask_type const *bitmask, std::vector<size_type> const &indices) { CUDF_FUNC_RANGE(); return detail::segmented_count_set_bits(bitmask, indices, 0); } // Count zero bits in the specified ranges std::vector<size_type> segmented_count_unset_bits(bitmask_type const *bitmask, std::vector<size_type> const &indices) { CUDF_FUNC_RANGE(); return detail::segmented_count_unset_bits(bitmask, indices, 0); } // Create a bitmask from a specific range rmm::device_buffer copy_bitmask(bitmask_type const *mask, size_type begin_bit, size_type end_bit, hipStream_t stream, rmm::mr::device_memory_resource *mr) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(begin_bit >= 0, "Invalid range."); CUDF_EXPECTS(begin_bit <= end_bit, "Invalid bit range."); rmm::device_buffer dest_mask{}; auto num_bytes = bitmask_allocation_size_bytes(end_bit - begin_bit); if ((mask == nullptr) || (num_bytes == 0)) { return dest_mask; } if (begin_bit == 0) { dest_mask = rmm::device_buffer{static_cast<void const *>(mask), num_bytes, stream, mr}; } else { auto number_of_mask_words = num_bitmask_words(end_bit - begin_bit); dest_mask = rmm::device_buffer{num_bytes, stream, mr}; cudf::detail::grid_1d config(number_of_mask_words, 256); hipLaunchKernelGGL(( copy_offset_bitmask), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream, static_cast<bitmask_type *>(dest_mask.data()), mask, begin_bit, end_bit, number_of_mask_words); CHECK_CUDA(stream); } return dest_mask; } // Create a bitmask from a column view rmm::device_buffer copy_bitmask(column_view const &view, hipStream_t stream, rmm::mr::device_memory_resource *mr) { rmm::device_buffer null_mask{0, stream, mr}; if (view.nullable()) { null_mask = copy_bitmask(view.null_mask(), view.offset(), view.offset() + view.size(), stream, mr); } return null_mask; } // Returns the bitwise AND of the null masks of all columns in the table view rmm::device_buffer bitmask_and(table_view const &view, rmm::mr::device_memory_resource *mr, hipStream_t stream) { CUDF_FUNC_RANGE(); rmm::device_buffer null_mask{0, stream, mr}; if (view.num_rows() == 0 or view.num_columns() == 0) { return null_mask; } std::vector<bitmask_type const *> masks; std::vector<size_type> offsets; for (auto &&col : view) { if (col.nullable()) { masks.push_back(col.null_mask()); offsets.push_back(col.offset()); } } if (masks.size() > 0) { return cudf::detail::bitmask_and(masks, offsets, view.num_rows(), stream, mr); } return null_mask; } } // namespace cudf
1a2b050d7f6a92ee5fa741f0c36f8e230a17e4aa.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/null_mask.hpp> #include <cudf/table/table_view.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <rmm/thrust_rmm_allocator.h> #include <thrust/binary_search.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/extrema.h> #include <cub/cub.cuh> #include <rmm/device_buffer.hpp> #include <rmm/device_scalar.hpp> #include <algorithm> #include <numeric> #include <type_traits> namespace cudf { size_type state_null_count(mask_state state, size_type size) { switch (state) { case mask_state::UNALLOCATED: return 0; case mask_state::UNINITIALIZED: return UNKNOWN_NULL_COUNT; case mask_state::ALL_NULL: return size; case mask_state::ALL_VALID: return 0; default: CUDF_FAIL("Invalid null mask state."); } } // Computes required allocation size of a bitmask std::size_t bitmask_allocation_size_bytes(size_type number_of_bits, std::size_t padding_boundary) { CUDF_EXPECTS(padding_boundary > 0, "Invalid padding boundary"); auto necessary_bytes = cudf::util::div_rounding_up_safe<size_type>(number_of_bits, CHAR_BIT); auto padded_bytes = padding_boundary * cudf::util::div_rounding_up_safe<size_type>( necessary_bytes, padding_boundary); return padded_bytes; } // Computes number of *actual* bitmask_type elements needed size_type num_bitmask_words(size_type number_of_bits) { return cudf::util::div_rounding_up_safe<size_type>(number_of_bits, detail::size_in_bits<bitmask_type>()); } // Create a device_buffer for a null mask rmm::device_buffer create_null_mask(size_type size, mask_state state, cudaStream_t stream, rmm::mr::device_memory_resource *mr) { size_type mask_size{0}; if (state != mask_state::UNALLOCATED) { mask_size = bitmask_allocation_size_bytes(size); } rmm::device_buffer mask(mask_size, stream, mr); if (state != mask_state::UNINITIALIZED) { uint8_t fill_value = (state == mask_state::ALL_VALID) ? 0xff : 0x00; CUDA_TRY( cudaMemsetAsync(static_cast<bitmask_type *>(mask.data()), fill_value, mask_size, stream)); } return mask; } __global__ void set_null_mask_kernel(bitmask_type *__restrict__ destination, size_type begin_bit, size_type end_bit, bool valid, size_type number_of_mask_words) { auto x = destination + word_index(begin_bit); const auto last_word = word_index(end_bit) - word_index(begin_bit); bitmask_type fill_value = (valid == true) ? 0xffffffff : 0x00; for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x; destination_word_index < number_of_mask_words; destination_word_index += blockDim.x * gridDim.x) { if (destination_word_index == 0 || destination_word_index == last_word) { bitmask_type mask = ~bitmask_type{0}; if (destination_word_index == 0) { mask = ~(set_least_significant_bits(intra_word_index(begin_bit))); } if (destination_word_index == last_word) { mask = mask & set_least_significant_bits(intra_word_index(end_bit)); } x[destination_word_index] = (valid == true) ? x[destination_word_index] | mask : x[destination_word_index] & ~mask; } else { x[destination_word_index] = fill_value; } } } // Set pre-allocated null mask of given bit range [begin_bit, end_bit) // to valid, if valid==true, // or null, otherwise; void set_null_mask( bitmask_type *bitmask, size_type begin_bit, size_type end_bit, bool valid, cudaStream_t stream) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(begin_bit >= 0, "Invalid range."); CUDF_EXPECTS(begin_bit < end_bit, "Invalid bit range."); if (bitmask != nullptr) { auto number_of_mask_words = num_bitmask_words(end_bit) - begin_bit / detail::size_in_bits<bitmask_type>(); cudf::detail::grid_1d config(number_of_mask_words, 256); set_null_mask_kernel<<<config.num_blocks, config.num_threads_per_block, 0, stream>>>( static_cast<bitmask_type *>(bitmask), begin_bit, end_bit, valid, number_of_mask_words); CHECK_CUDA(stream); } } namespace { /** * @brief Counts the number of non-zero bits in a bitmask in the range * `[first_bit_index, last_bit_index]`. * * Expects `0 <= first_bit_index <= last_bit_index`. * * @param[in] bitmask The bitmask whose non-zero bits will be counted. * @param[in] first_bit_index The index (inclusive) of the first bit to count * @param[in] last_bit_index The index (inclusive) of the last bit to count * @param[out] global_count The number of non-zero bits in the specified range **/ template <size_type block_size> __global__ void count_set_bits_kernel(bitmask_type const *bitmask, size_type first_bit_index, size_type last_bit_index, size_type *global_count) { constexpr auto const word_size{detail::size_in_bits<bitmask_type>()}; auto const first_word_index{word_index(first_bit_index)}; auto const last_word_index{word_index(last_bit_index)}; auto const tid = threadIdx.x + blockIdx.x * blockDim.x; auto thread_word_index = tid + first_word_index; size_type thread_count{0}; // First, just count the bits in all words while (thread_word_index <= last_word_index) { thread_count += __popc(bitmask[thread_word_index]); thread_word_index += blockDim.x * gridDim.x; } // Subtract any slack bits counted from the first and last word // Two threads handle this -- one for first word, one for last if (tid < 2) { bool const first{tid == 0}; bool const last{not first}; size_type bit_index = (first) ? first_bit_index : last_bit_index; size_type word_index = (first) ? first_word_index : last_word_index; size_type num_slack_bits = bit_index % word_size; if (last) { num_slack_bits = word_size - num_slack_bits - 1; } if (num_slack_bits > 0) { bitmask_type word = bitmask[word_index]; auto slack_mask = (first) ? set_least_significant_bits(num_slack_bits) : set_most_significant_bits(num_slack_bits); thread_count -= __popc(word & slack_mask); } } using BlockReduce = cub::BlockReduce<size_type, block_size>; __shared__ typename BlockReduce::TempStorage temp_storage; size_type block_count{BlockReduce(temp_storage).Sum(thread_count)}; if (threadIdx.x == 0) { atomicAdd(global_count, block_count); } } /** * @brief Convenience function to get offset word from a bitmask * * @see copy_offset_bitmask * @see offset_bitmask_and */ __device__ bitmask_type get_mask_offset_word(bitmask_type const *__restrict__ source, size_type destination_word_index, size_type source_begin_bit, size_type source_end_bit) { size_type source_word_index = destination_word_index + word_index(source_begin_bit); bitmask_type curr_word = source[source_word_index]; bitmask_type next_word = 0; if (word_index(source_end_bit) > word_index(source_begin_bit + destination_word_index * detail::size_in_bits<bitmask_type>())) { next_word = source[source_word_index + 1]; } return __funnelshift_r(curr_word, next_word, source_begin_bit); } /** * For each range `[first_bit_indices[i], last_bit_indices[i])` * (where 0 <= i < `num_ranges`), count the number of bits set outside the range * in the boundary words (i.e. words that include either * `first_bit_indices[i]'th` bit or `(last_bit_indices[i] - 1)'th` bit) and * subtract the count from the range's null count. * * Expects `0 <= first_bit_indices[i] <= last_bit_indices[i]`. * * @param[in] bitmask The bitmask whose non-zero bits outside the range in the * boundary words will be counted. * @param[in] num_ranges The number of ranges * @param[in] first_bit_indices The indices (inclusive) of the first bit in each * range * @param[in] last_bit_indices The indices (exclusive) of the last bit in each * range * @param[in,out] null_counts The number of non-zero bits in each range to be * updated */ template <typename OffsetIterator, typename OutputIterator> __global__ void subtract_set_bits_range_boundaries_kerenel(bitmask_type const *bitmask, size_type num_ranges, OffsetIterator first_bit_indices, OffsetIterator last_bit_indices, OutputIterator null_counts) { constexpr size_type const word_size_in_bits{detail::size_in_bits<bitmask_type>()}; cudf::size_type const tid = threadIdx.x + blockIdx.x * blockDim.x; cudf::size_type range_id = tid; while (range_id < num_ranges) { size_type const first_bit_index = *(first_bit_indices + range_id); size_type const last_bit_index = *(last_bit_indices + range_id); size_type delta = 0; size_type num_slack_bits = 0; // compute delta due to the preceding bits in the first word in the range num_slack_bits = intra_word_index(first_bit_index); if (num_slack_bits > 0) { bitmask_type word = bitmask[word_index(first_bit_index)]; bitmask_type slack_mask = set_least_significant_bits(num_slack_bits); delta -= __popc(word & slack_mask); } // compute delta due to the following bits in the last word in the range num_slack_bits = (last_bit_index % word_size_in_bits) == 0 ? 0 : word_size_in_bits - intra_word_index(last_bit_index); if (num_slack_bits > 0) { bitmask_type word = bitmask[word_index(last_bit_index)]; bitmask_type slack_mask = set_most_significant_bits(num_slack_bits); delta -= __popc(word & slack_mask); } size_type updated_null_count = *(null_counts + range_id) + delta; *(null_counts + range_id) = updated_null_count; range_id += blockDim.x * gridDim.x; } } /** * @brief Copies the bits starting at the specified offset from a source * bitmask into the destination bitmask. * * Bit `i` in `destination` will be equal to bit `i + offset` from `source`. * * @param destination The mask to copy into * @param source The mask to copy from * @param source_begin_bit The offset into `source` from which to begin the copy * @param source_end_bit The offset into `source` till which copying is done * @param number_of_mask_words The number of `cudf::bitmask_type` words to copy **/ // TODO: Also make binops test that uses offset in column_view __global__ void copy_offset_bitmask(bitmask_type *__restrict__ destination, bitmask_type const *__restrict__ source, size_type source_begin_bit, size_type source_end_bit, size_type number_of_mask_words) { for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x; destination_word_index < number_of_mask_words; destination_word_index += blockDim.x * gridDim.x) { destination[destination_word_index] = get_mask_offset_word(source, destination_word_index, source_begin_bit, source_end_bit); } } /** * @brief Computes the bitwise AND of an array of bitmasks * * @param destination The bitmask to write result into * @param source Array of source mask pointers. All masks must be of same size * @param begin_bit Array of offsets into corresponding @p source masks. * Must be same size as source array * @param num_sources Number of masks in @p source array * @param source_size Number of bits in each mask in @p source * @param number_of_mask_words The number of words of type bitmask_type to copy */ __global__ void offset_bitmask_and(bitmask_type *__restrict__ destination, bitmask_type const *const *__restrict__ source, size_type const *__restrict__ begin_bit, size_type num_sources, size_type source_size, size_type number_of_mask_words) { for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x; destination_word_index < number_of_mask_words; destination_word_index += blockDim.x * gridDim.x) { bitmask_type destination_word = ~bitmask_type{0}; // All bits 1 for (size_type i = 0; i < num_sources; i++) { destination_word &= get_mask_offset_word( source[i], destination_word_index, begin_bit[i], begin_bit[i] + source_size); } destination[destination_word_index] = destination_word; } } // convert [first_bit_index,last_bit_index) to // [first_word_index,last_word_index) struct to_word_index : public thrust::unary_function<size_type, size_type> { const bool _inclusive = false; size_type const *const _d_bit_indices = nullptr; /** * @brief Constructor of a functor that converts bit indices to bitmask word * indices. * * @param[in] inclusive Flag that indicates whether bit indices are inclusive * or exclusive. * @param[in] d_bit_indices Pointer to an array of bit indices */ __host__ to_word_index(bool inclusive, size_type const *d_bit_indices) : _inclusive(inclusive), _d_bit_indices(d_bit_indices) { } __device__ size_type operator()(const size_type &i) const { auto bit_index = _d_bit_indices[i]; return word_index(bit_index) + ((_inclusive || intra_word_index(bit_index) == 0) ? 0 : 1); } }; } // namespace namespace detail { // Inplace Bitwise AND of the masks void inplace_bitmask_and(bitmask_type *dest_mask, std::vector<bitmask_type const *> const &masks, std::vector<size_type> const &begin_bits, size_type mask_size, cudaStream_t stream, rmm::mr::device_memory_resource *mr) { CUDF_EXPECTS(std::all_of(begin_bits.begin(), begin_bits.end(), [](auto b) { return b >= 0; }), "Invalid range."); CUDF_EXPECTS(mask_size > 0, "Invalid bit range."); CUDF_EXPECTS(std::all_of(masks.begin(), masks.end(), [](auto p) { return p != nullptr; }), "Mask pointer cannot be null"); auto num_bytes = bitmask_allocation_size_bytes(mask_size); auto number_of_mask_words = num_bitmask_words(mask_size); rmm::device_vector<bitmask_type const *> d_masks(masks); rmm::device_vector<size_type> d_begin_bits(begin_bits); cudf::detail::grid_1d config(number_of_mask_words, 256); offset_bitmask_and<<<config.num_blocks, config.num_threads_per_block, 0, stream>>>( dest_mask, d_masks.data().get(), d_begin_bits.data().get(), d_masks.size(), mask_size, number_of_mask_words); CHECK_CUDA(stream); } // Bitwise AND of the masks rmm::device_buffer bitmask_and(std::vector<bitmask_type const *> const &masks, std::vector<size_type> const &begin_bits, size_type mask_size, cudaStream_t stream, rmm::mr::device_memory_resource *mr) { rmm::device_buffer dest_mask{}; auto num_bytes = bitmask_allocation_size_bytes(mask_size); auto number_of_mask_words = num_bitmask_words(mask_size); dest_mask = rmm::device_buffer{num_bytes, stream, mr}; inplace_bitmask_and( static_cast<bitmask_type *>(dest_mask.data()), masks, begin_bits, mask_size, stream, mr); return dest_mask; } cudf::size_type count_set_bits(bitmask_type const *bitmask, size_type start, size_type stop, cudaStream_t stream = 0) { if (nullptr == bitmask) { return 0; } CUDF_EXPECTS(start >= 0, "Invalid range."); CUDF_EXPECTS(start <= stop, "Invalid bit range."); std::size_t num_bits_to_count = stop - start; if (num_bits_to_count == 0) { return 0; } auto num_words = num_bitmask_words(num_bits_to_count); constexpr size_type block_size{256}; cudf::detail::grid_1d grid(num_words, block_size); rmm::device_scalar<size_type> non_zero_count(0, stream); count_set_bits_kernel<block_size><<<grid.num_blocks, grid.num_threads_per_block, 0, stream>>>( bitmask, start, stop - 1, non_zero_count.data()); return non_zero_count.value(); } cudf::size_type count_unset_bits(bitmask_type const *bitmask, size_type start, size_type stop, cudaStream_t stream = 0) { if (nullptr == bitmask) { return 0; } auto num_bits = (stop - start); return (num_bits - detail::count_set_bits(bitmask, start, stop, stream)); } std::vector<size_type> segmented_count_set_bits(bitmask_type const *bitmask, std::vector<size_type> const &indices, cudaStream_t stream) { CUDF_EXPECTS(indices.size() % 2 == 0, "Array of indices needs to have an even number of elements."); for (size_t i = 0; i < indices.size() / 2; i++) { auto begin = indices[i * 2]; auto end = indices[i * 2 + 1]; CUDF_EXPECTS(begin >= 0, "Starting index cannot be negative."); CUDF_EXPECTS(end >= begin, "End index cannot be smaller than the starting index."); } if (indices.size() == 0) { return std::vector<size_type>{}; } else if (bitmask == nullptr) { std::vector<size_type> ret(indices.size() / 2); for (size_t i = 0; i < indices.size() / 2; i++) { ret[i] = indices[2 * i + 1] - indices[2 * i]; } return ret; } size_type num_ranges = indices.size() / 2; thrust::host_vector<size_type> h_first_indices(num_ranges); thrust::host_vector<size_type> h_last_indices(num_ranges); thrust::stable_partition_copy(thrust::seq, std::begin(indices), std::end(indices), thrust::make_counting_iterator(0), h_first_indices.begin(), h_last_indices.begin(), [](auto i) { return (i % 2) == 0; }); rmm::device_vector<size_type> d_first_indices = h_first_indices; rmm::device_vector<size_type> d_last_indices = h_last_indices; rmm::device_vector<size_type> d_null_counts(num_ranges, 0); auto word_num_set_bits = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [bitmask] __device__(auto i) { return static_cast<size_type>(__popc(bitmask[i])); }); auto first_word_indices = thrust::make_transform_iterator( thrust::make_counting_iterator(0), // We cannot use lambda as cub::DeviceSegmentedReduce::Sum() requires // first_word_indices and last_word_indices to have the same type. to_word_index(true, d_first_indices.data().get())); auto last_word_indices = thrust::make_transform_iterator( thrust::make_counting_iterator(0), // We cannot use lambda as cub::DeviceSegmentedReduce::Sum() requires // first_word_indices and last_word_indices to have the same type. to_word_index(false, d_last_indices.data().get())); // first allocate temporary memroy size_t temp_storage_bytes{0}; CUDA_TRY(cub::DeviceSegmentedReduce::Sum(nullptr, temp_storage_bytes, word_num_set_bits, d_null_counts.begin(), num_ranges, first_word_indices, last_word_indices, stream)); rmm::device_buffer d_temp_storage(temp_storage_bytes, stream); // second perform segmented reduction CUDA_TRY(cub::DeviceSegmentedReduce::Sum(d_temp_storage.data(), temp_storage_bytes, word_num_set_bits, d_null_counts.begin(), num_ranges, first_word_indices, last_word_indices, stream)); CHECK_CUDA(stream); // third, adjust counts in segment boundaries (if segments are not // word-aligned) constexpr size_type block_size{256}; cudf::detail::grid_1d grid(num_ranges, block_size); subtract_set_bits_range_boundaries_kerenel<<<grid.num_blocks, grid.num_threads_per_block, 0, stream>>>( bitmask, num_ranges, d_first_indices.begin(), d_last_indices.begin(), d_null_counts.begin()); CHECK_CUDA(stream); std::vector<size_type> ret(num_ranges); CUDA_TRY(cudaMemcpyAsync(ret.data(), d_null_counts.data().get(), num_ranges * sizeof(size_type), cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaStreamSynchronize(stream)); // now ret is valid. return ret; } std::vector<size_type> segmented_count_unset_bits(bitmask_type const *bitmask, std::vector<size_type> const &indices, cudaStream_t stream) { if (indices.size() == 0) { return std::vector<size_type>{}; } else if (bitmask == nullptr) { return std::vector<size_type>(indices.size() / 2, 0); } auto ret = segmented_count_set_bits(bitmask, indices, stream); for (size_t i = 0; i < ret.size(); i++) { auto begin = indices[i * 2]; auto end = indices[i * 2 + 1]; ret[i] = (end - begin) - ret[i]; } return ret; } } // namespace detail // Count non-zero bits in the specified range cudf::size_type count_set_bits(bitmask_type const *bitmask, size_type start, size_type stop) { CUDF_FUNC_RANGE(); return detail::count_set_bits(bitmask, start, stop); } // Count zero bits in the specified range cudf::size_type count_unset_bits(bitmask_type const *bitmask, size_type start, size_type stop) { CUDF_FUNC_RANGE(); return detail::count_unset_bits(bitmask, start, stop); } // Count non-zero bits in the specified ranges std::vector<size_type> segmented_count_set_bits(bitmask_type const *bitmask, std::vector<size_type> const &indices) { CUDF_FUNC_RANGE(); return detail::segmented_count_set_bits(bitmask, indices, 0); } // Count zero bits in the specified ranges std::vector<size_type> segmented_count_unset_bits(bitmask_type const *bitmask, std::vector<size_type> const &indices) { CUDF_FUNC_RANGE(); return detail::segmented_count_unset_bits(bitmask, indices, 0); } // Create a bitmask from a specific range rmm::device_buffer copy_bitmask(bitmask_type const *mask, size_type begin_bit, size_type end_bit, cudaStream_t stream, rmm::mr::device_memory_resource *mr) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(begin_bit >= 0, "Invalid range."); CUDF_EXPECTS(begin_bit <= end_bit, "Invalid bit range."); rmm::device_buffer dest_mask{}; auto num_bytes = bitmask_allocation_size_bytes(end_bit - begin_bit); if ((mask == nullptr) || (num_bytes == 0)) { return dest_mask; } if (begin_bit == 0) { dest_mask = rmm::device_buffer{static_cast<void const *>(mask), num_bytes, stream, mr}; } else { auto number_of_mask_words = num_bitmask_words(end_bit - begin_bit); dest_mask = rmm::device_buffer{num_bytes, stream, mr}; cudf::detail::grid_1d config(number_of_mask_words, 256); copy_offset_bitmask<<<config.num_blocks, config.num_threads_per_block, 0, stream>>>( static_cast<bitmask_type *>(dest_mask.data()), mask, begin_bit, end_bit, number_of_mask_words); CHECK_CUDA(stream); } return dest_mask; } // Create a bitmask from a column view rmm::device_buffer copy_bitmask(column_view const &view, cudaStream_t stream, rmm::mr::device_memory_resource *mr) { rmm::device_buffer null_mask{0, stream, mr}; if (view.nullable()) { null_mask = copy_bitmask(view.null_mask(), view.offset(), view.offset() + view.size(), stream, mr); } return null_mask; } // Returns the bitwise AND of the null masks of all columns in the table view rmm::device_buffer bitmask_and(table_view const &view, rmm::mr::device_memory_resource *mr, cudaStream_t stream) { CUDF_FUNC_RANGE(); rmm::device_buffer null_mask{0, stream, mr}; if (view.num_rows() == 0 or view.num_columns() == 0) { return null_mask; } std::vector<bitmask_type const *> masks; std::vector<size_type> offsets; for (auto &&col : view) { if (col.nullable()) { masks.push_back(col.null_mask()); offsets.push_back(col.offset()); } } if (masks.size() > 0) { return cudf::detail::bitmask_and(masks, offsets, view.num_rows(), stream, mr); } return null_mask; } } // namespace cudf
f1d60bb67c3a8cb87bfc46d2291660f7ac13d946.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" #include "THHDeviceTensor.cuh" #include "THHDeviceTensorUtils.cuh" #include "THHDeviceUtils.cuh" #include "THHReduceApplyUtils.cuh" #include <THH/THHApply.cuh> #include "THHHalf.h" #include "THHHalfAutoNumerics.cuh" #include "THHAtomics.cuh" template <typename Dtype> __global__ void TemporalReplicationPadding_updateOutput( THCDeviceTensor<Dtype, 3> input, THCDeviceTensor<Dtype, 3> output, int padL, int padR) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= output.getSize(2)) { return; } int outputPointX = outputPointId % output.getSize(2); int iStartX = max(0, -padL); int oStartX = max(0, padL); int inputPointX = min(max(padL, outputPointX), input.getSize(2) + padL - 1) - oStartX + iStartX; Dtype valueToCopy = input[batch][plane][inputPointX]; output[batch][plane][outputPointX] = valueToCopy; } template <typename Dtype> __global__ void TemporalReplicationPadding_updateGradInput( THCDeviceTensor<Dtype, 3> gradInput, THCDeviceTensor<Dtype, 3> gradOutput, int padL, int padR) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= gradOutput.getSize(2)) { return; } int outputPointX = outputPointId % gradOutput.getSize(2); int iStartX = max(0, -padL); int oStartX = max(0, padL); int inputPointX = min(max(padL, outputPointX), gradInput.getSize(2) + padL - 1) - oStartX + iStartX; Dtype valueToCopy = gradOutput[batch][plane][outputPointX]; atomicAdd(&gradInput[batch][plane][inputPointX], valueToCopy); } #include "generic/TemporalReplicationPadding.cu" #include "THHGenerateFloatTypes.h"
f1d60bb67c3a8cb87bfc46d2291660f7ac13d946.cu
#include "THCUNN.h" #include "common.h" #include "THCDeviceTensor.cuh" #include "THCDeviceTensorUtils.cuh" #include "THCDeviceUtils.cuh" #include "THCReduceApplyUtils.cuh" #include <THC/THCApply.cuh> #include "THCHalf.h" #include "THCHalfAutoNumerics.cuh" #include "THCAtomics.cuh" template <typename Dtype> __global__ void TemporalReplicationPadding_updateOutput( THCDeviceTensor<Dtype, 3> input, THCDeviceTensor<Dtype, 3> output, int padL, int padR) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= output.getSize(2)) { return; } int outputPointX = outputPointId % output.getSize(2); int iStartX = max(0, -padL); int oStartX = max(0, padL); int inputPointX = min(max(padL, outputPointX), input.getSize(2) + padL - 1) - oStartX + iStartX; Dtype valueToCopy = input[batch][plane][inputPointX]; output[batch][plane][outputPointX] = valueToCopy; } template <typename Dtype> __global__ void TemporalReplicationPadding_updateGradInput( THCDeviceTensor<Dtype, 3> gradInput, THCDeviceTensor<Dtype, 3> gradOutput, int padL, int padR) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= gradOutput.getSize(2)) { return; } int outputPointX = outputPointId % gradOutput.getSize(2); int iStartX = max(0, -padL); int oStartX = max(0, padL); int inputPointX = min(max(padL, outputPointX), gradInput.getSize(2) + padL - 1) - oStartX + iStartX; Dtype valueToCopy = gradOutput[batch][plane][outputPointX]; atomicAdd(&gradInput[batch][plane][inputPointX], valueToCopy); } #include "generic/TemporalReplicationPadding.cu" #include "THCGenerateFloatTypes.h"
6f272fe2180d5d48108564d47535b74d7be4936c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file spatial_transformer.cu * \brief * \author Wei Wu */ #include "./spatial_transformer-inl.h" #include <algorithm> #if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5 #include "./cudnn_spatial_transformer-inl.h" #endif // MXNET_USE_CUDNN && CUDNN_MAJOR namespace mshadow { template<typename DType> __device__ bool between(DType value, int lowerBound, int upperBound) { return (value >= lowerBound && value <= upperBound); } template<typename DType> __global__ void BilinearSamplingForwardKernel(const int i_c, const int i_h, const int i_w, const DType* data, const DType* grid, const int o_n, const int o_c, const int o_h, const int o_w, DType* out) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < o_n * o_c * o_h * o_w; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) is the element in out int w = index % o_w; int h = (index / o_w) % o_h; int c = (index / o_w / o_h) % o_c; int n = index / o_w / o_h / o_c; index_t out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w; index_t grid_index = n * o_h * o_w * 2 + h * o_w + w; DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h - 1) / 2; DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2; int top_left_y = static_cast<int>(floor(y_real)); int top_left_x = static_cast<int>(floor(x_real)); DType top_left_y_w = 1.0 - (y_real - top_left_y); DType top_left_x_w = 1.0 - (x_real - top_left_x); int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x; DType top_left_v = 0; DType top_right_v = 0; DType bottom_left_v = 0; DType bottom_right_v = 0; if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) top_left_v = *(data + data_index); if (between(top_left_x + 1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) top_right_v = *(data + data_index + 1); if (between(top_left_x, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1)) bottom_left_v = *(data + data_index + i_w); if (between(top_left_x+1, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1)) bottom_right_v = *(data + data_index + i_w + 1); *(out+out_index) = top_left_v * top_left_y_w * top_left_x_w + top_right_v * top_left_y_w * (1.0 - top_left_x_w) + bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w + bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w); } } template<typename DType> __global__ void BilinearSamplingBackwardKernel(const int i_c, const int i_h, const int i_w, const DType* grad, const DType* data, const int o_n, const int o_c, const int o_h, const int o_w, DType* g_input, DType* grid_src) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < o_n * o_h * o_w; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) is the element in grad int w = index % o_w; int h = (index / o_w) % o_h; int n = index / o_w / o_h; DType top_left_y_gw = 0.0; DType top_left_x_gw = 0.0; index_t grid_src_index = n * o_h * o_w * 2 + h * o_w + w; DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h - 1) / 2; DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1) / 2; int top_left_y = static_cast<int>(floor(y_real)); int top_left_x = static_cast<int>(floor(x_real)); DType top_left_y_w = 1.0 - (y_real - top_left_y); DType top_left_x_w = 1.0 - (x_real - top_left_x); for (index_t c = 0; c < o_c; ++c) { index_t grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w; index_t data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x; // calc 4 vertex value in input data DType top_left_v = 0; DType top_right_v = 0; DType bottom_left_v = 0; DType bottom_right_v = 0; // calc input grad if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) { *(g_input + data_index) += *(grad + grad_index) * top_left_y_w * top_left_x_w; top_left_v = *(data + data_index); } if (between(top_left_x+1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) { *(g_input + data_index + 1) += *(grad + grad_index) * top_left_y_w * (1.0 - top_left_x_w); top_right_v = *(data + data_index + 1); } if (between(top_left_x, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) { *(g_input + data_index+ i_w) += *(grad + grad_index) * (1.0 - top_left_y_w) * top_left_x_w; bottom_left_v = *(data + data_index + i_w); } if (between(top_left_x+1, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) { *(g_input + data_index+ i_w + 1) += *(grad + grad_index) * (1.0 - top_left_y_w) * (1.0 - top_left_x_w); bottom_right_v = *(data + data_index + i_w + 1); } // calc weight grad of top_left_w, then multiple -1 is the grad of grid_src top_left_y_gw -= *(grad + grad_index) * (top_right_v - bottom_right_v + (top_left_v - top_right_v - bottom_left_v + bottom_right_v) * top_left_x_w); top_left_x_gw -= *(grad + grad_index) * (bottom_left_v - bottom_right_v + (top_left_v - top_right_v - bottom_left_v + bottom_right_v) * top_left_y_w); } // calc grid_src grad *(grid_src + grid_src_index + o_h * o_w) = top_left_y_gw * (i_h - 1) / 2; *(grid_src + grid_src_index) = top_left_x_gw * (i_w - 1) / 2; } } template<typename DType> inline void BilinearSamplingForward(const Tensor<gpu, 4, DType> &output, const Tensor<gpu, 4, DType> &input, const Tensor<gpu, 3, DType> grid_src) { DType *out = output.dptr_; const DType *data = input.dptr_; const DType *grid = grid_src.dptr_; int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3); int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3); using namespace cuda; const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 num_blocks(kMaxGridDim, (max_block + kMaxGridDim - 1) / kMaxGridDim); dim3 threads_per_block(kMaxThreadsPerBlock); CheckLaunchParam(num_blocks, threads_per_block, "spatial transformer forward"); hipStream_t stream = Stream<gpu>::GetStream(output.stream_); BilinearSamplingForwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >( i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out); } template<typename DType> inline void BilinearSamplingBackward(const Tensor<gpu, 4, DType> &input_grad, const Tensor<gpu, 3, DType> &grid_src_data, const Tensor<gpu, 4, DType> &output_grad, const Tensor<gpu, 4, DType> &input_data) { DType *g_input = input_grad.dptr_; DType *grid_src = grid_src_data.dptr_; const DType *grad = output_grad.dptr_; const DType *data = input_data.dptr_; int o_n = output_grad.size(0), o_c = output_grad.size(1), o_h = output_grad.size(2), o_w = output_grad.size(3); int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3); using namespace cuda; const int max_block = (output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 num_blocks(kMaxGridDim, (max_block + kMaxGridDim - 1) / kMaxGridDim); dim3 threads_per_block(kMaxThreadsPerBlock); CheckLaunchParam(num_blocks, threads_per_block, "spatial transformer backward"); hipStream_t stream = Stream<gpu>::GetStream(input_grad.stream_); BilinearSamplingBackwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >( i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(SpatialTransformerParam param, int dtype) { Operator *op = NULL; #if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5 MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new CuDNNSpatialTransformerOp<DType>(param); }) #else MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new SpatialTransformerOp<gpu, DType>(param); }) #endif // MXNET_USE_CUDNN && CUDNN_MAJOR return op; } } // namespace op } // namespace mxnet
6f272fe2180d5d48108564d47535b74d7be4936c.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file spatial_transformer.cu * \brief * \author Wei Wu */ #include "./spatial_transformer-inl.h" #include <algorithm> #if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5 #include "./cudnn_spatial_transformer-inl.h" #endif // MXNET_USE_CUDNN && CUDNN_MAJOR namespace mshadow { template<typename DType> __device__ bool between(DType value, int lowerBound, int upperBound) { return (value >= lowerBound && value <= upperBound); } template<typename DType> __global__ void BilinearSamplingForwardKernel(const int i_c, const int i_h, const int i_w, const DType* data, const DType* grid, const int o_n, const int o_c, const int o_h, const int o_w, DType* out) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < o_n * o_c * o_h * o_w; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) is the element in out int w = index % o_w; int h = (index / o_w) % o_h; int c = (index / o_w / o_h) % o_c; int n = index / o_w / o_h / o_c; index_t out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w; index_t grid_index = n * o_h * o_w * 2 + h * o_w + w; DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h - 1) / 2; DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2; int top_left_y = static_cast<int>(floor(y_real)); int top_left_x = static_cast<int>(floor(x_real)); DType top_left_y_w = 1.0 - (y_real - top_left_y); DType top_left_x_w = 1.0 - (x_real - top_left_x); int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x; DType top_left_v = 0; DType top_right_v = 0; DType bottom_left_v = 0; DType bottom_right_v = 0; if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) top_left_v = *(data + data_index); if (between(top_left_x + 1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) top_right_v = *(data + data_index + 1); if (between(top_left_x, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1)) bottom_left_v = *(data + data_index + i_w); if (between(top_left_x+1, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1)) bottom_right_v = *(data + data_index + i_w + 1); *(out+out_index) = top_left_v * top_left_y_w * top_left_x_w + top_right_v * top_left_y_w * (1.0 - top_left_x_w) + bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w + bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w); } } template<typename DType> __global__ void BilinearSamplingBackwardKernel(const int i_c, const int i_h, const int i_w, const DType* grad, const DType* data, const int o_n, const int o_c, const int o_h, const int o_w, DType* g_input, DType* grid_src) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < o_n * o_h * o_w; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) is the element in grad int w = index % o_w; int h = (index / o_w) % o_h; int n = index / o_w / o_h; DType top_left_y_gw = 0.0; DType top_left_x_gw = 0.0; index_t grid_src_index = n * o_h * o_w * 2 + h * o_w + w; DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h - 1) / 2; DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1) / 2; int top_left_y = static_cast<int>(floor(y_real)); int top_left_x = static_cast<int>(floor(x_real)); DType top_left_y_w = 1.0 - (y_real - top_left_y); DType top_left_x_w = 1.0 - (x_real - top_left_x); for (index_t c = 0; c < o_c; ++c) { index_t grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w; index_t data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x; // calc 4 vertex value in input data DType top_left_v = 0; DType top_right_v = 0; DType bottom_left_v = 0; DType bottom_right_v = 0; // calc input grad if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) { *(g_input + data_index) += *(grad + grad_index) * top_left_y_w * top_left_x_w; top_left_v = *(data + data_index); } if (between(top_left_x+1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) { *(g_input + data_index + 1) += *(grad + grad_index) * top_left_y_w * (1.0 - top_left_x_w); top_right_v = *(data + data_index + 1); } if (between(top_left_x, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) { *(g_input + data_index+ i_w) += *(grad + grad_index) * (1.0 - top_left_y_w) * top_left_x_w; bottom_left_v = *(data + data_index + i_w); } if (between(top_left_x+1, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) { *(g_input + data_index+ i_w + 1) += *(grad + grad_index) * (1.0 - top_left_y_w) * (1.0 - top_left_x_w); bottom_right_v = *(data + data_index + i_w + 1); } // calc weight grad of top_left_w, then multiple -1 is the grad of grid_src top_left_y_gw -= *(grad + grad_index) * (top_right_v - bottom_right_v + (top_left_v - top_right_v - bottom_left_v + bottom_right_v) * top_left_x_w); top_left_x_gw -= *(grad + grad_index) * (bottom_left_v - bottom_right_v + (top_left_v - top_right_v - bottom_left_v + bottom_right_v) * top_left_y_w); } // calc grid_src grad *(grid_src + grid_src_index + o_h * o_w) = top_left_y_gw * (i_h - 1) / 2; *(grid_src + grid_src_index) = top_left_x_gw * (i_w - 1) / 2; } } template<typename DType> inline void BilinearSamplingForward(const Tensor<gpu, 4, DType> &output, const Tensor<gpu, 4, DType> &input, const Tensor<gpu, 3, DType> grid_src) { DType *out = output.dptr_; const DType *data = input.dptr_; const DType *grid = grid_src.dptr_; int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3); int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3); using namespace cuda; const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 num_blocks(kMaxGridDim, (max_block + kMaxGridDim - 1) / kMaxGridDim); dim3 threads_per_block(kMaxThreadsPerBlock); CheckLaunchParam(num_blocks, threads_per_block, "spatial transformer forward"); cudaStream_t stream = Stream<gpu>::GetStream(output.stream_); BilinearSamplingForwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >( i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out); } template<typename DType> inline void BilinearSamplingBackward(const Tensor<gpu, 4, DType> &input_grad, const Tensor<gpu, 3, DType> &grid_src_data, const Tensor<gpu, 4, DType> &output_grad, const Tensor<gpu, 4, DType> &input_data) { DType *g_input = input_grad.dptr_; DType *grid_src = grid_src_data.dptr_; const DType *grad = output_grad.dptr_; const DType *data = input_data.dptr_; int o_n = output_grad.size(0), o_c = output_grad.size(1), o_h = output_grad.size(2), o_w = output_grad.size(3); int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3); using namespace cuda; const int max_block = (output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 num_blocks(kMaxGridDim, (max_block + kMaxGridDim - 1) / kMaxGridDim); dim3 threads_per_block(kMaxThreadsPerBlock); CheckLaunchParam(num_blocks, threads_per_block, "spatial transformer backward"); cudaStream_t stream = Stream<gpu>::GetStream(input_grad.stream_); BilinearSamplingBackwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >( i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(SpatialTransformerParam param, int dtype) { Operator *op = NULL; #if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5 MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new CuDNNSpatialTransformerOp<DType>(param); }) #else MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new SpatialTransformerOp<gpu, DType>(param); }) #endif // MXNET_USE_CUDNN && CUDNN_MAJOR return op; } } // namespace op } // namespace mxnet
6455bcc61ab9df01abadbae4d1a3e0d2960f51b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/zlaswp.cu, normal z -> d, Mon Jun 25 18:24:13 2018 @author Stan Tomov @author Mathieu Faverge @author Ichitaro Yamazaki @author Mark Gates */ #include "magma_internal.h" // MAX_PIVOTS is maximum number of pivots to apply in each kernel launch // NTHREADS is number of threads in a block // 64 and 256 are better on Kepler; //#define MAX_PIVOTS 64 //#define NTHREADS 256 #define MAX_PIVOTS 32 #define NTHREADS 64 typedef struct { int npivots; int ipiv[MAX_PIVOTS]; } dlaswp_params_t; // Matrix A is stored row-wise in dAT. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void dlaswp_kernel( int n, double *dAT, int ldda, dlaswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; double *A1 = dAT; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; double *A2 = dAT + i2*ldda; double temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= DLASWP performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dAT DOUBLE PRECISION array on GPU, stored row-wise, dimension (LDDA,M) The M-by-N matrix, stored transposed as N-by-M matrix embedded in LDDA-by-M array. M is not given; it is implicit. On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldda INTEGER The leading dimension of the array A. ldda >= n. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k1.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k2.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, INCI > 0. TODO: If INCI is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ // used in dgessm, dgetrf_incpiv. extern "C" void magmablas_dlaswp( magma_int_t n, magmaDouble_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( n > ldda ) info = -3; else if ( k1 < 1 ) info = -4; else if ( k2 < 1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); dlaswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } hipLaunchKernelGGL(( dlaswp_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dAT(k,0), ldda, params ); } #undef dAT } /******************************************************************************/ // Extended version has stride in both directions (ldx, ldy) // to handle both row-wise and column-wise storage. // Matrix A is stored row or column-wise in dA. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void dlaswpx_kernel( int n, double *dA, int ldx, int ldy, dlaswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dA += tid*ldy; double *A1 = dA; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; double *A2 = dA + i2*ldx; double temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldx; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= DLASWPX performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored either row-wise or column-wise, depending on ldx and ldy. ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dA DOUBLE PRECISION array on GPU, dimension (*,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldx INTEGER Stride between elements in same column. @param[in] ldy INTEGER Stride between elements in same row. For A stored row-wise, set ldx=ldda and ldy=1. For A stored column-wise, set ldx=1 and ldy=ldda. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_dlaswpx( magma_int_t n, magmaDouble_ptr dA, magma_int_t ldx, magma_int_t ldy, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_)*ldx + (j_)*ldy) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); dlaswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } hipLaunchKernelGGL(( dlaswpx_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA(k,0), ldx, ldy, params ); } #undef dA } /******************************************************************************/ // This version takes d_ipiv on the GPU. Thus it does not pass pivots // as an argument using a structure, avoiding all the argument size // limitations of CUDA and OpenCL. It also needs just one kernel launch // with all the pivots, instead of multiple kernel launches with small // batches of pivots. On Fermi, it is faster than magmablas_dlaswp // (including copying pivots to the GPU). __global__ void dlaswp2_kernel( int n, double *dAT, int ldda, int npivots, const magma_int_t *d_ipiv, int inci ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; double *A1 = dAT; for( int i1 = 0; i1 < npivots; ++i1 ) { int i2 = d_ipiv[i1*inci] - 1; // Fortran index double *A2 = dAT + i2*ldda; double temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= DLASWP2 performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Here, d_ipiv is passed in GPU memory. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dAT DOUBLE PRECISION array on GPU, stored row-wise, dimension (LDDA,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldda INTEGER The leading dimension of the array A. (I.e., stride between elements in a column.) @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] d_ipiv INTEGER array, on GPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_dlaswp2( magma_int_t n, magmaDouble_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, magmaInt_const_ptr d_ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t nb = k2-(k1-1); dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); hipLaunchKernelGGL(( dlaswp2_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dAT(k1-1,0), ldda, nb, d_ipiv, inci ); }
6455bcc61ab9df01abadbae4d1a3e0d2960f51b2.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/zlaswp.cu, normal z -> d, Mon Jun 25 18:24:13 2018 @author Stan Tomov @author Mathieu Faverge @author Ichitaro Yamazaki @author Mark Gates */ #include "magma_internal.h" // MAX_PIVOTS is maximum number of pivots to apply in each kernel launch // NTHREADS is number of threads in a block // 64 and 256 are better on Kepler; //#define MAX_PIVOTS 64 //#define NTHREADS 256 #define MAX_PIVOTS 32 #define NTHREADS 64 typedef struct { int npivots; int ipiv[MAX_PIVOTS]; } dlaswp_params_t; // Matrix A is stored row-wise in dAT. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void dlaswp_kernel( int n, double *dAT, int ldda, dlaswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; double *A1 = dAT; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; double *A2 = dAT + i2*ldda; double temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= DLASWP performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dAT DOUBLE PRECISION array on GPU, stored row-wise, dimension (LDDA,M) The M-by-N matrix, stored transposed as N-by-M matrix embedded in LDDA-by-M array. M is not given; it is implicit. On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldda INTEGER The leading dimension of the array A. ldda >= n. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k1.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k2.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, INCI > 0. TODO: If INCI is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ // used in dgessm, dgetrf_incpiv. extern "C" void magmablas_dlaswp( magma_int_t n, magmaDouble_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( n > ldda ) info = -3; else if ( k1 < 1 ) info = -4; else if ( k2 < 1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); dlaswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } dlaswp_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( n, dAT(k,0), ldda, params ); } #undef dAT } /******************************************************************************/ // Extended version has stride in both directions (ldx, ldy) // to handle both row-wise and column-wise storage. // Matrix A is stored row or column-wise in dA. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void dlaswpx_kernel( int n, double *dA, int ldx, int ldy, dlaswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dA += tid*ldy; double *A1 = dA; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; double *A2 = dA + i2*ldx; double temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldx; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= DLASWPX performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored either row-wise or column-wise, depending on ldx and ldy. ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dA DOUBLE PRECISION array on GPU, dimension (*,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldx INTEGER Stride between elements in same column. @param[in] ldy INTEGER Stride between elements in same row. For A stored row-wise, set ldx=ldda and ldy=1. For A stored column-wise, set ldx=1 and ldy=ldda. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_dlaswpx( magma_int_t n, magmaDouble_ptr dA, magma_int_t ldx, magma_int_t ldy, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_)*ldx + (j_)*ldy) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); dlaswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } dlaswpx_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( n, dA(k,0), ldx, ldy, params ); } #undef dA } /******************************************************************************/ // This version takes d_ipiv on the GPU. Thus it does not pass pivots // as an argument using a structure, avoiding all the argument size // limitations of CUDA and OpenCL. It also needs just one kernel launch // with all the pivots, instead of multiple kernel launches with small // batches of pivots. On Fermi, it is faster than magmablas_dlaswp // (including copying pivots to the GPU). __global__ void dlaswp2_kernel( int n, double *dAT, int ldda, int npivots, const magma_int_t *d_ipiv, int inci ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; double *A1 = dAT; for( int i1 = 0; i1 < npivots; ++i1 ) { int i2 = d_ipiv[i1*inci] - 1; // Fortran index double *A2 = dAT + i2*ldda; double temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= DLASWP2 performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Here, d_ipiv is passed in GPU memory. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dAT DOUBLE PRECISION array on GPU, stored row-wise, dimension (LDDA,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldda INTEGER The leading dimension of the array A. (I.e., stride between elements in a column.) @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] d_ipiv INTEGER array, on GPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_dlaswp2( magma_int_t n, magmaDouble_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, magmaInt_const_ptr d_ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t nb = k2-(k1-1); dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); dlaswp2_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( n, dAT(k1-1,0), ldda, nb, d_ipiv, inci ); }
5e373e13a77118cf57f504863e5ffd76ebbdb874.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <raft/cudart_utils.h> #include <limits> #include <raft/distance/fused_l2_nn.cuh> #include <raft/linalg/norm.cuh> #include <raft/random/rng.cuh> #include "../common/ml_benchmark.hpp" namespace MLCommon { namespace Bench { namespace Distance { struct FLNParams { int m, n, k; }; // struct FLNParams template <typename T> struct FusedL2NN : public Fixture { FusedL2NN(const std::string& name, const FLNParams& p) : Fixture(name, std::shared_ptr<deviceAllocator>( new raft::mr::device::default_allocator)), params(p) {} protected: void allocateBuffers(const ::benchmark::State& state) override { alloc(x, params.m * params.k); alloc(y, params.n * params.k); alloc(xn, params.m); alloc(yn, params.n); alloc(out, params.m); alloc(workspace, params.m); raft::random::Rng r(123456ULL); r.uniform(x, params.m * params.k, T(-1.0), T(1.0), stream); r.uniform(y, params.n * params.k, T(-1.0), T(1.0), stream); raft::linalg::rowNorm(xn, x, params.k, params.m, raft::linalg::L2Norm, true, stream); raft::linalg::rowNorm(yn, y, params.k, params.n, raft::linalg::L2Norm, true, stream); auto blks = raft::ceildiv(params.m, 256); hipLaunchKernelGGL(( raft::distance::initKernel<T, hipcub::KeyValuePair<int, T>, int>) , dim3(blks), dim3(256), 0, stream, out, params.m, std::numeric_limits<T>::max(), op); } void deallocateBuffers(const ::benchmark::State& state) override { dealloc(x, params.m * params.k); dealloc(y, params.n * params.k); dealloc(xn, params.m); dealloc(yn, params.n); dealloc(out, params.m); dealloc(workspace, params.m); } void runBenchmark(::benchmark::State& state) override { loopOnState(state, [this]() { // it is enough to only benchmark the L2-squared metric raft::distance::fusedL2NN<T, hipcub::KeyValuePair<int, T>, int>( out, x, y, xn, yn, params.m, params.n, params.k, (void*)workspace, op, pairRedOp, false, false, stream); }); } private: FLNParams params; T *x, *y, *xn, *yn; hipcub::KeyValuePair<int, T>* out; int* workspace; raft::distance::KVPMinReduce<int, T> pairRedOp; raft::distance::MinAndDistanceReduceOp<int, T> op; }; // struct FusedL2NN static std::vector<FLNParams> getInputs() { return { {32, 16384, 16384}, {64, 16384, 16384}, {128, 16384, 16384}, {256, 16384, 16384}, {512, 16384, 16384}, {1024, 16384, 16384}, {16384, 32, 16384}, {16384, 64, 16384}, {16384, 128, 16384}, {16384, 256, 16384}, {16384, 512, 16384}, {16384, 1024, 16384}, {16384, 16384, 32}, {16384, 16384, 64}, {16384, 16384, 128}, {16384, 16384, 256}, {16384, 16384, 512}, {16384, 16384, 1024}, {16384, 16384, 16384}, }; } ML_BENCH_REGISTER(FLNParams, FusedL2NN<float>, "", getInputs()); ML_BENCH_REGISTER(FLNParams, FusedL2NN<double>, "", getInputs()); } // namespace Distance } // namespace Bench } // namespace MLCommon
5e373e13a77118cf57f504863e5ffd76ebbdb874.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <raft/cudart_utils.h> #include <limits> #include <raft/distance/fused_l2_nn.cuh> #include <raft/linalg/norm.cuh> #include <raft/random/rng.cuh> #include "../common/ml_benchmark.hpp" namespace MLCommon { namespace Bench { namespace Distance { struct FLNParams { int m, n, k; }; // struct FLNParams template <typename T> struct FusedL2NN : public Fixture { FusedL2NN(const std::string& name, const FLNParams& p) : Fixture(name, std::shared_ptr<deviceAllocator>( new raft::mr::device::default_allocator)), params(p) {} protected: void allocateBuffers(const ::benchmark::State& state) override { alloc(x, params.m * params.k); alloc(y, params.n * params.k); alloc(xn, params.m); alloc(yn, params.n); alloc(out, params.m); alloc(workspace, params.m); raft::random::Rng r(123456ULL); r.uniform(x, params.m * params.k, T(-1.0), T(1.0), stream); r.uniform(y, params.n * params.k, T(-1.0), T(1.0), stream); raft::linalg::rowNorm(xn, x, params.k, params.m, raft::linalg::L2Norm, true, stream); raft::linalg::rowNorm(yn, y, params.k, params.n, raft::linalg::L2Norm, true, stream); auto blks = raft::ceildiv(params.m, 256); raft::distance::initKernel<T, cub::KeyValuePair<int, T>, int> <<<blks, 256, 0, stream>>>(out, params.m, std::numeric_limits<T>::max(), op); } void deallocateBuffers(const ::benchmark::State& state) override { dealloc(x, params.m * params.k); dealloc(y, params.n * params.k); dealloc(xn, params.m); dealloc(yn, params.n); dealloc(out, params.m); dealloc(workspace, params.m); } void runBenchmark(::benchmark::State& state) override { loopOnState(state, [this]() { // it is enough to only benchmark the L2-squared metric raft::distance::fusedL2NN<T, cub::KeyValuePair<int, T>, int>( out, x, y, xn, yn, params.m, params.n, params.k, (void*)workspace, op, pairRedOp, false, false, stream); }); } private: FLNParams params; T *x, *y, *xn, *yn; cub::KeyValuePair<int, T>* out; int* workspace; raft::distance::KVPMinReduce<int, T> pairRedOp; raft::distance::MinAndDistanceReduceOp<int, T> op; }; // struct FusedL2NN static std::vector<FLNParams> getInputs() { return { {32, 16384, 16384}, {64, 16384, 16384}, {128, 16384, 16384}, {256, 16384, 16384}, {512, 16384, 16384}, {1024, 16384, 16384}, {16384, 32, 16384}, {16384, 64, 16384}, {16384, 128, 16384}, {16384, 256, 16384}, {16384, 512, 16384}, {16384, 1024, 16384}, {16384, 16384, 32}, {16384, 16384, 64}, {16384, 16384, 128}, {16384, 16384, 256}, {16384, 16384, 512}, {16384, 16384, 1024}, {16384, 16384, 16384}, }; } ML_BENCH_REGISTER(FLNParams, FusedL2NN<float>, "", getInputs()); ML_BENCH_REGISTER(FLNParams, FusedL2NN<double>, "", getInputs()); } // namespace Distance } // namespace Bench } // namespace MLCommon
b8dd85f90fd208143bb94b21f4fa35bde10e918b.hip
// !!! This is a file automatically generated by hipify!!! /* ** Ben Pittman ** Greg Smith ** Calvin Winston Fei ** Term Project - board.cpp ** Static class for checking solutions. ** Assumptions: Assumes valid board size and 1D memory allocation */ #include <stdio.h> #include <iostream> #include <set> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" const int BOARD_SIZE = 81; const int SUB_BOARD_SIZE = 9; class Board { public: // Array of bool pointers to hold cells for board // 0 item in each array signifies filled or empty, 1-9 signifies filled value or potential value bool **board[BOARD_SIZE]; __host__ Board() { for (int i = 0; i < BOARD_SIZE; i++) { *board[i] = (bool*)malloc((SUB_BOARD_SIZE + 1) * sizeof(bool)); } }; // Method to set the board according to passed integer array // assumes the filled integer array is of size BOARD_SIZE contains only values between 1 and 9 __host__ void set_board(int* filled) { for (int i = 0; i < BOARD_SIZE; i++) { if (filled[i] != 0) { *board[i][0] = true; *board[i][filled[i]] = true; } } } // sets a cell as __device__ void set_cell(int _row, int _col, int _val) { int board_cell = _row + _col * SUB_BOARD_SIZE; *board[board_cell][0] = true; for (int i = 1; i < SUB_BOARD_SIZE + 1; i++) { if (*board[board_cell][i] == true && i != _val) { *board[board_cell][0] = false; } } } // method for finding potential values for empty cells __device__ void annotate_potential_entries(int row, int col) { // scan row for values and store in temp set std::set<int> row_vals; std::set<int> col_vals; // find filled cells in the row and add to array for (int i = row * SUB_BOARD_SIZE; i < (row * SUB_BOARD_SIZE) + SUB_BOARD_SIZE; i++) { if (*board[i][0]) { for (int j = 1; j < SUB_BOARD_SIZE; j++) { if (*board[i][j]) { row_vals.insert(j); } } } } // scan col for values and store in temp set for (int i = col; i < col * SUB_BOARD_SIZE - col; i += SUB_BOARD_SIZE) { if (*board[i][0]) { for (int j = 1; j < SUB_BOARD_SIZE; j++) { if (*board[i][j]) { col_vals.insert(j); } } } } // Fill potential entries in rows if (!row_vals.empty()) { for (int i = row * SUB_BOARD_SIZE; i < (row * SUB_BOARD_SIZE) + SUB_BOARD_SIZE; i++) { if (!*board[i][0]) { for (auto it = row_vals.begin(); it != row_vals.end(); ++it) { *board[i][*it] = true; } } } } // Fill potential entries in columns if (!col_vals.empty()) { for (int i = col; i < col * BOARD_SIZE - col; i += SUB_BOARD_SIZE) { if (*board[i][0]) { for (auto it = col_vals.begin(); it != col_vals.end(); ++it) { *board[i][*it] = true; } } } } } // Prints out the passed in sudoku game board // Assumes N is either 4, 9 or 16 but can be extended to add more sizes __host__ void print_board(int *sudoku) { char* border; if (SUB_BOARD_SIZE == 4) { border = new char[14]{ "|-----+-----|" }; } else if (SUB_BOARD_SIZE == 9) { border = new char[26]{ "|-------+-------+-------|" }; } else if (SUB_BOARD_SIZE == 16) { border = new char[42]{ "|---------+---------+---------+---------|" }; } else { return; } std::cout << border << std::endl; int split = sqrt(SUB_BOARD_SIZE); for (int i = 0; i < SUB_BOARD_SIZE*SUB_BOARD_SIZE; i++) { if (i % SUB_BOARD_SIZE == 0) { std::cout << "| "; } else if (i % split == 0) { std::cout << "| "; } // change to call a get_entry fucntion that will return the value int value = sudoku[i]; if (value != 0) { std::cout << value << " "; } else { std::cout << ". "; } if (i % SUB_BOARD_SIZE == SUB_BOARD_SIZE - 1) { std::cout << "|" << std::endl; if (((i + 1) % (SUB_BOARD_SIZE * SUB_BOARD_SIZE / split)) == 0) { std::cout << border << std::endl; } } } std::cout << std::endl; } };
b8dd85f90fd208143bb94b21f4fa35bde10e918b.cu
/* ** Ben Pittman ** Greg Smith ** Calvin Winston Fei ** Term Project - board.cpp ** Static class for checking solutions. ** Assumptions: Assumes valid board size and 1D memory allocation */ #include <stdio.h> #include <iostream> #include <set> #include "cuda_runtime.h" #include "device_launch_parameters.h" const int BOARD_SIZE = 81; const int SUB_BOARD_SIZE = 9; class Board { public: // Array of bool pointers to hold cells for board // 0 item in each array signifies filled or empty, 1-9 signifies filled value or potential value bool **board[BOARD_SIZE]; __host__ Board() { for (int i = 0; i < BOARD_SIZE; i++) { *board[i] = (bool*)malloc((SUB_BOARD_SIZE + 1) * sizeof(bool)); } }; // Method to set the board according to passed integer array // assumes the filled integer array is of size BOARD_SIZE contains only values between 1 and 9 __host__ void set_board(int* filled) { for (int i = 0; i < BOARD_SIZE; i++) { if (filled[i] != 0) { *board[i][0] = true; *board[i][filled[i]] = true; } } } // sets a cell as __device__ void set_cell(int _row, int _col, int _val) { int board_cell = _row + _col * SUB_BOARD_SIZE; *board[board_cell][0] = true; for (int i = 1; i < SUB_BOARD_SIZE + 1; i++) { if (*board[board_cell][i] == true && i != _val) { *board[board_cell][0] = false; } } } // method for finding potential values for empty cells __device__ void annotate_potential_entries(int row, int col) { // scan row for values and store in temp set std::set<int> row_vals; std::set<int> col_vals; // find filled cells in the row and add to array for (int i = row * SUB_BOARD_SIZE; i < (row * SUB_BOARD_SIZE) + SUB_BOARD_SIZE; i++) { if (*board[i][0]) { for (int j = 1; j < SUB_BOARD_SIZE; j++) { if (*board[i][j]) { row_vals.insert(j); } } } } // scan col for values and store in temp set for (int i = col; i < col * SUB_BOARD_SIZE - col; i += SUB_BOARD_SIZE) { if (*board[i][0]) { for (int j = 1; j < SUB_BOARD_SIZE; j++) { if (*board[i][j]) { col_vals.insert(j); } } } } // Fill potential entries in rows if (!row_vals.empty()) { for (int i = row * SUB_BOARD_SIZE; i < (row * SUB_BOARD_SIZE) + SUB_BOARD_SIZE; i++) { if (!*board[i][0]) { for (auto it = row_vals.begin(); it != row_vals.end(); ++it) { *board[i][*it] = true; } } } } // Fill potential entries in columns if (!col_vals.empty()) { for (int i = col; i < col * BOARD_SIZE - col; i += SUB_BOARD_SIZE) { if (*board[i][0]) { for (auto it = col_vals.begin(); it != col_vals.end(); ++it) { *board[i][*it] = true; } } } } } // Prints out the passed in sudoku game board // Assumes N is either 4, 9 or 16 but can be extended to add more sizes __host__ void print_board(int *sudoku) { char* border; if (SUB_BOARD_SIZE == 4) { border = new char[14]{ "|-----+-----|" }; } else if (SUB_BOARD_SIZE == 9) { border = new char[26]{ "|-------+-------+-------|" }; } else if (SUB_BOARD_SIZE == 16) { border = new char[42]{ "|---------+---------+---------+---------|" }; } else { return; } std::cout << border << std::endl; int split = sqrt(SUB_BOARD_SIZE); for (int i = 0; i < SUB_BOARD_SIZE*SUB_BOARD_SIZE; i++) { if (i % SUB_BOARD_SIZE == 0) { std::cout << "| "; } else if (i % split == 0) { std::cout << "| "; } // change to call a get_entry fucntion that will return the value int value = sudoku[i]; if (value != 0) { std::cout << value << " "; } else { std::cout << ". "; } if (i % SUB_BOARD_SIZE == SUB_BOARD_SIZE - 1) { std::cout << "|" << std::endl; if (((i + 1) % (SUB_BOARD_SIZE * SUB_BOARD_SIZE / split)) == 0) { std::cout << border << std::endl; } } } std::cout << std::endl; } };
5fcb85d2958ba13e9ea3518df5580cb129a3dacf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Modifications Copyright (c) Microsoft. */ #include "core/providers/cuda/cu_inc/common.cuh" #include "orttraining/training_ops/cuda/nn/dropout_impl.h" #include <hiprand/hiprand_kernel.h> #include <algorithm> namespace onnxruntime { namespace cuda { template <typename T> __global__ void DropoutGradientKernel( const int64_t N, const T* dY_data, const bool* mask_data, const T scale, T* dX_data) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); dX_data[id] = dY_data[id] * T(mask_data[id]) * scale; } template <typename T> void DropoutGradientKernelImpl( const int64_t N, const T* dY_data, const bool* mask_data, const float ratio, T* dX_data) { if (ratio == 0.0f) { if (dY_data != dX_data) { CUDA_CALL_THROW(hipMemcpyAsync(dX_data, dY_data, N * sizeof(T), hipMemcpyDeviceToDevice)); } } else { const float scale = 1.f / (1.f - ratio); const int blocksPerGrid = (N + GridDim::maxThreadsPerBlock - 1) / GridDim::maxThreadsPerBlock; hipLaunchKernelGGL(( DropoutGradientKernel<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, N, dY_data, mask_data, T(scale), dX_data); } } #define SPECIALIZED_DROPOUT_GRAD_IMPL(T) \ template void DropoutGradientKernelImpl( \ const int64_t N, \ const T* dY_data, \ const bool* mask_data, \ const float scale, \ T* dX_data); SPECIALIZED_DROPOUT_GRAD_IMPL(float) SPECIALIZED_DROPOUT_GRAD_IMPL(double) SPECIALIZED_DROPOUT_GRAD_IMPL(half) } // namespace cuda } // namespace onnxruntime
5fcb85d2958ba13e9ea3518df5580cb129a3dacf.cu
/** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Modifications Copyright (c) Microsoft. */ #include "core/providers/cuda/cu_inc/common.cuh" #include "orttraining/training_ops/cuda/nn/dropout_impl.h" #include <curand_kernel.h> #include <algorithm> namespace onnxruntime { namespace cuda { template <typename T> __global__ void DropoutGradientKernel( const int64_t N, const T* dY_data, const bool* mask_data, const T scale, T* dX_data) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); dX_data[id] = dY_data[id] * T(mask_data[id]) * scale; } template <typename T> void DropoutGradientKernelImpl( const int64_t N, const T* dY_data, const bool* mask_data, const float ratio, T* dX_data) { if (ratio == 0.0f) { if (dY_data != dX_data) { CUDA_CALL_THROW(cudaMemcpyAsync(dX_data, dY_data, N * sizeof(T), cudaMemcpyDeviceToDevice)); } } else { const float scale = 1.f / (1.f - ratio); const int blocksPerGrid = (N + GridDim::maxThreadsPerBlock - 1) / GridDim::maxThreadsPerBlock; DropoutGradientKernel<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(N, dY_data, mask_data, T(scale), dX_data); } } #define SPECIALIZED_DROPOUT_GRAD_IMPL(T) \ template void DropoutGradientKernelImpl( \ const int64_t N, \ const T* dY_data, \ const bool* mask_data, \ const float scale, \ T* dX_data); SPECIALIZED_DROPOUT_GRAD_IMPL(float) SPECIALIZED_DROPOUT_GRAD_IMPL(double) SPECIALIZED_DROPOUT_GRAD_IMPL(half) } // namespace cuda } // namespace onnxruntime
7cab030930ab404fd286f7cb18bc4b6029ab10d9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> __global__ void dkernel(){ printf("Hi from thread id %d\n", threadIdx.x); } int main() { hipLaunchKernelGGL(( dkernel), dim3(1),dim3(32), 0, 0, ); hipDeviceSynchronize(); return 0; }
7cab030930ab404fd286f7cb18bc4b6029ab10d9.cu
#include <stdio.h> #include <cuda.h> __global__ void dkernel(){ printf("Hi from thread id %d\n", threadIdx.x); } int main() { dkernel<<<1,32>>>(); cudaDeviceSynchronize(); return 0; }
e4982b89c0d1aec55520b8641e653de399a75ebd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* ******************************* BLUEBOTTLE-1.0 ******************************** ******************************************************************************* * * Copyright 2012 - 2014 Adam Sierakowski, The Johns Hopkins University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Please contact the Johns Hopkins University to use Bluebottle for * commercial and/or for-profit applications. ******************************************************************************/ #include "cuda_quadrature.h" __device__ void rtp2xyz(real r, real theta, real phi, real *x, real *y, real *z) { *x = r * sin(theta) * cos(phi); *y = r * sin(theta) * sin(phi); *z = r * cos(theta); } __device__ void cart2sphere(real u, real v, real w, real theta, real phi, real *ur, real *ut, real *up) { real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); *ur = st * (u * cp + v * sp) + w * ct; *ut = ct * (u * cp + v * sp) - w * st; *up = -u * sp + v * cp; } __global__ void check_nodes(int nparts, part_struct *parts, dom_struct *dom, real *theta, real *phi, int nnodes, BC bc) { int node = threadIdx.x; int part = blockIdx.x; // convert node (r, theta, phi) to (x, y, z) real xp, yp, zp; // Cartesian radial vector real x, y, z; // Cartesian location of node rtp2xyz(parts[part].rs, theta[node], phi[node], &xp, &yp, &zp); // shift from particle center x = xp + parts[part].x; y = yp + parts[part].y; z = zp + parts[part].z; if(x < dom->xs && bc.uW == PERIODIC) x = x + dom->xl; else if(x > dom->xe && bc.uE == PERIODIC) x = x - dom->xl; if(y < dom->ys && bc.vS == PERIODIC) y = y + dom->yl; else if(y > dom->ye && bc.vN == PERIODIC) y = y - dom->yl; if(z < dom->zs && bc.wB == PERIODIC) z = z + dom->zl; else if(z > dom->ze && bc.wT == PERIODIC) z = z - dom->zl; __syncthreads(); // start off with all -1's parts[part].nodes[node] = -1; // check if the node is interfered with by another particle // give it the value of the particle if it is, otherwise, set to -1 for(int i = 0; i < nparts; i++) { //if(i != part) { // a particle can never interfere with its own nodes // compute distance between node and other particle's center real dx = x - parts[i].x; real dy = y - parts[i].y; real dz = z - parts[i].z; real dist = sqrt(dx*dx + dy*dy + dz*dz); if(dist < parts[i].r && parts[part].nodes[node] == -1) parts[part].nodes[node] = i; //printf("part[%d-->%d].node[%d] = %d\n", part, i, node, parts[part].nodes[node]); //} } // check if the node is interfered with by a wall // compute distance between node and walls // set equal to some number to identify which wall is interefering if(x - dom->xs < 0) { if(bc.uW == DIRICHLET || bc.vW == DIRICHLET || bc.wW == DIRICHLET) if(parts[part].nodes[node] == -1) parts[part].nodes[node] = -10; } if(x - dom->xe > 0) { if(bc.uE == DIRICHLET || bc.vE == DIRICHLET || bc.wE == DIRICHLET) if(parts[part].nodes[node] == -1) parts[part].nodes[node] = -11; } if(y - dom->ys < 0) { if(bc.uS == DIRICHLET || bc.vS == DIRICHLET || bc.wS == DIRICHLET) if(parts[part].nodes[node] == -1) parts[part].nodes[node] = -12; } if(y - dom->ye > 0) { if(bc.uN == DIRICHLET || bc.vN == DIRICHLET || bc.wN == DIRICHLET) if(parts[part].nodes[node] == -1) parts[part].nodes[node] = -13; } if(z - dom->zs < 0) { if(bc.uB == DIRICHLET || bc.vB == DIRICHLET || bc.wB == DIRICHLET) if(parts[part].nodes[node] == -1) parts[part].nodes[node] = -14; } if(z - dom->ze > 0) { if(bc.uT == DIRICHLET || bc.vT == DIRICHLET || bc.wT == DIRICHLET) if(parts[part].nodes[node] == -1) parts[part].nodes[node] = -15; } } __global__ void interpolate_nodes(real *p0, real *p, real *u, real *v, real *w, real rho_f, real nu, gradP_struct gradP, part_struct *parts, dom_struct *dom, real *theta, real *phi, int nnodes, real *pp, real *ur, real *ut, real *up, real dt0, real dt, BC bc) { int node = threadIdx.x; int part = blockIdx.x; // the node number of the intersecting node int intnode = parts[part].nodes[node]; if(intnode < 0) intnode = part; real ddx = 1. / dom->dx; real ddy = 1. / dom->dy; real ddz = 1. / dom->dz; real ox = parts[part].ox; real oy = parts[part].oy; real oz = parts[part].oz; real oxdot = parts[part].oxdot; real oydot = parts[part].oydot; real ozdot = parts[part].ozdot; real udot = parts[part].udot; real vdot = parts[part].vdot; real wdot = parts[part].wdot; real uu, vv, ww; // temporary nodes for Cartesian result of interpolation real uunode, vvnode, wwnode; real uuwall, vvwall, wwwall; // convert node (r, theta, phi) to (x, y, z) real xp, yp, zp; // Cartesian radial vector real x, y, z; // Cartesian location of node rtp2xyz(parts[part].rs, theta[node], phi[node], &xp, &yp, &zp); // shift from particle center x = xp + parts[part].x; y = yp + parts[part].y; z = zp + parts[part].z; if(x < dom->xs && bc.uW == PERIODIC) x = x + dom->xl; else if(x > dom->xe && bc.uE == PERIODIC) x = x - dom->xl; if(y < dom->ys && bc.vS == PERIODIC) y = y + dom->yl; else if(y > dom->ye && bc.vN == PERIODIC) y = y - dom->yl; if(z < dom->zs && bc.wB == PERIODIC) z = z + dom->zl; else if(z > dom->ze && bc.wT == PERIODIC) z = z - dom->zl; __syncthreads(); // find index of cell containing node int i = floor((x - dom->xs) * ddx) + DOM_BUF; int j = floor((y - dom->ys) * ddy) + DOM_BUF; int k = floor((z - dom->zs) * ddz) + DOM_BUF; if(i < dom->Gcc.is) i = dom->Gcc.is; if(j < dom->Gcc.js) j = dom->Gcc.js; if(k < dom->Gcc.ks) k = dom->Gcc.ks; if(i > dom->Gcc.ie-1) i = dom->Gcc.ie-1; if(j > dom->Gcc.je-1) j = dom->Gcc.je-1; if(k > dom->Gcc.ke-1) k = dom->Gcc.ke-1; int C = i + j*dom->Gcc.s1b + k*dom->Gcc.s2b; // Cartesian location of center of cell real xx = (i-0.5) * dom->dx + dom->xs; real yy = (j-0.5) * dom->dy + dom->ys; real zz = (k-0.5) * dom->dz + dom->zs; // interpolate pressure /* real a = (0.5 * dt) / (0.5 * dt0 + 0.5 * dt); real pc = (1. + 0.5 * a) * p[C] - 0.5 * a * p0[C]; real pw = (1. + 0.5 * a) * p[C-1] - 0.5 * a * p0[C-1]; real pe = (1. + 0.5 * a) * p[C+1] - 0.5 * a * p0[C+1]; real ps = (1. + 0.5 * a) * p[C-dom->Gcc.s1b] - 0.5 * a * p0[C-dom->Gcc.s1b]; real pn = (1. + 0.5 * a) * p[C+dom->Gcc.s1b] - 0.5 * a * p0[C+dom->Gcc.s1b]; real pb = (1. + 0.5 * a) * p[C-dom->Gcc.s2b] - 0.5 * a * p0[C-dom->Gcc.s2b]; real pt = (1. + 0.5 * a) * p[C+dom->Gcc.s2b] - 0.5 * a * p0[C+dom->Gcc.s2b]; real dpdx = 0.5 * (pe - pw) * ddx; real dpdy = 0.5 * (pn - ps) * ddy; real dpdz = 0.5 * (pt - pb) * ddz; */ /* real pc = p[C]; real pw = p[C-1]; real pe = p[C+1]; real ps = p[C-dom->Gcc.s1b]; real pn = p[C+dom->Gcc.s1b]; real pb = p[C-dom->Gcc.s2b]; real pt = p[C+dom->Gcc.s2b]; real dpdx = 0.5 * (pe - pw) * ddx; real dpdy = 0.5 * (pn - ps) * ddy; real dpdz = 0.5 * (pt - pb) * ddz; */ real a = dt0/dt; a = (a + 2.)/(a + 1.); real pc = p[C]*a + p0[C]*(1.-a); real pw = p[C-1]*a + p0[C-1]*(1.-a); real pe = p[C+1]*a + p0[C+1]*(1.-a); real ps = p[C-dom->Gcc.s1b]*a + p0[C-dom->Gcc.s1b]*(1.-a); real pn = p[C+dom->Gcc.s1b]*a + p0[C+dom->Gcc.s1b]*(1.-a); real pb = p[C-dom->Gcc.s2b]*a + p0[C-dom->Gcc.s2b]*(1.-a); real pt = p[C+dom->Gcc.s2b]*a + p0[C+dom->Gcc.s2b]*(1.-a); real dpdx = 0.5 * (pe - pw) * ddx; real dpdy = 0.5 * (pn - ps) * ddy; real dpdz = 0.5 * (pt - pb) * ddz; pp[node+nnodes*part] = pc + dpdx*(x-xx) + dpdy*(y-yy) + dpdz*(z-zz); // switch to particle rest frame real ocrossr2 = (oy*zp - oz*yp) * (oy*zp - oz*yp); ocrossr2 += (ox*zp - oz*xp) * (ox*zp - oz*xp); ocrossr2 += (ox*yp - oy*xp) * (ox*yp - oy*xp); real rhoV = rho_f; real accdotr = (-gradP.x/rhoV - udot)*xp + (-gradP.y/rhoV - vdot)*yp + (-gradP.z/rhoV - wdot)*zp; pp[node+nnodes*part] -= 0.5 * rho_f * ocrossr2 + rho_f * accdotr; // zero if this node intersects another particle pp[node+nnodes*part] = (parts[part].nodes[node]==-1)*pp[node+part*nnodes]; // interpolate velocities // don't work with cell-center anymore; // find closest cell face in x-direction // interpolate u-velocity i = round((x - dom->xs) * ddx - 0.5) + DOM_BUF; j = floor((y - dom->ys) * ddy) + DOM_BUF; k = floor((z - dom->zs) * ddz) + DOM_BUF; if(i < dom->Gfx.is) i = dom->Gfx.is; if(j < dom->Gfx.js) j = dom->Gfx.js; if(k < dom->Gfx.ks) k = dom->Gfx.ks; if(i > dom->Gfx.ie-1) i = dom->Gfx.ie-1; if(j > dom->Gfx.je-1) j = dom->Gfx.je-1; if(k > dom->Gfx.ke-1) k = dom->Gfx.ke-1; xx = (i-DOM_BUF) * dom->dx + dom->xs; yy = (j-0.5) * dom->dy + dom->ys; zz = (k-0.5) * dom->dz + dom->zs; C = i + j*dom->Gfx.s1b + k*dom->Gfx.s2b; real dudx = 0.5 * (u[C+1] - u[C-1]) * ddx; real dudy = 0.5 * (u[C+dom->Gfx.s1b] - u[C-dom->Gfx.s1b]) * ddy; real dudz = 0.5 * (u[C+dom->Gfx.s2b] - u[C-dom->Gfx.s2b]) * ddz; uu = u[C] + dudx * (x - xx) + dudy * (y - yy) + dudz * (z - zz); // set uunode equal to interfering particle u-velocity uunode = parts[intnode].u; // set uuwall equal to interfering wall u-velocity uuwall = (parts[part].nodes[node] == -10)*bc.uWD + (parts[part].nodes[node] == -11)*bc.uED + (parts[part].nodes[node] == -12)*bc.uSD + (parts[part].nodes[node] == -13)*bc.uND + (parts[part].nodes[node] == -14)*bc.uBD + (parts[part].nodes[node] == -15)*bc.uTD; // switch to particle rest frame real rs2 = parts[part].rs*parts[part].rs; real a2 = parts[part].r*parts[part].r; real ocrossr_x = oy*zp - oz*yp; real odotcrossr_x = oydot*zp - ozdot*yp; uu -= parts[part].u + ocrossr_x; uu -= 0.1/nu *(rs2-a2) * odotcrossr_x; uunode -= parts[part].u + ocrossr_x; uunode -= 0.1/nu *(rs2-a2) * odotcrossr_x; uuwall -= parts[part].u + ocrossr_x; //uuwall -= 0.1 / nu / rs3 * (rs5 - r5) * odotcrossr_x; uuwall -= 0.1/nu *(rs2-a2) * odotcrossr_x; // set actual node value based on whether it is interfered with uu = (parts[part].nodes[node]==-1)*uu + (parts[part].nodes[node]!=part)*(parts[part].nodes[node]>-1)*uunode + (parts[part].nodes[node]<-1)*uuwall; //printf("uu = %f uuwall = %f\n", uu + parts[part].u + ocrossr_x + 0.1 / nu / rs3 * (rs5 - r5) * odotcrossr_x, uuwall + parts[part].u + ocrossr_x + 0.1 / nu / rs3 * (rs5 - r5) * odotcrossr_x); // interpolate v-velocity i = floor((x - dom->xs) * ddx) + DOM_BUF; j = round((y - dom->ys) * ddy - 0.5) + DOM_BUF; k = floor((z - dom->zs) * ddz) + DOM_BUF; if(i < dom->Gfy.is) i = dom->Gfy.is; if(j < dom->Gfy.js) j = dom->Gfy.js; if(k < dom->Gfy.ks) k = dom->Gfy.ks; if(i > dom->Gfy.ie-1) i = dom->Gfy.ie-1; if(j > dom->Gfy.je-1) j = dom->Gfy.je-1; if(k > dom->Gfy.ke-1) k = dom->Gfy.ke-1; xx = (i-0.5) * dom->dx + dom->xs; yy = (j-DOM_BUF) * dom->dy + dom->ys; zz = (k-0.5) * dom->dz + dom->zs; C = i + j*dom->Gfy.s1b + k*dom->Gfy.s2b; real dvdx = 0.5 * (v[C+1] - v[C-1]) * ddx; real dvdy = 0.5 * (v[C+dom->Gfy.s1b] - v[C-dom->Gfy.s1b]) * ddy; real dvdz = 0.5 * (v[C+dom->Gfy.s2b] - v[C-dom->Gfy.s2b]) * ddz; vv = v[C] + dvdx * (x - xx) + dvdy * (y - yy) + dvdz * (z - zz); // set vvnode equal to interfering particle v-velocity vvnode = parts[intnode].v; // set vvwall equal to interfering wall v-velocity vvwall = (parts[part].nodes[node] == -10)*bc.vWD + (parts[part].nodes[node] == -11)*bc.vED + (parts[part].nodes[node] == -12)*bc.vSD + (parts[part].nodes[node] == -13)*bc.vND + (parts[part].nodes[node] == -14)*bc.vBD + (parts[part].nodes[node] == -15)*bc.vTD; // switch to particle rest frame real ocrossr_y = -(ox*zp - oz*xp); real odotcrossr_y = -(oxdot*zp - ozdot*xp); vv -= parts[part].v + ocrossr_y; vv -= 0.1/nu *(rs2-a2) * odotcrossr_y; vvnode -= parts[part].v + ocrossr_y; vvnode -= 0.1/nu *(rs2-a2) * odotcrossr_y; vvwall -= parts[part].v + ocrossr_y; vvwall -= 0.1/nu *(rs2-a2) * odotcrossr_y; // set actual node value based on whether it is interfered with vv = (parts[part].nodes[node]==-1)*vv + (parts[part].nodes[node]!=part)*(parts[part].nodes[node]>-1)*vvnode + (parts[part].nodes[node]<-1)*vvwall; // interpolate w-velocity i = floor((x - dom->xs) * ddx) + DOM_BUF; j = floor((y - dom->ys) * ddy) + DOM_BUF; k = round((z - dom->zs) * ddz - 0.5) + DOM_BUF; if(i < dom->Gfz.is) i = dom->Gfz.is; if(j < dom->Gfz.js) j = dom->Gfz.js; if(k < dom->Gfz.ks) k = dom->Gfz.ks; if(i > dom->Gfz.ie-1) i = dom->Gfz.ie-1; if(j > dom->Gfz.je-1) j = dom->Gfz.je-1; if(k > dom->Gfz.ke-1) k = dom->Gfz.ke-1; xx = (i-0.5) * dom->dx + dom->xs; yy = (j-0.5) * dom->dy + dom->ys; zz = (k-DOM_BUF) * dom->dz + dom->zs; C = i + j*dom->Gfz.s1b + k*dom->Gfz.s2b; real dwdx = 0.5 * (w[C+1] - w[C-1]) * ddx; real dwdy = 0.5 * (w[C+dom->Gfz.s1b] - w[C-dom->Gfz.s1b]) * ddy; real dwdz = 0.5 * (w[C+dom->Gfz.s2b] - w[C-dom->Gfz.s2b]) * ddz; ww = w[C] + dwdx * (x - xx) + dwdy * (y - yy) + dwdz * (z - zz); // set wwnode equal to interfering particle w-velocity wwnode = parts[intnode].w; // set uuwall equal to interfering wall u-velocity wwwall = (parts[part].nodes[node] == -10)*bc.wWD + (parts[part].nodes[node] == -11)*bc.wED + (parts[part].nodes[node] == -12)*bc.wSD + (parts[part].nodes[node] == -13)*bc.wND + (parts[part].nodes[node] == -14)*bc.wBD + (parts[part].nodes[node] == -15)*bc.wTD; // switch to particle rest frame real ocrossr_z = ox*yp - oy*xp; real odotcrossr_z = oxdot*yp - oydot*xp; ww -= parts[part].w + ocrossr_z; ww -= 0.1/nu *(rs2-a2) * odotcrossr_z; wwnode -= parts[part].w + ocrossr_z; wwnode -= 0.1/nu *(rs2-a2) * odotcrossr_z; wwwall -= parts[part].w + ocrossr_z; wwwall -= 0.1/nu *(rs2-a2) * odotcrossr_z; // set actual node value based on whether it is interfered with ww = (parts[part].nodes[node]==-1)*ww + (parts[part].nodes[node]!=part)*(parts[part].nodes[node]>-1)*wwnode + (parts[part].nodes[node]<-1)*wwwall; // convert (uu, vv, ww) to (u_r, u_theta, u_phi) and write to node arrays cart2sphere(uu, vv, ww, theta[node], phi[node], &ur[node+part*nnodes], &ut[node+part*nnodes], &up[node+part*nnodes]); } __device__ real nnm(int n, int m) { real fact_top = 1; real fact_bot = 1; for(int i = 1; i <= (n-m); i++) fact_top *= (real)i; for(int i = 1; i <= (n+m); i++) fact_bot *= (real)i; return sqrt((2.*n+1.) / 4. / PI * fact_top / fact_bot); } __device__ real pnm(int n, int m, real theta) { real x = cos(theta); real y = sin(theta); switch(n) { case 0: return 1; case 1: switch(m) { //case -1: return -0.5*y; case 0: return x; case 1: return -y; } case 2: switch(m) { //case -2: return 0.125*y*y; //case -1: return -0.5*x*y; case 0: return 0.5*(3.*x*x - 1.); case 1: return -3.*x*y; case 2: return 3.*y*y; } case 3: switch(m) { //case -3: return -0.02083333333333*y*y*y; //case -2: return 0.125*x*y*y; //case -1: return -0.125*(1. - 5.*x*x)*y; case 0: return 0.5*x*(5.*x*x - 3.); case 1: return -1.5*(5.*x*x - 1.)*y; case 2: return 15.*x*y*y; case 3: return -15.*y*y*y; } case 4: switch(m) { //case -4: return .002604166666667*y*y*y*y; //case -3: return -0.02083333333333*x*y*y*y*y; //case -2: return 0.02083333333333*(7.*x*x - 1.)*y*y; //case -1: return -0.125*x*(3. - 7.*x*x)*y; case 0: return 0.125*(35.*x*x*x*x - 30.*x*x + 3.); case 1: return -2.5*(7.*x*x - 3.)*x*y; case 2: return 7.5*(7.*x*x - 1.)*y*y; case 3: return -105.*x*y*y*y; case 4: return 105.*y*y*y*y; } case 5: switch(m) { //case -5: return -0.000260416666667*y*y*y*y*y; //case -4: return 0.002604166666667*x*y*y*y*y; //case -3: return -0.002604166666667*y*y*y*(9.*x*x - 1.); //case -2: return 0.0625*x*y*y*(3.*x*x - 1.); //case -1: return -0.0625*(21.*x*x*x*x - 14.*x*x + 1.); case 0: return 0.125*x*(63.*x*x*x*x - 70.*x*x + 15.); case 1: return -1.875*y*(21.*x*x*x*x - 14.*x*x + 1.); case 2: return 52.5*x*y*y*(3.*x*x - 1.); case 3: return -52.5*y*y*y*(9.*x*x - 1.); case 4: return 945.*x*y*y*y*y; case 5: return -945.*y*y*y*y*y; } } return 0; // this should never be reached } __global__ void cuda_get_coeffs(part_struct *parts, int *nn, int *mm, real *node_t, real *node_p, real *pp, real *ur, real *ut, real *up, real mu, real nu, int stride, real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im, real *chinm_re, real *chinm_im, real *int_Yp_re, real *int_Yp_im, real *int_rDYu_re, real *int_rDYu_im, real *int_xXDYu_re, real *int_xXDYu_im, int nnodes, int ncoeffs, real A1, real A2, real A3, real B, real *pnm_re0, real *pnm_im0, real *phinm_re0, real *phinm_im0, real *chinm_re0, real *chinm_im0, real lambrelax) { int node = threadIdx.x; int part = blockIdx.x; int coeff = blockIdx.y; real ars = parts[part].r / parts[part].rs; real rsa = parts[part].rs / parts[part].r; int i; // iterator if(coeff < parts[part].ncoeff) { // calculate integrand at each node int j = part*nnodes*ncoeffs + coeff*nnodes + node; int n = nn[coeff]; int m = mm[coeff]; real theta = node_t[node]; real phi = node_p[node]; real N_nm = nnm(n,m); real P_nm = pnm(n,m,theta); real P_n1m = pnm(n+1,m,theta); real dPdt = (n-m+1)*P_n1m-(n+1)*cos(theta)*P_nm; real dPdp = m*P_nm; int_Yp_re[j] = N_nm*P_nm*pp[node+part*nnodes]*cos(m*phi); int_Yp_im[j] = -N_nm*P_nm*pp[node+part*nnodes]*sin(m*phi); int_rDYu_re[j] = N_nm/sin(theta)*(dPdt*ut[node+part*nnodes]*cos(m*phi) - dPdp*up[node+part*nnodes]*sin(m*phi)); int_rDYu_im[j] = N_nm/sin(theta)*(-dPdt*ut[node+part*nnodes]*sin(m*phi) - dPdp*up[node+part*nnodes]*cos(m*phi)); int_xXDYu_re[j] = N_nm/sin(theta)*(dPdp*ut[node+part*nnodes]*sin(m*phi) + dPdt*up[node+part*nnodes]*cos(m*phi)); int_xXDYu_im[j] = N_nm/sin(theta)*(dPdp*ut[node+part*nnodes]*cos(m*phi) - dPdt*up[node+part*nnodes]*sin(m*phi)); __syncthreads(); // compute scalar products // put sum into first node position for each coeff for each particle if(node == 0) { int_Yp_re[j] *= A1; int_Yp_im[j] *= A1; int_rDYu_re[j] *= A1; int_rDYu_im[j] *= A1; int_xXDYu_re[j] *= A1; int_xXDYu_im[j] *= A1; for(i = 1; i < 6; i++) { int_Yp_re[j] += A1 * int_Yp_re[j+i]; int_Yp_im[j] += A1 * int_Yp_im[j+i]; int_rDYu_re[j] += A1 * int_rDYu_re[j+i]; int_rDYu_im[j] += A1 * int_rDYu_im[j+i]; int_xXDYu_re[j] += A1 * int_xXDYu_re[j+i]; int_xXDYu_im[j] += A1 * int_xXDYu_im[j+i]; } for(i = 6; i < 18; i++) { int_Yp_re[j] += A2 * int_Yp_re[j+i]; int_Yp_im[j] += A2 * int_Yp_im[j+i]; int_rDYu_re[j] += A2 * int_rDYu_re[j+i]; int_rDYu_im[j] += A2 * int_rDYu_im[j+i]; int_xXDYu_re[j] += A2 * int_xXDYu_re[j+i]; int_xXDYu_im[j] += A2 * int_xXDYu_im[j+i]; } for(i = 18; i < 26; i++) { int_Yp_re[j] += A3 * int_Yp_re[j+i]; int_Yp_im[j] += A3 * int_Yp_im[j+i]; int_rDYu_re[j] += A3 * int_rDYu_re[j+i]; int_rDYu_im[j] += A3 * int_rDYu_im[j+i]; int_xXDYu_re[j] += A3 * int_xXDYu_re[j+i]; int_xXDYu_im[j] += A3 * int_xXDYu_im[j+i]; } /*for(i = 26; i < 50; i++) { int_Yp_re[j] += B * int_Yp_re[j+i]; int_Yp_im[j] += B * int_Yp_im[j+i]; int_rDYu_re[j] += B * int_rDYu_re[j+i]; int_rDYu_im[j] += B * int_rDYu_im[j+i]; int_xXDYu_re[j] += B * int_xXDYu_re[j+i]; int_xXDYu_im[j] += B * int_xXDYu_im[j+i]; } */ #ifdef TEST real relax = 1.0; #else real relax = lambrelax; #endif if(n == 0) { pnm_re[stride*part+coeff] = pnm_re0[stride*part+coeff] + relax*(parts[part].r*parts[part].r/mu/nu*int_Yp_re[j]*pow(ars,n) - pnm_re0[stride*part+coeff]); pnm_im[stride*part+coeff] = pnm_im0[stride*part+coeff] + relax*(parts[part].r*parts[part].r/mu/nu*int_Yp_im[j]*pow(ars,n) - pnm_im0[stride*part+coeff]); phinm_re[stride*part+coeff] = 0.; phinm_im[stride*part+coeff] = 0.; chinm_re[stride*part+coeff] = 0.; chinm_im[stride*part+coeff] = 0.; } else { // calculate p_nm and phi_nm real A = (1.-0.5*n*(2.*n-1.)/(n+1.)*pow(ars,2.*n+1.))*pow(rsa,n); real B = n*(2.*n-1.)*(2.*n+1.)/(n+1.)*pow(ars,n+1.); real C = 0.25*n*(2.*(n+3.)/(2.*n+3.) + (n-2.-n*(2.*n+1.)/(2.*n+3.)*ars*ars)*pow(ars,2.*n+1.))*pow(rsa,n+1); real D = n*(n+1.+0.5*((n-2.)*(2.*n+1.)*rsa*rsa - n*(2.*n-1.))*pow(ars,2.*n+1.))*pow(rsa,n-1.); pnm_re[stride*part+coeff] = (parts[part].r*parts[part].r/mu/nu *int_Yp_re[j]*D + parts[part].r/nu*int_rDYu_re[j]*B) / (A*D+B*C); pnm_im[stride*part+coeff] = (parts[part].r*parts[part].r/mu/nu *int_Yp_im[j]*D + parts[part].r/nu*int_rDYu_im[j]*B) / (A*D+B*C); phinm_re[stride*part+coeff] = (parts[part].r/nu*int_rDYu_re[j]*A - parts[part].r*parts[part].r/mu/nu*int_Yp_re[j]*C) / (A*D+B*C); phinm_im[stride*part+coeff] = (parts[part].r/nu*int_rDYu_im[j]*A - parts[part].r*parts[part].r/mu/nu*int_Yp_im[j]*C) / (A*D+B*C); // calculate chi_nm real E = n*(n+1.)*(pow(ars,2.*n+1.)-1.)*pow(rsa, n); chinm_re[stride*part+coeff] = parts[part].r/nu*int_xXDYu_re[j] / E; chinm_im[stride*part+coeff] = parts[part].r/nu*int_xXDYu_im[j] / E; // apply underrelaxation pnm_re[stride*part+coeff] = pnm_re0[stride*part+coeff]*(1.-relax) + relax*pnm_re[stride*part+coeff]; pnm_im[stride*part+coeff] = pnm_im0[stride*part+coeff]*(1.-relax) + relax*pnm_im[stride*part+coeff]; phinm_re[stride*part+coeff] = phinm_re0[stride*part+coeff]*(1.-relax) + relax*phinm_re[stride*part+coeff]; phinm_im[stride*part+coeff] = phinm_im0[stride*part+coeff]*(1.-relax) + relax*phinm_im[stride*part+coeff]; chinm_re[stride*part+coeff] = chinm_re0[stride*part+coeff]*(1.-relax) + relax*chinm_re[stride*part+coeff]; chinm_im[stride*part+coeff] = chinm_im0[stride*part+coeff]*(1.-relax) + relax*chinm_im[stride*part+coeff]; } } } } __global__ void cuda_calc_forces(dom_struct *dom, part_struct *parts, int nparts, gradP_struct gradP, real rho_f, real mu, real nu, int stride, real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im, real *chinm_re, real *chinm_im) { int pp = threadIdx.x + blockIdx.x*blockDim.x; // particle number if(pp < nparts) { real vol = 4./3. * PI * parts[pp].r*parts[pp].r*parts[pp].r; real N10 = sqrt(3./4./PI); real N11 = sqrt(3./8./PI); parts[pp].Fx = gradP.x + rho_f * vol * parts[pp].udot - PI * mu * nu * 2.*N11 * (pnm_re[stride*pp + 2] + 6.*phinm_re[stride*pp + 2]); parts[pp].Fy = gradP.y + rho_f * vol * parts[pp].vdot + PI * mu * nu * 2.*N11 * (pnm_im[stride*pp + 2] + 6.*phinm_im[stride*pp + 2]); parts[pp].Fz = gradP.z + rho_f * vol * parts[pp].wdot + PI * mu * nu * N10 * (pnm_re[stride*pp + 1] + 6.*phinm_re[stride*pp + 1]); parts[pp].Lx = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].oxdot - 8. * PI * mu * nu * 2.*N11 * parts[pp].r * chinm_re[stride*pp + 2]; parts[pp].Ly = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].oydot + 8. * PI * mu * nu * 2.*N11 * parts[pp].r * chinm_im[stride*pp + 2]; parts[pp].Lz = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].ozdot + 8. * PI * mu * nu * N10 * parts[pp].r * chinm_re[stride*pp + 1]; } } __global__ void compute_error(real lamb_cut, int stride, int nparts, real *pnm_re, real *pnm_re0, real *pnm_im, real *pnm_im0, real *phinm_re, real *phinm_re0, real *phinm_im, real *phinm_im0, real *chinm_re, real *chinm_re0, real *chinm_im, real *chinm_im0, real *coeffs, real *errors, real *part_errors, dom_struct *dom, real nu) { int part = blockIdx.x; int i,j; real tmp = FLT_MIN; int loc = 0; real avg = 0; real div = 0; // create shared memory space __shared__ real s_coeffs[6*21]; // ** have to hard-code this length ** __shared__ real s_coeffs0[6*21]; // ** have to hard-code this length ** // using 6 coefficient sets, each holding // a maximum of 21 coefficients (5th-order // truncation) // copy coeffs for this particle into shared memory for(i = 0; i < stride; i++) { s_coeffs[i] = pnm_re[part*stride+i]; s_coeffs[i+1*stride] = pnm_im[part*stride+i]; s_coeffs[i+2*stride] = phinm_re[part*stride+i]; s_coeffs[i+3*stride] = phinm_im[part*stride+i]; s_coeffs[i+4*stride] = chinm_re[part*stride+i]; s_coeffs[i+5*stride] = chinm_im[part*stride+i]; s_coeffs0[i] = pnm_re0[part*stride+i]; s_coeffs0[i+1*stride] = pnm_im0[part*stride+i]; s_coeffs0[i+2*stride] = phinm_re0[part*stride+i]; s_coeffs0[i+3*stride] = phinm_im0[part*stride+i]; s_coeffs0[i+4*stride] = chinm_re0[part*stride+i]; s_coeffs0[i+5*stride] = chinm_im0[part*stride+i]; } // compute the average of the coefficients for(i = 0; i < stride*6; i++) { avg += s_coeffs[i]*s_coeffs[i]; } avg = avg / (stride*6.); // sort the coefficients in shared memory and calculate errors along the way for(i = 0; i < 6*stride; i++) { // search for the largest magnitude value in shared and store its location tmp = FLT_MIN; for(j = 0; j < 6*stride; j++) { if(s_coeffs[j]*s_coeffs[j] > tmp) { tmp = s_coeffs[j]*s_coeffs[j]; loc = j; } } // move the largest value into sorted list coeffs[part*stride+i] = s_coeffs[loc]; // if its corresponding coefficient has large enough magnitude, // compute error for this coefficient if(fabs(s_coeffs[loc]) > lamb_cut*fabs(coeffs[part*stride+0])) { div = fabs(s_coeffs[loc]);// + fabs(avg)*1e-4; if(div < 1e-16) div = 1e-16; errors[part*stride+i] = fabs((s_coeffs[loc] - s_coeffs0[loc]) / div); } else errors[part*stride+i] = 0.; // discard this value since we've used it once s_coeffs[loc] = 0.; } // find the largest error for each particle tmp = FLT_MIN; for(i = 0; i < 6*stride; i++) { if(errors[part*stride+i] > tmp) tmp = errors[part*stride+i]; } // write error to return for each particle part_errors[part] = tmp; }
e4982b89c0d1aec55520b8641e653de399a75ebd.cu
/******************************************************************************* ******************************* BLUEBOTTLE-1.0 ******************************** ******************************************************************************* * * Copyright 2012 - 2014 Adam Sierakowski, The Johns Hopkins University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Please contact the Johns Hopkins University to use Bluebottle for * commercial and/or for-profit applications. ******************************************************************************/ #include "cuda_quadrature.h" __device__ void rtp2xyz(real r, real theta, real phi, real *x, real *y, real *z) { *x = r * sin(theta) * cos(phi); *y = r * sin(theta) * sin(phi); *z = r * cos(theta); } __device__ void cart2sphere(real u, real v, real w, real theta, real phi, real *ur, real *ut, real *up) { real st = sin(theta); real ct = cos(theta); real sp = sin(phi); real cp = cos(phi); *ur = st * (u * cp + v * sp) + w * ct; *ut = ct * (u * cp + v * sp) - w * st; *up = -u * sp + v * cp; } __global__ void check_nodes(int nparts, part_struct *parts, dom_struct *dom, real *theta, real *phi, int nnodes, BC bc) { int node = threadIdx.x; int part = blockIdx.x; // convert node (r, theta, phi) to (x, y, z) real xp, yp, zp; // Cartesian radial vector real x, y, z; // Cartesian location of node rtp2xyz(parts[part].rs, theta[node], phi[node], &xp, &yp, &zp); // shift from particle center x = xp + parts[part].x; y = yp + parts[part].y; z = zp + parts[part].z; if(x < dom->xs && bc.uW == PERIODIC) x = x + dom->xl; else if(x > dom->xe && bc.uE == PERIODIC) x = x - dom->xl; if(y < dom->ys && bc.vS == PERIODIC) y = y + dom->yl; else if(y > dom->ye && bc.vN == PERIODIC) y = y - dom->yl; if(z < dom->zs && bc.wB == PERIODIC) z = z + dom->zl; else if(z > dom->ze && bc.wT == PERIODIC) z = z - dom->zl; __syncthreads(); // start off with all -1's parts[part].nodes[node] = -1; // check if the node is interfered with by another particle // give it the value of the particle if it is, otherwise, set to -1 for(int i = 0; i < nparts; i++) { //if(i != part) { // a particle can never interfere with its own nodes // compute distance between node and other particle's center real dx = x - parts[i].x; real dy = y - parts[i].y; real dz = z - parts[i].z; real dist = sqrt(dx*dx + dy*dy + dz*dz); if(dist < parts[i].r && parts[part].nodes[node] == -1) parts[part].nodes[node] = i; //printf("part[%d-->%d].node[%d] = %d\n", part, i, node, parts[part].nodes[node]); //} } // check if the node is interfered with by a wall // compute distance between node and walls // set equal to some number to identify which wall is interefering if(x - dom->xs < 0) { if(bc.uW == DIRICHLET || bc.vW == DIRICHLET || bc.wW == DIRICHLET) if(parts[part].nodes[node] == -1) parts[part].nodes[node] = -10; } if(x - dom->xe > 0) { if(bc.uE == DIRICHLET || bc.vE == DIRICHLET || bc.wE == DIRICHLET) if(parts[part].nodes[node] == -1) parts[part].nodes[node] = -11; } if(y - dom->ys < 0) { if(bc.uS == DIRICHLET || bc.vS == DIRICHLET || bc.wS == DIRICHLET) if(parts[part].nodes[node] == -1) parts[part].nodes[node] = -12; } if(y - dom->ye > 0) { if(bc.uN == DIRICHLET || bc.vN == DIRICHLET || bc.wN == DIRICHLET) if(parts[part].nodes[node] == -1) parts[part].nodes[node] = -13; } if(z - dom->zs < 0) { if(bc.uB == DIRICHLET || bc.vB == DIRICHLET || bc.wB == DIRICHLET) if(parts[part].nodes[node] == -1) parts[part].nodes[node] = -14; } if(z - dom->ze > 0) { if(bc.uT == DIRICHLET || bc.vT == DIRICHLET || bc.wT == DIRICHLET) if(parts[part].nodes[node] == -1) parts[part].nodes[node] = -15; } } __global__ void interpolate_nodes(real *p0, real *p, real *u, real *v, real *w, real rho_f, real nu, gradP_struct gradP, part_struct *parts, dom_struct *dom, real *theta, real *phi, int nnodes, real *pp, real *ur, real *ut, real *up, real dt0, real dt, BC bc) { int node = threadIdx.x; int part = blockIdx.x; // the node number of the intersecting node int intnode = parts[part].nodes[node]; if(intnode < 0) intnode = part; real ddx = 1. / dom->dx; real ddy = 1. / dom->dy; real ddz = 1. / dom->dz; real ox = parts[part].ox; real oy = parts[part].oy; real oz = parts[part].oz; real oxdot = parts[part].oxdot; real oydot = parts[part].oydot; real ozdot = parts[part].ozdot; real udot = parts[part].udot; real vdot = parts[part].vdot; real wdot = parts[part].wdot; real uu, vv, ww; // temporary nodes for Cartesian result of interpolation real uunode, vvnode, wwnode; real uuwall, vvwall, wwwall; // convert node (r, theta, phi) to (x, y, z) real xp, yp, zp; // Cartesian radial vector real x, y, z; // Cartesian location of node rtp2xyz(parts[part].rs, theta[node], phi[node], &xp, &yp, &zp); // shift from particle center x = xp + parts[part].x; y = yp + parts[part].y; z = zp + parts[part].z; if(x < dom->xs && bc.uW == PERIODIC) x = x + dom->xl; else if(x > dom->xe && bc.uE == PERIODIC) x = x - dom->xl; if(y < dom->ys && bc.vS == PERIODIC) y = y + dom->yl; else if(y > dom->ye && bc.vN == PERIODIC) y = y - dom->yl; if(z < dom->zs && bc.wB == PERIODIC) z = z + dom->zl; else if(z > dom->ze && bc.wT == PERIODIC) z = z - dom->zl; __syncthreads(); // find index of cell containing node int i = floor((x - dom->xs) * ddx) + DOM_BUF; int j = floor((y - dom->ys) * ddy) + DOM_BUF; int k = floor((z - dom->zs) * ddz) + DOM_BUF; if(i < dom->Gcc.is) i = dom->Gcc.is; if(j < dom->Gcc.js) j = dom->Gcc.js; if(k < dom->Gcc.ks) k = dom->Gcc.ks; if(i > dom->Gcc.ie-1) i = dom->Gcc.ie-1; if(j > dom->Gcc.je-1) j = dom->Gcc.je-1; if(k > dom->Gcc.ke-1) k = dom->Gcc.ke-1; int C = i + j*dom->Gcc.s1b + k*dom->Gcc.s2b; // Cartesian location of center of cell real xx = (i-0.5) * dom->dx + dom->xs; real yy = (j-0.5) * dom->dy + dom->ys; real zz = (k-0.5) * dom->dz + dom->zs; // interpolate pressure /* real a = (0.5 * dt) / (0.5 * dt0 + 0.5 * dt); real pc = (1. + 0.5 * a) * p[C] - 0.5 * a * p0[C]; real pw = (1. + 0.5 * a) * p[C-1] - 0.5 * a * p0[C-1]; real pe = (1. + 0.5 * a) * p[C+1] - 0.5 * a * p0[C+1]; real ps = (1. + 0.5 * a) * p[C-dom->Gcc.s1b] - 0.5 * a * p0[C-dom->Gcc.s1b]; real pn = (1. + 0.5 * a) * p[C+dom->Gcc.s1b] - 0.5 * a * p0[C+dom->Gcc.s1b]; real pb = (1. + 0.5 * a) * p[C-dom->Gcc.s2b] - 0.5 * a * p0[C-dom->Gcc.s2b]; real pt = (1. + 0.5 * a) * p[C+dom->Gcc.s2b] - 0.5 * a * p0[C+dom->Gcc.s2b]; real dpdx = 0.5 * (pe - pw) * ddx; real dpdy = 0.5 * (pn - ps) * ddy; real dpdz = 0.5 * (pt - pb) * ddz; */ /* real pc = p[C]; real pw = p[C-1]; real pe = p[C+1]; real ps = p[C-dom->Gcc.s1b]; real pn = p[C+dom->Gcc.s1b]; real pb = p[C-dom->Gcc.s2b]; real pt = p[C+dom->Gcc.s2b]; real dpdx = 0.5 * (pe - pw) * ddx; real dpdy = 0.5 * (pn - ps) * ddy; real dpdz = 0.5 * (pt - pb) * ddz; */ real a = dt0/dt; a = (a + 2.)/(a + 1.); real pc = p[C]*a + p0[C]*(1.-a); real pw = p[C-1]*a + p0[C-1]*(1.-a); real pe = p[C+1]*a + p0[C+1]*(1.-a); real ps = p[C-dom->Gcc.s1b]*a + p0[C-dom->Gcc.s1b]*(1.-a); real pn = p[C+dom->Gcc.s1b]*a + p0[C+dom->Gcc.s1b]*(1.-a); real pb = p[C-dom->Gcc.s2b]*a + p0[C-dom->Gcc.s2b]*(1.-a); real pt = p[C+dom->Gcc.s2b]*a + p0[C+dom->Gcc.s2b]*(1.-a); real dpdx = 0.5 * (pe - pw) * ddx; real dpdy = 0.5 * (pn - ps) * ddy; real dpdz = 0.5 * (pt - pb) * ddz; pp[node+nnodes*part] = pc + dpdx*(x-xx) + dpdy*(y-yy) + dpdz*(z-zz); // switch to particle rest frame real ocrossr2 = (oy*zp - oz*yp) * (oy*zp - oz*yp); ocrossr2 += (ox*zp - oz*xp) * (ox*zp - oz*xp); ocrossr2 += (ox*yp - oy*xp) * (ox*yp - oy*xp); real rhoV = rho_f; real accdotr = (-gradP.x/rhoV - udot)*xp + (-gradP.y/rhoV - vdot)*yp + (-gradP.z/rhoV - wdot)*zp; pp[node+nnodes*part] -= 0.5 * rho_f * ocrossr2 + rho_f * accdotr; // zero if this node intersects another particle pp[node+nnodes*part] = (parts[part].nodes[node]==-1)*pp[node+part*nnodes]; // interpolate velocities // don't work with cell-center anymore; // find closest cell face in x-direction // interpolate u-velocity i = round((x - dom->xs) * ddx - 0.5) + DOM_BUF; j = floor((y - dom->ys) * ddy) + DOM_BUF; k = floor((z - dom->zs) * ddz) + DOM_BUF; if(i < dom->Gfx.is) i = dom->Gfx.is; if(j < dom->Gfx.js) j = dom->Gfx.js; if(k < dom->Gfx.ks) k = dom->Gfx.ks; if(i > dom->Gfx.ie-1) i = dom->Gfx.ie-1; if(j > dom->Gfx.je-1) j = dom->Gfx.je-1; if(k > dom->Gfx.ke-1) k = dom->Gfx.ke-1; xx = (i-DOM_BUF) * dom->dx + dom->xs; yy = (j-0.5) * dom->dy + dom->ys; zz = (k-0.5) * dom->dz + dom->zs; C = i + j*dom->Gfx.s1b + k*dom->Gfx.s2b; real dudx = 0.5 * (u[C+1] - u[C-1]) * ddx; real dudy = 0.5 * (u[C+dom->Gfx.s1b] - u[C-dom->Gfx.s1b]) * ddy; real dudz = 0.5 * (u[C+dom->Gfx.s2b] - u[C-dom->Gfx.s2b]) * ddz; uu = u[C] + dudx * (x - xx) + dudy * (y - yy) + dudz * (z - zz); // set uunode equal to interfering particle u-velocity uunode = parts[intnode].u; // set uuwall equal to interfering wall u-velocity uuwall = (parts[part].nodes[node] == -10)*bc.uWD + (parts[part].nodes[node] == -11)*bc.uED + (parts[part].nodes[node] == -12)*bc.uSD + (parts[part].nodes[node] == -13)*bc.uND + (parts[part].nodes[node] == -14)*bc.uBD + (parts[part].nodes[node] == -15)*bc.uTD; // switch to particle rest frame real rs2 = parts[part].rs*parts[part].rs; real a2 = parts[part].r*parts[part].r; real ocrossr_x = oy*zp - oz*yp; real odotcrossr_x = oydot*zp - ozdot*yp; uu -= parts[part].u + ocrossr_x; uu -= 0.1/nu *(rs2-a2) * odotcrossr_x; uunode -= parts[part].u + ocrossr_x; uunode -= 0.1/nu *(rs2-a2) * odotcrossr_x; uuwall -= parts[part].u + ocrossr_x; //uuwall -= 0.1 / nu / rs3 * (rs5 - r5) * odotcrossr_x; uuwall -= 0.1/nu *(rs2-a2) * odotcrossr_x; // set actual node value based on whether it is interfered with uu = (parts[part].nodes[node]==-1)*uu + (parts[part].nodes[node]!=part)*(parts[part].nodes[node]>-1)*uunode + (parts[part].nodes[node]<-1)*uuwall; //printf("uu = %f uuwall = %f\n", uu + parts[part].u + ocrossr_x + 0.1 / nu / rs3 * (rs5 - r5) * odotcrossr_x, uuwall + parts[part].u + ocrossr_x + 0.1 / nu / rs3 * (rs5 - r5) * odotcrossr_x); // interpolate v-velocity i = floor((x - dom->xs) * ddx) + DOM_BUF; j = round((y - dom->ys) * ddy - 0.5) + DOM_BUF; k = floor((z - dom->zs) * ddz) + DOM_BUF; if(i < dom->Gfy.is) i = dom->Gfy.is; if(j < dom->Gfy.js) j = dom->Gfy.js; if(k < dom->Gfy.ks) k = dom->Gfy.ks; if(i > dom->Gfy.ie-1) i = dom->Gfy.ie-1; if(j > dom->Gfy.je-1) j = dom->Gfy.je-1; if(k > dom->Gfy.ke-1) k = dom->Gfy.ke-1; xx = (i-0.5) * dom->dx + dom->xs; yy = (j-DOM_BUF) * dom->dy + dom->ys; zz = (k-0.5) * dom->dz + dom->zs; C = i + j*dom->Gfy.s1b + k*dom->Gfy.s2b; real dvdx = 0.5 * (v[C+1] - v[C-1]) * ddx; real dvdy = 0.5 * (v[C+dom->Gfy.s1b] - v[C-dom->Gfy.s1b]) * ddy; real dvdz = 0.5 * (v[C+dom->Gfy.s2b] - v[C-dom->Gfy.s2b]) * ddz; vv = v[C] + dvdx * (x - xx) + dvdy * (y - yy) + dvdz * (z - zz); // set vvnode equal to interfering particle v-velocity vvnode = parts[intnode].v; // set vvwall equal to interfering wall v-velocity vvwall = (parts[part].nodes[node] == -10)*bc.vWD + (parts[part].nodes[node] == -11)*bc.vED + (parts[part].nodes[node] == -12)*bc.vSD + (parts[part].nodes[node] == -13)*bc.vND + (parts[part].nodes[node] == -14)*bc.vBD + (parts[part].nodes[node] == -15)*bc.vTD; // switch to particle rest frame real ocrossr_y = -(ox*zp - oz*xp); real odotcrossr_y = -(oxdot*zp - ozdot*xp); vv -= parts[part].v + ocrossr_y; vv -= 0.1/nu *(rs2-a2) * odotcrossr_y; vvnode -= parts[part].v + ocrossr_y; vvnode -= 0.1/nu *(rs2-a2) * odotcrossr_y; vvwall -= parts[part].v + ocrossr_y; vvwall -= 0.1/nu *(rs2-a2) * odotcrossr_y; // set actual node value based on whether it is interfered with vv = (parts[part].nodes[node]==-1)*vv + (parts[part].nodes[node]!=part)*(parts[part].nodes[node]>-1)*vvnode + (parts[part].nodes[node]<-1)*vvwall; // interpolate w-velocity i = floor((x - dom->xs) * ddx) + DOM_BUF; j = floor((y - dom->ys) * ddy) + DOM_BUF; k = round((z - dom->zs) * ddz - 0.5) + DOM_BUF; if(i < dom->Gfz.is) i = dom->Gfz.is; if(j < dom->Gfz.js) j = dom->Gfz.js; if(k < dom->Gfz.ks) k = dom->Gfz.ks; if(i > dom->Gfz.ie-1) i = dom->Gfz.ie-1; if(j > dom->Gfz.je-1) j = dom->Gfz.je-1; if(k > dom->Gfz.ke-1) k = dom->Gfz.ke-1; xx = (i-0.5) * dom->dx + dom->xs; yy = (j-0.5) * dom->dy + dom->ys; zz = (k-DOM_BUF) * dom->dz + dom->zs; C = i + j*dom->Gfz.s1b + k*dom->Gfz.s2b; real dwdx = 0.5 * (w[C+1] - w[C-1]) * ddx; real dwdy = 0.5 * (w[C+dom->Gfz.s1b] - w[C-dom->Gfz.s1b]) * ddy; real dwdz = 0.5 * (w[C+dom->Gfz.s2b] - w[C-dom->Gfz.s2b]) * ddz; ww = w[C] + dwdx * (x - xx) + dwdy * (y - yy) + dwdz * (z - zz); // set wwnode equal to interfering particle w-velocity wwnode = parts[intnode].w; // set uuwall equal to interfering wall u-velocity wwwall = (parts[part].nodes[node] == -10)*bc.wWD + (parts[part].nodes[node] == -11)*bc.wED + (parts[part].nodes[node] == -12)*bc.wSD + (parts[part].nodes[node] == -13)*bc.wND + (parts[part].nodes[node] == -14)*bc.wBD + (parts[part].nodes[node] == -15)*bc.wTD; // switch to particle rest frame real ocrossr_z = ox*yp - oy*xp; real odotcrossr_z = oxdot*yp - oydot*xp; ww -= parts[part].w + ocrossr_z; ww -= 0.1/nu *(rs2-a2) * odotcrossr_z; wwnode -= parts[part].w + ocrossr_z; wwnode -= 0.1/nu *(rs2-a2) * odotcrossr_z; wwwall -= parts[part].w + ocrossr_z; wwwall -= 0.1/nu *(rs2-a2) * odotcrossr_z; // set actual node value based on whether it is interfered with ww = (parts[part].nodes[node]==-1)*ww + (parts[part].nodes[node]!=part)*(parts[part].nodes[node]>-1)*wwnode + (parts[part].nodes[node]<-1)*wwwall; // convert (uu, vv, ww) to (u_r, u_theta, u_phi) and write to node arrays cart2sphere(uu, vv, ww, theta[node], phi[node], &ur[node+part*nnodes], &ut[node+part*nnodes], &up[node+part*nnodes]); } __device__ real nnm(int n, int m) { real fact_top = 1; real fact_bot = 1; for(int i = 1; i <= (n-m); i++) fact_top *= (real)i; for(int i = 1; i <= (n+m); i++) fact_bot *= (real)i; return sqrt((2.*n+1.) / 4. / PI * fact_top / fact_bot); } __device__ real pnm(int n, int m, real theta) { real x = cos(theta); real y = sin(theta); switch(n) { case 0: return 1; case 1: switch(m) { //case -1: return -0.5*y; case 0: return x; case 1: return -y; } case 2: switch(m) { //case -2: return 0.125*y*y; //case -1: return -0.5*x*y; case 0: return 0.5*(3.*x*x - 1.); case 1: return -3.*x*y; case 2: return 3.*y*y; } case 3: switch(m) { //case -3: return -0.02083333333333*y*y*y; //case -2: return 0.125*x*y*y; //case -1: return -0.125*(1. - 5.*x*x)*y; case 0: return 0.5*x*(5.*x*x - 3.); case 1: return -1.5*(5.*x*x - 1.)*y; case 2: return 15.*x*y*y; case 3: return -15.*y*y*y; } case 4: switch(m) { //case -4: return .002604166666667*y*y*y*y; //case -3: return -0.02083333333333*x*y*y*y*y; //case -2: return 0.02083333333333*(7.*x*x - 1.)*y*y; //case -1: return -0.125*x*(3. - 7.*x*x)*y; case 0: return 0.125*(35.*x*x*x*x - 30.*x*x + 3.); case 1: return -2.5*(7.*x*x - 3.)*x*y; case 2: return 7.5*(7.*x*x - 1.)*y*y; case 3: return -105.*x*y*y*y; case 4: return 105.*y*y*y*y; } case 5: switch(m) { //case -5: return -0.000260416666667*y*y*y*y*y; //case -4: return 0.002604166666667*x*y*y*y*y; //case -3: return -0.002604166666667*y*y*y*(9.*x*x - 1.); //case -2: return 0.0625*x*y*y*(3.*x*x - 1.); //case -1: return -0.0625*(21.*x*x*x*x - 14.*x*x + 1.); case 0: return 0.125*x*(63.*x*x*x*x - 70.*x*x + 15.); case 1: return -1.875*y*(21.*x*x*x*x - 14.*x*x + 1.); case 2: return 52.5*x*y*y*(3.*x*x - 1.); case 3: return -52.5*y*y*y*(9.*x*x - 1.); case 4: return 945.*x*y*y*y*y; case 5: return -945.*y*y*y*y*y; } } return 0; // this should never be reached } __global__ void cuda_get_coeffs(part_struct *parts, int *nn, int *mm, real *node_t, real *node_p, real *pp, real *ur, real *ut, real *up, real mu, real nu, int stride, real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im, real *chinm_re, real *chinm_im, real *int_Yp_re, real *int_Yp_im, real *int_rDYu_re, real *int_rDYu_im, real *int_xXDYu_re, real *int_xXDYu_im, int nnodes, int ncoeffs, real A1, real A2, real A3, real B, real *pnm_re0, real *pnm_im0, real *phinm_re0, real *phinm_im0, real *chinm_re0, real *chinm_im0, real lambrelax) { int node = threadIdx.x; int part = blockIdx.x; int coeff = blockIdx.y; real ars = parts[part].r / parts[part].rs; real rsa = parts[part].rs / parts[part].r; int i; // iterator if(coeff < parts[part].ncoeff) { // calculate integrand at each node int j = part*nnodes*ncoeffs + coeff*nnodes + node; int n = nn[coeff]; int m = mm[coeff]; real theta = node_t[node]; real phi = node_p[node]; real N_nm = nnm(n,m); real P_nm = pnm(n,m,theta); real P_n1m = pnm(n+1,m,theta); real dPdt = (n-m+1)*P_n1m-(n+1)*cos(theta)*P_nm; real dPdp = m*P_nm; int_Yp_re[j] = N_nm*P_nm*pp[node+part*nnodes]*cos(m*phi); int_Yp_im[j] = -N_nm*P_nm*pp[node+part*nnodes]*sin(m*phi); int_rDYu_re[j] = N_nm/sin(theta)*(dPdt*ut[node+part*nnodes]*cos(m*phi) - dPdp*up[node+part*nnodes]*sin(m*phi)); int_rDYu_im[j] = N_nm/sin(theta)*(-dPdt*ut[node+part*nnodes]*sin(m*phi) - dPdp*up[node+part*nnodes]*cos(m*phi)); int_xXDYu_re[j] = N_nm/sin(theta)*(dPdp*ut[node+part*nnodes]*sin(m*phi) + dPdt*up[node+part*nnodes]*cos(m*phi)); int_xXDYu_im[j] = N_nm/sin(theta)*(dPdp*ut[node+part*nnodes]*cos(m*phi) - dPdt*up[node+part*nnodes]*sin(m*phi)); __syncthreads(); // compute scalar products // put sum into first node position for each coeff for each particle if(node == 0) { int_Yp_re[j] *= A1; int_Yp_im[j] *= A1; int_rDYu_re[j] *= A1; int_rDYu_im[j] *= A1; int_xXDYu_re[j] *= A1; int_xXDYu_im[j] *= A1; for(i = 1; i < 6; i++) { int_Yp_re[j] += A1 * int_Yp_re[j+i]; int_Yp_im[j] += A1 * int_Yp_im[j+i]; int_rDYu_re[j] += A1 * int_rDYu_re[j+i]; int_rDYu_im[j] += A1 * int_rDYu_im[j+i]; int_xXDYu_re[j] += A1 * int_xXDYu_re[j+i]; int_xXDYu_im[j] += A1 * int_xXDYu_im[j+i]; } for(i = 6; i < 18; i++) { int_Yp_re[j] += A2 * int_Yp_re[j+i]; int_Yp_im[j] += A2 * int_Yp_im[j+i]; int_rDYu_re[j] += A2 * int_rDYu_re[j+i]; int_rDYu_im[j] += A2 * int_rDYu_im[j+i]; int_xXDYu_re[j] += A2 * int_xXDYu_re[j+i]; int_xXDYu_im[j] += A2 * int_xXDYu_im[j+i]; } for(i = 18; i < 26; i++) { int_Yp_re[j] += A3 * int_Yp_re[j+i]; int_Yp_im[j] += A3 * int_Yp_im[j+i]; int_rDYu_re[j] += A3 * int_rDYu_re[j+i]; int_rDYu_im[j] += A3 * int_rDYu_im[j+i]; int_xXDYu_re[j] += A3 * int_xXDYu_re[j+i]; int_xXDYu_im[j] += A3 * int_xXDYu_im[j+i]; } /*for(i = 26; i < 50; i++) { int_Yp_re[j] += B * int_Yp_re[j+i]; int_Yp_im[j] += B * int_Yp_im[j+i]; int_rDYu_re[j] += B * int_rDYu_re[j+i]; int_rDYu_im[j] += B * int_rDYu_im[j+i]; int_xXDYu_re[j] += B * int_xXDYu_re[j+i]; int_xXDYu_im[j] += B * int_xXDYu_im[j+i]; } */ #ifdef TEST real relax = 1.0; #else real relax = lambrelax; #endif if(n == 0) { pnm_re[stride*part+coeff] = pnm_re0[stride*part+coeff] + relax*(parts[part].r*parts[part].r/mu/nu*int_Yp_re[j]*pow(ars,n) - pnm_re0[stride*part+coeff]); pnm_im[stride*part+coeff] = pnm_im0[stride*part+coeff] + relax*(parts[part].r*parts[part].r/mu/nu*int_Yp_im[j]*pow(ars,n) - pnm_im0[stride*part+coeff]); phinm_re[stride*part+coeff] = 0.; phinm_im[stride*part+coeff] = 0.; chinm_re[stride*part+coeff] = 0.; chinm_im[stride*part+coeff] = 0.; } else { // calculate p_nm and phi_nm real A = (1.-0.5*n*(2.*n-1.)/(n+1.)*pow(ars,2.*n+1.))*pow(rsa,n); real B = n*(2.*n-1.)*(2.*n+1.)/(n+1.)*pow(ars,n+1.); real C = 0.25*n*(2.*(n+3.)/(2.*n+3.) + (n-2.-n*(2.*n+1.)/(2.*n+3.)*ars*ars)*pow(ars,2.*n+1.))*pow(rsa,n+1); real D = n*(n+1.+0.5*((n-2.)*(2.*n+1.)*rsa*rsa - n*(2.*n-1.))*pow(ars,2.*n+1.))*pow(rsa,n-1.); pnm_re[stride*part+coeff] = (parts[part].r*parts[part].r/mu/nu *int_Yp_re[j]*D + parts[part].r/nu*int_rDYu_re[j]*B) / (A*D+B*C); pnm_im[stride*part+coeff] = (parts[part].r*parts[part].r/mu/nu *int_Yp_im[j]*D + parts[part].r/nu*int_rDYu_im[j]*B) / (A*D+B*C); phinm_re[stride*part+coeff] = (parts[part].r/nu*int_rDYu_re[j]*A - parts[part].r*parts[part].r/mu/nu*int_Yp_re[j]*C) / (A*D+B*C); phinm_im[stride*part+coeff] = (parts[part].r/nu*int_rDYu_im[j]*A - parts[part].r*parts[part].r/mu/nu*int_Yp_im[j]*C) / (A*D+B*C); // calculate chi_nm real E = n*(n+1.)*(pow(ars,2.*n+1.)-1.)*pow(rsa, n); chinm_re[stride*part+coeff] = parts[part].r/nu*int_xXDYu_re[j] / E; chinm_im[stride*part+coeff] = parts[part].r/nu*int_xXDYu_im[j] / E; // apply underrelaxation pnm_re[stride*part+coeff] = pnm_re0[stride*part+coeff]*(1.-relax) + relax*pnm_re[stride*part+coeff]; pnm_im[stride*part+coeff] = pnm_im0[stride*part+coeff]*(1.-relax) + relax*pnm_im[stride*part+coeff]; phinm_re[stride*part+coeff] = phinm_re0[stride*part+coeff]*(1.-relax) + relax*phinm_re[stride*part+coeff]; phinm_im[stride*part+coeff] = phinm_im0[stride*part+coeff]*(1.-relax) + relax*phinm_im[stride*part+coeff]; chinm_re[stride*part+coeff] = chinm_re0[stride*part+coeff]*(1.-relax) + relax*chinm_re[stride*part+coeff]; chinm_im[stride*part+coeff] = chinm_im0[stride*part+coeff]*(1.-relax) + relax*chinm_im[stride*part+coeff]; } } } } __global__ void cuda_calc_forces(dom_struct *dom, part_struct *parts, int nparts, gradP_struct gradP, real rho_f, real mu, real nu, int stride, real *pnm_re, real *pnm_im, real *phinm_re, real *phinm_im, real *chinm_re, real *chinm_im) { int pp = threadIdx.x + blockIdx.x*blockDim.x; // particle number if(pp < nparts) { real vol = 4./3. * PI * parts[pp].r*parts[pp].r*parts[pp].r; real N10 = sqrt(3./4./PI); real N11 = sqrt(3./8./PI); parts[pp].Fx = gradP.x + rho_f * vol * parts[pp].udot - PI * mu * nu * 2.*N11 * (pnm_re[stride*pp + 2] + 6.*phinm_re[stride*pp + 2]); parts[pp].Fy = gradP.y + rho_f * vol * parts[pp].vdot + PI * mu * nu * 2.*N11 * (pnm_im[stride*pp + 2] + 6.*phinm_im[stride*pp + 2]); parts[pp].Fz = gradP.z + rho_f * vol * parts[pp].wdot + PI * mu * nu * N10 * (pnm_re[stride*pp + 1] + 6.*phinm_re[stride*pp + 1]); parts[pp].Lx = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].oxdot - 8. * PI * mu * nu * 2.*N11 * parts[pp].r * chinm_re[stride*pp + 2]; parts[pp].Ly = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].oydot + 8. * PI * mu * nu * 2.*N11 * parts[pp].r * chinm_im[stride*pp + 2]; parts[pp].Lz = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].ozdot + 8. * PI * mu * nu * N10 * parts[pp].r * chinm_re[stride*pp + 1]; } } __global__ void compute_error(real lamb_cut, int stride, int nparts, real *pnm_re, real *pnm_re0, real *pnm_im, real *pnm_im0, real *phinm_re, real *phinm_re0, real *phinm_im, real *phinm_im0, real *chinm_re, real *chinm_re0, real *chinm_im, real *chinm_im0, real *coeffs, real *errors, real *part_errors, dom_struct *dom, real nu) { int part = blockIdx.x; int i,j; real tmp = FLT_MIN; int loc = 0; real avg = 0; real div = 0; // create shared memory space __shared__ real s_coeffs[6*21]; // ** have to hard-code this length ** __shared__ real s_coeffs0[6*21]; // ** have to hard-code this length ** // using 6 coefficient sets, each holding // a maximum of 21 coefficients (5th-order // truncation) // copy coeffs for this particle into shared memory for(i = 0; i < stride; i++) { s_coeffs[i] = pnm_re[part*stride+i]; s_coeffs[i+1*stride] = pnm_im[part*stride+i]; s_coeffs[i+2*stride] = phinm_re[part*stride+i]; s_coeffs[i+3*stride] = phinm_im[part*stride+i]; s_coeffs[i+4*stride] = chinm_re[part*stride+i]; s_coeffs[i+5*stride] = chinm_im[part*stride+i]; s_coeffs0[i] = pnm_re0[part*stride+i]; s_coeffs0[i+1*stride] = pnm_im0[part*stride+i]; s_coeffs0[i+2*stride] = phinm_re0[part*stride+i]; s_coeffs0[i+3*stride] = phinm_im0[part*stride+i]; s_coeffs0[i+4*stride] = chinm_re0[part*stride+i]; s_coeffs0[i+5*stride] = chinm_im0[part*stride+i]; } // compute the average of the coefficients for(i = 0; i < stride*6; i++) { avg += s_coeffs[i]*s_coeffs[i]; } avg = avg / (stride*6.); // sort the coefficients in shared memory and calculate errors along the way for(i = 0; i < 6*stride; i++) { // search for the largest magnitude value in shared and store its location tmp = FLT_MIN; for(j = 0; j < 6*stride; j++) { if(s_coeffs[j]*s_coeffs[j] > tmp) { tmp = s_coeffs[j]*s_coeffs[j]; loc = j; } } // move the largest value into sorted list coeffs[part*stride+i] = s_coeffs[loc]; // if its corresponding coefficient has large enough magnitude, // compute error for this coefficient if(fabs(s_coeffs[loc]) > lamb_cut*fabs(coeffs[part*stride+0])) { div = fabs(s_coeffs[loc]);// + fabs(avg)*1e-4; if(div < 1e-16) div = 1e-16; errors[part*stride+i] = fabs((s_coeffs[loc] - s_coeffs0[loc]) / div); } else errors[part*stride+i] = 0.; // discard this value since we've used it once s_coeffs[loc] = 0.; } // find the largest error for each particle tmp = FLT_MIN; for(i = 0; i < 6*stride; i++) { if(errors[part*stride+i] > tmp) tmp = errors[part*stride+i]; } // write error to return for each particle part_errors[part] = tmp; }
935a2ff6a2d5ce06fa601a490b78272a6c576d75.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "roi_align_gradient_op.h" #include <stdio.h> #include <cfloat> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" namespace caffe2 { namespace { template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T& w1, T& w2, T& w3, T& w4, int& x_low, int& x_high, int& y_low, int& y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = bottom_data[y_low * width + x_low]; // T v2 = bottom_data[y_low * width + x_high]; // T v3 = bottom_data[y_high * width + x_low]; // T v4 = bottom_data[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void RoIAlignBackwardFeature( const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { gpu_atomic_add( static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low); gpu_atomic_add( static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high); gpu_atomic_add( static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low); gpu_atomic_add( static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward } // namespace template <> bool RoIAlignGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Input data to pool auto& R = Input(1); // RoIs auto& dY = Input(2); // Gradient of net w.r.t. output of "forward" op // (aka "gradOutput") auto* dX = Output(0); // Gradient of net w.r.t. input to "forward" op // (aka "gradInput") dX->ResizeLike(X); // Must zero-out dX before accumulating gradients // (TODO): Kaiming - is this safe? math::Set<float, CUDAContext>( dX->size(), 0.f, dX->mutable_data<float>(), &context_); if (dY.size() > 0) { // Handle possibly empty gradient if there were no rois hipLaunchKernelGGL(( RoIAlignBackwardFeature<float>) , dim3(CAFFE_GET_BLOCKS(dY.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), dY.size(), dY.data<float>(), R.dim32(0), spatial_scale_, X.dim32(1), X.dim32(2), X.dim32(3), pooled_height_, pooled_width_, sampling_ratio_, dX->mutable_data<float>(), R.data<float>()); } return true; } REGISTER_CUDA_OPERATOR( RoIAlignGradient, RoIAlignGradientOp<float, CUDAContext>); } // namespace caffe2
935a2ff6a2d5ce06fa601a490b78272a6c576d75.cu
#include "roi_align_gradient_op.h" #include <stdio.h> #include <cfloat> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" namespace caffe2 { namespace { template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T& w1, T& w2, T& w3, T& w4, int& x_low, int& x_high, int& y_low, int& y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = bottom_data[y_low * width + x_low]; // T v2 = bottom_data[y_low * width + x_high]; // T v3 = bottom_data[y_high * width + x_low]; // T v4 = bottom_data[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void RoIAlignBackwardFeature( const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { gpu_atomic_add( static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low); gpu_atomic_add( static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high); gpu_atomic_add( static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low); gpu_atomic_add( static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward } // namespace template <> bool RoIAlignGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Input data to pool auto& R = Input(1); // RoIs auto& dY = Input(2); // Gradient of net w.r.t. output of "forward" op // (aka "gradOutput") auto* dX = Output(0); // Gradient of net w.r.t. input to "forward" op // (aka "gradInput") dX->ResizeLike(X); // Must zero-out dX before accumulating gradients // (TODO): Kaiming - is this safe? math::Set<float, CUDAContext>( dX->size(), 0.f, dX->mutable_data<float>(), &context_); if (dY.size() > 0) { // Handle possibly empty gradient if there were no rois RoIAlignBackwardFeature<float> <<<CAFFE_GET_BLOCKS(dY.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( dY.size(), dY.data<float>(), R.dim32(0), spatial_scale_, X.dim32(1), X.dim32(2), X.dim32(3), pooled_height_, pooled_width_, sampling_ratio_, dX->mutable_data<float>(), R.data<float>()); } return true; } REGISTER_CUDA_OPERATOR( RoIAlignGradient, RoIAlignGradientOp<float, CUDAContext>); } // namespace caffe2
88ceafba728016b7802cc0ece16fb23cf370f3da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> __global__ void matAdd(float *matrixA, float *matrixB, float *matrixC, int matSize) { int threadCol = blockIdx.x * blockDim.x + threadIdx.x; int threadRow = blockIdx.y * blockDim.y + threadIdx.y; int indexOfMatrix = threadCol + threadRow * matSize; if(threadCol < matSize && threadRow < matSize) matrixC[indexOfMatrix] = matrixA[indexOfMatrix] + matrixB[indexOfMatrix]; } void printMatrix(float *matrix, int size, char * matrixName) { if(size > 10) return; int i = 0; printf("Printing Matrix: %s\n", matrixName); for( ; i < size * size ; i ++) { if(i % size == 0) printf("\n"); printf("%-3f ", matrix[i]); } printf("\n\n"); } void checkError(hipError_t error, char * function) { if(error != hipSuccess) { printf("\"%s\" has a problem with error code %d and desc: %s\n", function, error, hipGetErrorString(error)); exit(-1); } } bool checkIfMatricesEqual(float * mat1, float * mat2, int matSize) { int i = 0; for( ; i < matSize; i++) if(mat1[i] != mat2[i]){ printf("values different for i: %d\n", i); printf("mat1[i] = %d, mat2[i] = %d\n", mat1[i], mat2[i]); return false; } return true; } void readValue(int *value, char * msg, int lowerBound, int upperBound) { while(true) { printf("%s(%d-%d): ", msg, lowerBound, upperBound); scanf("%d", value); if(*value <= upperBound && *value >= lowerBound) return; } } void RandomInit(float *data, int n) { for(int i = 0; i < n*n; i++) data[i] = rand()/(float)RAND_MAX; } int main() { //Have some variables required for loop counters. int i; //have variables for threads per block, number of blocks. int threadsPerBlock = 0, blocksInGrid = 0; //create cuda event variables hipEvent_t hostStart, hostStop, deviceStart, deviceStop; float timeDifferenceOnHost, timeDifferenceOnDevice; //program variables int matrixSize = 0; size_t size; //variable to have the size of arrays on device //int *matA, *matB, *matC, *matCFromGPU; //matrices for host float *matA, *matB, *matC, *matCFromGPU; float *gpuMatA, *gpuMatB, *gpuMatC; //matrices for Device //initialize cuda timing variables hipEventCreate(&hostStart); hipEventCreate(&hostStop); hipEventCreate(&deviceStart); hipEventCreate(&deviceStop); printf("Enter the size of the matrix: "); scanf("%d", &matrixSize); //calculate the size required on GPU size = matrixSize * matrixSize * sizeof(int); matA = (float *)malloc(matrixSize * sizeof(int) * matrixSize); matB = (float *)malloc(matrixSize * sizeof(int) * matrixSize); matC = (float *)malloc(matrixSize * sizeof(int) * matrixSize); /** for(i = 0 ; i < matrixSize * matrixSize; i ++) matA[i] = matB[i] = (i*2)%1000; **/ RandomInit(matA,matrixSize); RandomInit(matB,matrixSize); //printMatrix(matA, matrixSize, "Matrix A"); //printMatrix(matB, matrixSize, "Matrix B"); printf("Adding matrices on CPU...\n"); hipEventRecord(hostStart, 0); for(i = 0 ; i < matrixSize * matrixSize; i ++) matC[i] = matA[i] + matB[i]; hipEventRecord(hostStop, 0); hipEventElapsedTime(&timeDifferenceOnHost, hostStart, hostStop); printf("Matrix addition over. Time taken on CPU: %5.5f\n", timeDifferenceOnHost); printMatrix(matC, matrixSize, "Summation Matrix"); //allocate memory on GPU checkError(hipMalloc((void**)&gpuMatA, size), "Malloc for Matrix A"); checkError(hipMalloc((void**)&gpuMatB, size), "Malloc for Matrix B"); checkError(hipMalloc((void**)&gpuMatC, size), "Malloc for Matrix C"); //copy the matrix A and matrix B checkError(hipMemcpy(gpuMatA, matA, size, hipMemcpyHostToDevice), "Matrix A Copy"); checkError(hipMemcpy(gpuMatB, matB, size, hipMemcpyHostToDevice), "Matrix B Copy"); bool done = false; while(!done) { matCFromGPU = (float *)malloc(matrixSize * sizeof(int) * matrixSize); //create a proper grid block using dim3 readValue(&threadsPerBlock, "Enter no. of threads per block(input of 'P' will construct PxP threads in block)", 4, 32); readValue(&blocksInGrid, "Enter no. of blocks in grid(input of 'P' will construct PxP blocks)", (matrixSize + threadsPerBlock -1)/threadsPerBlock, 65535); printf("Threads Per block: %d, Blocks in grid: %d\n", threadsPerBlock, blocksInGrid); printf("Adding matrices on GPU..\n"); dim3 blocks(threadsPerBlock, threadsPerBlock); dim3 grid(blocksInGrid, blocksInGrid); //(matrixSize + threadsPerBlock - 1/blocks.x), (matrixSize + blocks.y - 1/blocks.y)); //call the kernels to execute hipEventRecord(deviceStart, 0); printf("Total linear threads: %d\n", blocksInGrid*threadsPerBlock); hipLaunchKernelGGL(( matAdd), dim3(grid), dim3(blocks), 0, 0, gpuMatA, gpuMatB, gpuMatC, matrixSize); hipEventRecord(deviceStop, 0); hipEventSynchronize(deviceStop); hipEventElapsedTime(&timeDifferenceOnDevice, deviceStart, deviceStop); //copy the result back into host memory checkError(hipMemcpy(matCFromGPU, gpuMatC, size, hipMemcpyDeviceToHost), "Matrix C Copy from device to Host"); if(checkIfMatricesEqual(matC, matCFromGPU, matrixSize)) printf("Kernels correct!\n"); else printf("Kernel logic wrong!\n"); printf("Finished addition on GPU. Time taken: %5.5f\n", timeDifferenceOnDevice); printf("Speedup: %5.5f\n", (float)timeDifferenceOnHost/timeDifferenceOnDevice); printMatrix(matCFromGPU, matrixSize, "Summation Matrix from GPU"); double sum = 0.; double diff; for(int i = 0; i < matrixSize * matrixSize ; i++){ diff = abs(matCFromGPU[i]-matC[i]); sum += diff*diff; } sum = sqrt(sum); printf("norm(h_C - h_D)=%20.15e\n\n",sum); char c = 'n'; printf("Again?(y/n): "); while(true) { c = getchar(); if(c == 'y' || c == 'n') break; } if(c == 'n') break; free(matCFromGPU); } free(matA); free(matB); free(matC); hipEventDestroy(deviceStart); hipEventDestroy(deviceStop); hipEventDestroy(hostStart); hipEventDestroy(hostStop); return 0; }
88ceafba728016b7802cc0ece16fb23cf370f3da.cu
#include<stdio.h> #include<stdlib.h> __global__ void matAdd(float *matrixA, float *matrixB, float *matrixC, int matSize) { int threadCol = blockIdx.x * blockDim.x + threadIdx.x; int threadRow = blockIdx.y * blockDim.y + threadIdx.y; int indexOfMatrix = threadCol + threadRow * matSize; if(threadCol < matSize && threadRow < matSize) matrixC[indexOfMatrix] = matrixA[indexOfMatrix] + matrixB[indexOfMatrix]; } void printMatrix(float *matrix, int size, char * matrixName) { if(size > 10) return; int i = 0; printf("Printing Matrix: %s\n", matrixName); for( ; i < size * size ; i ++) { if(i % size == 0) printf("\n"); printf("%-3f ", matrix[i]); } printf("\n\n"); } void checkError(cudaError_t error, char * function) { if(error != cudaSuccess) { printf("\"%s\" has a problem with error code %d and desc: %s\n", function, error, cudaGetErrorString(error)); exit(-1); } } bool checkIfMatricesEqual(float * mat1, float * mat2, int matSize) { int i = 0; for( ; i < matSize; i++) if(mat1[i] != mat2[i]){ printf("values different for i: %d\n", i); printf("mat1[i] = %d, mat2[i] = %d\n", mat1[i], mat2[i]); return false; } return true; } void readValue(int *value, char * msg, int lowerBound, int upperBound) { while(true) { printf("%s(%d-%d): ", msg, lowerBound, upperBound); scanf("%d", value); if(*value <= upperBound && *value >= lowerBound) return; } } void RandomInit(float *data, int n) { for(int i = 0; i < n*n; i++) data[i] = rand()/(float)RAND_MAX; } int main() { //Have some variables required for loop counters. int i; //have variables for threads per block, number of blocks. int threadsPerBlock = 0, blocksInGrid = 0; //create cuda event variables cudaEvent_t hostStart, hostStop, deviceStart, deviceStop; float timeDifferenceOnHost, timeDifferenceOnDevice; //program variables int matrixSize = 0; size_t size; //variable to have the size of arrays on device //int *matA, *matB, *matC, *matCFromGPU; //matrices for host float *matA, *matB, *matC, *matCFromGPU; float *gpuMatA, *gpuMatB, *gpuMatC; //matrices for Device //initialize cuda timing variables cudaEventCreate(&hostStart); cudaEventCreate(&hostStop); cudaEventCreate(&deviceStart); cudaEventCreate(&deviceStop); printf("Enter the size of the matrix: "); scanf("%d", &matrixSize); //calculate the size required on GPU size = matrixSize * matrixSize * sizeof(int); matA = (float *)malloc(matrixSize * sizeof(int) * matrixSize); matB = (float *)malloc(matrixSize * sizeof(int) * matrixSize); matC = (float *)malloc(matrixSize * sizeof(int) * matrixSize); /** for(i = 0 ; i < matrixSize * matrixSize; i ++) matA[i] = matB[i] = (i*2)%1000; **/ RandomInit(matA,matrixSize); RandomInit(matB,matrixSize); //printMatrix(matA, matrixSize, "Matrix A"); //printMatrix(matB, matrixSize, "Matrix B"); printf("Adding matrices on CPU...\n"); cudaEventRecord(hostStart, 0); for(i = 0 ; i < matrixSize * matrixSize; i ++) matC[i] = matA[i] + matB[i]; cudaEventRecord(hostStop, 0); cudaEventElapsedTime(&timeDifferenceOnHost, hostStart, hostStop); printf("Matrix addition over. Time taken on CPU: %5.5f\n", timeDifferenceOnHost); printMatrix(matC, matrixSize, "Summation Matrix"); //allocate memory on GPU checkError(cudaMalloc((void**)&gpuMatA, size), "Malloc for Matrix A"); checkError(cudaMalloc((void**)&gpuMatB, size), "Malloc for Matrix B"); checkError(cudaMalloc((void**)&gpuMatC, size), "Malloc for Matrix C"); //copy the matrix A and matrix B checkError(cudaMemcpy(gpuMatA, matA, size, cudaMemcpyHostToDevice), "Matrix A Copy"); checkError(cudaMemcpy(gpuMatB, matB, size, cudaMemcpyHostToDevice), "Matrix B Copy"); bool done = false; while(!done) { matCFromGPU = (float *)malloc(matrixSize * sizeof(int) * matrixSize); //create a proper grid block using dim3 readValue(&threadsPerBlock, "Enter no. of threads per block(input of 'P' will construct PxP threads in block)", 4, 32); readValue(&blocksInGrid, "Enter no. of blocks in grid(input of 'P' will construct PxP blocks)", (matrixSize + threadsPerBlock -1)/threadsPerBlock, 65535); printf("Threads Per block: %d, Blocks in grid: %d\n", threadsPerBlock, blocksInGrid); printf("Adding matrices on GPU..\n"); dim3 blocks(threadsPerBlock, threadsPerBlock); dim3 grid(blocksInGrid, blocksInGrid); //(matrixSize + threadsPerBlock - 1/blocks.x), (matrixSize + blocks.y - 1/blocks.y)); //call the kernels to execute cudaEventRecord(deviceStart, 0); printf("Total linear threads: %d\n", blocksInGrid*threadsPerBlock); matAdd<<<grid, blocks>>>(gpuMatA, gpuMatB, gpuMatC, matrixSize); cudaEventRecord(deviceStop, 0); cudaEventSynchronize(deviceStop); cudaEventElapsedTime(&timeDifferenceOnDevice, deviceStart, deviceStop); //copy the result back into host memory checkError(cudaMemcpy(matCFromGPU, gpuMatC, size, cudaMemcpyDeviceToHost), "Matrix C Copy from device to Host"); if(checkIfMatricesEqual(matC, matCFromGPU, matrixSize)) printf("Kernels correct!\n"); else printf("Kernel logic wrong!\n"); printf("Finished addition on GPU. Time taken: %5.5f\n", timeDifferenceOnDevice); printf("Speedup: %5.5f\n", (float)timeDifferenceOnHost/timeDifferenceOnDevice); printMatrix(matCFromGPU, matrixSize, "Summation Matrix from GPU"); double sum = 0.; double diff; for(int i = 0; i < matrixSize * matrixSize ; i++){ diff = abs(matCFromGPU[i]-matC[i]); sum += diff*diff; } sum = sqrt(sum); printf("norm(h_C - h_D)=%20.15e\n\n",sum); char c = 'n'; printf("Again?(y/n): "); while(true) { c = getchar(); if(c == 'y' || c == 'n') break; } if(c == 'n') break; free(matCFromGPU); } free(matA); free(matB); free(matC); cudaEventDestroy(deviceStart); cudaEventDestroy(deviceStop); cudaEventDestroy(hostStart); cudaEventDestroy(hostStop); return 0; }
3c56edc585f56e4653863d29607f8a194b95e565.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * GridTools * * Copyright (c) 2014-2023, ETH Zurich * All rights reserved. * * Please, refer to the LICENSE file in the root directory. * SPDX-License-Identifier: BSD-3-Clause */ #include "gtest/gtest.h" #include <cstdlib> #include <gridtools/common/atomic_functions.hpp> #include <gridtools/common/cuda_util.hpp> #include <gridtools/common/defs.hpp> template <typename T> struct verifier { static void TestEQ(T val, T exp) { T err = ::fabs(val - exp) / ::fabs(val); ASSERT_TRUE(err < 1e-12); } }; template <> struct verifier<float> { static void TestEQ(float val, float exp) { double err = ::fabs(val - exp) / ::fabs(val); ASSERT_TRUE(err < 1e-6); } }; template <> struct verifier<int> { static void TestEQ(int val, int exp) { ASSERT_EQ(val, exp); } }; template <typename T> __global__ void atomic_add_kernel(T *pReduced, const T *field, const int size) { const int i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; const int pos = j * gridDim.x * blockDim.x + i; gridtools::atomic_add(*pReduced, field[pos]); } template <typename T> __global__ void atomic_sub_kernel(T *pReduced, const T *field, const int size) { const int i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; const int pos = j * gridDim.x * blockDim.x + i; gridtools::atomic_sub(*pReduced, field[pos]); } template <typename T> __global__ void atomic_min_kernel(T *pReduced, const T *field, const int size) { const int i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; const int pos = j * gridDim.x * blockDim.x + i; gridtools::atomic_min(*pReduced, field[pos]); } template <typename T> __global__ void atomic_max_kernel(T *pReduced, const T *field, const int size) { const int i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; const int pos = j * gridDim.x * blockDim.x + i; gridtools::atomic_max(*pReduced, field[pos]); } template <typename T> void test_atomic_add() { dim3 threadsPerBlock(4, 4); dim3 numberOfBlocks(4, 4); int size = threadsPerBlock.x * threadsPerBlock.y * numberOfBlocks.x * numberOfBlocks.y; T field[size]; T sumRef = 0; T sum = 0; T *sumDevice; GT_CUDA_CHECK(hipMalloc(&sumDevice, sizeof(T))); GT_CUDA_CHECK(hipMemcpy(sumDevice, &sum, sizeof(T), hipMemcpyHostToDevice)); T *fieldDevice; GT_CUDA_CHECK(hipMalloc(&fieldDevice, sizeof(T) * size)); for (int cnt = 0; cnt < size; ++cnt) { field[cnt] = static_cast<T>(std::rand() % 100 + (std::rand() % 100) * 0.005); sumRef += field[cnt]; } GT_CUDA_CHECK(hipMemcpy(fieldDevice, &field[0], sizeof(T) * size, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( atomic_add_kernel), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, sumDevice, fieldDevice, size); GT_CUDA_CHECK(hipMemcpy(&sum, sumDevice, sizeof(T), hipMemcpyDeviceToHost)); verifier<T>::TestEQ(sumRef, sum); } template <typename T> void test_atomic_sub() { dim3 threadsPerBlock(4, 4); dim3 numberOfBlocks(4, 4); int size = threadsPerBlock.x * threadsPerBlock.y * numberOfBlocks.x * numberOfBlocks.y; T field[size]; T sumRef = 0; T sum = 0; T *sumDevice; GT_CUDA_CHECK(hipMalloc(&sumDevice, sizeof(T))); GT_CUDA_CHECK(hipMemcpy(sumDevice, &sum, sizeof(T), hipMemcpyHostToDevice)); T *fieldDevice; GT_CUDA_CHECK(hipMalloc(&fieldDevice, sizeof(T) * size)); for (int cnt = 0; cnt < size; ++cnt) { field[cnt] = static_cast<T>(std::rand() % 100 + (std::rand() % 100) * 0.005); sumRef -= field[cnt]; } GT_CUDA_CHECK(hipMemcpy(fieldDevice, &field[0], sizeof(T) * size, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( atomic_sub_kernel), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, sumDevice, fieldDevice, size); GT_CUDA_CHECK(hipMemcpy(&sum, sumDevice, sizeof(T), hipMemcpyDeviceToHost)); verifier<T>::TestEQ(sumRef, sum); } template <typename T> void test_atomic_min() { dim3 threadsPerBlock(4, 4); dim3 numberOfBlocks(4, 4); int size = threadsPerBlock.x * threadsPerBlock.y * numberOfBlocks.x * numberOfBlocks.y; T field[size]; T minRef = 99999; T min = 99999; T *minDevice; GT_CUDA_CHECK(hipMalloc(&minDevice, sizeof(T))); GT_CUDA_CHECK(hipMemcpy(minDevice, &min, sizeof(T), hipMemcpyHostToDevice)); T *fieldDevice; GT_CUDA_CHECK(hipMalloc(&fieldDevice, sizeof(T) * size)); for (int cnt = 0; cnt < size; ++cnt) { field[cnt] = static_cast<T>(std::rand() % 100 + (std::rand() % 100) * 0.005); minRef = ::min(minRef, field[cnt]); } GT_CUDA_CHECK(hipMemcpy(fieldDevice, &field[0], sizeof(T) * size, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( atomic_min_kernel), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, minDevice, fieldDevice, size); GT_CUDA_CHECK(hipMemcpy(&min, minDevice, sizeof(T), hipMemcpyDeviceToHost)); verifier<T>::TestEQ(minRef, min); } template <typename T> void test_atomic_max() { dim3 threadsPerBlock(4, 4); dim3 numberOfBlocks(4, 4); int size = threadsPerBlock.x * threadsPerBlock.y * numberOfBlocks.x * numberOfBlocks.y; T field[size]; T maxRef = -1; T max = -1; T *maxDevice; GT_CUDA_CHECK(hipMalloc(&maxDevice, sizeof(T))); GT_CUDA_CHECK(hipMemcpy(maxDevice, &max, sizeof(T), hipMemcpyHostToDevice)); T *fieldDevice; GT_CUDA_CHECK(hipMalloc(&fieldDevice, sizeof(T) * size)); for (int cnt = 0; cnt < size; ++cnt) { field[cnt] = static_cast<T>(std::rand() % 100 + (std::rand() % 100) * 0.005); maxRef = ::max(maxRef, field[cnt]); } GT_CUDA_CHECK(hipMemcpy(fieldDevice, &field[0], sizeof(T) * size, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( atomic_max_kernel), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, maxDevice, fieldDevice, size); GT_CUDA_CHECK(hipMemcpy(&max, maxDevice, sizeof(T), hipMemcpyDeviceToHost)); verifier<T>::TestEQ(maxRef, max); } TEST(AtomicFunctionsUnittest, atomic_add_int) { test_atomic_add<int>(); } TEST(AtomicFunctionsUnittest, atomic_add_real) { test_atomic_add<double>(); test_atomic_add<float>(); } TEST(AtomicFunctionsUnittest, atomic_sub_int) { test_atomic_sub<int>(); } TEST(AtomicFunctionsUnittest, atomic_sub_real) { test_atomic_sub<double>(); test_atomic_sub<float>(); } TEST(AtomicFunctionsUnittest, atomic_min_int) { test_atomic_min<int>(); } TEST(AtomicFunctionsUnittest, atomic_min_real) { test_atomic_min<double>(); test_atomic_min<float>(); } TEST(AtomicFunctionsUnittest, atomic_max_int) { test_atomic_max<int>(); } TEST(AtomicFunctionsUnittest, atomic_max_real) { test_atomic_max<double>(); test_atomic_max<float>(); }
3c56edc585f56e4653863d29607f8a194b95e565.cu
/* * GridTools * * Copyright (c) 2014-2023, ETH Zurich * All rights reserved. * * Please, refer to the LICENSE file in the root directory. * SPDX-License-Identifier: BSD-3-Clause */ #include "gtest/gtest.h" #include <cstdlib> #include <gridtools/common/atomic_functions.hpp> #include <gridtools/common/cuda_util.hpp> #include <gridtools/common/defs.hpp> template <typename T> struct verifier { static void TestEQ(T val, T exp) { T err = std::fabs(val - exp) / std::fabs(val); ASSERT_TRUE(err < 1e-12); } }; template <> struct verifier<float> { static void TestEQ(float val, float exp) { double err = std::fabs(val - exp) / std::fabs(val); ASSERT_TRUE(err < 1e-6); } }; template <> struct verifier<int> { static void TestEQ(int val, int exp) { ASSERT_EQ(val, exp); } }; template <typename T> __global__ void atomic_add_kernel(T *pReduced, const T *field, const int size) { const int i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; const int pos = j * gridDim.x * blockDim.x + i; gridtools::atomic_add(*pReduced, field[pos]); } template <typename T> __global__ void atomic_sub_kernel(T *pReduced, const T *field, const int size) { const int i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; const int pos = j * gridDim.x * blockDim.x + i; gridtools::atomic_sub(*pReduced, field[pos]); } template <typename T> __global__ void atomic_min_kernel(T *pReduced, const T *field, const int size) { const int i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; const int pos = j * gridDim.x * blockDim.x + i; gridtools::atomic_min(*pReduced, field[pos]); } template <typename T> __global__ void atomic_max_kernel(T *pReduced, const T *field, const int size) { const int i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; const int pos = j * gridDim.x * blockDim.x + i; gridtools::atomic_max(*pReduced, field[pos]); } template <typename T> void test_atomic_add() { dim3 threadsPerBlock(4, 4); dim3 numberOfBlocks(4, 4); int size = threadsPerBlock.x * threadsPerBlock.y * numberOfBlocks.x * numberOfBlocks.y; T field[size]; T sumRef = 0; T sum = 0; T *sumDevice; GT_CUDA_CHECK(cudaMalloc(&sumDevice, sizeof(T))); GT_CUDA_CHECK(cudaMemcpy(sumDevice, &sum, sizeof(T), cudaMemcpyHostToDevice)); T *fieldDevice; GT_CUDA_CHECK(cudaMalloc(&fieldDevice, sizeof(T) * size)); for (int cnt = 0; cnt < size; ++cnt) { field[cnt] = static_cast<T>(std::rand() % 100 + (std::rand() % 100) * 0.005); sumRef += field[cnt]; } GT_CUDA_CHECK(cudaMemcpy(fieldDevice, &field[0], sizeof(T) * size, cudaMemcpyHostToDevice)); atomic_add_kernel<<<numberOfBlocks, threadsPerBlock>>>(sumDevice, fieldDevice, size); GT_CUDA_CHECK(cudaMemcpy(&sum, sumDevice, sizeof(T), cudaMemcpyDeviceToHost)); verifier<T>::TestEQ(sumRef, sum); } template <typename T> void test_atomic_sub() { dim3 threadsPerBlock(4, 4); dim3 numberOfBlocks(4, 4); int size = threadsPerBlock.x * threadsPerBlock.y * numberOfBlocks.x * numberOfBlocks.y; T field[size]; T sumRef = 0; T sum = 0; T *sumDevice; GT_CUDA_CHECK(cudaMalloc(&sumDevice, sizeof(T))); GT_CUDA_CHECK(cudaMemcpy(sumDevice, &sum, sizeof(T), cudaMemcpyHostToDevice)); T *fieldDevice; GT_CUDA_CHECK(cudaMalloc(&fieldDevice, sizeof(T) * size)); for (int cnt = 0; cnt < size; ++cnt) { field[cnt] = static_cast<T>(std::rand() % 100 + (std::rand() % 100) * 0.005); sumRef -= field[cnt]; } GT_CUDA_CHECK(cudaMemcpy(fieldDevice, &field[0], sizeof(T) * size, cudaMemcpyHostToDevice)); atomic_sub_kernel<<<numberOfBlocks, threadsPerBlock>>>(sumDevice, fieldDevice, size); GT_CUDA_CHECK(cudaMemcpy(&sum, sumDevice, sizeof(T), cudaMemcpyDeviceToHost)); verifier<T>::TestEQ(sumRef, sum); } template <typename T> void test_atomic_min() { dim3 threadsPerBlock(4, 4); dim3 numberOfBlocks(4, 4); int size = threadsPerBlock.x * threadsPerBlock.y * numberOfBlocks.x * numberOfBlocks.y; T field[size]; T minRef = 99999; T min = 99999; T *minDevice; GT_CUDA_CHECK(cudaMalloc(&minDevice, sizeof(T))); GT_CUDA_CHECK(cudaMemcpy(minDevice, &min, sizeof(T), cudaMemcpyHostToDevice)); T *fieldDevice; GT_CUDA_CHECK(cudaMalloc(&fieldDevice, sizeof(T) * size)); for (int cnt = 0; cnt < size; ++cnt) { field[cnt] = static_cast<T>(std::rand() % 100 + (std::rand() % 100) * 0.005); minRef = std::min(minRef, field[cnt]); } GT_CUDA_CHECK(cudaMemcpy(fieldDevice, &field[0], sizeof(T) * size, cudaMemcpyHostToDevice)); atomic_min_kernel<<<numberOfBlocks, threadsPerBlock>>>(minDevice, fieldDevice, size); GT_CUDA_CHECK(cudaMemcpy(&min, minDevice, sizeof(T), cudaMemcpyDeviceToHost)); verifier<T>::TestEQ(minRef, min); } template <typename T> void test_atomic_max() { dim3 threadsPerBlock(4, 4); dim3 numberOfBlocks(4, 4); int size = threadsPerBlock.x * threadsPerBlock.y * numberOfBlocks.x * numberOfBlocks.y; T field[size]; T maxRef = -1; T max = -1; T *maxDevice; GT_CUDA_CHECK(cudaMalloc(&maxDevice, sizeof(T))); GT_CUDA_CHECK(cudaMemcpy(maxDevice, &max, sizeof(T), cudaMemcpyHostToDevice)); T *fieldDevice; GT_CUDA_CHECK(cudaMalloc(&fieldDevice, sizeof(T) * size)); for (int cnt = 0; cnt < size; ++cnt) { field[cnt] = static_cast<T>(std::rand() % 100 + (std::rand() % 100) * 0.005); maxRef = std::max(maxRef, field[cnt]); } GT_CUDA_CHECK(cudaMemcpy(fieldDevice, &field[0], sizeof(T) * size, cudaMemcpyHostToDevice)); atomic_max_kernel<<<numberOfBlocks, threadsPerBlock>>>(maxDevice, fieldDevice, size); GT_CUDA_CHECK(cudaMemcpy(&max, maxDevice, sizeof(T), cudaMemcpyDeviceToHost)); verifier<T>::TestEQ(maxRef, max); } TEST(AtomicFunctionsUnittest, atomic_add_int) { test_atomic_add<int>(); } TEST(AtomicFunctionsUnittest, atomic_add_real) { test_atomic_add<double>(); test_atomic_add<float>(); } TEST(AtomicFunctionsUnittest, atomic_sub_int) { test_atomic_sub<int>(); } TEST(AtomicFunctionsUnittest, atomic_sub_real) { test_atomic_sub<double>(); test_atomic_sub<float>(); } TEST(AtomicFunctionsUnittest, atomic_min_int) { test_atomic_min<int>(); } TEST(AtomicFunctionsUnittest, atomic_min_real) { test_atomic_min<double>(); test_atomic_min<float>(); } TEST(AtomicFunctionsUnittest, atomic_max_int) { test_atomic_max<int>(); } TEST(AtomicFunctionsUnittest, atomic_max_real) { test_atomic_max<double>(); test_atomic_max<float>(); }
2c31c1204c0272fa7f59daf135ca19c07724abce.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hip/hip_runtime.h> #include "decoderPlugin.h" #define CHECK(status) \ do \ { \ auto ret = (status); \ if (ret != 0) \ { \ std::cout << "Cuda failure: " << ret << std::endl; \ abort(); \ } \ } while (0) using namespace nvinfer1; using nvinfer1::plugin::GNMTDecoderPlugin; using nvinfer1::plugin::GNMTDecoderPluginCreator; REGISTER_TENSORRT_PLUGIN(GNMTDecoderPluginCreator); GNMTDecoderPlugin::GNMTDecoderPlugin(const PluginFieldCollection *fc) { int idx = 0; mNumLayers = *(int*)(fc->fields[idx].data); idx++; mHiddenSize = *(int*)(fc->fields[idx].data); idx++; mAttentionSize = *(int*)(fc->fields[idx].data); idx++; mBeamSize = *(int*)(fc->fields[idx].data); idx++; mDataType = *(nvinfer1::DataType*)(fc->fields[idx].data); idx++; mWeights_h = (void**)malloc(mNumLayers * sizeof(void*)); for (int i = 0; i < mNumLayers; i++) { mWeights_h[i] = (void*)fc->fields[idx].data; idx++; } if (mDataType == DataType::kINT8) { mPostActivationScalesH_h = (float*)(fc->fields[idx].data); idx++; mPostActivationScalesY_h = (float*)(fc->fields[idx].data); idx++; mLayerGemm0ScaleInput = *(float*)(fc->fields[idx].data); idx++; mLayerGemm0ScaleAttn = *(float*)(fc->fields[idx].data); idx++; } } GNMTDecoderPlugin::GNMTDecoderPlugin(const void* data, size_t length) { const char *d = static_cast<const char*>(data), *a = d; read<int>(d, mNumLayers); read<int>(d, mHiddenSize); read<int>(d, mAttentionSize); read<int>(d, mInputSize); read<int>(d, mBeamSize); read<nvinfer1::DataType>(d, mDataType); mPostActivationScalesH_h = (float*)malloc(mNumLayers * sizeof(float)); mPostActivationScalesY_h = (float*)malloc(mNumLayers * sizeof(float)); mWeights_h = (void**)malloc(mNumLayers * sizeof(void*)); for (int i = 0; i < mNumLayers; i++) { size_t dataTypeSize = 0; if (mDataType == DataType::kHALF) { dataTypeSize = sizeof(half); } else if (mDataType == DataType::kINT8) { dataTypeSize = sizeof(int8_t); } size_t sz = 4 * mHiddenSize * (mAttentionSize + 2 * mHiddenSize) * dataTypeSize; mWeights_h[i] = malloc(sz); memcpy(mWeights_h[i], d, sz); d += sz; } if (mDataType == DataType::kINT8) { size_t sz = mNumLayers * sizeof(float); memcpy(mPostActivationScalesH_h, d, sz); d += sz; memcpy(mPostActivationScalesY_h, d, sz); d += sz; read<float>(d, mLayerGemm0ScaleInput); read<float>(d, mLayerGemm0ScaleAttn); } assert(d == a + length); } const char* GNMTDecoderPlugin::getPluginType() const { return "GNMTDecoderPlugin"; } const char* GNMTDecoderPlugin::getPluginVersion() const { return "1"; } void GNMTDecoderPlugin::setPluginNamespace(const char* libNamespace) { mNamespace = libNamespace; } const char* GNMTDecoderPlugin::getPluginNamespace() const { return mNamespace.c_str(); } void GNMTDecoderPlugin::destroy() { if (mWeights_h) { free(mWeights_h); mWeights_h = nullptr; } if (mPostActivationScalesH_h) { free(mPostActivationScalesH_h); mPostActivationScalesH_h = nullptr; } if (mPostActivationScalesY_h) { free(mPostActivationScalesY_h); mPostActivationScalesY_h = nullptr; } delete this; } void GNMTDecoderPlugin::setCUDAInfo(hipStream_t mStreami, hipStream_t mStreamh, hipStream_t* mSplitKStreams, hipEvent_t* mSplitKEvents, hipblasHandle_t mCublas, cublasLtHandle_t mCublasLt, void **mWeights_d, float *mPostActivationScalesH_d, float* mPostActivationScalesY_d) { this->mStreami = mStreami; this->mStreamh = mStreamh; this->mSplitKStreams = mSplitKStreams; this->mSplitKEvents = mSplitKEvents; this->mCublas = mCublas; this->mCublasLt = mCublasLt; this->mWeights_d = mWeights_d; this->mPostActivationScalesH_d = mPostActivationScalesH_d; this->mPostActivationScalesY_d = mPostActivationScalesY_d; } IPluginV2IOExt* GNMTDecoderPlugin::clone() const { size_t sz = getSerializationSize(); char *buff = (char*)malloc(getSerializationSize()); serialize(buff); GNMTDecoderPlugin* ret = new GNMTDecoderPlugin(buff, sz); ret->setCUDAInfo(mStreami, mStreamh, mSplitKStreams, mSplitKEvents, mCublas, mCublasLt, mWeights_d, mPostActivationScalesH_d, mPostActivationScalesY_d); free(buff); return ret; } int GNMTDecoderPlugin::getNbOutputs() const { return 1 + 2 * mNumLayers; } // TODO: No idea if this needs batch size. Actually, don't know what's expected at all. Dims GNMTDecoderPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { assert(index >= 0 && index < this->getNbOutputs()); // y/hy/cy are all hiddenSize * batch. return Dims3(inputs[0].d[0], 1, mHiddenSize); } bool GNMTDecoderPlugin::supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const { if (inOut[pos].format != TensorFormat::kNCHW) return false; // fp16 I/O if (mDataType == nvinfer1::DataType::kHALF) { bool allHalf = true; // Don't care about pos. If all are half pass it. // The way this is called doesn't fill all of inOut, it only fills it up to pos. for (int i = 0; i <= pos; i++) { if (inOut[i].type != DataType::kHALF) { allHalf = false; } } if (allHalf) { return true; } return false; } else if (mDataType == nvinfer1::DataType::kINT8) { int localPos = pos; // Inputs // x if (localPos == 0 && inOut[pos].type != DataType::kHALF) return false; // concatData else if (localPos == 1 && inOut[pos].type != DataType::kHALF) return false; // hx else if (localPos >= 2 && localPos < 2 + mNumLayers && inOut[pos].type != DataType::kHALF) return false; // cx else if (localPos >= 2 + mNumLayers && localPos < 2 + 2 * mNumLayers && inOut[pos].type != DataType::kHALF) return false; // bias else if (localPos >= 2 + 2 * mNumLayers && localPos < 2 + 3 * mNumLayers && inOut[pos].type != DataType::kFLOAT) return false; // preActivationScale else if (localPos >= 2 + 3 * mNumLayers && localPos < 2 + 4 * mNumLayers && inOut[pos].type != DataType::kFLOAT) return false; localPos -= nbInputs; // Outputs // y if (localPos == 0 && inOut[pos].type != DataType::kHALF) return false; // hy else if (localPos >= 1 && localPos < 1 + mNumLayers && inOut[pos].type != DataType::kHALF) return false; // cy else if (localPos >= 1 + mNumLayers && localPos < 1 + 2 * mNumLayers && inOut[pos].type != DataType::kHALF) return false; return true; } return false; } void GNMTDecoderPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) { mInputSize = in[0].dims.d[in[0].dims.nbDims - 1]; } void GNMTDecoderPlugin::configurePlugin(const Dims *inputDims, int nbInputs, const Dims *outputDims, int nbOutputs, const DataType *inputTypes, const DataType *outputTypes, const bool *inputIsBroadcast, const bool *outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) { mInputSize = inputDims[0].d[inputDims[0].nbDims - 1]; } int GNMTDecoderPlugin::initialize() { CHECK(hipblasCreate(&mCublas)); CHECK(cublasLtCreate(&mCublasLt)); CHECK(cublasSetMathMode(mCublas, CUBLAS_TENSOR_OP_MATH)); CHECK(hipStreamCreateWithPriority(&mStreami, 0, -1)); CHECK(hipStreamCreate(&mStreamh)); mSplitKStreams = (hipStream_t*)malloc(NUM_SPLIT_K_STREAMS * sizeof(hipStream_t)); mSplitKEvents = (hipEvent_t*)malloc(NUM_SPLIT_K_STREAMS * sizeof(hipEvent_t)); for (int i = 0; i < NUM_SPLIT_K_STREAMS; i++) { CHECK(hipStreamCreateWithPriority(&mSplitKStreams[i], 0, -1)); } mWeights_d = (void**)malloc(mNumLayers * sizeof(void*)); for (int i = 0; i < mNumLayers; i++) { size_t dataTypeSize = 0; if (mDataType == DataType::kHALF) { dataTypeSize = sizeof(half); } else if (mDataType == DataType::kINT8) { dataTypeSize = sizeof(int8_t); } size_t sz = 4 * mHiddenSize * (mAttentionSize + 2 * mHiddenSize) * dataTypeSize; CHECK(hipMalloc(&mWeights_d[i], sz)); if (mDataType == DataType::kINT8) { int8_t *tmpWeights; CHECK(hipMalloc(&tmpWeights, sz)); CHECK(hipMemcpy(tmpWeights, mWeights_h[i], sz, hipMemcpyHostToDevice)); // Layer { for (int splitK = 0; splitK < 2; splitK++) { int offset = splitK * 4 * mHiddenSize * mHiddenSize * sizeof(int8_t); int n = 4 * mHiddenSize; int k = mHiddenSize; int ldb = n; cublasLtMatrixLayout_t Bdesc = NULL; cublasLtMatrixTransformDesc_t transformDesc = NULL; cublasLtMatrixLayout_t BtransformDesc = NULL; float transformBAlpha = 1.0f; float transformBBeta = 0.0f; cublasLtOrder_t colOrder = CUBLASLT_ORDER_COL; cublasLtOrder_t rowOrder = CUBLASLT_ORDER_ROW; cublasLtOrder_t order_COL4_4R2_8C = CUBLASLT_ORDER_COL4_4R2_8C; int ldbtransform = 32 * n; cublasErrCheck(cublasLtMatrixTransformDescCreate(&transformDesc, HIP_R_32F)); cublasErrCheck(cublasLtMatrixLayoutCreate(&Bdesc, HIP_R_8I, n, k, ldb)); cublasErrCheck(cublasLtMatrixLayoutCreate(&BtransformDesc, HIP_R_8I, n, k, ldbtransform)); cublasErrCheck(cublasLtMatrixLayoutSetAttribute(Bdesc, CUBLASLT_MATRIX_LAYOUT_ORDER, &colOrder, sizeof(colOrder))); cublasErrCheck(cublasLtMatrixLayoutSetAttribute(BtransformDesc, CUBLASLT_MATRIX_LAYOUT_ORDER, &order_COL4_4R2_8C, sizeof(order_COL4_4R2_8C))); cublasErrCheck(cublasLtMatrixTransform(mCublasLt, transformDesc, &transformBAlpha, tmpWeights + offset, Bdesc, &transformBBeta, NULL, NULL, (int8_t*)(mWeights_d[i]) + offset, BtransformDesc, 0)); CHECK(cublasLtMatrixTransformDescDestroy(transformDesc)); CHECK(cublasLtMatrixLayoutDestroy(Bdesc)); CHECK(cublasLtMatrixLayoutDestroy(BtransformDesc)); } } // Recurrent. Only thing different is the offset. { int offset = 4 * mHiddenSize * (2 * mHiddenSize) * sizeof(int8_t); int n = 4 * mHiddenSize; int k = mHiddenSize; int ldb = n; cublasLtMatrixLayout_t Bdesc = NULL; cublasLtMatrixTransformDesc_t transformDesc = NULL; cublasLtMatrixLayout_t BtransformDesc = NULL; float transformBAlpha = 1.0f; float transformBBeta = 0.0f; cublasLtOrder_t colOrder = CUBLASLT_ORDER_COL; cublasLtOrder_t rowOrder = CUBLASLT_ORDER_ROW; cublasLtOrder_t order_COL4_4R2_8C = CUBLASLT_ORDER_COL4_4R2_8C; int ldbtransform = 32 * n; cublasErrCheck(cublasLtMatrixTransformDescCreate(&transformDesc, HIP_R_32F)); cublasErrCheck(cublasLtMatrixLayoutCreate(&Bdesc, HIP_R_8I, n, k, ldb)); cublasErrCheck(cublasLtMatrixLayoutCreate(&BtransformDesc, HIP_R_8I, n, k, ldbtransform)); cublasErrCheck(cublasLtMatrixLayoutSetAttribute(Bdesc, CUBLASLT_MATRIX_LAYOUT_ORDER, &colOrder, sizeof(colOrder))); cublasErrCheck(cublasLtMatrixLayoutSetAttribute(BtransformDesc, CUBLASLT_MATRIX_LAYOUT_ORDER, &order_COL4_4R2_8C, sizeof(order_COL4_4R2_8C))); cublasErrCheck(cublasLtMatrixTransform(mCublasLt, transformDesc, &transformBAlpha, tmpWeights + offset, Bdesc, &transformBBeta, NULL, NULL, (int8_t*)(mWeights_d[i]) + offset, BtransformDesc, 0)); CHECK(cublasLtMatrixTransformDescDestroy(transformDesc)); CHECK(cublasLtMatrixLayoutDestroy(Bdesc)); CHECK(cublasLtMatrixLayoutDestroy(BtransformDesc)); } CHECK(hipFree(tmpWeights)); } else { CHECK(hipMemcpy(mWeights_d[i], mWeights_h[i], sz, hipMemcpyHostToDevice)); } } if (mDataType == DataType::kINT8) { size_t sz = mNumLayers * sizeof(float); CHECK(hipMalloc(&mPostActivationScalesH_d, sz)); CHECK(hipMalloc(&mPostActivationScalesY_d, sz)); CHECK(hipMemcpy(mPostActivationScalesH_d, mPostActivationScalesH_h, sz, hipMemcpyHostToDevice)); CHECK(hipMemcpy(mPostActivationScalesY_d, mPostActivationScalesY_h, sz, hipMemcpyHostToDevice)); } return hipSuccess; } void GNMTDecoderPlugin::terminate() { if (mCublas) { CHECK(hipblasDestroy(mCublas)); mCublas = nullptr; } if (mStreami) { CHECK(hipStreamDestroy(mStreami)); mStreami = nullptr; } if (mStreamh) { CHECK(hipStreamDestroy(mStreamh)); mStreamh = nullptr; } for (int i = 0; i < NUM_SPLIT_K_STREAMS; i++) { if (mSplitKStreams[i]) { CHECK(hipStreamDestroy(mSplitKStreams[i])); mSplitKStreams[i] = nullptr; } } if (mSplitKStreams) { free(mSplitKStreams); mSplitKStreams = nullptr; } if (mSplitKEvents) { free(mSplitKEvents); mSplitKEvents = nullptr; } if (mWeights_d) { for (int i = 0; i < mNumLayers; i++) { if (mWeights_d[i]) { hipFree(mWeights_d[i]); mWeights_d[i] = nullptr; } } free(mWeights_d); mWeights_d = nullptr; } if (mPostActivationScalesH_d) { hipFree(mPostActivationScalesH_d); mPostActivationScalesH_d = nullptr; } if (mPostActivationScalesY_d) { hipFree(mPostActivationScalesY_d); mPostActivationScalesY_d = nullptr; } } size_t GNMTDecoderPlugin::getWorkspaceSize(int maxBatchSize) const { size_t size = 0; if (mDataType == nvinfer1::DataType::kHALF) { // tmp_io size += mNumLayers * (mAttentionSize + mInputSize) * maxBatchSize * mBeamSize * sizeof(half); // tmp_i size += mHiddenSize * maxBatchSize * mBeamSize * 4 * NUM_SPLIT_K_STREAMS * sizeof(half); // tmp_h size += mNumLayers * mHiddenSize * maxBatchSize * mBeamSize * 4 * sizeof(half); } else if (mDataType == nvinfer1::DataType::kINT8) { int effectiveBatch = maxBatchSize * mBeamSize; int roundedBatch = roundoff(effectiveBatch, 32); // tmp_io size += mNumLayers * (mAttentionSize + mInputSize) * roundedBatch * sizeof(int8_t); // tmp_i size += mHiddenSize * roundedBatch * 4 * NUM_SPLIT_K_STREAMS * sizeof(int32_t); // tmp_h size += mNumLayers * mHiddenSize * roundedBatch * 4 * sizeof(int32_t); // tmp_resid size += mNumLayers * mHiddenSize * roundedBatch * sizeof(float); // tmp_x size += mInputSize * roundedBatch * sizeof(int8_t); // tmp_y size += mHiddenSize * roundedBatch * sizeof(int8_t); // tmp_attention size += mAttentionSize * roundedBatch * sizeof(int8_t); // tmp_attention2 size += mAttentionSize * roundedBatch * sizeof(int8_t); // tmp_h_in/out size += 2 * mNumLayers * mHiddenSize * roundedBatch * sizeof(int8_t); // tmp_i2 size += 2 * mHiddenSize * roundedBatch * sizeof(int8_t); } return size; } int GNMTDecoderPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, hipStream_t stream) { int effectiveBatch = batchSize * mBeamSize; assert(mAttentionSize == mHiddenSize); assert(mInputSize == mHiddenSize); void *tmp_io = NULL; void *tmp_i = NULL; void *tmp_h = NULL; void *tmp_resid = NULL; void *tmp_x = NULL; void *tmp_y = NULL; void *tmp_attention = NULL; void *tmp_attention2 = NULL; void **tmp_h_in = NULL; void **tmp_h_out = NULL; void *tmp_i2 = NULL; if (mDataType == nvinfer1::DataType::kHALF) { tmp_io = workspace; tmp_i = (void*)((char*)(workspace) + mNumLayers * (mAttentionSize + mInputSize) * effectiveBatch * sizeof(half)); tmp_h = (void*)((char*)(tmp_i) + mHiddenSize * effectiveBatch * 4 * NUM_SPLIT_K_STREAMS * sizeof(half)); } else if (mDataType == nvinfer1::DataType::kINT8) { int roundedBatch = roundoff(effectiveBatch, 32); tmp_io = workspace; tmp_i = (void*)((char*)(workspace) + mNumLayers * (mAttentionSize + mInputSize) * roundedBatch * sizeof(int8_t)); tmp_h = (void*)((char*)(tmp_i) + mHiddenSize * roundedBatch * 4 * NUM_SPLIT_K_STREAMS * sizeof(int32_t)); tmp_resid = (void*)((char*)(tmp_h) + mNumLayers * mHiddenSize * roundedBatch * 4 * sizeof(int32_t)); tmp_x = (void*)((char*)(tmp_resid) + mNumLayers * mHiddenSize * roundedBatch * sizeof(float)); tmp_y = (void*)((char*)(tmp_x) + mInputSize * roundedBatch * sizeof(int8_t)); tmp_attention = (void*)((char*)(tmp_y) + mHiddenSize * roundedBatch * sizeof(int8_t)); tmp_attention2 = (void*)((char*)(tmp_attention) + mAttentionSize * roundedBatch * sizeof(int8_t)); tmp_h_in = (void**)malloc(mNumLayers * sizeof(void*)); tmp_h_out = (void**)malloc(mNumLayers * sizeof(void*)); tmp_h_in[0] = (void*)((char*)(tmp_attention2) + mAttentionSize * roundedBatch * sizeof(int8_t)); tmp_h_in[1] = (void*)((char*)(tmp_h_in[0]) + mHiddenSize * roundedBatch * sizeof(int8_t)); tmp_h_in[2] = (void*)((char*)(tmp_h_in[1]) + mHiddenSize * roundedBatch * sizeof(int8_t)); tmp_h_out[0] = (void*)((char*)(tmp_h_in[2]) + mHiddenSize * roundedBatch * sizeof(int8_t)); tmp_h_out[1] = (void*)((char*)(tmp_h_out[0]) + mHiddenSize * roundedBatch * sizeof(int8_t)); tmp_h_out[2] = (void*)((char*)(tmp_h_out[1]) + mHiddenSize * roundedBatch * sizeof(int8_t)); tmp_i2 = (void*)((char*)(tmp_h_out[2]) + mHiddenSize * roundedBatch * sizeof(int8_t)); } if (mDataType == nvinfer1::DataType::kINT8) { float scaleX = mLayerGemm0ScaleInput; float scaleAttn = mLayerGemm0ScaleAttn; float scale_h1 = mPostActivationScalesH_h[0]; float scale_h2 = mPostActivationScalesH_h[1]; float scale_h3 = mPostActivationScalesH_h[2]; // Quantise and transform to the shape required by the GEMM bulk5DecoderTransformAndQuantize((int8_t*)tmp_x, (half*)inputs[0], scaleX, (int8_t*)tmp_attention2, (half*)inputs[1], scaleAttn, (int8_t*)tmp_h_in[0], (half*)inputs[2], scale_h1, (int8_t*)tmp_h_in[1], (half*)inputs[3], scale_h2, (int8_t*)tmp_h_in[2], (half*)inputs[4], scale_h3, mHiddenSize, effectiveBatch, stream); } hipEvent_t event; CHECK(hipEventCreate(&event, hipEventDisableTiming)); CHECK(hipEventRecord(event, stream)); CHECK(hipStreamWaitEvent(mStreami, event, 0)); CHECK(hipStreamWaitEvent(mStreamh, event, 0)); for (int i = 0; i < NUM_SPLIT_K_STREAMS; i++) { CHECK(hipStreamWaitEvent(mSplitKStreams[i], event, 0)); } CHECK(hipEventDestroy(event)); hipError_t status; int inputSize = mInputSize + mAttentionSize; if (mDataType == nvinfer1::DataType::kHALF) { decoderStep<half, HIP_R_16F, half, HIP_R_16F, half> (mHiddenSize, inputSize, effectiveBatch, 1, mNumLayers, this->mCublas, this->mCublasLt, (half*)inputs[0], // x (half**)(&(inputs[2])), // Array of hx, (half**)(&inputs[2 + mNumLayers]), // Array of cx, (half**)mWeights_d, (half**)(&inputs[2 + 2 * mNumLayers]), // bias (half*)outputs[0], // y, (half**)(&outputs[1]), // Array of hy, (half**)(&outputs[1 + mNumLayers]), // Array of cy, (half*)inputs[1], // attention, (half*)tmp_io, (half*)tmp_i, (half*)tmp_h, NULL, NULL, NULL, NULL, NULL, mStreami, mSplitKStreams, mSplitKEvents, NUM_SPLIT_K_STREAMS, mStreamh); } else if (mDataType == nvinfer1::DataType::kINT8) { decoderStep<int8_t, HIP_R_8I, int32_t, HIP_R_32I, float> (mHiddenSize, inputSize, effectiveBatch, 1, mNumLayers, this->mCublas, this->mCublasLt, (int8_t*)tmp_x, // x (int8_t**)(tmp_h_in), // Array of hx, (half**)(&inputs[2 + mNumLayers]), // Array of cx, (int8_t**)mWeights_d, (float**)(&inputs[2 + 2 * mNumLayers]), // bias (half*)outputs[0], //(int8_t*)tmp_y, // y, (half**)(&outputs[1]), // Array of hy, (half**)(&outputs[1 + mNumLayers]), // Array of cy, (int8_t*)tmp_attention2, // attention, (int8_t*)tmp_io, (int32_t*)tmp_i, (int32_t*)tmp_h, (float*)tmp_resid, (float**)(&inputs[2 + 3 * mNumLayers]), // gemm output scale, mPostActivationScalesH_d, // postActivationScaleH, mPostActivationScalesY_d, // postActivationScaleY, (int8_t*)tmp_i2, mStreami, mSplitKStreams, mSplitKEvents, NUM_SPLIT_K_STREAMS, mStreamh); } hipEvent_t eventEnd; // The final kernel is the elementwise kernel launched to stream i, so only need to wait for that one to finish. CHECK(hipEventCreate(&eventEnd, hipEventDisableTiming)); CHECK(hipEventRecord(eventEnd, mStreami)); CHECK(hipStreamWaitEvent(stream, eventEnd, 0)); CHECK(hipEventDestroy(eventEnd)); if (mDataType == nvinfer1::DataType::kINT8) { free(tmp_h_in); free(tmp_h_out); } return 0; } size_t GNMTDecoderPlugin::getSerializationSize() const { size_t sz = sizeof(mNumLayers) + sizeof(mHiddenSize) + sizeof(mAttentionSize) + sizeof(mInputSize) + sizeof(mBeamSize) + sizeof(mDataType); // Weights for (int i = 0; i < mNumLayers; i++) { size_t dataTypeSize = 0; if (mDataType == DataType::kHALF) { dataTypeSize = sizeof(half); } else if (mDataType == DataType::kINT8) { dataTypeSize = sizeof(int8_t); } sz += 4 * mHiddenSize * (mAttentionSize + 2 * mHiddenSize) * dataTypeSize; } // Scales if (mDataType == DataType::kINT8) { sz += mNumLayers * sizeof(float); sz += mNumLayers * sizeof(float); sz += sizeof(float); sz += sizeof(float); } return sz; } void GNMTDecoderPlugin::serialize(void* buffer) const { char *d = static_cast<char*>(buffer), *a = d; write<int>(d, mNumLayers); write<int>(d, mHiddenSize); write<int>(d, mAttentionSize); write<int>(d, mInputSize); write<int>(d, mBeamSize); write<nvinfer1::DataType>(d, mDataType); for (int i = 0; i < mNumLayers; i++) { size_t dataTypeSize = 0; if (mDataType == DataType::kHALF) { dataTypeSize = sizeof(half); } else if (mDataType == DataType::kINT8) { dataTypeSize = sizeof(int8_t); } size_t sz = 4 * mHiddenSize * (mAttentionSize + 2 * mHiddenSize) * dataTypeSize; memcpy(d, mWeights_h[i], sz); d += sz; } if (mDataType == DataType::kINT8) { size_t sz = mNumLayers * sizeof(float); memcpy(d, mPostActivationScalesH_h, sz); d += sz; memcpy(d, mPostActivationScalesY_h, sz); d += sz; write<float>(d, mLayerGemm0ScaleInput); write<float>(d, mLayerGemm0ScaleAttn); } assert(d == a + getSerializationSize()); } nvinfer1::DataType GNMTDecoderPlugin::getOutputDataType (int index, const nvinfer1::DataType *inputTypes, int nbInputs) const { return mDataType == nvinfer1::DataType::kINT8 ? nvinfer1::DataType::kHALF : mDataType; } bool GNMTDecoderPlugin::isOutputBroadcastAcrossBatch (int outputIndex, const bool *inputIsBroadcasted, int nbInputs) const { return false; } bool GNMTDecoderPlugin::canBroadcastInputAcrossBatch (int inputIndex) const { return inputIndex >= 2 * mNumLayers + 2; } template <typename T> void GNMTDecoderPlugin::write(char*& buffer, const T& val) const { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } template <typename T> void GNMTDecoderPlugin::read(const char*& buffer, T& val) const { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } const char* GNMTDecoderPluginCreator::getPluginName() const { return "GNMTDecoderPlugin"; } const char* GNMTDecoderPluginCreator::getPluginVersion() const { return "1"; } const PluginFieldCollection* GNMTDecoderPluginCreator::getFieldNames() { return nullptr; } void GNMTDecoderPluginCreator::setPluginNamespace(const char* libNamespace) { mNamespace = libNamespace; } const char* GNMTDecoderPluginCreator::getPluginNamespace() const { return mNamespace.c_str(); } IPluginV2IOExt* GNMTDecoderPluginCreator::createPlugin(const char *name, const PluginFieldCollection *fc) { return new GNMTDecoderPlugin(fc); } IPluginV2IOExt* GNMTDecoderPluginCreator::deserializePlugin(const char *name, const void *serialData, size_t serialLength) { return new GNMTDecoderPlugin(serialData, serialLength); }
2c31c1204c0272fa7f59daf135ca19c07724abce.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda.h> #include "decoderPlugin.h" #define CHECK(status) \ do \ { \ auto ret = (status); \ if (ret != 0) \ { \ std::cout << "Cuda failure: " << ret << std::endl; \ abort(); \ } \ } while (0) using namespace nvinfer1; using nvinfer1::plugin::GNMTDecoderPlugin; using nvinfer1::plugin::GNMTDecoderPluginCreator; REGISTER_TENSORRT_PLUGIN(GNMTDecoderPluginCreator); GNMTDecoderPlugin::GNMTDecoderPlugin(const PluginFieldCollection *fc) { int idx = 0; mNumLayers = *(int*)(fc->fields[idx].data); idx++; mHiddenSize = *(int*)(fc->fields[idx].data); idx++; mAttentionSize = *(int*)(fc->fields[idx].data); idx++; mBeamSize = *(int*)(fc->fields[idx].data); idx++; mDataType = *(nvinfer1::DataType*)(fc->fields[idx].data); idx++; mWeights_h = (void**)malloc(mNumLayers * sizeof(void*)); for (int i = 0; i < mNumLayers; i++) { mWeights_h[i] = (void*)fc->fields[idx].data; idx++; } if (mDataType == DataType::kINT8) { mPostActivationScalesH_h = (float*)(fc->fields[idx].data); idx++; mPostActivationScalesY_h = (float*)(fc->fields[idx].data); idx++; mLayerGemm0ScaleInput = *(float*)(fc->fields[idx].data); idx++; mLayerGemm0ScaleAttn = *(float*)(fc->fields[idx].data); idx++; } } GNMTDecoderPlugin::GNMTDecoderPlugin(const void* data, size_t length) { const char *d = static_cast<const char*>(data), *a = d; read<int>(d, mNumLayers); read<int>(d, mHiddenSize); read<int>(d, mAttentionSize); read<int>(d, mInputSize); read<int>(d, mBeamSize); read<nvinfer1::DataType>(d, mDataType); mPostActivationScalesH_h = (float*)malloc(mNumLayers * sizeof(float)); mPostActivationScalesY_h = (float*)malloc(mNumLayers * sizeof(float)); mWeights_h = (void**)malloc(mNumLayers * sizeof(void*)); for (int i = 0; i < mNumLayers; i++) { size_t dataTypeSize = 0; if (mDataType == DataType::kHALF) { dataTypeSize = sizeof(half); } else if (mDataType == DataType::kINT8) { dataTypeSize = sizeof(int8_t); } size_t sz = 4 * mHiddenSize * (mAttentionSize + 2 * mHiddenSize) * dataTypeSize; mWeights_h[i] = malloc(sz); memcpy(mWeights_h[i], d, sz); d += sz; } if (mDataType == DataType::kINT8) { size_t sz = mNumLayers * sizeof(float); memcpy(mPostActivationScalesH_h, d, sz); d += sz; memcpy(mPostActivationScalesY_h, d, sz); d += sz; read<float>(d, mLayerGemm0ScaleInput); read<float>(d, mLayerGemm0ScaleAttn); } assert(d == a + length); } const char* GNMTDecoderPlugin::getPluginType() const { return "GNMTDecoderPlugin"; } const char* GNMTDecoderPlugin::getPluginVersion() const { return "1"; } void GNMTDecoderPlugin::setPluginNamespace(const char* libNamespace) { mNamespace = libNamespace; } const char* GNMTDecoderPlugin::getPluginNamespace() const { return mNamespace.c_str(); } void GNMTDecoderPlugin::destroy() { if (mWeights_h) { free(mWeights_h); mWeights_h = nullptr; } if (mPostActivationScalesH_h) { free(mPostActivationScalesH_h); mPostActivationScalesH_h = nullptr; } if (mPostActivationScalesY_h) { free(mPostActivationScalesY_h); mPostActivationScalesY_h = nullptr; } delete this; } void GNMTDecoderPlugin::setCUDAInfo(cudaStream_t mStreami, cudaStream_t mStreamh, cudaStream_t* mSplitKStreams, cudaEvent_t* mSplitKEvents, cublasHandle_t mCublas, cublasLtHandle_t mCublasLt, void **mWeights_d, float *mPostActivationScalesH_d, float* mPostActivationScalesY_d) { this->mStreami = mStreami; this->mStreamh = mStreamh; this->mSplitKStreams = mSplitKStreams; this->mSplitKEvents = mSplitKEvents; this->mCublas = mCublas; this->mCublasLt = mCublasLt; this->mWeights_d = mWeights_d; this->mPostActivationScalesH_d = mPostActivationScalesH_d; this->mPostActivationScalesY_d = mPostActivationScalesY_d; } IPluginV2IOExt* GNMTDecoderPlugin::clone() const { size_t sz = getSerializationSize(); char *buff = (char*)malloc(getSerializationSize()); serialize(buff); GNMTDecoderPlugin* ret = new GNMTDecoderPlugin(buff, sz); ret->setCUDAInfo(mStreami, mStreamh, mSplitKStreams, mSplitKEvents, mCublas, mCublasLt, mWeights_d, mPostActivationScalesH_d, mPostActivationScalesY_d); free(buff); return ret; } int GNMTDecoderPlugin::getNbOutputs() const { return 1 + 2 * mNumLayers; } // TODO: No idea if this needs batch size. Actually, don't know what's expected at all. Dims GNMTDecoderPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { assert(index >= 0 && index < this->getNbOutputs()); // y/hy/cy are all hiddenSize * batch. return Dims3(inputs[0].d[0], 1, mHiddenSize); } bool GNMTDecoderPlugin::supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const { if (inOut[pos].format != TensorFormat::kNCHW) return false; // fp16 I/O if (mDataType == nvinfer1::DataType::kHALF) { bool allHalf = true; // Don't care about pos. If all are half pass it. // The way this is called doesn't fill all of inOut, it only fills it up to pos. for (int i = 0; i <= pos; i++) { if (inOut[i].type != DataType::kHALF) { allHalf = false; } } if (allHalf) { return true; } return false; } else if (mDataType == nvinfer1::DataType::kINT8) { int localPos = pos; // Inputs // x if (localPos == 0 && inOut[pos].type != DataType::kHALF) return false; // concatData else if (localPos == 1 && inOut[pos].type != DataType::kHALF) return false; // hx else if (localPos >= 2 && localPos < 2 + mNumLayers && inOut[pos].type != DataType::kHALF) return false; // cx else if (localPos >= 2 + mNumLayers && localPos < 2 + 2 * mNumLayers && inOut[pos].type != DataType::kHALF) return false; // bias else if (localPos >= 2 + 2 * mNumLayers && localPos < 2 + 3 * mNumLayers && inOut[pos].type != DataType::kFLOAT) return false; // preActivationScale else if (localPos >= 2 + 3 * mNumLayers && localPos < 2 + 4 * mNumLayers && inOut[pos].type != DataType::kFLOAT) return false; localPos -= nbInputs; // Outputs // y if (localPos == 0 && inOut[pos].type != DataType::kHALF) return false; // hy else if (localPos >= 1 && localPos < 1 + mNumLayers && inOut[pos].type != DataType::kHALF) return false; // cy else if (localPos >= 1 + mNumLayers && localPos < 1 + 2 * mNumLayers && inOut[pos].type != DataType::kHALF) return false; return true; } return false; } void GNMTDecoderPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) { mInputSize = in[0].dims.d[in[0].dims.nbDims - 1]; } void GNMTDecoderPlugin::configurePlugin(const Dims *inputDims, int nbInputs, const Dims *outputDims, int nbOutputs, const DataType *inputTypes, const DataType *outputTypes, const bool *inputIsBroadcast, const bool *outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize) { mInputSize = inputDims[0].d[inputDims[0].nbDims - 1]; } int GNMTDecoderPlugin::initialize() { CHECK(cublasCreate(&mCublas)); CHECK(cublasLtCreate(&mCublasLt)); CHECK(cublasSetMathMode(mCublas, CUBLAS_TENSOR_OP_MATH)); CHECK(cudaStreamCreateWithPriority(&mStreami, 0, -1)); CHECK(cudaStreamCreate(&mStreamh)); mSplitKStreams = (cudaStream_t*)malloc(NUM_SPLIT_K_STREAMS * sizeof(cudaStream_t)); mSplitKEvents = (cudaEvent_t*)malloc(NUM_SPLIT_K_STREAMS * sizeof(cudaEvent_t)); for (int i = 0; i < NUM_SPLIT_K_STREAMS; i++) { CHECK(cudaStreamCreateWithPriority(&mSplitKStreams[i], 0, -1)); } mWeights_d = (void**)malloc(mNumLayers * sizeof(void*)); for (int i = 0; i < mNumLayers; i++) { size_t dataTypeSize = 0; if (mDataType == DataType::kHALF) { dataTypeSize = sizeof(half); } else if (mDataType == DataType::kINT8) { dataTypeSize = sizeof(int8_t); } size_t sz = 4 * mHiddenSize * (mAttentionSize + 2 * mHiddenSize) * dataTypeSize; CHECK(cudaMalloc(&mWeights_d[i], sz)); if (mDataType == DataType::kINT8) { int8_t *tmpWeights; CHECK(cudaMalloc(&tmpWeights, sz)); CHECK(cudaMemcpy(tmpWeights, mWeights_h[i], sz, cudaMemcpyHostToDevice)); // Layer { for (int splitK = 0; splitK < 2; splitK++) { int offset = splitK * 4 * mHiddenSize * mHiddenSize * sizeof(int8_t); int n = 4 * mHiddenSize; int k = mHiddenSize; int ldb = n; cublasLtMatrixLayout_t Bdesc = NULL; cublasLtMatrixTransformDesc_t transformDesc = NULL; cublasLtMatrixLayout_t BtransformDesc = NULL; float transformBAlpha = 1.0f; float transformBBeta = 0.0f; cublasLtOrder_t colOrder = CUBLASLT_ORDER_COL; cublasLtOrder_t rowOrder = CUBLASLT_ORDER_ROW; cublasLtOrder_t order_COL4_4R2_8C = CUBLASLT_ORDER_COL4_4R2_8C; int ldbtransform = 32 * n; cublasErrCheck(cublasLtMatrixTransformDescCreate(&transformDesc, CUDA_R_32F)); cublasErrCheck(cublasLtMatrixLayoutCreate(&Bdesc, CUDA_R_8I, n, k, ldb)); cublasErrCheck(cublasLtMatrixLayoutCreate(&BtransformDesc, CUDA_R_8I, n, k, ldbtransform)); cublasErrCheck(cublasLtMatrixLayoutSetAttribute(Bdesc, CUBLASLT_MATRIX_LAYOUT_ORDER, &colOrder, sizeof(colOrder))); cublasErrCheck(cublasLtMatrixLayoutSetAttribute(BtransformDesc, CUBLASLT_MATRIX_LAYOUT_ORDER, &order_COL4_4R2_8C, sizeof(order_COL4_4R2_8C))); cublasErrCheck(cublasLtMatrixTransform(mCublasLt, transformDesc, &transformBAlpha, tmpWeights + offset, Bdesc, &transformBBeta, NULL, NULL, (int8_t*)(mWeights_d[i]) + offset, BtransformDesc, 0)); CHECK(cublasLtMatrixTransformDescDestroy(transformDesc)); CHECK(cublasLtMatrixLayoutDestroy(Bdesc)); CHECK(cublasLtMatrixLayoutDestroy(BtransformDesc)); } } // Recurrent. Only thing different is the offset. { int offset = 4 * mHiddenSize * (2 * mHiddenSize) * sizeof(int8_t); int n = 4 * mHiddenSize; int k = mHiddenSize; int ldb = n; cublasLtMatrixLayout_t Bdesc = NULL; cublasLtMatrixTransformDesc_t transformDesc = NULL; cublasLtMatrixLayout_t BtransformDesc = NULL; float transformBAlpha = 1.0f; float transformBBeta = 0.0f; cublasLtOrder_t colOrder = CUBLASLT_ORDER_COL; cublasLtOrder_t rowOrder = CUBLASLT_ORDER_ROW; cublasLtOrder_t order_COL4_4R2_8C = CUBLASLT_ORDER_COL4_4R2_8C; int ldbtransform = 32 * n; cublasErrCheck(cublasLtMatrixTransformDescCreate(&transformDesc, CUDA_R_32F)); cublasErrCheck(cublasLtMatrixLayoutCreate(&Bdesc, CUDA_R_8I, n, k, ldb)); cublasErrCheck(cublasLtMatrixLayoutCreate(&BtransformDesc, CUDA_R_8I, n, k, ldbtransform)); cublasErrCheck(cublasLtMatrixLayoutSetAttribute(Bdesc, CUBLASLT_MATRIX_LAYOUT_ORDER, &colOrder, sizeof(colOrder))); cublasErrCheck(cublasLtMatrixLayoutSetAttribute(BtransformDesc, CUBLASLT_MATRIX_LAYOUT_ORDER, &order_COL4_4R2_8C, sizeof(order_COL4_4R2_8C))); cublasErrCheck(cublasLtMatrixTransform(mCublasLt, transformDesc, &transformBAlpha, tmpWeights + offset, Bdesc, &transformBBeta, NULL, NULL, (int8_t*)(mWeights_d[i]) + offset, BtransformDesc, 0)); CHECK(cublasLtMatrixTransformDescDestroy(transformDesc)); CHECK(cublasLtMatrixLayoutDestroy(Bdesc)); CHECK(cublasLtMatrixLayoutDestroy(BtransformDesc)); } CHECK(cudaFree(tmpWeights)); } else { CHECK(cudaMemcpy(mWeights_d[i], mWeights_h[i], sz, cudaMemcpyHostToDevice)); } } if (mDataType == DataType::kINT8) { size_t sz = mNumLayers * sizeof(float); CHECK(cudaMalloc(&mPostActivationScalesH_d, sz)); CHECK(cudaMalloc(&mPostActivationScalesY_d, sz)); CHECK(cudaMemcpy(mPostActivationScalesH_d, mPostActivationScalesH_h, sz, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(mPostActivationScalesY_d, mPostActivationScalesY_h, sz, cudaMemcpyHostToDevice)); } return cudaSuccess; } void GNMTDecoderPlugin::terminate() { if (mCublas) { CHECK(cublasDestroy(mCublas)); mCublas = nullptr; } if (mStreami) { CHECK(cudaStreamDestroy(mStreami)); mStreami = nullptr; } if (mStreamh) { CHECK(cudaStreamDestroy(mStreamh)); mStreamh = nullptr; } for (int i = 0; i < NUM_SPLIT_K_STREAMS; i++) { if (mSplitKStreams[i]) { CHECK(cudaStreamDestroy(mSplitKStreams[i])); mSplitKStreams[i] = nullptr; } } if (mSplitKStreams) { free(mSplitKStreams); mSplitKStreams = nullptr; } if (mSplitKEvents) { free(mSplitKEvents); mSplitKEvents = nullptr; } if (mWeights_d) { for (int i = 0; i < mNumLayers; i++) { if (mWeights_d[i]) { cudaFree(mWeights_d[i]); mWeights_d[i] = nullptr; } } free(mWeights_d); mWeights_d = nullptr; } if (mPostActivationScalesH_d) { cudaFree(mPostActivationScalesH_d); mPostActivationScalesH_d = nullptr; } if (mPostActivationScalesY_d) { cudaFree(mPostActivationScalesY_d); mPostActivationScalesY_d = nullptr; } } size_t GNMTDecoderPlugin::getWorkspaceSize(int maxBatchSize) const { size_t size = 0; if (mDataType == nvinfer1::DataType::kHALF) { // tmp_io size += mNumLayers * (mAttentionSize + mInputSize) * maxBatchSize * mBeamSize * sizeof(half); // tmp_i size += mHiddenSize * maxBatchSize * mBeamSize * 4 * NUM_SPLIT_K_STREAMS * sizeof(half); // tmp_h size += mNumLayers * mHiddenSize * maxBatchSize * mBeamSize * 4 * sizeof(half); } else if (mDataType == nvinfer1::DataType::kINT8) { int effectiveBatch = maxBatchSize * mBeamSize; int roundedBatch = roundoff(effectiveBatch, 32); // tmp_io size += mNumLayers * (mAttentionSize + mInputSize) * roundedBatch * sizeof(int8_t); // tmp_i size += mHiddenSize * roundedBatch * 4 * NUM_SPLIT_K_STREAMS * sizeof(int32_t); // tmp_h size += mNumLayers * mHiddenSize * roundedBatch * 4 * sizeof(int32_t); // tmp_resid size += mNumLayers * mHiddenSize * roundedBatch * sizeof(float); // tmp_x size += mInputSize * roundedBatch * sizeof(int8_t); // tmp_y size += mHiddenSize * roundedBatch * sizeof(int8_t); // tmp_attention size += mAttentionSize * roundedBatch * sizeof(int8_t); // tmp_attention2 size += mAttentionSize * roundedBatch * sizeof(int8_t); // tmp_h_in/out size += 2 * mNumLayers * mHiddenSize * roundedBatch * sizeof(int8_t); // tmp_i2 size += 2 * mHiddenSize * roundedBatch * sizeof(int8_t); } return size; } int GNMTDecoderPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) { int effectiveBatch = batchSize * mBeamSize; assert(mAttentionSize == mHiddenSize); assert(mInputSize == mHiddenSize); void *tmp_io = NULL; void *tmp_i = NULL; void *tmp_h = NULL; void *tmp_resid = NULL; void *tmp_x = NULL; void *tmp_y = NULL; void *tmp_attention = NULL; void *tmp_attention2 = NULL; void **tmp_h_in = NULL; void **tmp_h_out = NULL; void *tmp_i2 = NULL; if (mDataType == nvinfer1::DataType::kHALF) { tmp_io = workspace; tmp_i = (void*)((char*)(workspace) + mNumLayers * (mAttentionSize + mInputSize) * effectiveBatch * sizeof(half)); tmp_h = (void*)((char*)(tmp_i) + mHiddenSize * effectiveBatch * 4 * NUM_SPLIT_K_STREAMS * sizeof(half)); } else if (mDataType == nvinfer1::DataType::kINT8) { int roundedBatch = roundoff(effectiveBatch, 32); tmp_io = workspace; tmp_i = (void*)((char*)(workspace) + mNumLayers * (mAttentionSize + mInputSize) * roundedBatch * sizeof(int8_t)); tmp_h = (void*)((char*)(tmp_i) + mHiddenSize * roundedBatch * 4 * NUM_SPLIT_K_STREAMS * sizeof(int32_t)); tmp_resid = (void*)((char*)(tmp_h) + mNumLayers * mHiddenSize * roundedBatch * 4 * sizeof(int32_t)); tmp_x = (void*)((char*)(tmp_resid) + mNumLayers * mHiddenSize * roundedBatch * sizeof(float)); tmp_y = (void*)((char*)(tmp_x) + mInputSize * roundedBatch * sizeof(int8_t)); tmp_attention = (void*)((char*)(tmp_y) + mHiddenSize * roundedBatch * sizeof(int8_t)); tmp_attention2 = (void*)((char*)(tmp_attention) + mAttentionSize * roundedBatch * sizeof(int8_t)); tmp_h_in = (void**)malloc(mNumLayers * sizeof(void*)); tmp_h_out = (void**)malloc(mNumLayers * sizeof(void*)); tmp_h_in[0] = (void*)((char*)(tmp_attention2) + mAttentionSize * roundedBatch * sizeof(int8_t)); tmp_h_in[1] = (void*)((char*)(tmp_h_in[0]) + mHiddenSize * roundedBatch * sizeof(int8_t)); tmp_h_in[2] = (void*)((char*)(tmp_h_in[1]) + mHiddenSize * roundedBatch * sizeof(int8_t)); tmp_h_out[0] = (void*)((char*)(tmp_h_in[2]) + mHiddenSize * roundedBatch * sizeof(int8_t)); tmp_h_out[1] = (void*)((char*)(tmp_h_out[0]) + mHiddenSize * roundedBatch * sizeof(int8_t)); tmp_h_out[2] = (void*)((char*)(tmp_h_out[1]) + mHiddenSize * roundedBatch * sizeof(int8_t)); tmp_i2 = (void*)((char*)(tmp_h_out[2]) + mHiddenSize * roundedBatch * sizeof(int8_t)); } if (mDataType == nvinfer1::DataType::kINT8) { float scaleX = mLayerGemm0ScaleInput; float scaleAttn = mLayerGemm0ScaleAttn; float scale_h1 = mPostActivationScalesH_h[0]; float scale_h2 = mPostActivationScalesH_h[1]; float scale_h3 = mPostActivationScalesH_h[2]; // Quantise and transform to the shape required by the GEMM bulk5DecoderTransformAndQuantize((int8_t*)tmp_x, (half*)inputs[0], scaleX, (int8_t*)tmp_attention2, (half*)inputs[1], scaleAttn, (int8_t*)tmp_h_in[0], (half*)inputs[2], scale_h1, (int8_t*)tmp_h_in[1], (half*)inputs[3], scale_h2, (int8_t*)tmp_h_in[2], (half*)inputs[4], scale_h3, mHiddenSize, effectiveBatch, stream); } cudaEvent_t event; CHECK(cudaEventCreate(&event, cudaEventDisableTiming)); CHECK(cudaEventRecord(event, stream)); CHECK(cudaStreamWaitEvent(mStreami, event, 0)); CHECK(cudaStreamWaitEvent(mStreamh, event, 0)); for (int i = 0; i < NUM_SPLIT_K_STREAMS; i++) { CHECK(cudaStreamWaitEvent(mSplitKStreams[i], event, 0)); } CHECK(cudaEventDestroy(event)); cudaError_t status; int inputSize = mInputSize + mAttentionSize; if (mDataType == nvinfer1::DataType::kHALF) { decoderStep<half, CUDA_R_16F, half, CUDA_R_16F, half> (mHiddenSize, inputSize, effectiveBatch, 1, mNumLayers, this->mCublas, this->mCublasLt, (half*)inputs[0], // x (half**)(&(inputs[2])), // Array of hx, (half**)(&inputs[2 + mNumLayers]), // Array of cx, (half**)mWeights_d, (half**)(&inputs[2 + 2 * mNumLayers]), // bias (half*)outputs[0], // y, (half**)(&outputs[1]), // Array of hy, (half**)(&outputs[1 + mNumLayers]), // Array of cy, (half*)inputs[1], // attention, (half*)tmp_io, (half*)tmp_i, (half*)tmp_h, NULL, NULL, NULL, NULL, NULL, mStreami, mSplitKStreams, mSplitKEvents, NUM_SPLIT_K_STREAMS, mStreamh); } else if (mDataType == nvinfer1::DataType::kINT8) { decoderStep<int8_t, CUDA_R_8I, int32_t, CUDA_R_32I, float> (mHiddenSize, inputSize, effectiveBatch, 1, mNumLayers, this->mCublas, this->mCublasLt, (int8_t*)tmp_x, // x (int8_t**)(tmp_h_in), // Array of hx, (half**)(&inputs[2 + mNumLayers]), // Array of cx, (int8_t**)mWeights_d, (float**)(&inputs[2 + 2 * mNumLayers]), // bias (half*)outputs[0], //(int8_t*)tmp_y, // y, (half**)(&outputs[1]), // Array of hy, (half**)(&outputs[1 + mNumLayers]), // Array of cy, (int8_t*)tmp_attention2, // attention, (int8_t*)tmp_io, (int32_t*)tmp_i, (int32_t*)tmp_h, (float*)tmp_resid, (float**)(&inputs[2 + 3 * mNumLayers]), // gemm output scale, mPostActivationScalesH_d, // postActivationScaleH, mPostActivationScalesY_d, // postActivationScaleY, (int8_t*)tmp_i2, mStreami, mSplitKStreams, mSplitKEvents, NUM_SPLIT_K_STREAMS, mStreamh); } cudaEvent_t eventEnd; // The final kernel is the elementwise kernel launched to stream i, so only need to wait for that one to finish. CHECK(cudaEventCreate(&eventEnd, cudaEventDisableTiming)); CHECK(cudaEventRecord(eventEnd, mStreami)); CHECK(cudaStreamWaitEvent(stream, eventEnd, 0)); CHECK(cudaEventDestroy(eventEnd)); if (mDataType == nvinfer1::DataType::kINT8) { free(tmp_h_in); free(tmp_h_out); } return 0; } size_t GNMTDecoderPlugin::getSerializationSize() const { size_t sz = sizeof(mNumLayers) + sizeof(mHiddenSize) + sizeof(mAttentionSize) + sizeof(mInputSize) + sizeof(mBeamSize) + sizeof(mDataType); // Weights for (int i = 0; i < mNumLayers; i++) { size_t dataTypeSize = 0; if (mDataType == DataType::kHALF) { dataTypeSize = sizeof(half); } else if (mDataType == DataType::kINT8) { dataTypeSize = sizeof(int8_t); } sz += 4 * mHiddenSize * (mAttentionSize + 2 * mHiddenSize) * dataTypeSize; } // Scales if (mDataType == DataType::kINT8) { sz += mNumLayers * sizeof(float); sz += mNumLayers * sizeof(float); sz += sizeof(float); sz += sizeof(float); } return sz; } void GNMTDecoderPlugin::serialize(void* buffer) const { char *d = static_cast<char*>(buffer), *a = d; write<int>(d, mNumLayers); write<int>(d, mHiddenSize); write<int>(d, mAttentionSize); write<int>(d, mInputSize); write<int>(d, mBeamSize); write<nvinfer1::DataType>(d, mDataType); for (int i = 0; i < mNumLayers; i++) { size_t dataTypeSize = 0; if (mDataType == DataType::kHALF) { dataTypeSize = sizeof(half); } else if (mDataType == DataType::kINT8) { dataTypeSize = sizeof(int8_t); } size_t sz = 4 * mHiddenSize * (mAttentionSize + 2 * mHiddenSize) * dataTypeSize; memcpy(d, mWeights_h[i], sz); d += sz; } if (mDataType == DataType::kINT8) { size_t sz = mNumLayers * sizeof(float); memcpy(d, mPostActivationScalesH_h, sz); d += sz; memcpy(d, mPostActivationScalesY_h, sz); d += sz; write<float>(d, mLayerGemm0ScaleInput); write<float>(d, mLayerGemm0ScaleAttn); } assert(d == a + getSerializationSize()); } nvinfer1::DataType GNMTDecoderPlugin::getOutputDataType (int index, const nvinfer1::DataType *inputTypes, int nbInputs) const { return mDataType == nvinfer1::DataType::kINT8 ? nvinfer1::DataType::kHALF : mDataType; } bool GNMTDecoderPlugin::isOutputBroadcastAcrossBatch (int outputIndex, const bool *inputIsBroadcasted, int nbInputs) const { return false; } bool GNMTDecoderPlugin::canBroadcastInputAcrossBatch (int inputIndex) const { return inputIndex >= 2 * mNumLayers + 2; } template <typename T> void GNMTDecoderPlugin::write(char*& buffer, const T& val) const { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } template <typename T> void GNMTDecoderPlugin::read(const char*& buffer, T& val) const { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } const char* GNMTDecoderPluginCreator::getPluginName() const { return "GNMTDecoderPlugin"; } const char* GNMTDecoderPluginCreator::getPluginVersion() const { return "1"; } const PluginFieldCollection* GNMTDecoderPluginCreator::getFieldNames() { return nullptr; } void GNMTDecoderPluginCreator::setPluginNamespace(const char* libNamespace) { mNamespace = libNamespace; } const char* GNMTDecoderPluginCreator::getPluginNamespace() const { return mNamespace.c_str(); } IPluginV2IOExt* GNMTDecoderPluginCreator::createPlugin(const char *name, const PluginFieldCollection *fc) { return new GNMTDecoderPlugin(fc); } IPluginV2IOExt* GNMTDecoderPluginCreator::deserializePlugin(const char *name, const void *serialData, size_t serialLength) { return new GNMTDecoderPlugin(serialData, serialLength); }
25f06aeca66415f426ed909396ca63e5744ae1ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/reciprocal_op.h" #include <algorithm> #include <functional> #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { template <typename T> __global__ void ReciprocalGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 dX[i] = __ldg(dY + i) * (-__ldg(Y + i) * __ldg(Y + i)); #else dX[i] = dY[i] * (-Y[i] * Y[i]); #endif } } } // namespace template <> template <typename T> bool ReciprocalGradientFunctor<CUDAContext>::Forward( const std::vector<int>& Y_dims, const std::vector<int>& /* dY_dims */, const T* Y, const T* dY, T* dX, CUDAContext* context) const { const int size = std::accumulate( Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>()); hipLaunchKernelGGL(( ReciprocalGradientCUDAKernel<T>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, dY, Y, dX); return true; } REGISTER_CUDA_OPERATOR( Reciprocal, UnaryElementwiseOp< TensorTypes<float>, CUDAContext, ReciprocalFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( ReciprocalGradient, BinaryElementwiseOp< TensorTypes<float>, CUDAContext, ReciprocalGradientFunctor<CUDAContext>>); } // namespace caffe2
25f06aeca66415f426ed909396ca63e5744ae1ee.cu
#include "caffe2/operators/reciprocal_op.h" #include <algorithm> #include <functional> #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { template <typename T> __global__ void ReciprocalGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 dX[i] = __ldg(dY + i) * (-__ldg(Y + i) * __ldg(Y + i)); #else dX[i] = dY[i] * (-Y[i] * Y[i]); #endif } } } // namespace template <> template <typename T> bool ReciprocalGradientFunctor<CUDAContext>::Forward( const std::vector<int>& Y_dims, const std::vector<int>& /* dY_dims */, const T* Y, const T* dY, T* dX, CUDAContext* context) const { const int size = std::accumulate( Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>()); ReciprocalGradientCUDAKernel<T> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, dY, Y, dX); return true; } REGISTER_CUDA_OPERATOR( Reciprocal, UnaryElementwiseOp< TensorTypes<float>, CUDAContext, ReciprocalFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( ReciprocalGradient, BinaryElementwiseOp< TensorTypes<float>, CUDAContext, ReciprocalGradientFunctor<CUDAContext>>); } // namespace caffe2
ac91954f055b3ee212d90bfdaa62e010c097ed4b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime_api.h> //#include <cutil.h> #include <hip/hip_runtime.h> #include <string> #define GPUJOULE_DIR "" #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 196608 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { int block_id; int warp_id; int i; int index; int tid = blockDim.x * blockIdx.x + threadIdx.x; void **ptr_array = (void **)my_ptr_array; unsigned long long *array = (unsigned long long *)my_array; if (tid == 0) { // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; //int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; // for (block_id = 0; block_id < num_blocks_k; block_id++) { for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { for (i = 0; i < elements_per_warp; i++) { //index = (block_id * elements_per_block) + (warp_id * elements_per_warp); index = (warp_id * elements_per_warp); ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))]; } } /* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; } */ for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; array[i] = (unsigned long long)ptr_array[i]; } } __syncthreads(); } __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int block_id = blockIdx.x; int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x % 32; // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; // int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; //int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; int index1 = (warp_id * elements_per_warp) + warp_thread_id; void **ptr_array = (void **)my_ptr_array; unsigned long long int *array = (unsigned long long int *)my_array; void **tmp_ptr; //tmp_ptr = (void *)sdata; //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[index1])); tmp_ptr = (void **)(&(array[index1])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { /* __asm volatile ( ".reg .f32 %r14;\n\t" "mov.f32 %r14, 2.2;\n\t" ); */ for (int l = 0; l < iterations; l++) { f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { hipProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; hipError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 1 is %s\n", hipGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice); hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice); hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 2 is %s\n", hipGetErrorString(error_id)); } hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); hipDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l2_cache/fadd_l2d_80_20_64p_asm_power.txt &"; std::system(cmd.c_str()); std::system("sleep 5"); hipEventRecord(start, 0); hipProfilerStart(); hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); hipDeviceSynchronize(); ///hipDeviceSynchronize (); hipProfilerStop(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); std::system("killall power_monitor"); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 3 is %s\n", hipGetErrorString(error_id)); } /* copy results from GPU to CPU */ hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost); hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost); hipDeviceSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ hipFree(d_a); hipFree(d_ptr_a); hipFree(duration); hipDeviceSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
ac91954f055b3ee212d90bfdaa62e010c097ed4b.cu
#include <stdio.h> #include <iostream> #include <cuda_profiler_api.h> //#include <cutil.h> #include <cuda_runtime.h> #include <string> #define GPUJOULE_DIR "" #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 196608 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { int block_id; int warp_id; int i; int index; int tid = blockDim.x * blockIdx.x + threadIdx.x; void **ptr_array = (void **)my_ptr_array; unsigned long long *array = (unsigned long long *)my_array; if (tid == 0) { // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; //int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; // for (block_id = 0; block_id < num_blocks_k; block_id++) { for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { for (i = 0; i < elements_per_warp; i++) { //index = (block_id * elements_per_block) + (warp_id * elements_per_warp); index = (warp_id * elements_per_warp); ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))]; } } /* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; } */ for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; array[i] = (unsigned long long)ptr_array[i]; } } __syncthreads(); } __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int block_id = blockIdx.x; int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x % 32; // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; // int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; //int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; int index1 = (warp_id * elements_per_warp) + warp_thread_id; void **ptr_array = (void **)my_ptr_array; unsigned long long int *array = (unsigned long long int *)my_array; void **tmp_ptr; //tmp_ptr = (void *)sdata; //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[index1])); tmp_ptr = (void **)(&(array[index1])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { /* __asm volatile ( ".reg .f32 %r14;\n\t" "mov.f32 %r14, 2.2;\n\t" ); */ for (int l = 0; l < iterations; l++) { f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { cudaProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; cudaError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 1 is %s\n", cudaGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 2 is %s\n", cudaGetErrorString(error_id)); } init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l2_cache/fadd_l2d_80_20_64p_asm_power.txt &"; std::system(cmd.c_str()); std::system("sleep 5"); cudaEventRecord(start, 0); cudaProfilerStart(); cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); ///cudaThreadSynchronize (); cudaProfilerStop(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); std::system("killall power_monitor"); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 3 is %s\n", cudaGetErrorString(error_id)); } /* copy results from GPU to CPU */ cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost); cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost); cudaThreadSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ cudaFree(d_a); cudaFree(d_ptr_a); cudaFree(duration); cudaThreadSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
f2f8e8c373fb9b0244c9292b4391a6955420bd39.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <cutil.h> #include <math.h> // Includes #include <stdio.h> #include "../include/ContAcq-IntClk.h" //#include "REPEATL.h" #include "../include/REPEATR.h" // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 60 #define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS #define LINE_SIZE 20 #define SETS 64 #define ASSOC 6 #define SIMD_WIDTH 32 #define ITERATIONS REPLACE_ITERATIONS // Variables int* h_A; int* h_B; int* h_C; int* d_A; int* d_B; int* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(int*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){ if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal(int* A, int* C, int N){ int tid = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation int size = (400*max_tid*LINE_SIZE)/sizeof(int); unsigned j=0, k=0; int sum=0; // Fill the L1 cache, Miss on every iteration for (int i=0; i<ITERATIONS ; i++){ REPEAT_L6(0); //REPLACE_ITERATIONS } /* // Fill the L1 cache, Miss on first LD, Hit on subsequent LDs for(k=0; k<ITERATIONS; ++k){ for(j=0; j<(size/2); j+=THREADS_PER_BLOCK){ C[tid+j] = A[tid+j]; } } */ C[0]=sum; __syncthreads(); } // Host code int main(){ printf("Power Microbenchmarks\n"); int N = (400*max_tid*LINE_SIZE); size_t size = N * sizeof(int) ; // Allocate input vectors h_A and h_B in host memory h_A = (int*)malloc(size); if (h_A == 0) CleanupResources(); //h_B = (float*)malloc(size); //if (h_B == 0) CleanupResources(); h_C = (int*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); //RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( hipMalloc((void**)&d_A, size) ); //checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); //checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutStopTimer(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); getLastCudaError("kernel launch failure"); #ifdef _DEBUG checkCudaErrors( hipDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void){ // Free device memory if (d_A) hipFree(d_A); //if (d_B) // hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); // if (h_B) // free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(int* data, int n){ for (int i = 0; i < n; ++i) data[i] = (int)(rand() / RAND_MAX); }
f2f8e8c373fb9b0244c9292b4391a6955420bd39.cu
#include <stdio.h> #include <stdlib.h> #include <cutil.h> #include <math.h> // Includes #include <stdio.h> #include "../include/ContAcq-IntClk.h" //#include "REPEATL.h" #include "../include/REPEATR.h" // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 60 #define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS #define LINE_SIZE 20 #define SETS 64 #define ASSOC 6 #define SIMD_WIDTH 32 #define ITERATIONS REPLACE_ITERATIONS // Variables int* h_A; int* h_B; int* h_C; int* d_A; int* d_B; int* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(int*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ){ if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal(int* A, int* C, int N){ int tid = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation int size = (400*max_tid*LINE_SIZE)/sizeof(int); unsigned j=0, k=0; int sum=0; // Fill the L1 cache, Miss on every iteration for (int i=0; i<ITERATIONS ; i++){ REPEAT_L6(0); //REPLACE_ITERATIONS } /* // Fill the L1 cache, Miss on first LD, Hit on subsequent LDs for(k=0; k<ITERATIONS; ++k){ for(j=0; j<(size/2); j+=THREADS_PER_BLOCK){ C[tid+j] = A[tid+j]; } } */ C[0]=sum; __syncthreads(); } // Host code int main(){ printf("Power Microbenchmarks\n"); int N = (400*max_tid*LINE_SIZE); size_t size = N * sizeof(int) ; // Allocate input vectors h_A and h_B in host memory h_A = (int*)malloc(size); if (h_A == 0) CleanupResources(); //h_B = (float*)malloc(size); //if (h_B == 0) CleanupResources(); h_C = (int*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); //RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( cudaMalloc((void**)&d_A, size) ); //checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); //checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutStopTimer(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); getLastCudaError("kernel launch failure"); #ifdef _DEBUG checkCudaErrors( cudaDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void){ // Free device memory if (d_A) cudaFree(d_A); //if (d_B) // cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); // if (h_B) // free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(int* data, int n){ for (int i = 0; i < n; ++i) data[i] = (int)(rand() / RAND_MAX); }
afd5d3ba3993a448eb3c1a8a13a0f4f154b88bdd.hip
// !!! This is a file automatically generated by hipify!!! //pass //--blockDim=32 --gridDim=2 #include <hip/hip_runtime.h> __device__ void write(int* A, int idx, int temp) { A[idx] = temp; } __device__ int read(int* A, int idx) { return A[idx + 1]; } __global__ void race (int* A) { int tid = threadIdx.x; int bid = blockIdx.x; int idx = blockDim.x * bid + tid; int temp = read(A, idx); write(A, idx, temp); }
afd5d3ba3993a448eb3c1a8a13a0f4f154b88bdd.cu
//pass //--blockDim=32 --gridDim=2 #include <cuda.h> __device__ void write(int* A, int idx, int temp) { A[idx] = temp; } __device__ int read(int* A, int idx) { return A[idx + 1]; } __global__ void race (int* A) { int tid = threadIdx.x; int bid = blockIdx.x; int idx = blockDim.x * bid + tid; int temp = read(A, idx); write(A, idx, temp); }
c28e1aeae8b51917bc97a56409c6840b15725c2a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" namespace caffe { // CUDA kernele for forward template <typename Dtype> __global__ void PReLUForward(const int n, const int channels, const int dim, const Dtype* in, Dtype* out, const Dtype* slope_data, const int div_factor) { CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; } } // CUDA kernel for bottom backward template <typename Dtype> __global__ void PReLUBackward(const int n, const int channels, const int dim, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, const Dtype* slope_data, const int div_factor) { CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); } } // CUDA kernel for element-wise parameter backward template <typename Dtype> __global__ void PReLUParamBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0); } } template <typename Dtype> void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); const int dim = bottom[0]->count(2); const int channels = bottom[0]->channels(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); const int div_factor = channel_shared_ ? channels : 1; // For in-place computation if (top[0] == bottom[0]) { caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data()); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, channels, dim, bottom_data, top_data, slope_data, div_factor); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); const int count = bottom[0]->count(); const int dim = bottom[0]->count(2); const int channels = bottom[0]->channels(); // For in-place computation if (top[0] == bottom[0]) { bottom_data = bottom_memory_.gpu_data(); } // Propagte to param // Since to write bottom diff will affect top diff if top and bottom blobs // are identical (in-place computaion), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); // slope_diff is set as 0, then accumulated over batches caffe_gpu_set<Dtype>(this->blobs_[0]->count(), Dtype(0), slope_diff); int cdim = channels * dim; Dtype dsum = 0.; for (int n = 0; n < bottom[0]->num(); ++n) { Dtype* temp_buff = multiplier_.mutable_gpu_diff(); // compute element-wise diff // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PReLUParamBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, cdim, top_diff + top[0]->offset(n), bottom_data + bottom[0]->offset(n), multiplier_.mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if (channel_shared_) { Dtype d; caffe_gpu_dot<Dtype>(channels * dim, multiplier_.gpu_diff(), multiplier_.gpu_data(), &d); dsum += d; } else { caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1., multiplier_.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff); } } if (channel_shared_) { caffe_gpu_set(this->blobs_[0]->count(), Dtype(dsum), slope_diff); } } // Propagate to bottom if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); int div_factor = channel_shared_ ? channels : 1; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data, div_factor); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer); } // namespace caffe
c28e1aeae8b51917bc97a56409c6840b15725c2a.cu
#include <algorithm> #include <vector> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" namespace caffe { // CUDA kernele for forward template <typename Dtype> __global__ void PReLUForward(const int n, const int channels, const int dim, const Dtype* in, Dtype* out, const Dtype* slope_data, const int div_factor) { CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; } } // CUDA kernel for bottom backward template <typename Dtype> __global__ void PReLUBackward(const int n, const int channels, const int dim, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, const Dtype* slope_data, const int div_factor) { CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); } } // CUDA kernel for element-wise parameter backward template <typename Dtype> __global__ void PReLUParamBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0); } } template <typename Dtype> void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); const int dim = bottom[0]->count(2); const int channels = bottom[0]->channels(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); const int div_factor = channel_shared_ ? channels : 1; // For in-place computation if (top[0] == bottom[0]) { caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data()); } // NOLINT_NEXT_LINE(whitespace/operators) PReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, channels, dim, bottom_data, top_data, slope_data, div_factor); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); const int count = bottom[0]->count(); const int dim = bottom[0]->count(2); const int channels = bottom[0]->channels(); // For in-place computation if (top[0] == bottom[0]) { bottom_data = bottom_memory_.gpu_data(); } // Propagte to param // Since to write bottom diff will affect top diff if top and bottom blobs // are identical (in-place computaion), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); // slope_diff is set as 0, then accumulated over batches caffe_gpu_set<Dtype>(this->blobs_[0]->count(), Dtype(0), slope_diff); int cdim = channels * dim; Dtype dsum = 0.; for (int n = 0; n < bottom[0]->num(); ++n) { Dtype* temp_buff = multiplier_.mutable_gpu_diff(); // compute element-wise diff // NOLINT_NEXT_LINE(whitespace/operators) PReLUParamBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( cdim, top_diff + top[0]->offset(n), bottom_data + bottom[0]->offset(n), multiplier_.mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if (channel_shared_) { Dtype d; caffe_gpu_dot<Dtype>(channels * dim, multiplier_.gpu_diff(), multiplier_.gpu_data(), &d); dsum += d; } else { caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1., multiplier_.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff); } } if (channel_shared_) { caffe_gpu_set(this->blobs_[0]->count(), Dtype(dsum), slope_diff); } } // Propagate to bottom if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); int div_factor = channel_shared_ ? channels : 1; // NOLINT_NEXT_LINE(whitespace/operators) PReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data, div_factor); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer); } // namespace caffe
ee572d8fc927dcfe3c2714f75a5d8f231437ed3f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by will on 20-1-9. // #include "CUDATSDFIntegrator.h" // CUDA kernel function to integrate a TSDF voxel volume given depth images and color images __global__ void IntegrateDepthMapKernel(float* d_cam_K, float* d_pose, float* d_depth, uchar4* d_color, float voxel_size, float truncation, int height, int width, int grid_dim_x, int grid_dim_y, int grid_dim_z, float grid_origin_x, float grid_origin_y, float grid_origin_z, Voxel* d_SDFBlocks) { int pt_grid_z = blockIdx.x; int pt_grid_y = threadIdx.x; for(int pt_grid_x = 0; pt_grid_x < grid_dim_x; pt_grid_x++) { // Converter voxel center from grid voxel coordinates to real world coordinates float pt_x = grid_origin_x + pt_grid_x * voxel_size; float pt_y = grid_origin_y + pt_grid_y * voxel_size; float pt_z = grid_origin_z + pt_grid_z * voxel_size; // Converter world coordinates to current camera coordinates float tmp[3] = {0}; tmp[0] = pt_x - d_pose[0 * 4 + 3]; tmp[1] = pt_y - d_pose[1 * 4 + 3]; tmp[2] = pt_z - d_pose[2 * 4 + 3]; float cam_pt_x = d_pose[0 * 4 + 0] * tmp[0] + d_pose[1 * 4 + 0] * tmp[1] + d_pose[2 * 4 + 0] * tmp[2]; float cam_pt_y = d_pose[0 * 4 + 1] * tmp[0] + d_pose[1 * 4 + 1] * tmp[1] + d_pose[2 * 4 + 1] * tmp[2]; float cam_pt_z = d_pose[0 * 4 + 2] * tmp[0] + d_pose[1 * 4 + 2] * tmp[1] + d_pose[2 * 4 + 2] * tmp[2]; if(cam_pt_z <= 0) continue; // d_camK: fx, fy, cx, cy int pt_pix_x = roundf(d_cam_K[0] * (cam_pt_x / cam_pt_z) + d_cam_K[2]); int pt_pix_y = roundf(d_cam_K[1] * (cam_pt_y / cam_pt_z) + d_cam_K[3]); if(pt_pix_x < 0 || pt_pix_x >= width || pt_pix_y < 0 || pt_pix_y >= height) continue; //printf("%d, %d\n", pt_pix_x, pt_pix_y); float depth_val = d_depth[pt_pix_y * width + pt_pix_x]; if(depth_val <= 0 || depth_val > 6) continue; float diff = depth_val - cam_pt_z; if(diff <= -truncation) continue; int volume_idx = pt_grid_z * grid_dim_x * grid_dim_y + pt_grid_y * grid_dim_x + pt_grid_x; // Integrate TSDF float dist = fmin(1.0f, diff / truncation); float weight_old = d_SDFBlocks[volume_idx].weight; float weight_new = weight_old + 1.0f; d_SDFBlocks[volume_idx].weight = weight_new; d_SDFBlocks[volume_idx].sdf = (d_SDFBlocks[volume_idx].sdf * weight_old + dist) / weight_new; // Integrate Color uchar4 RGB = d_color[pt_pix_y * width + pt_pix_x]; float3 cur_color = make_float3(RGB.x, RGB.y, RGB.z); float3 old_color = make_float3(d_SDFBlocks[volume_idx].color.x, d_SDFBlocks[volume_idx].color.y, d_SDFBlocks[volume_idx].color.z); float3 new_color; new_color.x = fmin(roundf((old_color.x * weight_old + cur_color.x)/weight_new), 255.0f); new_color.y = fmin(roundf((old_color.y * weight_old + cur_color.y)/weight_new), 255.0f);; new_color.z = fmin(roundf((old_color.z * weight_old + cur_color.z)/weight_new), 255.0f);; d_SDFBlocks[volume_idx].color = make_uchar4(new_color.x, new_color.y,new_color.z, 255); } } extern "C" void IntegrateDepthMapCUDA(float* d_cam_K, float* d_pose, float* d_depth, uchar4* d_color, float voxel_size, float truncation, int height, int width, int grid_dim, float grid_origin_x, float grid_origin_y, float grid_origin_z, Voxel* d_SDFBlocks) { const dim3 gridSize(grid_dim); const dim3 blockSize(grid_dim); //std::cout << "Launch Kernel..." << std::endl; hipLaunchKernelGGL(( IntegrateDepthMapKernel) , dim3(gridSize), dim3(blockSize) , 0, 0, d_cam_K, d_pose, d_depth, d_color, voxel_size, truncation, height, width, grid_dim, grid_dim, grid_dim, grid_origin_x, grid_origin_y, grid_origin_z, d_SDFBlocks); //hipError_t status = hipGetLastError(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); }
ee572d8fc927dcfe3c2714f75a5d8f231437ed3f.cu
// // Created by will on 20-1-9. // #include "CUDATSDFIntegrator.h" // CUDA kernel function to integrate a TSDF voxel volume given depth images and color images __global__ void IntegrateDepthMapKernel(float* d_cam_K, float* d_pose, float* d_depth, uchar4* d_color, float voxel_size, float truncation, int height, int width, int grid_dim_x, int grid_dim_y, int grid_dim_z, float grid_origin_x, float grid_origin_y, float grid_origin_z, Voxel* d_SDFBlocks) { int pt_grid_z = blockIdx.x; int pt_grid_y = threadIdx.x; for(int pt_grid_x = 0; pt_grid_x < grid_dim_x; pt_grid_x++) { // Converter voxel center from grid voxel coordinates to real world coordinates float pt_x = grid_origin_x + pt_grid_x * voxel_size; float pt_y = grid_origin_y + pt_grid_y * voxel_size; float pt_z = grid_origin_z + pt_grid_z * voxel_size; // Converter world coordinates to current camera coordinates float tmp[3] = {0}; tmp[0] = pt_x - d_pose[0 * 4 + 3]; tmp[1] = pt_y - d_pose[1 * 4 + 3]; tmp[2] = pt_z - d_pose[2 * 4 + 3]; float cam_pt_x = d_pose[0 * 4 + 0] * tmp[0] + d_pose[1 * 4 + 0] * tmp[1] + d_pose[2 * 4 + 0] * tmp[2]; float cam_pt_y = d_pose[0 * 4 + 1] * tmp[0] + d_pose[1 * 4 + 1] * tmp[1] + d_pose[2 * 4 + 1] * tmp[2]; float cam_pt_z = d_pose[0 * 4 + 2] * tmp[0] + d_pose[1 * 4 + 2] * tmp[1] + d_pose[2 * 4 + 2] * tmp[2]; if(cam_pt_z <= 0) continue; // d_camK: fx, fy, cx, cy int pt_pix_x = roundf(d_cam_K[0] * (cam_pt_x / cam_pt_z) + d_cam_K[2]); int pt_pix_y = roundf(d_cam_K[1] * (cam_pt_y / cam_pt_z) + d_cam_K[3]); if(pt_pix_x < 0 || pt_pix_x >= width || pt_pix_y < 0 || pt_pix_y >= height) continue; //printf("%d, %d\n", pt_pix_x, pt_pix_y); float depth_val = d_depth[pt_pix_y * width + pt_pix_x]; if(depth_val <= 0 || depth_val > 6) continue; float diff = depth_val - cam_pt_z; if(diff <= -truncation) continue; int volume_idx = pt_grid_z * grid_dim_x * grid_dim_y + pt_grid_y * grid_dim_x + pt_grid_x; // Integrate TSDF float dist = fmin(1.0f, diff / truncation); float weight_old = d_SDFBlocks[volume_idx].weight; float weight_new = weight_old + 1.0f; d_SDFBlocks[volume_idx].weight = weight_new; d_SDFBlocks[volume_idx].sdf = (d_SDFBlocks[volume_idx].sdf * weight_old + dist) / weight_new; // Integrate Color uchar4 RGB = d_color[pt_pix_y * width + pt_pix_x]; float3 cur_color = make_float3(RGB.x, RGB.y, RGB.z); float3 old_color = make_float3(d_SDFBlocks[volume_idx].color.x, d_SDFBlocks[volume_idx].color.y, d_SDFBlocks[volume_idx].color.z); float3 new_color; new_color.x = fmin(roundf((old_color.x * weight_old + cur_color.x)/weight_new), 255.0f); new_color.y = fmin(roundf((old_color.y * weight_old + cur_color.y)/weight_new), 255.0f);; new_color.z = fmin(roundf((old_color.z * weight_old + cur_color.z)/weight_new), 255.0f);; d_SDFBlocks[volume_idx].color = make_uchar4(new_color.x, new_color.y,new_color.z, 255); } } extern "C" void IntegrateDepthMapCUDA(float* d_cam_K, float* d_pose, float* d_depth, uchar4* d_color, float voxel_size, float truncation, int height, int width, int grid_dim, float grid_origin_x, float grid_origin_y, float grid_origin_z, Voxel* d_SDFBlocks) { const dim3 gridSize(grid_dim); const dim3 blockSize(grid_dim); //std::cout << "Launch Kernel..." << std::endl; IntegrateDepthMapKernel <<< gridSize, blockSize >>> (d_cam_K, d_pose, d_depth, d_color, voxel_size, truncation, height, width, grid_dim, grid_dim, grid_dim, grid_origin_x, grid_origin_y, grid_origin_z, d_SDFBlocks); //cudaError_t status = cudaGetLastError(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); }
ff0ce452d2ed4823ad630f291de9644275023c3c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "convultion.hpp" #include "cuda_timer.hpp" #include <stdio.h> #include <iostream> #include <fstream> #include <cassert> #include <memory> #include <sstream> #include <cassert> using namespace std; STiming MesureMethodsTimings(istream &input, bool saveOutput = false); int RunSimpleProgram(int argc, char ** argv); int RunMesurings(); int main(int argc, char *argv[]) { int rc = 0; if (argc > 1 && string(argv[1]) == "-t") { rc = RunMesurings(); } else { rc = RunSimpleProgram(argc, argv); } //cin.get(); return rc; } /* * 1. Reads input from input.txt or specified file as first argument * 2. Runs stwo methods for convultion prints result matrxi to outputGlob.txt, outputShared.txt * 3. Prints timings for both methods */ int RunSimpleProgram(int argc, char ** argv) { string filename = argc > 1 ? argv[1] : "input.txt"; fstream input(filename); if (!input.is_open()){ cerr << "File not found: " << filename << endl; return -1; } STiming timing = MesureMethodsTimings(input, true); cout << "Global: " << timing.method1 << endl; cout << "Shared: " << timing.method2; return 0; } /* * Generate input data for different grid and kernel sizes, * mesures times for both methods and prints result */ int RunMesurings() { for (size_t kernelSize = 3; kernelSize <= 9; kernelSize += 2){ for (size_t gridSize = 10; gridSize <= 1000; gridSize *= 10) { stringstream input; generateInput(input, gridSize, kernelSize); STiming timing = MesureMethodsTimings(input); cout << "N: " << gridSize << endl; cout << "M: " << kernelSize << endl; cout << "Global: " << timing.method1 << endl; cout << "Shared: " << timing.method2 << endl; } } return 0; } STiming MesureMethodsTimings(istream &input, bool saveOutput /*= false*/) { STiming timing; fstream outputShared("outputGlobal.txt", fstream::out); fstream outputGlobal("outputShared.txt", fstream::out); size_t gridSizeFromInput; // matrix square side size size_t kernelSize; // convolution kernel matrix square side size input >> gridSizeFromInput >> kernelSize; assert(kernelSize % 2 == 1); size_t kernelRadius; kernelRadius = kernelSize / 2; DataType *h_Grid, *h_Kern; DataType *d_Grid; // d_Kern is in constant memory DataType *d_GridNew; // for checking that 2 different CUDA kernels work equally DataType *h_GridCheck; const size_t CUDA_BLOCK_SIZE = 16; // gridSize ceil-rounded to k * CUDA_BLOCK_SIZE to elluminate extra "if"s inside kernel size_t gridSize = ceilRound(gridSizeFromInput, CUDA_BLOCK_SIZE); // Allocate host & device memory size_t haloGridSize = (gridSize + 2 * kernelRadius); size_t gridDataSize = sizeof(DataType)* haloGridSize * haloGridSize; // with halo rows and columns size_t kernDataSize = sizeof(DataType)* kernelSize * kernelSize; h_Grid = (DataType*)malloc(gridDataSize); h_GridCheck = (DataType*)malloc(gridDataSize); h_Kern = (DataType*)malloc(kernDataSize); hipMalloc(&d_Grid, gridDataSize); hipMalloc(&d_GridNew, gridDataSize); // Read matrix and kernel from input readGridCustomBorders( h_Grid, input, haloGridSize, haloGridSize, kernelRadius, kernelRadius + gridSizeFromInput, kernelRadius, kernelRadius + gridSizeFromInput); readGrid(h_Kern, input, kernelSize, kernelSize); hipMemcpy(d_Grid, h_Grid, gridDataSize, hipMemcpyHostToDevice); copyKernelToConstantMemory(h_Kern, kernDataSize); // Calculate block/grid dimensions dim3 cudaBlockDim(CUDA_BLOCK_SIZE, CUDA_BLOCK_SIZE, 1); // actually gridSize is already multiple of CUDA_BLOCK_SIZE, but... size_t cudaGridSize = (size_t)ceil(gridSize / (float)CUDA_BLOCK_SIZE); dim3 cudaGridDim(cudaGridSize, cudaGridSize, 1); /****************************************** * Time measuring for 2 CUDA kernels ******************************************/ CudaTimer timer; int atemps = 10; timer.start(); for (int i = 0; i < atemps; i++) { convultionGlobalMemory << <cudaGridDim, cudaBlockDim >> >( d_GridNew, d_Grid, gridSize, gridSize, kernelSize); } timer.stop(); timing.method1 = timer.getTime() / atemps; hipMemcpy(h_Grid, d_GridNew, gridDataSize, hipMemcpyDeviceToHost); size_t sharedSide = CUDA_BLOCK_SIZE + 2 * kernelRadius; size_t sharedSize = sizeof(DataType)* sharedSide * sharedSide; timer.start(); for (int i = 0; i < atemps; i++) { convultionSharedMemory << <cudaGridDim, cudaBlockDim, sharedSize >> >( d_GridNew, d_Grid, gridSize, gridSize, kernelSize); } timer.stop(); timing.method2 = timer.getTime() / atemps; hipMemcpy(h_GridCheck, d_GridNew, gridDataSize, hipMemcpyDeviceToHost); // Check if both methods generate equal results if (areGridsEqual(h_Grid, h_GridCheck, haloGridSize)){ cout << "Both methods generate equal results." << endl; } else { cout << "Error: methods generate DIFFERENT results." << endl; } // Print result if (saveOutput){ printGridCustomBorders( h_Grid, outputGlobal, haloGridSize, kernelRadius, kernelRadius + gridSizeFromInput, kernelRadius, kernelRadius + gridSizeFromInput); printGridCustomBorders( h_GridCheck, outputShared, haloGridSize, kernelRadius, kernelRadius + gridSizeFromInput, kernelRadius, kernelRadius + gridSizeFromInput); } // Release memory free(h_GridCheck); hipFree(d_GridNew); hipFree(d_Grid); free(h_Kern); free(h_Grid); return timing; }
ff0ce452d2ed4823ad630f291de9644275023c3c.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "convultion.hpp" #include "cuda_timer.hpp" #include <stdio.h> #include <iostream> #include <fstream> #include <cassert> #include <memory> #include <sstream> #include <cassert> using namespace std; STiming MesureMethodsTimings(istream &input, bool saveOutput = false); int RunSimpleProgram(int argc, char ** argv); int RunMesurings(); int main(int argc, char *argv[]) { int rc = 0; if (argc > 1 && string(argv[1]) == "-t") { rc = RunMesurings(); } else { rc = RunSimpleProgram(argc, argv); } //cin.get(); return rc; } /* * 1. Reads input from input.txt or specified file as first argument * 2. Runs stwo methods for convultion prints result matrxi to outputGlob.txt, outputShared.txt * 3. Prints timings for both methods */ int RunSimpleProgram(int argc, char ** argv) { string filename = argc > 1 ? argv[1] : "input.txt"; fstream input(filename); if (!input.is_open()){ cerr << "File not found: " << filename << endl; return -1; } STiming timing = MesureMethodsTimings(input, true); cout << "Global: " << timing.method1 << endl; cout << "Shared: " << timing.method2; return 0; } /* * Generate input data for different grid and kernel sizes, * mesures times for both methods and prints result */ int RunMesurings() { for (size_t kernelSize = 3; kernelSize <= 9; kernelSize += 2){ for (size_t gridSize = 10; gridSize <= 1000; gridSize *= 10) { stringstream input; generateInput(input, gridSize, kernelSize); STiming timing = MesureMethodsTimings(input); cout << "N: " << gridSize << endl; cout << "M: " << kernelSize << endl; cout << "Global: " << timing.method1 << endl; cout << "Shared: " << timing.method2 << endl; } } return 0; } STiming MesureMethodsTimings(istream &input, bool saveOutput /*= false*/) { STiming timing; fstream outputShared("outputGlobal.txt", fstream::out); fstream outputGlobal("outputShared.txt", fstream::out); size_t gridSizeFromInput; // matrix square side size size_t kernelSize; // convolution kernel matrix square side size input >> gridSizeFromInput >> kernelSize; assert(kernelSize % 2 == 1); size_t kernelRadius; kernelRadius = kernelSize / 2; DataType *h_Grid, *h_Kern; DataType *d_Grid; // d_Kern is in constant memory DataType *d_GridNew; // for checking that 2 different CUDA kernels work equally DataType *h_GridCheck; const size_t CUDA_BLOCK_SIZE = 16; // gridSize ceil-rounded to k * CUDA_BLOCK_SIZE to elluminate extra "if"s inside kernel size_t gridSize = ceilRound(gridSizeFromInput, CUDA_BLOCK_SIZE); // Allocate host & device memory size_t haloGridSize = (gridSize + 2 * kernelRadius); size_t gridDataSize = sizeof(DataType)* haloGridSize * haloGridSize; // with halo rows and columns size_t kernDataSize = sizeof(DataType)* kernelSize * kernelSize; h_Grid = (DataType*)malloc(gridDataSize); h_GridCheck = (DataType*)malloc(gridDataSize); h_Kern = (DataType*)malloc(kernDataSize); cudaMalloc(&d_Grid, gridDataSize); cudaMalloc(&d_GridNew, gridDataSize); // Read matrix and kernel from input readGridCustomBorders( h_Grid, input, haloGridSize, haloGridSize, kernelRadius, kernelRadius + gridSizeFromInput, kernelRadius, kernelRadius + gridSizeFromInput); readGrid(h_Kern, input, kernelSize, kernelSize); cudaMemcpy(d_Grid, h_Grid, gridDataSize, cudaMemcpyHostToDevice); copyKernelToConstantMemory(h_Kern, kernDataSize); // Calculate block/grid dimensions dim3 cudaBlockDim(CUDA_BLOCK_SIZE, CUDA_BLOCK_SIZE, 1); // actually gridSize is already multiple of CUDA_BLOCK_SIZE, but... size_t cudaGridSize = (size_t)ceil(gridSize / (float)CUDA_BLOCK_SIZE); dim3 cudaGridDim(cudaGridSize, cudaGridSize, 1); /****************************************** * Time measuring for 2 CUDA kernels ******************************************/ CudaTimer timer; int atemps = 10; timer.start(); for (int i = 0; i < atemps; i++) { convultionGlobalMemory << <cudaGridDim, cudaBlockDim >> >( d_GridNew, d_Grid, gridSize, gridSize, kernelSize); } timer.stop(); timing.method1 = timer.getTime() / atemps; cudaMemcpy(h_Grid, d_GridNew, gridDataSize, cudaMemcpyDeviceToHost); size_t sharedSide = CUDA_BLOCK_SIZE + 2 * kernelRadius; size_t sharedSize = sizeof(DataType)* sharedSide * sharedSide; timer.start(); for (int i = 0; i < atemps; i++) { convultionSharedMemory << <cudaGridDim, cudaBlockDim, sharedSize >> >( d_GridNew, d_Grid, gridSize, gridSize, kernelSize); } timer.stop(); timing.method2 = timer.getTime() / atemps; cudaMemcpy(h_GridCheck, d_GridNew, gridDataSize, cudaMemcpyDeviceToHost); // Check if both methods generate equal results if (areGridsEqual(h_Grid, h_GridCheck, haloGridSize)){ cout << "Both methods generate equal results." << endl; } else { cout << "Error: methods generate DIFFERENT results." << endl; } // Print result if (saveOutput){ printGridCustomBorders( h_Grid, outputGlobal, haloGridSize, kernelRadius, kernelRadius + gridSizeFromInput, kernelRadius, kernelRadius + gridSizeFromInput); printGridCustomBorders( h_GridCheck, outputShared, haloGridSize, kernelRadius, kernelRadius + gridSizeFromInput, kernelRadius, kernelRadius + gridSizeFromInput); } // Release memory free(h_GridCheck); cudaFree(d_GridNew); cudaFree(d_Grid); free(h_Kern); free(h_Grid); return timing; }
cd16e5dd0c22799b800444c368e4de2e6cb531a3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <assert.h> #include <string.h> #include <stdio.h> #include <limits.h> #include <ctype.h> //#include <cutil.h> removed in CUDA 5.0 #include <math.h> #include "rocblas.h" #include "hip/hip_runtime.h" #include "../include/cuSVMutil.h" /*This mixed-precision matrix-vector multiplication algorithm is based on hipblasSgemv NVIDIA's CUBLAS 1.1. In his tests, the author has found catastrophic prediction errors resulting from using only single precision floating point arithmetic for the multiplication of the predictive kernel matrix by the SVM coefficients; however, all of the errors he found disappeared when he switched to a mixed-precision approach where the scalar dot-product accumulator is a double precision number. Thus, the use of full double precision arithmetic, which would involve significant performance penalties, does not seem necessary. CUBLAS 1.1 source code is available at: http://forums.nvidia.com/index.php?showtopic=59101, and CUBLAS is available at http://www.nvidia.com/cuda .*/ #define LOG_THREAD_COUNT (7) #define THREAD_COUNT (1 << LOG_THREAD_COUNT) #define CTAS (64) #define IDXA(row,col) (lda*(col)+(row)) #define IDXX(i) (startx + ((i) * incx)) #define IDXY(i) (starty + ((i) * incy)) #define TILEW_LOG (5) #define TILEW (1 << TILEW_LOG) #define TILEH_LOG (5) #define TILEH (1 << TILEH_LOG) #define X_ELEMS_PER_THREAD (4) #define IINC (CTAS * THREAD_COUNT) #define JINC (THREAD_COUNT * X_ELEMS_PER_THREAD) #define XINC (THREAD_COUNT) __shared__ float XX[TILEH]; __shared__ float AA[(TILEH+1)*TILEW]; __global__ void sgemvn_mixedprecis(const float *A, const float *x,float *y, int m, int n, int lda, int incx, int incy) { __shared__ float XX[JINC]; int i, ii, j, jj, idx, incr, tid; double sdot; int startx; int starty; tid = threadIdx.x; startx = (incx >= 0) ? 0 : ((1 - n) * incx); starty = (incy >= 0) ? 0 : ((1 - m) * incy); for (i = 0; i < m; i += IINC) { ii = i + blockIdx.x * THREAD_COUNT; if (ii >= m) break; ii += tid; sdot = 0.0f; for (j = 0; j < n; j += JINC) { int jjLimit = min (j + JINC, n); incr = XINC * incx; jj = j + tid; __syncthreads (); idx = IDXX(jj); if (jj < (jjLimit - 3 * XINC)) { XX[tid+0*XINC] = x[idx + 0 * incr]; XX[tid+1*XINC] = x[idx + 1 * incr]; XX[tid+2*XINC] = x[idx + 2 * incr]; XX[tid+3*XINC] = x[idx + 3 * incr]; } else if (jj < (jjLimit - 2 * XINC)) { XX[tid+0*XINC] = x[idx + 0 * incr]; XX[tid+1*XINC] = x[idx + 1 * incr]; XX[tid+2*XINC] = x[idx + 2 * incr]; } else if (jj < (jjLimit - 1 * XINC)) { XX[tid+0*XINC] = x[idx + 0 * incr]; XX[tid+1*XINC] = x[idx + 1 * incr]; } else if (jj < (jjLimit - 0 * XINC)) { XX[tid+0*XINC] = x[idx + 0 * incr]; } __syncthreads (); if (ii < m) { /* if this row is active, accumulate dp */ idx = IDXA(ii, j); incr = lda; jjLimit = jjLimit - j; jj = 0; while (jj < (jjLimit - 5)) { sdot += A[idx + 0*incr] * XX[jj+ 0]; sdot += A[idx + 1*incr] * XX[jj+ 1]; sdot += A[idx + 2*incr] * XX[jj+ 2]; sdot += A[idx + 3*incr] * XX[jj+ 3]; sdot += A[idx + 4*incr] * XX[jj+ 4]; sdot += A[idx + 5*incr] * XX[jj+ 5]; jj += 6; idx += 6 * incr; } while (jj < jjLimit) { sdot += A[idx + 0*incr] * XX[jj+ 0]; jj += 1; idx += 1 * incr; } } } if (ii < m) { idx = IDXY(ii); y[idx] = sdot; } } } //The memory access pattern and structure of this code is derived from Vasily Volkov's highly // optimized matrix-matrix multiply CUDA code. //His website is http://www.cs.berkeley.edu/~volkov/ __global__ void RBFKernelForPredict( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float kernelwidth ) { int inx = threadIdx.x; int iny = threadIdx.y; int ibx = blockIdx.x * 32; int iby = blockIdx.y * 32; A += ibx + inx + __mul24( iny, lda ); B += iby + inx + __mul24( iny, ldb ); C += ibx + inx + __mul24( iby + iny, ldc ); float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; for( int i = 0; i < k; i += 4 ) { __shared__ float a[4][32]; __shared__ float b[4][32]; a[iny][inx] = A[i*lda]; a[iny+2][inx] = A[(i+2)*lda]; b[iny][inx] = B[i*ldb]; b[iny+2][inx] = B[(i+2)*ldb]; __syncthreads(); for( int j = 0; j < 4; j++ ) { float _a = a[j][inx]; float *_b = &b[j][0] + iny; float _asquared=_a*_a;; //The (negative here) squared distance between datapoints is necessary for the calculation of the RBF Kernel. //This code uses the identity -(x-y)^2=2*x*y-x^2-y^2. c[0] += 2.f*_a*_b[0]-_asquared-_b[0]*_b[0]; c[1] += 2.f*_a*_b[2]-_asquared-_b[2]*_b[2]; c[2] += 2.f*_a*_b[4]-_asquared-_b[4]*_b[4]; c[3] += 2.f*_a*_b[6]-_asquared-_b[6]*_b[6]; c[4] += 2.f*_a*_b[8]-_asquared-_b[8]*_b[8]; c[5] += 2.f*_a*_b[10]-_asquared-_b[10]*_b[10]; c[6] += 2.f*_a*_b[12]-_asquared-_b[12]*_b[12]; c[7] += 2.f*_a*_b[14]-_asquared-_b[14]*_b[14]; c[8] += 2.f*_a*_b[16]-_asquared-_b[16]*_b[16]; c[9] += 2.f*_a*_b[18]-_asquared-_b[18]*_b[18]; c[10] += 2.f*_a*_b[20]-_asquared-_b[20]*_b[20]; c[11] += 2.f*_a*_b[22]-_asquared-_b[22]*_b[22]; c[12] += 2.f*_a*_b[24]-_asquared-_b[24]*_b[24]; c[13] += 2.f*_a*_b[26]-_asquared-_b[26]*_b[26]; c[14] += 2.f*_a*_b[28]-_asquared-_b[28]*_b[28]; c[15] += 2.f*_a*_b[30]-_asquared-_b[30]*_b[30]; } __syncthreads(); } for( int i = 0; i < 16; i++, C += 2*ldc ) // Here the negative squared distances between datapoints, calculated above, are multiplied by the kernel width parameter and exponentiated. C[0] = exp(kernelwidth*c[i]); } extern "C" void GPUPredictWrapper(int m, int n, int k, float kernelwidth, const float *Test, const float *Svs, float * alphas,float *prediction, float beta,float isregression) { // for now this is not important to us // mxArray *mexelapsed = mxCreateNumericMatrix(1, 1,mxSINGLE_CLASS, mxREAL); // float * elapsed=(float *)mxGetData(mexelapsed); // start things hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); // int paddedm=m+32-m%32; int paddedk=k+32-k%32; int paddedn=n+32-n%32; float* d_PaddedSvs; CUDA_SAFE_CALL( hipMalloc( (void**) &d_PaddedSvs, paddedn*paddedk*sizeof(float))); CUDA_SAFE_CALL( hipMemset(d_PaddedSvs,0.f,paddedn*paddedk*sizeof(float))); CUDA_SAFE_CALL( hipMemcpy(d_PaddedSvs, Svs, sizeof(float)*n*k,hipMemcpyHostToDevice)); float* d_PaddedSvsT; CUDA_SAFE_CALL( hipMalloc( (void**) &d_PaddedSvsT, paddedn*paddedk*sizeof(float))); CUDA_SAFE_CALL( hipMemset(d_PaddedSvsT,0.f,paddedn*paddedk*sizeof(float))); dim3 gridtranspose(ceil((double)n / TRANS_BLOCK_DIM), ceil((double)paddedk / TRANS_BLOCK_DIM), 1); dim3 threadstranspose(TRANS_BLOCK_DIM, TRANS_BLOCK_DIM, 1); hipLaunchKernelGGL(( transpose), dim3(gridtranspose), dim3(threadstranspose) , 0, 0, d_PaddedSvsT, d_PaddedSvs, n,paddedk); dim3 gridtranspose2(ceil((double)paddedk / TRANS_BLOCK_DIM), ceil((double)paddedn / TRANS_BLOCK_DIM), 1); hipLaunchKernelGGL(( transpose), dim3(gridtranspose2), dim3(threadstranspose) , 0, 0, d_PaddedSvs, d_PaddedSvsT, paddedk,paddedn); CUDA_SAFE_CALL( hipFree(d_PaddedSvsT)); double DoubleNecIterations=(double)paddedm/CUBIC_ROOT_MAX_OPS; DoubleNecIterations*=(double)paddedn/CUBIC_ROOT_MAX_OPS; DoubleNecIterations*=(double)paddedk/CUBIC_ROOT_MAX_OPS; int NecIterations=ceil(DoubleNecIterations); int RowsPerIter=ceil((double)paddedm/NecIterations)+32-int(ceil((double)paddedm/NecIterations))%32; NecIterations=ceil((double)paddedm/RowsPerIter); dim3 grid( RowsPerIter/32, paddedn/32, 1 ); dim3 threads2( 32, 2, 1 ); float * d_TestInter; float * d_QInter; CUDA_SAFE_CALL( hipMalloc( (void**) &d_TestInter, RowsPerIter*paddedk*sizeof(float))); CUDA_SAFE_CALL(hipMemset(d_TestInter,0.f,RowsPerIter*paddedk*sizeof(float))); CUDA_SAFE_CALL( hipMalloc( (void**) &d_QInter, RowsPerIter*paddedn*sizeof(float))); float * d_alphas; float * d_prediction; CUDA_SAFE_CALL( hipMalloc( (void**) &d_alphas, n*sizeof(float))); hipblasSetVector(n,sizeof(float),alphas,1,d_alphas,1); CUDA_SAFE_CALL( hipMalloc( (void**) &d_prediction, NecIterations*RowsPerIter*sizeof(float))); for (int j=0;j<NecIterations;j++) { if (j+1==NecIterations) { hipblasSetMatrix(m-j*RowsPerIter,k,sizeof(float),Test+j*RowsPerIter,m,d_TestInter,RowsPerIter); } else { hipblasSetMatrix(RowsPerIter,k,sizeof(float),Test+j*RowsPerIter,m,d_TestInter,RowsPerIter); } hipLaunchKernelGGL(( RBFKernelForPredict), dim3(grid), dim3(threads2), 0, 0, d_TestInter, RowsPerIter, d_PaddedSvs, paddedn, d_QInter, RowsPerIter, paddedk, kernelwidth); hipLaunchKernelGGL(( sgemvn_mixedprecis), dim3(CTAS),dim3(THREAD_COUNT), 0, 0, d_QInter,d_alphas,d_prediction+j*RowsPerIter,RowsPerIter,n,RowsPerIter,1,1); } hipblasGetVector(m,sizeof(float),d_prediction,1,prediction,1); for(int j=0;j<m;j++) { prediction[j]+=beta; if (isregression!=1){prediction[j]=prediction[j]<0?-1.0:1.0;} } hipDeviceSynchronize(); hipEventRecord(stop,0); hipEventSynchronize(stop); // hipEventElapsedTime(elapsed, start, stop); // mexPutVariable("base","cuSVMPredictTimeInMS",mexelapsed); CUDA_SAFE_CALL( hipFree(d_alphas)); CUDA_SAFE_CALL( hipFree(d_TestInter)); CUDA_SAFE_CALL( hipFree(d_QInter)); CUDA_SAFE_CALL( hipFree(d_PaddedSvs)); CUDA_SAFE_CALL(hipFree(d_prediction)); CUDA_SAFE_CALL(hipDeviceReset()); }
cd16e5dd0c22799b800444c368e4de2e6cb531a3.cu
#include <stdlib.h> #include <assert.h> #include <string.h> #include <stdio.h> #include <limits.h> #include <ctype.h> //#include <cutil.h> removed in CUDA 5.0 #include <math.h> #include "cublas.h" #include "cuda.h" #include "../include/cuSVMutil.h" /*This mixed-precision matrix-vector multiplication algorithm is based on cublasSgemv NVIDIA's CUBLAS 1.1. In his tests, the author has found catastrophic prediction errors resulting from using only single precision floating point arithmetic for the multiplication of the predictive kernel matrix by the SVM coefficients; however, all of the errors he found disappeared when he switched to a mixed-precision approach where the scalar dot-product accumulator is a double precision number. Thus, the use of full double precision arithmetic, which would involve significant performance penalties, does not seem necessary. CUBLAS 1.1 source code is available at: http://forums.nvidia.com/index.php?showtopic=59101, and CUBLAS is available at http://www.nvidia.com/cuda .*/ #define LOG_THREAD_COUNT (7) #define THREAD_COUNT (1 << LOG_THREAD_COUNT) #define CTAS (64) #define IDXA(row,col) (lda*(col)+(row)) #define IDXX(i) (startx + ((i) * incx)) #define IDXY(i) (starty + ((i) * incy)) #define TILEW_LOG (5) #define TILEW (1 << TILEW_LOG) #define TILEH_LOG (5) #define TILEH (1 << TILEH_LOG) #define X_ELEMS_PER_THREAD (4) #define IINC (CTAS * THREAD_COUNT) #define JINC (THREAD_COUNT * X_ELEMS_PER_THREAD) #define XINC (THREAD_COUNT) __shared__ float XX[TILEH]; __shared__ float AA[(TILEH+1)*TILEW]; __global__ void sgemvn_mixedprecis(const float *A, const float *x,float *y, int m, int n, int lda, int incx, int incy) { __shared__ float XX[JINC]; int i, ii, j, jj, idx, incr, tid; double sdot; int startx; int starty; tid = threadIdx.x; startx = (incx >= 0) ? 0 : ((1 - n) * incx); starty = (incy >= 0) ? 0 : ((1 - m) * incy); for (i = 0; i < m; i += IINC) { ii = i + blockIdx.x * THREAD_COUNT; if (ii >= m) break; ii += tid; sdot = 0.0f; for (j = 0; j < n; j += JINC) { int jjLimit = min (j + JINC, n); incr = XINC * incx; jj = j + tid; __syncthreads (); idx = IDXX(jj); if (jj < (jjLimit - 3 * XINC)) { XX[tid+0*XINC] = x[idx + 0 * incr]; XX[tid+1*XINC] = x[idx + 1 * incr]; XX[tid+2*XINC] = x[idx + 2 * incr]; XX[tid+3*XINC] = x[idx + 3 * incr]; } else if (jj < (jjLimit - 2 * XINC)) { XX[tid+0*XINC] = x[idx + 0 * incr]; XX[tid+1*XINC] = x[idx + 1 * incr]; XX[tid+2*XINC] = x[idx + 2 * incr]; } else if (jj < (jjLimit - 1 * XINC)) { XX[tid+0*XINC] = x[idx + 0 * incr]; XX[tid+1*XINC] = x[idx + 1 * incr]; } else if (jj < (jjLimit - 0 * XINC)) { XX[tid+0*XINC] = x[idx + 0 * incr]; } __syncthreads (); if (ii < m) { /* if this row is active, accumulate dp */ idx = IDXA(ii, j); incr = lda; jjLimit = jjLimit - j; jj = 0; while (jj < (jjLimit - 5)) { sdot += A[idx + 0*incr] * XX[jj+ 0]; sdot += A[idx + 1*incr] * XX[jj+ 1]; sdot += A[idx + 2*incr] * XX[jj+ 2]; sdot += A[idx + 3*incr] * XX[jj+ 3]; sdot += A[idx + 4*incr] * XX[jj+ 4]; sdot += A[idx + 5*incr] * XX[jj+ 5]; jj += 6; idx += 6 * incr; } while (jj < jjLimit) { sdot += A[idx + 0*incr] * XX[jj+ 0]; jj += 1; idx += 1 * incr; } } } if (ii < m) { idx = IDXY(ii); y[idx] = sdot; } } } //The memory access pattern and structure of this code is derived from Vasily Volkov's highly // optimized matrix-matrix multiply CUDA code. //His website is http://www.cs.berkeley.edu/~volkov/ __global__ void RBFKernelForPredict( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float kernelwidth ) { int inx = threadIdx.x; int iny = threadIdx.y; int ibx = blockIdx.x * 32; int iby = blockIdx.y * 32; A += ibx + inx + __mul24( iny, lda ); B += iby + inx + __mul24( iny, ldb ); C += ibx + inx + __mul24( iby + iny, ldc ); float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; for( int i = 0; i < k; i += 4 ) { __shared__ float a[4][32]; __shared__ float b[4][32]; a[iny][inx] = A[i*lda]; a[iny+2][inx] = A[(i+2)*lda]; b[iny][inx] = B[i*ldb]; b[iny+2][inx] = B[(i+2)*ldb]; __syncthreads(); for( int j = 0; j < 4; j++ ) { float _a = a[j][inx]; float *_b = &b[j][0] + iny; float _asquared=_a*_a;; //The (negative here) squared distance between datapoints is necessary for the calculation of the RBF Kernel. //This code uses the identity -(x-y)^2=2*x*y-x^2-y^2. c[0] += 2.f*_a*_b[0]-_asquared-_b[0]*_b[0]; c[1] += 2.f*_a*_b[2]-_asquared-_b[2]*_b[2]; c[2] += 2.f*_a*_b[4]-_asquared-_b[4]*_b[4]; c[3] += 2.f*_a*_b[6]-_asquared-_b[6]*_b[6]; c[4] += 2.f*_a*_b[8]-_asquared-_b[8]*_b[8]; c[5] += 2.f*_a*_b[10]-_asquared-_b[10]*_b[10]; c[6] += 2.f*_a*_b[12]-_asquared-_b[12]*_b[12]; c[7] += 2.f*_a*_b[14]-_asquared-_b[14]*_b[14]; c[8] += 2.f*_a*_b[16]-_asquared-_b[16]*_b[16]; c[9] += 2.f*_a*_b[18]-_asquared-_b[18]*_b[18]; c[10] += 2.f*_a*_b[20]-_asquared-_b[20]*_b[20]; c[11] += 2.f*_a*_b[22]-_asquared-_b[22]*_b[22]; c[12] += 2.f*_a*_b[24]-_asquared-_b[24]*_b[24]; c[13] += 2.f*_a*_b[26]-_asquared-_b[26]*_b[26]; c[14] += 2.f*_a*_b[28]-_asquared-_b[28]*_b[28]; c[15] += 2.f*_a*_b[30]-_asquared-_b[30]*_b[30]; } __syncthreads(); } for( int i = 0; i < 16; i++, C += 2*ldc ) // Here the negative squared distances between datapoints, calculated above, are multiplied by the kernel width parameter and exponentiated. C[0] = exp(kernelwidth*c[i]); } extern "C" void GPUPredictWrapper(int m, int n, int k, float kernelwidth, const float *Test, const float *Svs, float * alphas,float *prediction, float beta,float isregression) { // for now this is not important to us // mxArray *mexelapsed = mxCreateNumericMatrix(1, 1,mxSINGLE_CLASS, mxREAL); // float * elapsed=(float *)mxGetData(mexelapsed); // start things cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); // int paddedm=m+32-m%32; int paddedk=k+32-k%32; int paddedn=n+32-n%32; float* d_PaddedSvs; CUDA_SAFE_CALL( cudaMalloc( (void**) &d_PaddedSvs, paddedn*paddedk*sizeof(float))); CUDA_SAFE_CALL( cudaMemset(d_PaddedSvs,0.f,paddedn*paddedk*sizeof(float))); CUDA_SAFE_CALL( cudaMemcpy(d_PaddedSvs, Svs, sizeof(float)*n*k,cudaMemcpyHostToDevice)); float* d_PaddedSvsT; CUDA_SAFE_CALL( cudaMalloc( (void**) &d_PaddedSvsT, paddedn*paddedk*sizeof(float))); CUDA_SAFE_CALL( cudaMemset(d_PaddedSvsT,0.f,paddedn*paddedk*sizeof(float))); dim3 gridtranspose(ceil((double)n / TRANS_BLOCK_DIM), ceil((double)paddedk / TRANS_BLOCK_DIM), 1); dim3 threadstranspose(TRANS_BLOCK_DIM, TRANS_BLOCK_DIM, 1); transpose<<< gridtranspose, threadstranspose >>>(d_PaddedSvsT, d_PaddedSvs, n,paddedk); dim3 gridtranspose2(ceil((double)paddedk / TRANS_BLOCK_DIM), ceil((double)paddedn / TRANS_BLOCK_DIM), 1); transpose<<< gridtranspose2, threadstranspose >>>(d_PaddedSvs, d_PaddedSvsT, paddedk,paddedn); CUDA_SAFE_CALL( cudaFree(d_PaddedSvsT)); double DoubleNecIterations=(double)paddedm/CUBIC_ROOT_MAX_OPS; DoubleNecIterations*=(double)paddedn/CUBIC_ROOT_MAX_OPS; DoubleNecIterations*=(double)paddedk/CUBIC_ROOT_MAX_OPS; int NecIterations=ceil(DoubleNecIterations); int RowsPerIter=ceil((double)paddedm/NecIterations)+32-int(ceil((double)paddedm/NecIterations))%32; NecIterations=ceil((double)paddedm/RowsPerIter); dim3 grid( RowsPerIter/32, paddedn/32, 1 ); dim3 threads2( 32, 2, 1 ); float * d_TestInter; float * d_QInter; CUDA_SAFE_CALL( cudaMalloc( (void**) &d_TestInter, RowsPerIter*paddedk*sizeof(float))); CUDA_SAFE_CALL(cudaMemset(d_TestInter,0.f,RowsPerIter*paddedk*sizeof(float))); CUDA_SAFE_CALL( cudaMalloc( (void**) &d_QInter, RowsPerIter*paddedn*sizeof(float))); float * d_alphas; float * d_prediction; CUDA_SAFE_CALL( cudaMalloc( (void**) &d_alphas, n*sizeof(float))); cublasSetVector(n,sizeof(float),alphas,1,d_alphas,1); CUDA_SAFE_CALL( cudaMalloc( (void**) &d_prediction, NecIterations*RowsPerIter*sizeof(float))); for (int j=0;j<NecIterations;j++) { if (j+1==NecIterations) { cublasSetMatrix(m-j*RowsPerIter,k,sizeof(float),Test+j*RowsPerIter,m,d_TestInter,RowsPerIter); } else { cublasSetMatrix(RowsPerIter,k,sizeof(float),Test+j*RowsPerIter,m,d_TestInter,RowsPerIter); } RBFKernelForPredict<<<grid, threads2>>>(d_TestInter, RowsPerIter, d_PaddedSvs, paddedn, d_QInter, RowsPerIter, paddedk, kernelwidth); sgemvn_mixedprecis<<<CTAS,THREAD_COUNT>>>(d_QInter,d_alphas,d_prediction+j*RowsPerIter,RowsPerIter,n,RowsPerIter,1,1); } cublasGetVector(m,sizeof(float),d_prediction,1,prediction,1); for(int j=0;j<m;j++) { prediction[j]+=beta; if (isregression!=1){prediction[j]=prediction[j]<0?-1.0:1.0;} } cudaThreadSynchronize(); cudaEventRecord(stop,0); cudaEventSynchronize(stop); // cudaEventElapsedTime(elapsed, start, stop); // mexPutVariable("base","cuSVMPredictTimeInMS",mexelapsed); CUDA_SAFE_CALL( cudaFree(d_alphas)); CUDA_SAFE_CALL( cudaFree(d_TestInter)); CUDA_SAFE_CALL( cudaFree(d_QInter)); CUDA_SAFE_CALL( cudaFree(d_PaddedSvs)); CUDA_SAFE_CALL(cudaFree(d_prediction)); CUDA_SAFE_CALL(cudaThreadExit()); }
e124b42a8b2223c542319f127fe1d62da4de95d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file dnn/src/cuda/batched_matrix_mul/helper.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "src/cuda/batched_matrix_mul/helper.cuh" namespace { template <typename T> __global__ void kernel(T *Xs, T start, uint32_t step, uint32_t n) { uint32_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { Xs[i] = start + i*step; } } } // anonymous namespace namespace megdnn { namespace cuda { namespace batched_matrix_mul { template <typename T> void arange(T *Xs, T start, uint32_t step, uint32_t n, hipStream_t stream) { uint32_t threads = NR_THREADS; uint32_t blocks = DIVUP(n, threads); hipLaunchKernelGGL(( kernel<T>), dim3(blocks), dim3(threads), 0, stream, Xs, start, step, n); after_kernel_launch(); } template void arange<uintptr_t>(uintptr_t *, uintptr_t, uint32_t, uint32_t, hipStream_t); } // namespace batched_matrix_mul } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
e124b42a8b2223c542319f127fe1d62da4de95d8.cu
/** * \file dnn/src/cuda/batched_matrix_mul/helper.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "src/cuda/batched_matrix_mul/helper.cuh" namespace { template <typename T> __global__ void kernel(T *Xs, T start, uint32_t step, uint32_t n) { uint32_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { Xs[i] = start + i*step; } } } // anonymous namespace namespace megdnn { namespace cuda { namespace batched_matrix_mul { template <typename T> void arange(T *Xs, T start, uint32_t step, uint32_t n, cudaStream_t stream) { uint32_t threads = NR_THREADS; uint32_t blocks = DIVUP(n, threads); kernel<T><<<blocks, threads, 0, stream>>>(Xs, start, step, n); after_kernel_launch(); } template void arange<uintptr_t>(uintptr_t *, uintptr_t, uint32_t, uint32_t, cudaStream_t); } // namespace batched_matrix_mul } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
e400d883bf200c2495a0fa5029d20b2f1ed3464c.hip
// !!! This is a file automatically generated by hipify!!! #include <chrono> #include <iostream> #include <cstdlib> #include <hip/hip_runtime.h> #include "kernels.h" // thread block size #define BLOCK_SIZE 256 template <typename T> inline void memcpyH2D(T *d, const T *h, const int n) { hipMemcpy(d, h, n * sizeof(T), hipMemcpyHostToDevice); } template <typename T> void test (const int repeat, const int numFloats) { // Initialize host data, with the first half the same as the second T *hostMem = (T*) malloc (sizeof(T) * numFloats); srand48(123); for (int j = 0; j < numFloats/2 ; ++j) hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); T *deviceMem; hipMalloc((void**)&deviceMem, numFloats * sizeof(T)); dim3 threads(BLOCK_SIZE); dim3 blocks((numFloats)/BLOCK_SIZE); // warmup for (int i = 0; i < 4; i++) { memcpyH2D(deviceMem, hostMem, numFloats); hipLaunchKernelGGL(( Add1<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 10.0); hipLaunchKernelGGL(( Add2<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 10.0); hipLaunchKernelGGL(( Add4<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 10.0); hipLaunchKernelGGL(( Add8<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 10.0); hipDeviceSynchronize(); } memcpyH2D(deviceMem, hostMem, numFloats); auto k_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( Add1<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 10.0); hipDeviceSynchronize(); auto k_end = std::chrono::high_resolution_clock::now(); auto k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (Add1): %f (s)\n", (k_time * 1e-9f)); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( Add2<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 10.0); hipDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (Add2): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( Add4<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 10.0); hipDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (Add4): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( Add8<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 10.0); hipDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (Add8): %f (s)\n", k_time * 1e-9f); // warmup for (int i = 0; i < 4; i++) { memcpyH2D(deviceMem, hostMem, numFloats); hipLaunchKernelGGL(( Mul1<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 1.01); hipLaunchKernelGGL(( Mul2<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 1.01); hipLaunchKernelGGL(( Mul4<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 1.01); hipLaunchKernelGGL(( Mul8<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 1.01); hipDeviceSynchronize(); } memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( Mul1<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 1.01); hipDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (Mul1): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( Mul2<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 1.01); hipDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (Mul2): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( Mul4<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 1.01); hipDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (Mul4): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( Mul8<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 1.01); hipDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (Mul8): %f (s)\n", k_time * 1e-9f); // warmup for (int i = 0; i < 4; i++) { memcpyH2D(deviceMem, hostMem, numFloats); hipLaunchKernelGGL(( MAdd1<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 10.0, 0.9899); hipLaunchKernelGGL(( MAdd2<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 10.0, 0.9899); hipLaunchKernelGGL(( MAdd4<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 10.0, 0.9899); hipLaunchKernelGGL(( MAdd8<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 10.0, 0.9899); hipDeviceSynchronize(); } memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( MAdd1<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 10.0, 0.9899); hipDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (MAdd1): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( MAdd2<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 10.0, 0.9899); hipDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (MAdd2): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( MAdd4<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 10.0, 0.9899); hipDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (MAdd4): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( MAdd8<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 10.0, 0.9899); hipDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (MAdd8): %f (s)\n", k_time * 1e-9f); // warmup for (int i = 0; i < 4; i++) { memcpyH2D(deviceMem, hostMem, numFloats); hipLaunchKernelGGL(( MulMAdd1<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 3.75, 0.355); hipLaunchKernelGGL(( MulMAdd2<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 3.75, 0.355); hipLaunchKernelGGL(( MulMAdd4<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 3.75, 0.355); hipLaunchKernelGGL(( MulMAdd8<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 3.75, 0.355); hipDeviceSynchronize(); } memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( MulMAdd1<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 3.75, 0.355); hipDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (MulMAdd1): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( MulMAdd2<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 3.75, 0.355); hipDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (MulMAdd2): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( MulMAdd4<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 3.75, 0.355); hipDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (MulMAdd4): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( MulMAdd8<T>), dim3(blocks), dim3(threads) , 0, 0, deviceMem, repeat, 3.75, 0.355); hipDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (MulMAdd8): %f (s)\n", k_time * 1e-9f); hipFree(deviceMem); free(hostMem); } int main(int argc, char* argv[]) { if (argc != 2) { printf("Usage: %s <repeat>\n", argv[0]); return 1; } // the number of loop iterations inside kernels const int repeat = atoi(argv[1]); // a multiple of BLOCK_SIZE const int numFloats = 2*1024*1024; printf("=== Single-precision floating-point kernels ===\n"); test<float>(repeat, numFloats); // comment out when double-precision is not supported by a device printf("=== Double-precision floating-point kernels ===\n"); test<double>(repeat, numFloats); return 0; }
e400d883bf200c2495a0fa5029d20b2f1ed3464c.cu
#include <chrono> #include <iostream> #include <cstdlib> #include <cuda.h> #include "kernels.h" // thread block size #define BLOCK_SIZE 256 template <typename T> inline void memcpyH2D(T *d, const T *h, const int n) { cudaMemcpy(d, h, n * sizeof(T), cudaMemcpyHostToDevice); } template <typename T> void test (const int repeat, const int numFloats) { // Initialize host data, with the first half the same as the second T *hostMem = (T*) malloc (sizeof(T) * numFloats); srand48(123); for (int j = 0; j < numFloats/2 ; ++j) hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); T *deviceMem; cudaMalloc((void**)&deviceMem, numFloats * sizeof(T)); dim3 threads(BLOCK_SIZE); dim3 blocks((numFloats)/BLOCK_SIZE); // warmup for (int i = 0; i < 4; i++) { memcpyH2D(deviceMem, hostMem, numFloats); Add1<T><<< blocks, threads >>>(deviceMem, repeat, 10.0); Add2<T><<< blocks, threads >>>(deviceMem, repeat, 10.0); Add4<T><<< blocks, threads >>>(deviceMem, repeat, 10.0); Add8<T><<< blocks, threads >>>(deviceMem, repeat, 10.0); cudaDeviceSynchronize(); } memcpyH2D(deviceMem, hostMem, numFloats); auto k_start = std::chrono::high_resolution_clock::now(); Add1<T><<< blocks, threads >>>(deviceMem, repeat, 10.0); cudaDeviceSynchronize(); auto k_end = std::chrono::high_resolution_clock::now(); auto k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (Add1): %f (s)\n", (k_time * 1e-9f)); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); Add2<T><<< blocks, threads >>>(deviceMem, repeat, 10.0); cudaDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (Add2): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); Add4<T><<< blocks, threads >>>(deviceMem, repeat, 10.0); cudaDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (Add4): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); Add8<T><<< blocks, threads >>>(deviceMem, repeat, 10.0); cudaDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (Add8): %f (s)\n", k_time * 1e-9f); // warmup for (int i = 0; i < 4; i++) { memcpyH2D(deviceMem, hostMem, numFloats); Mul1<T><<< blocks, threads >>>(deviceMem, repeat, 1.01); Mul2<T><<< blocks, threads >>>(deviceMem, repeat, 1.01); Mul4<T><<< blocks, threads >>>(deviceMem, repeat, 1.01); Mul8<T><<< blocks, threads >>>(deviceMem, repeat, 1.01); cudaDeviceSynchronize(); } memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); Mul1<T><<< blocks, threads >>>(deviceMem, repeat, 1.01); cudaDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (Mul1): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); Mul2<T><<< blocks, threads >>>(deviceMem, repeat, 1.01); cudaDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (Mul2): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); Mul4<T><<< blocks, threads >>>(deviceMem, repeat, 1.01); cudaDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (Mul4): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); Mul8<T><<< blocks, threads >>>(deviceMem, repeat, 1.01); cudaDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (Mul8): %f (s)\n", k_time * 1e-9f); // warmup for (int i = 0; i < 4; i++) { memcpyH2D(deviceMem, hostMem, numFloats); MAdd1<T><<< blocks, threads >>>(deviceMem, repeat, 10.0, 0.9899); MAdd2<T><<< blocks, threads >>>(deviceMem, repeat, 10.0, 0.9899); MAdd4<T><<< blocks, threads >>>(deviceMem, repeat, 10.0, 0.9899); MAdd8<T><<< blocks, threads >>>(deviceMem, repeat, 10.0, 0.9899); cudaDeviceSynchronize(); } memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); MAdd1<T><<< blocks, threads >>>(deviceMem, repeat, 10.0, 0.9899); cudaDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (MAdd1): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); MAdd2<T><<< blocks, threads >>>(deviceMem, repeat, 10.0, 0.9899); cudaDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (MAdd2): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); MAdd4<T><<< blocks, threads >>>(deviceMem, repeat, 10.0, 0.9899); cudaDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (MAdd4): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); MAdd8<T><<< blocks, threads >>>(deviceMem, repeat, 10.0, 0.9899); cudaDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (MAdd8): %f (s)\n", k_time * 1e-9f); // warmup for (int i = 0; i < 4; i++) { memcpyH2D(deviceMem, hostMem, numFloats); MulMAdd1<T><<< blocks, threads >>>(deviceMem, repeat, 3.75, 0.355); MulMAdd2<T><<< blocks, threads >>>(deviceMem, repeat, 3.75, 0.355); MulMAdd4<T><<< blocks, threads >>>(deviceMem, repeat, 3.75, 0.355); MulMAdd8<T><<< blocks, threads >>>(deviceMem, repeat, 3.75, 0.355); cudaDeviceSynchronize(); } memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); MulMAdd1<T><<< blocks, threads >>>(deviceMem, repeat, 3.75, 0.355); cudaDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (MulMAdd1): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); MulMAdd2<T><<< blocks, threads >>>(deviceMem, repeat, 3.75, 0.355); cudaDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (MulMAdd2): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); MulMAdd4<T><<< blocks, threads >>>(deviceMem, repeat, 3.75, 0.355); cudaDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (MulMAdd4): %f (s)\n", k_time * 1e-9f); memcpyH2D(deviceMem, hostMem, numFloats); k_start = std::chrono::high_resolution_clock::now(); MulMAdd8<T><<< blocks, threads >>>(deviceMem, repeat, 3.75, 0.355); cudaDeviceSynchronize(); k_end = std::chrono::high_resolution_clock::now(); k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count(); printf("kernel execution time (MulMAdd8): %f (s)\n", k_time * 1e-9f); cudaFree(deviceMem); free(hostMem); } int main(int argc, char* argv[]) { if (argc != 2) { printf("Usage: %s <repeat>\n", argv[0]); return 1; } // the number of loop iterations inside kernels const int repeat = atoi(argv[1]); // a multiple of BLOCK_SIZE const int numFloats = 2*1024*1024; printf("=== Single-precision floating-point kernels ===\n"); test<float>(repeat, numFloats); // comment out when double-precision is not supported by a device printf("=== Double-precision floating-point kernels ===\n"); test<double>(repeat, numFloats); return 0; }
b114ed3aabd91d5c377fc078ca9b00e283e5f59f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Lab 5, image filters with CUDA. // Compile with a command-line similar to Lab 4: // nvcc filter.cu -c -arch=sm_30 -o filter.o // g++ filter.o milli.c readppm.c -lGL -lm -lcuda -lcudart -L/usr/local/cuda/lib -lglut -o filter // or (multicore lab) // nvcc filter.cu -c -arch=sm_20 -o filter.o // g++ filter.o milli.c readppm.c -lGL -lm -lcuda -L/usr/local/cuda/lib64 -lcudart -lglut -o filter // 2017-11-27: Early pre-release, dubbed "beta". // 2017-12-03: First official version! Brand new lab 5 based on the old lab 6. // Better variable names, better prepared for some lab tasks. More changes may come // but I call this version 1.0b2. // 2017-12-04: Two fixes: Added command-lines (above), fixed a bug in computeImages // that allocated too much memory. b3 // 2017-12-04: More fixes: Tightened up the kernel with edge clamping. // Less code, nicer result (no borders). Cleaned up some messed up X and Y. b4 #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #ifdef __APPLE__ #include <GLUT/glut.h> #include <OpenGL/gl.h> #else #include <GL/glut.h> #endif #include "readppm.h" #include "milli.h" // Use these for setting shared memory size. #define MAX_KERNEL_SIZE_X 10 #define MAX_KERNEL_SIZE_Y 10 #define BLOCK_SIZE 16 #define SHARED_STUFF_SIZE_X (BLOCK_SIZE+MAX_KERNEL_SIZE_X * 2) #define SHARED_STUFF_SIZE_Y (BLOCK_SIZE+MAX_KERNEL_SIZE_Y * 2) #define SHARED_STUFF_SIZE (SHARED_STUFF_SIZE_X*SHARED_STUFF_SIZE_Y) #define KERNEL_SIZE 2 __device__ void swap(unsigned char* x1, unsigned char* x2) { unsigned char tmp = *x1; *x1 = *x2; *x2 = tmp; } __device__ void insertion_sort(int length, unsigned char* array) { for (unsigned i = 1; i < length; ++i) { unsigned j = i; while (j > 0 && array[j-1] > array[j]) { swap(&array[j-1], &array[j]); j--; } } } __global__ void filter(unsigned char *image, unsigned char *out, const unsigned int imagesizex, const unsigned int imagesizey) { __shared__ unsigned char shared_stuff[SHARED_STUFF_SIZE_X][SHARED_STUFF_SIZE_Y][3]; // map from blockIdx to pixel position int patch_start_x = blockIdx.x * blockDim.x; int patch_start_y = blockIdx.y * blockDim.y; int xxx = patch_start_x + threadIdx.x; int yyy = patch_start_y + threadIdx.y; size_t offset = threadIdx.x + threadIdx.y*blockDim.x; for (size_t i = offset; i < SHARED_STUFF_SIZE; i+=blockDim.x*blockDim.y) { size_t r_x_index = patch_start_x + (i % SHARED_STUFF_SIZE_X); size_t r_y_index = patch_start_y + (i / SHARED_STUFF_SIZE_X); size_t r_index = (r_x_index + r_y_index * imagesizex) * 3; shared_stuff[i % SHARED_STUFF_SIZE_X][i / SHARED_STUFF_SIZE_X][0] = image[r_index]; shared_stuff[i % SHARED_STUFF_SIZE_X][i / SHARED_STUFF_SIZE_X][1] = image[r_index+1]; shared_stuff[i % SHARED_STUFF_SIZE_X][i / SHARED_STUFF_SIZE_X][2] = image[r_index+2]; } int x = threadIdx.x + KERNEL_SIZE; int y = threadIdx.y + KERNEL_SIZE; // If inside image if (xxx < imagesizex && yyy < imagesizey) { // Filter kernel (simple box filter) unsigned char sort_r[KERNEL_SIZE*KERNEL_SIZE*4]; unsigned char sort_g[KERNEL_SIZE*KERNEL_SIZE*4]; unsigned char sort_b[KERNEL_SIZE*KERNEL_SIZE*4]; int it = 0; for(int dy=-KERNEL_SIZE;dy<=KERNEL_SIZE;dy++) { for(int dx=-KERNEL_SIZE;dx<=KERNEL_SIZE;dx++) { // Use max and min to avoid branching! int xx = min(max(x+dx, 0), SHARED_STUFF_SIZE_X-1); int yy = min(max(y+dy, 0), SHARED_STUFF_SIZE_Y-1); sort_r[it] = shared_stuff[xx][yy][0]; sort_g[it] = shared_stuff[xx][yy][1]; sort_b[it] = shared_stuff[xx][yy][2]; it++; } } insertion_sort(KERNEL_SIZE*KERNEL_SIZE*4, sort_r); insertion_sort(KERNEL_SIZE*KERNEL_SIZE*4, sort_g); insertion_sort(KERNEL_SIZE*KERNEL_SIZE*4, sort_b); out[(yyy*imagesizex+xxx)*3+0] = sort_r[KERNEL_SIZE*KERNEL_SIZE*2]; out[(yyy*imagesizex+xxx)*3+1] = sort_g[KERNEL_SIZE*KERNEL_SIZE*2]; out[(yyy*imagesizex+xxx)*3+2] = sort_b[KERNEL_SIZE*KERNEL_SIZE*2]; } } // Global variables for image data unsigned char *image, *pixels, *dev_bitmap, *dev_input; unsigned int imagesizey, imagesizex; // Image size //////////////////////////////////////////////////////////////////////////////// // main computation function //////////////////////////////////////////////////////////////////////////////// void computeImages(int kernelsizex, int kernelsizey) { if (KERNEL_SIZE > MAX_KERNEL_SIZE_X || KERNEL_SIZE > MAX_KERNEL_SIZE_Y) { printf("Kernel size out of bounds!\n"); return; } pixels = (unsigned char *) malloc(imagesizex*imagesizey*3); hipMalloc( (void**)&dev_input, imagesizex*imagesizey*3); hipMemcpy( dev_input, image, imagesizey*imagesizex*3, hipMemcpyHostToDevice ); hipMalloc( (void**)&dev_bitmap, imagesizex*imagesizey*3); dim3 grid(imagesizex/BLOCK_SIZE,imagesizey/BLOCK_SIZE); int start = GetMicroseconds(); hipLaunchKernelGGL(( filter), dim3(grid),dim3(dim3(BLOCK_SIZE, BLOCK_SIZE)), 0, 0, dev_input, dev_bitmap, imagesizex, imagesizey); hipDeviceSynchronize(); int end = GetMicroseconds(); printf("Time: %i us\n", end-start); // Check for errors! hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("Error: %s\n", hipGetErrorString(err)); } hipMemcpy( pixels, dev_bitmap, imagesizey*imagesizex*3, hipMemcpyDeviceToHost ); hipFree( dev_bitmap ); hipFree( dev_input ); } // Display images void draw() { // Dump the whole picture onto the screen. glClearColor( 0.0, 0.0, 0.0, 1.0 ); glClear( GL_COLOR_BUFFER_BIT ); // Not wide - probably square. Original left, result right. if (imagesizey >= imagesizex) { glRasterPos2f(-1, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image ); glRasterPos2i(0, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels); } else { // Wide image! Original on top, result below. glRasterPos2f(-1, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels ); glRasterPos2i(-1, 0); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image ); } glFlush(); } // Main program, inits int main( int argc, char** argv) { #ifdef __APPLE__ *(NULL) = 0xBAD; #endif glutInit(&argc, argv); glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA ); if (argc > 1) { image = readppm(argv[1], (int *)&imagesizex, (int *)&imagesizey); } else { image = readppm((char *)"maskros512.ppm", (int *)&imagesizex, (int *)&imagesizey); } if (imagesizey >= imagesizex) { glutInitWindowSize( imagesizex*2, imagesizey ); } else { glutInitWindowSize( imagesizex, imagesizey*2 ); } glutCreateWindow("Lab 5"); glutDisplayFunc(draw); ResetMilli(); computeImages(5, 5); // You can save the result to a file like this: // writeppm("out.ppm", imagesizey, imagesizex, pixels); glutMainLoop(); return 0; }
b114ed3aabd91d5c377fc078ca9b00e283e5f59f.cu
// Lab 5, image filters with CUDA. // Compile with a command-line similar to Lab 4: // nvcc filter.cu -c -arch=sm_30 -o filter.o // g++ filter.o milli.c readppm.c -lGL -lm -lcuda -lcudart -L/usr/local/cuda/lib -lglut -o filter // or (multicore lab) // nvcc filter.cu -c -arch=sm_20 -o filter.o // g++ filter.o milli.c readppm.c -lGL -lm -lcuda -L/usr/local/cuda/lib64 -lcudart -lglut -o filter // 2017-11-27: Early pre-release, dubbed "beta". // 2017-12-03: First official version! Brand new lab 5 based on the old lab 6. // Better variable names, better prepared for some lab tasks. More changes may come // but I call this version 1.0b2. // 2017-12-04: Two fixes: Added command-lines (above), fixed a bug in computeImages // that allocated too much memory. b3 // 2017-12-04: More fixes: Tightened up the kernel with edge clamping. // Less code, nicer result (no borders). Cleaned up some messed up X and Y. b4 #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #ifdef __APPLE__ #include <GLUT/glut.h> #include <OpenGL/gl.h> #else #include <GL/glut.h> #endif #include "readppm.h" #include "milli.h" // Use these for setting shared memory size. #define MAX_KERNEL_SIZE_X 10 #define MAX_KERNEL_SIZE_Y 10 #define BLOCK_SIZE 16 #define SHARED_STUFF_SIZE_X (BLOCK_SIZE+MAX_KERNEL_SIZE_X * 2) #define SHARED_STUFF_SIZE_Y (BLOCK_SIZE+MAX_KERNEL_SIZE_Y * 2) #define SHARED_STUFF_SIZE (SHARED_STUFF_SIZE_X*SHARED_STUFF_SIZE_Y) #define KERNEL_SIZE 2 __device__ void swap(unsigned char* x1, unsigned char* x2) { unsigned char tmp = *x1; *x1 = *x2; *x2 = tmp; } __device__ void insertion_sort(int length, unsigned char* array) { for (unsigned i = 1; i < length; ++i) { unsigned j = i; while (j > 0 && array[j-1] > array[j]) { swap(&array[j-1], &array[j]); j--; } } } __global__ void filter(unsigned char *image, unsigned char *out, const unsigned int imagesizex, const unsigned int imagesizey) { __shared__ unsigned char shared_stuff[SHARED_STUFF_SIZE_X][SHARED_STUFF_SIZE_Y][3]; // map from blockIdx to pixel position int patch_start_x = blockIdx.x * blockDim.x; int patch_start_y = blockIdx.y * blockDim.y; int xxx = patch_start_x + threadIdx.x; int yyy = patch_start_y + threadIdx.y; size_t offset = threadIdx.x + threadIdx.y*blockDim.x; for (size_t i = offset; i < SHARED_STUFF_SIZE; i+=blockDim.x*blockDim.y) { size_t r_x_index = patch_start_x + (i % SHARED_STUFF_SIZE_X); size_t r_y_index = patch_start_y + (i / SHARED_STUFF_SIZE_X); size_t r_index = (r_x_index + r_y_index * imagesizex) * 3; shared_stuff[i % SHARED_STUFF_SIZE_X][i / SHARED_STUFF_SIZE_X][0] = image[r_index]; shared_stuff[i % SHARED_STUFF_SIZE_X][i / SHARED_STUFF_SIZE_X][1] = image[r_index+1]; shared_stuff[i % SHARED_STUFF_SIZE_X][i / SHARED_STUFF_SIZE_X][2] = image[r_index+2]; } int x = threadIdx.x + KERNEL_SIZE; int y = threadIdx.y + KERNEL_SIZE; // If inside image if (xxx < imagesizex && yyy < imagesizey) { // Filter kernel (simple box filter) unsigned char sort_r[KERNEL_SIZE*KERNEL_SIZE*4]; unsigned char sort_g[KERNEL_SIZE*KERNEL_SIZE*4]; unsigned char sort_b[KERNEL_SIZE*KERNEL_SIZE*4]; int it = 0; for(int dy=-KERNEL_SIZE;dy<=KERNEL_SIZE;dy++) { for(int dx=-KERNEL_SIZE;dx<=KERNEL_SIZE;dx++) { // Use max and min to avoid branching! int xx = min(max(x+dx, 0), SHARED_STUFF_SIZE_X-1); int yy = min(max(y+dy, 0), SHARED_STUFF_SIZE_Y-1); sort_r[it] = shared_stuff[xx][yy][0]; sort_g[it] = shared_stuff[xx][yy][1]; sort_b[it] = shared_stuff[xx][yy][2]; it++; } } insertion_sort(KERNEL_SIZE*KERNEL_SIZE*4, sort_r); insertion_sort(KERNEL_SIZE*KERNEL_SIZE*4, sort_g); insertion_sort(KERNEL_SIZE*KERNEL_SIZE*4, sort_b); out[(yyy*imagesizex+xxx)*3+0] = sort_r[KERNEL_SIZE*KERNEL_SIZE*2]; out[(yyy*imagesizex+xxx)*3+1] = sort_g[KERNEL_SIZE*KERNEL_SIZE*2]; out[(yyy*imagesizex+xxx)*3+2] = sort_b[KERNEL_SIZE*KERNEL_SIZE*2]; } } // Global variables for image data unsigned char *image, *pixels, *dev_bitmap, *dev_input; unsigned int imagesizey, imagesizex; // Image size //////////////////////////////////////////////////////////////////////////////// // main computation function //////////////////////////////////////////////////////////////////////////////// void computeImages(int kernelsizex, int kernelsizey) { if (KERNEL_SIZE > MAX_KERNEL_SIZE_X || KERNEL_SIZE > MAX_KERNEL_SIZE_Y) { printf("Kernel size out of bounds!\n"); return; } pixels = (unsigned char *) malloc(imagesizex*imagesizey*3); cudaMalloc( (void**)&dev_input, imagesizex*imagesizey*3); cudaMemcpy( dev_input, image, imagesizey*imagesizex*3, cudaMemcpyHostToDevice ); cudaMalloc( (void**)&dev_bitmap, imagesizex*imagesizey*3); dim3 grid(imagesizex/BLOCK_SIZE,imagesizey/BLOCK_SIZE); int start = GetMicroseconds(); filter<<<grid,dim3(BLOCK_SIZE, BLOCK_SIZE)>>>(dev_input, dev_bitmap, imagesizex, imagesizey); cudaThreadSynchronize(); int end = GetMicroseconds(); printf("Time: %i us\n", end-start); // Check for errors! cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error: %s\n", cudaGetErrorString(err)); } cudaMemcpy( pixels, dev_bitmap, imagesizey*imagesizex*3, cudaMemcpyDeviceToHost ); cudaFree( dev_bitmap ); cudaFree( dev_input ); } // Display images void draw() { // Dump the whole picture onto the screen. glClearColor( 0.0, 0.0, 0.0, 1.0 ); glClear( GL_COLOR_BUFFER_BIT ); // Not wide - probably square. Original left, result right. if (imagesizey >= imagesizex) { glRasterPos2f(-1, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image ); glRasterPos2i(0, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels); } else { // Wide image! Original on top, result below. glRasterPos2f(-1, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels ); glRasterPos2i(-1, 0); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image ); } glFlush(); } // Main program, inits int main( int argc, char** argv) { #ifdef __APPLE__ *(NULL) = 0xBAD; #endif glutInit(&argc, argv); glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA ); if (argc > 1) { image = readppm(argv[1], (int *)&imagesizex, (int *)&imagesizey); } else { image = readppm((char *)"maskros512.ppm", (int *)&imagesizex, (int *)&imagesizey); } if (imagesizey >= imagesizex) { glutInitWindowSize( imagesizex*2, imagesizey ); } else { glutInitWindowSize( imagesizex, imagesizey*2 ); } glutCreateWindow("Lab 5"); glutDisplayFunc(draw); ResetMilli(); computeImages(5, 5); // You can save the result to a file like this: // writeppm("out.ppm", imagesizey, imagesizex, pixels); glutMainLoop(); return 0; }
92c9d9ef05ab5449399d822150844910500feb96.hip
// !!! This is a file automatically generated by hipify!!! #ifndef _BACKPROP_CUDA_KERNEL_H_ #define _BACKPROP_CUDA_KERNEL_H_ #include <stdio.h> #include "backprop.h" #include "math.h" #include "hip/hip_runtime.h" #define OPT2 #ifdef OPT1 __global__ void bpnn_layerforward_CUDA(float *input_cuda, float *output_hidden_cuda, float *input_hidden_cuda, float *hidden_partial_sum, int in, int hid) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_in = HEIGHT * by + ty + 1; __shared__ float input_node[HEIGHT]; __shared__ float weight_matrix[HEIGHT][WIDTH]; weight_matrix[ty][tx] = input_hidden_cuda[index] * input_cuda[index_in]; __syncthreads(); for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){ int power_two = __powf(2, i); if (ty % power_two == 0) weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx]; __syncthreads(); } input_hidden_cuda[index] = weight_matrix[ty][tx]; if (tx == 0) { hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty]; } } #endif #ifdef OPT2 __global__ void bpnn_layerforward_CUDA(float *input_cuda, float *output_hidden_cuda, float *input_hidden_cuda, float *hidden_partial_sum, int in, int hid) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_in = HEIGHT * by + ty + 1; __shared__ float input_node[HEIGHT]; __shared__ float weight_matrix[HEIGHT][WIDTH]; weight_matrix[ty][tx] = input_hidden_cuda[index] * input_cuda[index_in]; __syncthreads(); for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){ unsigned int power_two = i - 1; if( (ty & power_two) == 0 ) { weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx]; } __syncthreads(); } input_hidden_cuda[index] = weight_matrix[ty][tx]; if ( tx == 0 ) { hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty]; } } #endif __global__ void bpnn_adjust_weights_cuda(float * delta, int hid, float * ly, int in, float * w, float * oldw) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_y = HEIGHT * by + ty + 1; int index_x = tx + 1; //eta = 0.3; //momentum = 0.3; w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); __syncthreads(); if (ty == 0 && by ==0){ w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); } } #endif
92c9d9ef05ab5449399d822150844910500feb96.cu
#ifndef _BACKPROP_CUDA_KERNEL_H_ #define _BACKPROP_CUDA_KERNEL_H_ #include <stdio.h> #include "backprop.h" #include "math.h" #include "cuda.h" #define OPT2 #ifdef OPT1 __global__ void bpnn_layerforward_CUDA(float *input_cuda, float *output_hidden_cuda, float *input_hidden_cuda, float *hidden_partial_sum, int in, int hid) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_in = HEIGHT * by + ty + 1; __shared__ float input_node[HEIGHT]; __shared__ float weight_matrix[HEIGHT][WIDTH]; weight_matrix[ty][tx] = input_hidden_cuda[index] * input_cuda[index_in]; __syncthreads(); for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){ int power_two = __powf(2, i); if (ty % power_two == 0) weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx]; __syncthreads(); } input_hidden_cuda[index] = weight_matrix[ty][tx]; if (tx == 0) { hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty]; } } #endif #ifdef OPT2 __global__ void bpnn_layerforward_CUDA(float *input_cuda, float *output_hidden_cuda, float *input_hidden_cuda, float *hidden_partial_sum, int in, int hid) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_in = HEIGHT * by + ty + 1; __shared__ float input_node[HEIGHT]; __shared__ float weight_matrix[HEIGHT][WIDTH]; weight_matrix[ty][tx] = input_hidden_cuda[index] * input_cuda[index_in]; __syncthreads(); for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){ unsigned int power_two = i - 1; if( (ty & power_two) == 0 ) { weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx]; } __syncthreads(); } input_hidden_cuda[index] = weight_matrix[ty][tx]; if ( tx == 0 ) { hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty]; } } #endif __global__ void bpnn_adjust_weights_cuda(float * delta, int hid, float * ly, int in, float * w, float * oldw) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_y = HEIGHT * by + ty + 1; int index_x = tx + 1; //eta = 0.3; //momentum = 0.3; w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); __syncthreads(); if (ty == 0 && by ==0){ w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); } } #endif
c05324ffbd6b1c1d947410564fc6888cdfe47d37.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * simulate a cellular automaton with periodic boundaries (torus-like) * serial version * * (c) 2016 Felix Kubicek (Cuda port, optimization, e.g.: state field as bitmap) * (c) 2016 Steffen Christgau (C99 port, modularization) * (c) 1996,1997 Peter Sanders, Ingo Boesnach (original source) * * command line arguments: * #1: Number of lines * #2: Number of iterations to be simulated * */ #include <stdio.h> #include <stdlib.h> #include "ca_common.h" #define ANNEAL(i) ((0x3D0 >> (i)) & 1) /* --------------------- CA simulation -------------------------------- */ /* annealing rule from ChoDro96 page 34 * the table is used to map the number of nonzero * states in the neighborhood to the new state * __constant__ static const cell_state_t anneal[10] = {0, 0, 0, 0, 1, 0, 1, 1, 1, 1}; */ /* make one simulation iteration with lines lines. * old configuration is in from, new one is written to to. */ __global__ static void simulate(line_t_bit *from, line_t_bit *to, int lines) { int gid = threadIdx.x + blockIdx.x * blockDim.x; int grid_size = blockDim.x * gridDim.x; int field_width = XSIZE/8; int field_size = field_width * lines; for (int i = gid; i < field_size; i+= grid_size) { int x = i % field_width; int y = i / field_width; int x_prev = ((x - 1) + field_width) % field_width; int y_prev = ((y - 1) + lines) % lines; int x_next = (x + 1) % field_width; int y_next = (y + 1) % lines; cell_state_t top_l = from[y_prev][x_prev] >> 7; cell_state_t top_r = from[y_prev][x_next] & 1; int top = (top_r << 9) + (from[y_prev][x] << 1) + top_l; cell_state_t middle_l = from[y][x_prev] >> 7; cell_state_t middle_r = from[y][x_next] & 1; int middle = (middle_r << 9) + (from[y][x] << 1) + middle_l; cell_state_t bottom_l = from[y_next][x_prev] >> 7; cell_state_t bottom_r = from[y_next][x_next] & 1; int bottom = (bottom_r << 9) + (from[y_next][x] << 1) + bottom_l; cell_state_t result = 0; // 0x111 mask used to get all bits of top/middle/bottom row (3 bits each) int mask = 7; for(int b = 0; b < 8; b++) { int num_bits = ((top & mask) << 6) + ((middle & mask) << 3) + (bottom & mask); // bit counting algorithm (see: https://en.wikipedia.org/wiki/Hamming_weight) num_bits = (num_bits & 0x5555) + ((num_bits >> 1) & 0x5555); num_bits = (num_bits & 0x3333) + ((num_bits >> 2) & 0x3333); num_bits = (num_bits & 0x0f0f) + ((num_bits >> 4) & 0x0f0f); num_bits = (num_bits & 0x00ff) + ((num_bits >> 8) & 0x00ff); result |= ANNEAL(num_bits) << b; mask = mask << 1; } to[y][x] = result; } } /* --------------------- measurement ---------------------------------- */ int main(int argc, char** argv) { int lines, its; ca_init(argc, argv, &lines, &its); line_t_bit *from, *to, *from_d, *to_d; line_t *verify_field; MALLOC_ERROR_CHECK(from = (line_t_bit *) calloc(lines, sizeof(line_t_bit))); MALLOC_ERROR_CHECK(to = (line_t_bit *) calloc(lines, sizeof(line_t_bit))); MALLOC_ERROR_CHECK(verify_field = (line_t *) calloc((lines + 2), sizeof(line_t))); CUDA_ERROR_CHECK(hipMalloc((void **) &from_d, lines * sizeof(line_t_bit))); CUDA_ERROR_CHECK(hipMalloc((void **) &to_d, lines * sizeof(line_t_bit))); CUDA_ERROR_CHECK(hipMalloc((void **) &to_d, lines * sizeof(line_t_bit))); ca_init_config_bit(from, lines, 0); CUDA_ERROR_CHECK(hipMemcpy((void *) from_d, (void *) from, lines * sizeof(line_t_bit), hipMemcpyHostToDevice)); CUDA_ERROR_CHECK(hipMemcpy((void *) to_d, (void *) to, lines * sizeof(line_t_bit), hipMemcpyHostToDevice)); TIME_GET(sim_start); for (int i = 0; i < its; i++) { hipLaunchKernelGGL(( simulate) , dim3(lines/10), dim3(XSIZE/8), 0, 0, from_d, to_d, lines); line_t_bit *temp = from_d; from_d = to_d; to_d = temp; } hipDeviceSynchronize(); TIME_GET(sim_stop); CUDA_ERROR_CHECK(hipPeekAtLastError()); CUDA_ERROR_CHECK(hipMemcpy((void *) from, (void *) from_d, lines * sizeof(line_t_bit), hipMemcpyDeviceToHost)); for (int y = 0; y < lines; y++) { for (int x = 0; x < XSIZE; x++) { verify_field[y+1][x+1] = get_bit(from, x, y); } } ca_hash_and_report(verify_field + 1, lines, TIME_DIFF(sim_start, sim_stop)); free(from); free(to); free(verify_field); CUDA_ERROR_CHECK(hipFree(from_d)); CUDA_ERROR_CHECK(hipFree(to_d)); return EXIT_SUCCESS; }
c05324ffbd6b1c1d947410564fc6888cdfe47d37.cu
/* * simulate a cellular automaton with periodic boundaries (torus-like) * serial version * * (c) 2016 Felix Kubicek (Cuda port, optimization, e.g.: state field as bitmap) * (c) 2016 Steffen Christgau (C99 port, modularization) * (c) 1996,1997 Peter Sanders, Ingo Boesnach (original source) * * command line arguments: * #1: Number of lines * #2: Number of iterations to be simulated * */ #include <stdio.h> #include <stdlib.h> #include "ca_common.h" #define ANNEAL(i) ((0x3D0 >> (i)) & 1) /* --------------------- CA simulation -------------------------------- */ /* annealing rule from ChoDro96 page 34 * the table is used to map the number of nonzero * states in the neighborhood to the new state * __constant__ static const cell_state_t anneal[10] = {0, 0, 0, 0, 1, 0, 1, 1, 1, 1}; */ /* make one simulation iteration with lines lines. * old configuration is in from, new one is written to to. */ __global__ static void simulate(line_t_bit *from, line_t_bit *to, int lines) { int gid = threadIdx.x + blockIdx.x * blockDim.x; int grid_size = blockDim.x * gridDim.x; int field_width = XSIZE/8; int field_size = field_width * lines; for (int i = gid; i < field_size; i+= grid_size) { int x = i % field_width; int y = i / field_width; int x_prev = ((x - 1) + field_width) % field_width; int y_prev = ((y - 1) + lines) % lines; int x_next = (x + 1) % field_width; int y_next = (y + 1) % lines; cell_state_t top_l = from[y_prev][x_prev] >> 7; cell_state_t top_r = from[y_prev][x_next] & 1; int top = (top_r << 9) + (from[y_prev][x] << 1) + top_l; cell_state_t middle_l = from[y][x_prev] >> 7; cell_state_t middle_r = from[y][x_next] & 1; int middle = (middle_r << 9) + (from[y][x] << 1) + middle_l; cell_state_t bottom_l = from[y_next][x_prev] >> 7; cell_state_t bottom_r = from[y_next][x_next] & 1; int bottom = (bottom_r << 9) + (from[y_next][x] << 1) + bottom_l; cell_state_t result = 0; // 0x111 mask used to get all bits of top/middle/bottom row (3 bits each) int mask = 7; for(int b = 0; b < 8; b++) { int num_bits = ((top & mask) << 6) + ((middle & mask) << 3) + (bottom & mask); // bit counting algorithm (see: https://en.wikipedia.org/wiki/Hamming_weight) num_bits = (num_bits & 0x5555) + ((num_bits >> 1) & 0x5555); num_bits = (num_bits & 0x3333) + ((num_bits >> 2) & 0x3333); num_bits = (num_bits & 0x0f0f) + ((num_bits >> 4) & 0x0f0f); num_bits = (num_bits & 0x00ff) + ((num_bits >> 8) & 0x00ff); result |= ANNEAL(num_bits) << b; mask = mask << 1; } to[y][x] = result; } } /* --------------------- measurement ---------------------------------- */ int main(int argc, char** argv) { int lines, its; ca_init(argc, argv, &lines, &its); line_t_bit *from, *to, *from_d, *to_d; line_t *verify_field; MALLOC_ERROR_CHECK(from = (line_t_bit *) calloc(lines, sizeof(line_t_bit))); MALLOC_ERROR_CHECK(to = (line_t_bit *) calloc(lines, sizeof(line_t_bit))); MALLOC_ERROR_CHECK(verify_field = (line_t *) calloc((lines + 2), sizeof(line_t))); CUDA_ERROR_CHECK(cudaMalloc((void **) &from_d, lines * sizeof(line_t_bit))); CUDA_ERROR_CHECK(cudaMalloc((void **) &to_d, lines * sizeof(line_t_bit))); CUDA_ERROR_CHECK(cudaMalloc((void **) &to_d, lines * sizeof(line_t_bit))); ca_init_config_bit(from, lines, 0); CUDA_ERROR_CHECK(cudaMemcpy((void *) from_d, (void *) from, lines * sizeof(line_t_bit), cudaMemcpyHostToDevice)); CUDA_ERROR_CHECK(cudaMemcpy((void *) to_d, (void *) to, lines * sizeof(line_t_bit), cudaMemcpyHostToDevice)); TIME_GET(sim_start); for (int i = 0; i < its; i++) { simulate <<<lines/10, XSIZE/8>>> (from_d, to_d, lines); line_t_bit *temp = from_d; from_d = to_d; to_d = temp; } cudaDeviceSynchronize(); TIME_GET(sim_stop); CUDA_ERROR_CHECK(cudaPeekAtLastError()); CUDA_ERROR_CHECK(cudaMemcpy((void *) from, (void *) from_d, lines * sizeof(line_t_bit), cudaMemcpyDeviceToHost)); for (int y = 0; y < lines; y++) { for (int x = 0; x < XSIZE; x++) { verify_field[y+1][x+1] = get_bit(from, x, y); } } ca_hash_and_report(verify_field + 1, lines, TIME_DIFF(sim_start, sim_stop)); free(from); free(to); free(verify_field); CUDA_ERROR_CHECK(cudaFree(from_d)); CUDA_ERROR_CHECK(cudaFree(to_d)); return EXIT_SUCCESS; }
2c8c05ea81a517d6b8c2e27d5badf65379f004c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright 2018 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include "region_output.h" #include "thrust/functional.h" #include "thrust/sort.h" #include "boost/iterator/counting_iterator.hpp" namespace apollo { namespace perception { __host__ __device__ float bbox_size_gpu(const float *bbox, const bool normalized) { if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) { // If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0. return float(0.); } else { const float width = bbox[2] - bbox[0]; const float height = bbox[3] - bbox[1]; if (normalized) { return width * height; } else { // If bbox is not within range [0, 1]. return (width + 1) * (height + 1); } } } __host__ __device__ float jaccard_overlap_gpu(const float *bbox1, const float *bbox2) { if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] || bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) { return float(0.); } else { const float inter_xmin = max(bbox1[0], bbox2[0]); const float inter_ymin = max(bbox1[1], bbox2[1]); const float inter_xmax = min(bbox1[2], bbox2[2]); const float inter_ymax = min(bbox1[3], bbox2[3]); const float inter_width = inter_xmax - inter_xmin; const float inter_height = inter_ymax - inter_ymin; const float inter_size = inter_width * inter_height; const float bbox1_size = bbox_size_gpu(bbox1, true); const float bbox2_size = bbox_size_gpu(bbox2, true); return inter_size / (bbox1_size + bbox2_size - inter_size); } } __global__ void compute_overlapped_by_idx_kernel(const int nthreads, const float *bbox_data, const float overlap_threshold, const int *idx, const int num_idx, bool *overlapped_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); index += blockDim.x * gridDim.x) { const int j = index % num_idx; const int i = (index / num_idx); if (i == j) { // Ignore same bbox. return; } // Compute overlap between i-th bbox and j-th bbox. const int start_loc_i = idx[i] * s_box_block_size; const int start_loc_j = idx[j] * s_box_block_size; const float overlap = jaccard_overlap_gpu(bbox_data + start_loc_i, bbox_data + start_loc_j); if (overlap > overlap_threshold) { overlapped_data[index] = true; } else { //const float *b1 = bbox_data + start_loc_i; //const float *b2 = bbox_data + start_loc_j; overlapped_data[index] = false; } } } void compute_overlapped_by_idx_gpu(const int nthreads, const float *bbox_data, const float overlap_threshold, const int *idx, const int num_idx, bool *overlapped_data) { // NOLINT_NEXT_LINE(whitespace/operators) const int block_size = 512; int grid_size = (nthreads + block_size - 1) / block_size; compute_overlapped_by_idx_kernel << < grid_size, block_size >> > (nthreads, bbox_data, overlap_threshold, idx, num_idx, overlapped_data); } void apply_nms_gpu(const float *bbox_data, const float *conf_data, const int num_bboxes, const float confidence_threshold, const int top_k, const float nms_threshold, std::vector<int> *indices, std::shared_ptr <caffe::SyncedMemory> overlapped, std::shared_ptr <caffe::SyncedMemory> idx_sm) { // Keep part of detections whose scores are higher than confidence threshold. hipDeviceSynchronize(); std::vector<int> idx; std::vector<float> confidences; for (int i = 0; i < num_bboxes; ++i) { if (conf_data[i] > confidence_threshold) { idx.push_back(i); confidences.push_back(conf_data[i]); } } int num_remain = confidences.size(); if (num_remain == 0) { return; } // Sort detections based on score. thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0], thrust::greater<float>()); if (top_k > -1 && top_k < num_remain) { num_remain = top_k; } int *idx_data = (int *) idx_sm->mutable_cpu_data(); std::copy(idx.begin(), idx.begin() + num_remain, idx_data); bool *overlapped_data = (bool *) overlapped->mutable_gpu_data(); int total_bboxes = num_remain * num_remain; compute_overlapped_by_idx_gpu(total_bboxes, bbox_data, nms_threshold, (const int *) idx_sm->gpu_data(), num_remain, overlapped_data); hipDeviceSynchronize(); // Do non-maximum suppression based on overlapped results. const bool *overlapped_results = (const bool *) overlapped->cpu_data(); std::vector<int> selected_indices; apply_nms(overlapped_results, num_remain, &selected_indices); // Put back the selected information. for (int i = 0; i < (int)selected_indices.size(); ++i) { indices->push_back(idx[selected_indices[i]]); } } void apply_nms(const bool *overlapped, const int num, std::vector<int> *indices) { std::vector<int> index_vec(boost::counting_iterator<int>(0), boost::counting_iterator<int>(num)); // Do nms. indices->clear(); while (index_vec.size() != 0) { // Get the current highest score box. int best_idx = index_vec.front(); indices->push_back(best_idx); // Erase the best box. index_vec.erase(index_vec.begin()); for (std::vector<int>::iterator it = index_vec.begin(); it != index_vec.end();) { int cur_idx = *it; // Remove it if necessary if (overlapped[best_idx * num + cur_idx]) { it = index_vec.erase(it); } else { ++it; } } } } } }
2c8c05ea81a517d6b8c2e27d5badf65379f004c5.cu
/****************************************************************************** * Copyright 2018 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include "region_output.h" #include "thrust/functional.h" #include "thrust/sort.h" #include "boost/iterator/counting_iterator.hpp" namespace apollo { namespace perception { __host__ __device__ float bbox_size_gpu(const float *bbox, const bool normalized) { if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) { // If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0. return float(0.); } else { const float width = bbox[2] - bbox[0]; const float height = bbox[3] - bbox[1]; if (normalized) { return width * height; } else { // If bbox is not within range [0, 1]. return (width + 1) * (height + 1); } } } __host__ __device__ float jaccard_overlap_gpu(const float *bbox1, const float *bbox2) { if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] || bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) { return float(0.); } else { const float inter_xmin = max(bbox1[0], bbox2[0]); const float inter_ymin = max(bbox1[1], bbox2[1]); const float inter_xmax = min(bbox1[2], bbox2[2]); const float inter_ymax = min(bbox1[3], bbox2[3]); const float inter_width = inter_xmax - inter_xmin; const float inter_height = inter_ymax - inter_ymin; const float inter_size = inter_width * inter_height; const float bbox1_size = bbox_size_gpu(bbox1, true); const float bbox2_size = bbox_size_gpu(bbox2, true); return inter_size / (bbox1_size + bbox2_size - inter_size); } } __global__ void compute_overlapped_by_idx_kernel(const int nthreads, const float *bbox_data, const float overlap_threshold, const int *idx, const int num_idx, bool *overlapped_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); index += blockDim.x * gridDim.x) { const int j = index % num_idx; const int i = (index / num_idx); if (i == j) { // Ignore same bbox. return; } // Compute overlap between i-th bbox and j-th bbox. const int start_loc_i = idx[i] * s_box_block_size; const int start_loc_j = idx[j] * s_box_block_size; const float overlap = jaccard_overlap_gpu(bbox_data + start_loc_i, bbox_data + start_loc_j); if (overlap > overlap_threshold) { overlapped_data[index] = true; } else { //const float *b1 = bbox_data + start_loc_i; //const float *b2 = bbox_data + start_loc_j; overlapped_data[index] = false; } } } void compute_overlapped_by_idx_gpu(const int nthreads, const float *bbox_data, const float overlap_threshold, const int *idx, const int num_idx, bool *overlapped_data) { // NOLINT_NEXT_LINE(whitespace/operators) const int block_size = 512; int grid_size = (nthreads + block_size - 1) / block_size; compute_overlapped_by_idx_kernel << < grid_size, block_size >> > (nthreads, bbox_data, overlap_threshold, idx, num_idx, overlapped_data); } void apply_nms_gpu(const float *bbox_data, const float *conf_data, const int num_bboxes, const float confidence_threshold, const int top_k, const float nms_threshold, std::vector<int> *indices, std::shared_ptr <caffe::SyncedMemory> overlapped, std::shared_ptr <caffe::SyncedMemory> idx_sm) { // Keep part of detections whose scores are higher than confidence threshold. cudaDeviceSynchronize(); std::vector<int> idx; std::vector<float> confidences; for (int i = 0; i < num_bboxes; ++i) { if (conf_data[i] > confidence_threshold) { idx.push_back(i); confidences.push_back(conf_data[i]); } } int num_remain = confidences.size(); if (num_remain == 0) { return; } // Sort detections based on score. thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0], thrust::greater<float>()); if (top_k > -1 && top_k < num_remain) { num_remain = top_k; } int *idx_data = (int *) idx_sm->mutable_cpu_data(); std::copy(idx.begin(), idx.begin() + num_remain, idx_data); bool *overlapped_data = (bool *) overlapped->mutable_gpu_data(); int total_bboxes = num_remain * num_remain; compute_overlapped_by_idx_gpu(total_bboxes, bbox_data, nms_threshold, (const int *) idx_sm->gpu_data(), num_remain, overlapped_data); cudaDeviceSynchronize(); // Do non-maximum suppression based on overlapped results. const bool *overlapped_results = (const bool *) overlapped->cpu_data(); std::vector<int> selected_indices; apply_nms(overlapped_results, num_remain, &selected_indices); // Put back the selected information. for (int i = 0; i < (int)selected_indices.size(); ++i) { indices->push_back(idx[selected_indices[i]]); } } void apply_nms(const bool *overlapped, const int num, std::vector<int> *indices) { std::vector<int> index_vec(boost::counting_iterator<int>(0), boost::counting_iterator<int>(num)); // Do nms. indices->clear(); while (index_vec.size() != 0) { // Get the current highest score box. int best_idx = index_vec.front(); indices->push_back(best_idx); // Erase the best box. index_vec.erase(index_vec.begin()); for (std::vector<int>::iterator it = index_vec.begin(); it != index_vec.end();) { int cur_idx = *it; // Remove it if necessary if (overlapped[best_idx * num + cur_idx]) { it = index_vec.erase(it); } else { ++it; } } } } } }
9ec93453c077d0514885c804327e4ca910e6972c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "GpuTypes.h" #include "NNTypes.h" #include <limits> static __constant__ GpuData cData; void SetKDeltaGpuData() { hipError_t status; status = hipMemcpyToSymbol(cData, &(getGpu()._data), sizeof(GpuData)); RTERROR(status, "hipMemcpyToSymbol: SetKDeltaGpuData copy to cData failed"); } void GetKDeltaGpuData() { hipError_t status; status = hipMemcpyFromSymbol(&(getGpu()._data), cData, sizeof(GpuData)); RTERROR(status, "hipMemcpyFromSymbol: GetKDeltaGpuData copy From cData failed"); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = a - t; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = a - t; } } template<typename T> void kCalculateOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: hipLaunchKernelGGL(( kCalculateSigmoidOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSigmoidOutputDelta_kernel"); break; case Tanh: hipLaunchKernelGGL(( kCalculateTanhOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateTanhOutputDelta_kernel"); break; case Linear: hipLaunchKernelGGL(( kCalculateLinearOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateLinearOutputDelta_kernel"); break; case RectifiedLinear: hipLaunchKernelGGL(( kCalculateRELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: hipLaunchKernelGGL(( kCalculateLRELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, slope); LAUNCHERROR("kCalculateLRELUOutputDelta_kernel"); break; case ExponentialLinear: hipLaunchKernelGGL(( kCalculateELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, alpha); LAUNCHERROR("kCalculateELUOutputDelta_kernel"); break; case ScaledExponentialLinear: hipLaunchKernelGGL(( kCalculateSELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, alpha, lambda); LAUNCHERROR("kCalculateSELUOutputDelta_kernel"); break; case SoftMax: hipLaunchKernelGGL(( kCalculateSoftMaxOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSoftMaxOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { __shared__ NNFloat sDelta0; sDelta0 = (NNFloat)0; __syncthreads(); // Increment pointers and fetch margin and positive example uint32_t pos = threadIdx.x + 1; pUnit += blockIdx.x * stride; pDelta += blockIdx.x * stride; NNFloat positiveDP = pUnit[0]; NNFloat* pDelta0 = pDelta; pUnit += pos; pData += blockIdx.x * stride; NNFloat margin = pData[0]; pData += pos; // Calculate loss while (pos < stride) { NNFloat negativeDP = *pUnit; NNFloat loss = max((NNFloat)0.0, margin - positiveDP + negativeDP); NNFloat delta = (NNFloat)0.0; if (loss > (NNFloat)0.0) { delta = (NNFloat)1.0; atomicAdd(&sDelta0, (NNFloat)1.0); } *pDelta = delta; pos += blockDim.x; pUnit += blockDim.x; pData += blockDim.x; } // Output delta0 __syncthreads(); if (threadIdx.x == 0) *pDelta0 = sDelta0; } template<> __global__ void LAUNCH_BOUNDS() kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { __shared__ NNFloat sDelta0; sDelta0 = (NNFloat)0; __syncthreads(); // Increment pointers and fetch margin and positive example uint32_t pos = threadIdx.x + 1; pUnit += blockIdx.x * stride; pDelta += blockIdx.x * stride; NNFloat positiveDP = pUnit[0]; NNFloat* pDelta0 = pDelta; pUnit += pos; pData += blockIdx.x * stride; NNFloat margin = (NNFloat)pData[0] * (NNFloat)(1.0 / 256.0); pData += pos; // Calculate loss while (pos < stride) { NNFloat negativeDP = *pUnit; NNFloat loss = max((NNFloat)0.0, margin - positiveDP + negativeDP); NNFloat delta = (NNFloat)0.0; if (loss > (NNFloat)0.0) { delta = (NNFloat)1.0; atomicAdd(&sDelta0, (NNFloat)1.0); } *pDelta = delta; pos += blockDim.x; pUnit += blockDim.x; pData += blockDim.x; } // Output delta0 __syncthreads(); if (threadIdx.x == 0) *pDelta0 = sDelta0; } template<> __global__ void LAUNCH_BOUNDS() kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { __shared__ NNFloat sDelta0; sDelta0 = (NNFloat)0; __syncthreads(); // Increment pointers and fetch margin and positive example uint32_t pos = threadIdx.x + 1; pUnit += blockIdx.x * stride; pDelta += blockIdx.x * stride; NNFloat positiveDP = pUnit[0]; NNFloat* pDelta0 = pDelta; pUnit += pos; pData += blockIdx.x * stride; NNFloat margin = (NNFloat)pData[0] * (NNFloat)(1.0 / 128.0); pData += pos; // Calculate loss while (pos < stride) { NNFloat negativeDP = *pUnit; NNFloat loss = max((NNFloat)0.0, margin - positiveDP + negativeDP); NNFloat delta = (NNFloat)0.0; if (loss > (NNFloat)0.0) { delta = (NNFloat)1.0; atomicAdd(&sDelta0, (NNFloat)1.0); } *pDelta = delta; pos += blockDim.x; pUnit += blockDim.x; pData += blockDim.x; } // Output delta0 __syncthreads(); if (threadIdx.x == 0) *pDelta0 = sDelta0; } template<typename T> void kCalculateHingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { unsigned long threads = max(32, min(stride, getGpu()._threadsPerBlock)); hipLaunchKernelGGL(( kCalculateHingeOutputDelta_kernel), dim3(batch), dim3(threads), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateHingeOutputDelta_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = cData._deltaBoost_zero * a * a * ((NNFloat)1.0 - a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = cData._deltaBoost_one * (a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawTanhOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = a * ((NNFloat)1.0 - a * a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = (a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLinearOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = a; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = a - (NNFloat)1.0; pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawRELUOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = a * (a > (NNFloat)0.0); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLRELUOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = a * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawELUOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = a * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSELUOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = a * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = (a - (NNFloat)1.0) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSoftMaxOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = a; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat t = (NNFloat)1.0 / (end - pos1); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = a - t; pos1 += cData._warpSize; } } } void kCalculateSparseOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroSigmoidSparseOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawTanhOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroTanhOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroTanhOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLinearOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroLinearOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroLinearOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroLRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, slope); LAUNCHERROR("kCalculateSparseNonZeroLRELUOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, alpha); LAUNCHERROR("kCalculateSparseNonZeroELUOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, alpha, lambda); LAUNCHERROR("kCalculateSparseNonZeroSELUOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroSoftMaxOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = a - t; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = a - t; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = a - t; pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = a - t; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = a - t; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = a - t; pos1 += cData._warpSize; } } } template<typename T> void kCalculateSparseAnalogOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroSigmoidSparseOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawTanhOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLinearOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, slope); LAUNCHERROR("kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, alpha); LAUNCHERROR("kCalculateSparseAnalogNonZeroELUOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, alpha, lambda); LAUNCHERROR("kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t); } } template<typename T> void kCalculateCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: case SoftMax: hipLaunchKernelGGL(( kCalculateSigmoidCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSigmoidCrossEntropyOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = cData._deltaBoost_zero * a; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = cData._deltaBoost_one * (a - (NNFloat)1.0); pos1 += cData._warpSize; } } } void kCalculateSparseCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case SoftMax: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroSoftMaxOutputDelta_kernel"); break; case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonzeroSigmoidCrossEntropyOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = cData._deltaBoost_one * (a - t); pos1 += cData._warpSize; } } } template<typename T> void kCalculateSparseAnalogCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case SoftMax: case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonzeroSigmoidCrossEntropyOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat output = (NNFloat)0.0; if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat output = (NNFloat)0.0; if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<typename T> void kCalculateScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: hipLaunchKernelGGL(( kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); break; case SoftMax: hipLaunchKernelGGL(( kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; NNFloat output = (NNFloat)0.0; if (a > cData._SMCE_zeroTarget) output = cData._SMCE_zeroScale * a; pDelta[pos] = output; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) output = cData._SMCE_oneScale * (a - (NNFloat)1.0); pDelta[pos2] = output; pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; NNFloat output = (NNFloat)0.0; if (a > cData._SMCE_zeroTarget) output = cData._SMCE_zeroScale * a; pDelta[pos] = output; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat t = (NNFloat)1.0 / (NNFloat)(end - pos1); uint64_t offset = pos * stride; pos1 += threadIdx.x & cData._warpMask; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) output = cData._SMCE_oneScale * (a - t); pDelta[pos2] = output; pos1 += cData._warpSize; } } } void kCalculateSparseScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroScaleMarginalCrossEntropyOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; NNFloat output = (NNFloat)0.0; if (a > cData._SMCE_zeroTarget) { output = cData._SMCE_zeroScale * a; } pDelta[pos] = output; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) { output = cData._SMCE_oneScale * t * (a - (NNFloat)1.0); } pDelta[pos2] = output; pos1 += cData._warpSize; } } } template<typename T> void kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel"); break; case SoftMax: cout << "unsupported activation for this cost function" << endl; getGpu().Shutdown(); exit(-1); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a- t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<typename T> void kCalculateL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: hipLaunchKernelGGL(( kCalculateSigmoidL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSigmoidL1OutputDelta_kernel"); break; case Tanh: hipLaunchKernelGGL(( kCalculateTanhL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateTanhL1OutputDelta_kernel"); break; case Linear: hipLaunchKernelGGL(( kCalculateLinearL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateLinearL1OutputDelta_kernel"); break; case RectifiedLinear: hipLaunchKernelGGL(( kCalculateRELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateRELUL1OutputDelta_kernel"); break; case LeakyRectifiedLinear: hipLaunchKernelGGL(( kCalculateLRELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, slope); LAUNCHERROR("kCalculateLRELUL1OutputDelta_kernel"); break; case ExponentialLinear: hipLaunchKernelGGL(( kCalculateELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, alpha); LAUNCHERROR("kCalculateELUL1OutputDelta_kernel"); break; case ScaledExponentialLinear: hipLaunchKernelGGL(( kCalculateSELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, alpha, lambda); LAUNCHERROR("kCalculateSELUL1OutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = sgn(a) * a * ((NNFloat)1.0 - a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = sgn(a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawTanhL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = sgn(a) * ((NNFloat)1.0 - a * a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLinearL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = sgn(a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = sgn(a - (NNFloat)1.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawRELUL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = (a > (NNFloat)0.0); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = sgn(a - (NNFloat)1.0) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawELUL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = sgn(a) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = sgn((a > (NNFloat)1.0)) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSELUL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = sgn(a) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLRELUL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = sgn(a) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } void kCalculateSparseL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawTanhL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroTanhL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroTanhL1OutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLinearL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroLinearL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroLinearL1OutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawRELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroRELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroRELUL1OutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLRELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, slope); LAUNCHERROR("kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, alpha); LAUNCHERROR("kCalculateSparseNonZeroELUL1OutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, alpha, lambda); LAUNCHERROR("kCalculateSparseNonZeroSELUL1OutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparsenessPenalty_kernel(uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, NNFloat p, NNFloat beta) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; // Calculate sum of activations if (pos < stride) { NNFloat pi = (NNFloat)0.0; for (int i = 0; i < batch; i++) { pi += pUnit[pos]; pos += stride; } // Calculate sparseness penalty pi /= (NNFloat)batch; pi = max(MIN_ACTIVATION, min(MAX_ACTIVATION, pi)); NNFloat penalty = beta * (-p / pi + ((NNFloat)1.0 - p) / ((NNFloat)1.0 - pi)); // Apply sparseness penalty to deltas pos = blockIdx.x * blockDim.x + threadIdx.x; for (int i = 0; i < batch; i++) { pDelta[pos] += penalty; pos += stride; } } } // Calculates and applies sparseness penalty to hidden layers void kCalculateSparsenessPenalty(uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, NNFloat p, NNFloat beta) { dim3 grid1(CalculateBlocks(stride)); hipLaunchKernelGGL(( kCalculateSparsenessPenalty_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, batch, stride, pUnit, pDelta, p, beta); LAUNCHERROR("kCalculateSparsenessPenalty_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateSigmoidHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; NNFloat d = pDelta[pos]; pDelta[pos] = x * ((NNFloat)1.0 - x) * d; } } __global__ void LAUNCH_BOUNDS() kCalculateTanhHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat scale, NNFloat oneOverScale) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; NNFloat d = pDelta[pos]; x *= oneOverScale; pDelta[pos] = scale * ((NNFloat)1.0 - x * x) * d; } } __global__ void LAUNCH_BOUNDS() kCalculateRELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; if (x <= (NNFloat)0.0) pDelta[pos] = (NNFloat)0.0; } } __global__ void LAUNCH_BOUNDS() kCalculateLRELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; if (x <= (NNFloat)0.0) { pDelta[pos] *= slope; } } } __global__ void LAUNCH_BOUNDS() kCalculateELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; if (x <= (NNFloat)0.0) pDelta[pos] *= (x + alpha); } } __global__ void LAUNCH_BOUNDS() kCalculateSELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; NNFloat delta = pDelta[pos]; if (x > (NNFloat)0.0) { delta *= lambda; } else { delta *= (x + lambda * alpha); } pDelta[pos] = delta; } } void kCalculateHadamardProduct(Activation activation, uint64_t size, NNFloat scale, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint32_t blocks = CalculateBlocks(size); NNFloat oneOverScale = (NNFloat)1.0 / scale; switch (activation) { case Sigmoid: hipLaunchKernelGGL(( kCalculateSigmoidHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSigmoidHadamardProduct_kernel"); break; case Tanh: hipLaunchKernelGGL(( kCalculateTanhHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, scale, oneOverScale); LAUNCHERROR("kCalculateTanhHadamardProduct_kernel"); break; case Linear: // Derivative of linear output is 1, no need to call any kernel here break; case RectifiedLinear: hipLaunchKernelGGL(( kCalculateRELUHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateRELUHadamardProduct_kernel"); break; case LeakyRectifiedLinear: hipLaunchKernelGGL(( kCalculateLRELUHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateLRELUHadamardProduct_kernel"); break; case ExponentialLinear: hipLaunchKernelGGL(( kCalculateELUHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateELUHadamardProduct_kernel"); break; case ScaledExponentialLinear: hipLaunchKernelGGL(( kCalculateSELUHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSELUHadamardProduct_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kNormalizeDeltas_kernel(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta) { uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; uint32_t tgx = threadIdx.x & cData._warpMask; pDelta += dpos * stride; if (dpos < batch) { // Calculate vector length uint32_t pos = tgx; NNFloat r2 = (NNFloat)0.0; while (pos < stride) { NNFloat x = pDelta[pos]; r2 += x * x; pos += cData._warpSize; } // Reduce sum REDUCE(r2) // Normalalize vector if too large if (r2 > norm * norm) { norm *= rsqrt(r2); pos = tgx; while (pos < stride) { pDelta[pos] *= norm; pos += cData._warpSize; } } } } void kNormalizeDeltas(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta) { uint32_t blocks = (batch + 3) / 4; hipLaunchKernelGGL(( kNormalizeDeltas_kernel), dim3(blocks), dim3(128), 0, 0, norm, batch, stride, pDelta); LAUNCHERROR("kNormalizeDeltas_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateDeltaMagnitudes_kernel(uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude) { uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; uint32_t tgx = threadIdx.x & cData._warpMask; pDelta += dpos * stride; if (dpos < batch) { // Calculate vector length uint32_t pos = tgx; NNFloat r2 = (NNFloat)0.0; while (pos < stride) { NNFloat x = pDelta[pos]; r2 += x * x; pos += cData._warpSize; } // Reduce sum REDUCE(r2) // Output result if (tgx == 0) pMagnitude[dpos] = r2; } } void kCalculateDeltaMagnitudes(uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude) { uint32_t blocks = (batch + 3) / 4; hipLaunchKernelGGL(( kCalculateDeltaMagnitudes_kernel), dim3(blocks), dim3(128), 0, 0, batch, stride, pDelta, pMagnitude); LAUNCHERROR("kCalculateDeltaMagnitudes_kernel"); } __global__ void LAUNCH_BOUNDS() kNormalizeDeltaMagnitudes_kernel(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude) { uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; uint32_t tgx = threadIdx.x & cData._warpMask; pDelta += dpos * stride; if (dpos < batch) { // Normalalize vector if too large NNFloat r2 = pMagnitude[dpos]; if (r2 > norm * norm) { norm *= rsqrt(r2); uint32_t pos = tgx; while (pos < stride) { pDelta[pos] *= norm; pos += cData._warpSize; } } } } void kNormalizeDeltaMagnitudes(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude) { uint32_t blocks = (batch + 3) / 4; hipLaunchKernelGGL(( kNormalizeDeltaMagnitudes_kernel), dim3(blocks), dim3(128), 0, 0, norm, batch, stride, pDelta, pMagnitude); LAUNCHERROR("kNormalizeDeltaMagnitudes_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateMaxoutDelta_kernel(NNFloat* pSrc, NNFloat* pSrcDelta, size_t size, NNFloat beta, NNFloat* pDst, NNFloat* pDstDelta) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat s = pSrc[pos]; NNFloat sdelta = pSrcDelta[pos]; NNFloat d = pDst[pos]; NNFloat delta = (s == d) ? sdelta : (NNFloat)0; if (beta == (NNFloat)0) pDstDelta[pos] = delta; else if (delta != (NNFloat)0.0) pDstDelta[pos] = beta * pDstDelta[pos] + delta; } } void kCalculateMaxoutDelta(NNFloat* pSrc, NNFloat* pSrcDelta, size_t size, NNFloat beta, NNFloat* pDst, NNFloat* pDstDelta) { unsigned long blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateMaxoutDelta_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pSrc, pSrcDelta, size, beta, pDst, pDstDelta); LAUNCHERROR("kCalculateMaxoutDelta_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateCosineDelta_kernel(NNFloat* pDPDelta, NNFloat* pDP, NNFloat* pA, NNFloat* pB, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride) { // Preincrement pointers p0Vector += blockIdx.x * inputStride + threadIdx.x; pVector += blockIdx.x * inputStride + threadIdx.x; pDPDelta += blockIdx.x * stride; pDP += blockIdx.x * stride; pA += blockIdx.x * stride; pB += blockIdx.x * stride; pDelta0 += blockIdx.x * inputStride + threadIdx.x; pDelta += blockIdx.x * inputStride + threadIdx.x; uint32_t pos = threadIdx.x; NNFloat dp = *pDP; NNFloat dpDelta = *pDPDelta; NNFloat a = *pA; NNFloat b = *pB; NNFloat ab = a * b; NNFloat a2 = a * a; NNFloat b2 = b * b; // Calculate deltas while (pos < inputStride) { NNFloat ai = *p0Vector; NNFloat bi = *pVector; NNFloat delta0 = dpDelta * ((bi / ab) - (ai * dp / a2)); NNFloat delta = dpDelta * ((ai / ab) - (bi * dp / b2)); if (beta0 == (NNFloat)0) *pDelta0 = delta0; else *pDelta0 = *pDelta0 + beta0 * delta0; if (beta == (NNFloat)0) *pDelta = delta; else *pDelta = *pDelta + beta * delta; pDelta0 += blockDim.x; pDelta += blockDim.x; p0Vector += blockDim.x; pVector += blockDim.x; pos += blockDim.x; } } void kCalculateCosineDelta(NNFloat* pDPDeltaIn, NNFloat* pDPIn, NNFloat* pA, NNFloat* pB, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride) { unsigned long blocks = batch; unsigned long threadsPerBlock = ::min(stride, getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateCosineDelta_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pDPDeltaIn, pDPIn, pA, pB, p0Vector, pVector, batch, stride, pDelta0, beta0, pDelta, beta, inputStride); LAUNCHERROR("kCalculateCosineDelta_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateDotProductDelta_kernel(NNFloat* pDPDelta, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride) { // Preincrement pointers p0Vector += blockIdx.x * inputStride + threadIdx.x; pVector += blockIdx.x * inputStride + threadIdx.x; pDPDelta += blockIdx.x * stride; pDelta0 += blockIdx.x * inputStride + threadIdx.x; pDelta += blockIdx.x * inputStride + threadIdx.x; uint32_t pos = threadIdx.x; NNFloat dpDelta = *pDPDelta; // Calculate deltas while (pos < inputStride) { NNFloat ai = *p0Vector; NNFloat bi = *pVector; NNFloat delta0 = dpDelta * bi; NNFloat delta = dpDelta * ai; if (beta0 == (NNFloat)0) *pDelta0 = delta0; else *pDelta0 = *pDelta0 + beta0 * delta0; if (beta == (NNFloat)0) *pDelta = delta; else *pDelta = *pDelta + beta * delta; pDelta0 += blockDim.x; pDelta += blockDim.x; p0Vector += blockDim.x; pVector += blockDim.x; pos += blockDim.x; } } void kCalculateDotProductDelta(NNFloat* pDPDelta, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride) { unsigned long blocks = batch; unsigned long threadsPerBlock = ::min(stride, getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateDotProductDelta_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pDPDelta, p0Vector, pVector, batch, stride, pDelta0, beta0, pDelta, beta, inputStride); LAUNCHERROR("kCalculateDotProductDelta_kernel"); } // Instantiates allowable templated functions so we can hide the implementations here // instead of in the header file because we're mixing CUDA and C++ and that's // a migraine headache in the making otherwise. #define EXPLICITLY_INSTANTIATE_KERNELS(T) \ template void kCalculateL1OutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*, NNFloat, NNFloat, NNFloat); \ template void kCalculateCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \ template void kCalculateScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \ template void kCalculateOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*, NNFloat, NNFloat, NNFloat); \ template void kCalculateHingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \ template void kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint64_t*, uint64_t*, uint32_t*, T*, bool); \ template void kCalculateSparseAnalogOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint64_t*, uint64_t*, uint32_t*, T*, bool, NNFloat, NNFloat, NNFloat); \ /**/ EXPLICITLY_INSTANTIATE_KERNELS(NNFloat) EXPLICITLY_INSTANTIATE_KERNELS(double) EXPLICITLY_INSTANTIATE_KERNELS(unsigned char) EXPLICITLY_INSTANTIATE_KERNELS(char) EXPLICITLY_INSTANTIATE_KERNELS(uint32_t) EXPLICITLY_INSTANTIATE_KERNELS(uint64_t) EXPLICITLY_INSTANTIATE_KERNELS(int32_t) EXPLICITLY_INSTANTIATE_KERNELS(int64_t)
9ec93453c077d0514885c804327e4ca910e6972c.cu
/* Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "GpuTypes.h" #include "NNTypes.h" #include <limits> static __constant__ GpuData cData; void SetKDeltaGpuData() { cudaError_t status; status = cudaMemcpyToSymbol(cData, &(getGpu()._data), sizeof(GpuData)); RTERROR(status, "cudaMemcpyToSymbol: SetKDeltaGpuData copy to cData failed"); } void GetKDeltaGpuData() { cudaError_t status; status = cudaMemcpyFromSymbol(&(getGpu()._data), cData, sizeof(GpuData)); RTERROR(status, "cudaMemcpyFromSymbol: GetKDeltaGpuData copy From cData failed"); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = a - t; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = a - t; } } template<typename T> void kCalculateOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: kCalculateSigmoidOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSigmoidOutputDelta_kernel"); break; case Tanh: kCalculateTanhOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateTanhOutputDelta_kernel"); break; case Linear: kCalculateLinearOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateLinearOutputDelta_kernel"); break; case RectifiedLinear: kCalculateRELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: kCalculateLRELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, slope); LAUNCHERROR("kCalculateLRELUOutputDelta_kernel"); break; case ExponentialLinear: kCalculateELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, alpha); LAUNCHERROR("kCalculateELUOutputDelta_kernel"); break; case ScaledExponentialLinear: kCalculateSELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, alpha, lambda); LAUNCHERROR("kCalculateSELUOutputDelta_kernel"); break; case SoftMax: kCalculateSoftMaxOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSoftMaxOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { __shared__ NNFloat sDelta0; sDelta0 = (NNFloat)0; __syncthreads(); // Increment pointers and fetch margin and positive example uint32_t pos = threadIdx.x + 1; pUnit += blockIdx.x * stride; pDelta += blockIdx.x * stride; NNFloat positiveDP = pUnit[0]; NNFloat* pDelta0 = pDelta; pUnit += pos; pData += blockIdx.x * stride; NNFloat margin = pData[0]; pData += pos; // Calculate loss while (pos < stride) { NNFloat negativeDP = *pUnit; NNFloat loss = max((NNFloat)0.0, margin - positiveDP + negativeDP); NNFloat delta = (NNFloat)0.0; if (loss > (NNFloat)0.0) { delta = (NNFloat)1.0; atomicAdd(&sDelta0, (NNFloat)1.0); } *pDelta = delta; pos += blockDim.x; pUnit += blockDim.x; pData += blockDim.x; } // Output delta0 __syncthreads(); if (threadIdx.x == 0) *pDelta0 = sDelta0; } template<> __global__ void LAUNCH_BOUNDS() kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { __shared__ NNFloat sDelta0; sDelta0 = (NNFloat)0; __syncthreads(); // Increment pointers and fetch margin and positive example uint32_t pos = threadIdx.x + 1; pUnit += blockIdx.x * stride; pDelta += blockIdx.x * stride; NNFloat positiveDP = pUnit[0]; NNFloat* pDelta0 = pDelta; pUnit += pos; pData += blockIdx.x * stride; NNFloat margin = (NNFloat)pData[0] * (NNFloat)(1.0 / 256.0); pData += pos; // Calculate loss while (pos < stride) { NNFloat negativeDP = *pUnit; NNFloat loss = max((NNFloat)0.0, margin - positiveDP + negativeDP); NNFloat delta = (NNFloat)0.0; if (loss > (NNFloat)0.0) { delta = (NNFloat)1.0; atomicAdd(&sDelta0, (NNFloat)1.0); } *pDelta = delta; pos += blockDim.x; pUnit += blockDim.x; pData += blockDim.x; } // Output delta0 __syncthreads(); if (threadIdx.x == 0) *pDelta0 = sDelta0; } template<> __global__ void LAUNCH_BOUNDS() kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { __shared__ NNFloat sDelta0; sDelta0 = (NNFloat)0; __syncthreads(); // Increment pointers and fetch margin and positive example uint32_t pos = threadIdx.x + 1; pUnit += blockIdx.x * stride; pDelta += blockIdx.x * stride; NNFloat positiveDP = pUnit[0]; NNFloat* pDelta0 = pDelta; pUnit += pos; pData += blockIdx.x * stride; NNFloat margin = (NNFloat)pData[0] * (NNFloat)(1.0 / 128.0); pData += pos; // Calculate loss while (pos < stride) { NNFloat negativeDP = *pUnit; NNFloat loss = max((NNFloat)0.0, margin - positiveDP + negativeDP); NNFloat delta = (NNFloat)0.0; if (loss > (NNFloat)0.0) { delta = (NNFloat)1.0; atomicAdd(&sDelta0, (NNFloat)1.0); } *pDelta = delta; pos += blockDim.x; pUnit += blockDim.x; pData += blockDim.x; } // Output delta0 __syncthreads(); if (threadIdx.x == 0) *pDelta0 = sDelta0; } template<typename T> void kCalculateHingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { unsigned long threads = max(32, min(stride, getGpu()._threadsPerBlock)); kCalculateHingeOutputDelta_kernel<<<batch, threads>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateHingeOutputDelta_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = cData._deltaBoost_zero * a * a * ((NNFloat)1.0 - a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = cData._deltaBoost_one * (a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawTanhOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = a * ((NNFloat)1.0 - a * a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = (a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLinearOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = a; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = a - (NNFloat)1.0; pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawRELUOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = a * (a > (NNFloat)0.0); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLRELUOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = a * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawELUOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = a * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSELUOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = a * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = (a - (NNFloat)1.0) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSoftMaxOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = a; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat t = (NNFloat)1.0 / (end - pos1); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = a - t; pos1 += cData._warpSize; } } } void kCalculateSparseOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel"); } kCalculateSparseNonZeroSigmoidOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroSigmoidSparseOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { kCalculateSparseRawTanhOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel"); } kCalculateSparseNonZeroTanhOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroTanhOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { kCalculateSparseRawLinearOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel"); } kCalculateSparseNonZeroLinearOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroLinearOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel"); } kCalculateSparseNonZeroRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawLRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel"); } kCalculateSparseNonZeroLRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, slope); LAUNCHERROR("kCalculateSparseNonZeroLRELUOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel"); } kCalculateSparseNonZeroELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, alpha); LAUNCHERROR("kCalculateSparseNonZeroELUOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawSELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel"); } kCalculateSparseNonZeroSELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, alpha, lambda); LAUNCHERROR("kCalculateSparseNonZeroSELUOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } kCalculateSparseNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroSoftMaxOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = a - t; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = a - t; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = a - t; pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = a - t; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = a - t; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = a - t; pos1 += cData._warpSize; } } } template<typename T> void kCalculateSparseAnalogOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroSigmoidSparseOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { kCalculateSparseRawTanhOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { kCalculateSparseRawLinearOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawLRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, slope); LAUNCHERROR("kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, alpha); LAUNCHERROR("kCalculateSparseAnalogNonZeroELUOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawSELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, alpha, lambda); LAUNCHERROR("kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t); } } template<typename T> void kCalculateCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: case SoftMax: kCalculateSigmoidCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSigmoidCrossEntropyOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = cData._deltaBoost_zero * a; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = cData._deltaBoost_one * (a - (NNFloat)1.0); pos1 += cData._warpSize; } } } void kCalculateSparseCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case SoftMax: if (!bSparseIgnoreZero) { kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } kCalculateSparseNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroSoftMaxOutputDelta_kernel"); break; case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel"); } kCalculateSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonzeroSigmoidCrossEntropyOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = cData._deltaBoost_one * (a - t); pos1 += cData._warpSize; } } } template<typename T> void kCalculateSparseAnalogCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case SoftMax: case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonzeroSigmoidCrossEntropyOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat output = (NNFloat)0.0; if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat output = (NNFloat)0.0; if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<typename T> void kCalculateScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); break; case SoftMax: kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; NNFloat output = (NNFloat)0.0; if (a > cData._SMCE_zeroTarget) output = cData._SMCE_zeroScale * a; pDelta[pos] = output; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) output = cData._SMCE_oneScale * (a - (NNFloat)1.0); pDelta[pos2] = output; pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; NNFloat output = (NNFloat)0.0; if (a > cData._SMCE_zeroTarget) output = cData._SMCE_zeroScale * a; pDelta[pos] = output; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat t = (NNFloat)1.0 / (NNFloat)(end - pos1); uint64_t offset = pos * stride; pos1 += threadIdx.x & cData._warpMask; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) output = cData._SMCE_oneScale * (a - t); pDelta[pos2] = output; pos1 += cData._warpSize; } } } void kCalculateSparseScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel"); } kCalculateSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroScaleMarginalCrossEntropyOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel"); } kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; NNFloat output = (NNFloat)0.0; if (a > cData._SMCE_zeroTarget) { output = cData._SMCE_zeroScale * a; } pDelta[pos] = output; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) { output = cData._SMCE_oneScale * t * (a - (NNFloat)1.0); } pDelta[pos2] = output; pos1 += cData._warpSize; } } } template<typename T> void kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel"); } kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel"); break; case SoftMax: cout << "unsupported activation for this cost function" << endl; getGpu().Shutdown(); exit(-1); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a- t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<typename T> void kCalculateL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: kCalculateSigmoidL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSigmoidL1OutputDelta_kernel"); break; case Tanh: kCalculateTanhL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateTanhL1OutputDelta_kernel"); break; case Linear: kCalculateLinearL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateLinearL1OutputDelta_kernel"); break; case RectifiedLinear: kCalculateRELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateRELUL1OutputDelta_kernel"); break; case LeakyRectifiedLinear: kCalculateLRELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, slope); LAUNCHERROR("kCalculateLRELUL1OutputDelta_kernel"); break; case ExponentialLinear: kCalculateELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, alpha); LAUNCHERROR("kCalculateELUL1OutputDelta_kernel"); break; case ScaledExponentialLinear: kCalculateSELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, alpha, lambda); LAUNCHERROR("kCalculateSELUL1OutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = sgn(a) * a * ((NNFloat)1.0 - a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = sgn(a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawTanhL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = sgn(a) * ((NNFloat)1.0 - a * a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLinearL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = sgn(a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = sgn(a - (NNFloat)1.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawRELUL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = (a > (NNFloat)0.0); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = sgn(a - (NNFloat)1.0) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawELUL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = sgn(a) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = sgn((a > (NNFloat)1.0)) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSELUL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = sgn(a) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLRELUL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; pDelta[pos] = sgn(a) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } void kCalculateSparseL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidL1OutputDelta_kernel"); } kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { kCalculateSparseRawTanhL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhL1OutputDelta_kernel"); } kCalculateSparseNonZeroTanhL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroTanhL1OutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { kCalculateSparseRawLinearL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearL1OutputDelta_kernel"); } kCalculateSparseNonZeroLinearL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroLinearL1OutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawRELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUL1OutputDelta_kernel"); } kCalculateSparseNonZeroRELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex); LAUNCHERROR("kCalculateSparseNonZeroRELUL1OutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawLRELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUL1OutputDelta_kernel"); } kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, slope); LAUNCHERROR("kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUL1OutputDelta_kernel"); } kCalculateSparseNonZeroELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, alpha); LAUNCHERROR("kCalculateSparseNonZeroELUL1OutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawSELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUL1OutputDelta_kernel"); } kCalculateSparseNonZeroSELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, alpha, lambda); LAUNCHERROR("kCalculateSparseNonZeroSELUL1OutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparsenessPenalty_kernel(uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, NNFloat p, NNFloat beta) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; // Calculate sum of activations if (pos < stride) { NNFloat pi = (NNFloat)0.0; for (int i = 0; i < batch; i++) { pi += pUnit[pos]; pos += stride; } // Calculate sparseness penalty pi /= (NNFloat)batch; pi = max(MIN_ACTIVATION, min(MAX_ACTIVATION, pi)); NNFloat penalty = beta * (-p / pi + ((NNFloat)1.0 - p) / ((NNFloat)1.0 - pi)); // Apply sparseness penalty to deltas pos = blockIdx.x * blockDim.x + threadIdx.x; for (int i = 0; i < batch; i++) { pDelta[pos] += penalty; pos += stride; } } } // Calculates and applies sparseness penalty to hidden layers void kCalculateSparsenessPenalty(uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, NNFloat p, NNFloat beta) { dim3 grid1(CalculateBlocks(stride)); kCalculateSparsenessPenalty_kernel<<<grid1, getGpu()._threadsPerBlock>>>(batch, stride, pUnit, pDelta, p, beta); LAUNCHERROR("kCalculateSparsenessPenalty_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateSigmoidHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; NNFloat d = pDelta[pos]; pDelta[pos] = x * ((NNFloat)1.0 - x) * d; } } __global__ void LAUNCH_BOUNDS() kCalculateTanhHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat scale, NNFloat oneOverScale) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; NNFloat d = pDelta[pos]; x *= oneOverScale; pDelta[pos] = scale * ((NNFloat)1.0 - x * x) * d; } } __global__ void LAUNCH_BOUNDS() kCalculateRELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; if (x <= (NNFloat)0.0) pDelta[pos] = (NNFloat)0.0; } } __global__ void LAUNCH_BOUNDS() kCalculateLRELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; if (x <= (NNFloat)0.0) { pDelta[pos] *= slope; } } } __global__ void LAUNCH_BOUNDS() kCalculateELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; if (x <= (NNFloat)0.0) pDelta[pos] *= (x + alpha); } } __global__ void LAUNCH_BOUNDS() kCalculateSELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; NNFloat delta = pDelta[pos]; if (x > (NNFloat)0.0) { delta *= lambda; } else { delta *= (x + lambda * alpha); } pDelta[pos] = delta; } } void kCalculateHadamardProduct(Activation activation, uint64_t size, NNFloat scale, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint32_t blocks = CalculateBlocks(size); NNFloat oneOverScale = (NNFloat)1.0 / scale; switch (activation) { case Sigmoid: kCalculateSigmoidHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSigmoidHadamardProduct_kernel"); break; case Tanh: kCalculateTanhHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, scale, oneOverScale); LAUNCHERROR("kCalculateTanhHadamardProduct_kernel"); break; case Linear: // Derivative of linear output is 1, no need to call any kernel here break; case RectifiedLinear: kCalculateRELUHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateRELUHadamardProduct_kernel"); break; case LeakyRectifiedLinear: kCalculateLRELUHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateLRELUHadamardProduct_kernel"); break; case ExponentialLinear: kCalculateELUHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateELUHadamardProduct_kernel"); break; case ScaledExponentialLinear: kCalculateSELUHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSELUHadamardProduct_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kNormalizeDeltas_kernel(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta) { uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; uint32_t tgx = threadIdx.x & cData._warpMask; pDelta += dpos * stride; if (dpos < batch) { // Calculate vector length uint32_t pos = tgx; NNFloat r2 = (NNFloat)0.0; while (pos < stride) { NNFloat x = pDelta[pos]; r2 += x * x; pos += cData._warpSize; } // Reduce sum REDUCE(r2) // Normalalize vector if too large if (r2 > norm * norm) { norm *= rsqrt(r2); pos = tgx; while (pos < stride) { pDelta[pos] *= norm; pos += cData._warpSize; } } } } void kNormalizeDeltas(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta) { uint32_t blocks = (batch + 3) / 4; kNormalizeDeltas_kernel<<<blocks, 128>>>(norm, batch, stride, pDelta); LAUNCHERROR("kNormalizeDeltas_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateDeltaMagnitudes_kernel(uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude) { uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; uint32_t tgx = threadIdx.x & cData._warpMask; pDelta += dpos * stride; if (dpos < batch) { // Calculate vector length uint32_t pos = tgx; NNFloat r2 = (NNFloat)0.0; while (pos < stride) { NNFloat x = pDelta[pos]; r2 += x * x; pos += cData._warpSize; } // Reduce sum REDUCE(r2) // Output result if (tgx == 0) pMagnitude[dpos] = r2; } } void kCalculateDeltaMagnitudes(uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude) { uint32_t blocks = (batch + 3) / 4; kCalculateDeltaMagnitudes_kernel<<<blocks, 128>>>(batch, stride, pDelta, pMagnitude); LAUNCHERROR("kCalculateDeltaMagnitudes_kernel"); } __global__ void LAUNCH_BOUNDS() kNormalizeDeltaMagnitudes_kernel(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude) { uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; uint32_t tgx = threadIdx.x & cData._warpMask; pDelta += dpos * stride; if (dpos < batch) { // Normalalize vector if too large NNFloat r2 = pMagnitude[dpos]; if (r2 > norm * norm) { norm *= rsqrt(r2); uint32_t pos = tgx; while (pos < stride) { pDelta[pos] *= norm; pos += cData._warpSize; } } } } void kNormalizeDeltaMagnitudes(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude) { uint32_t blocks = (batch + 3) / 4; kNormalizeDeltaMagnitudes_kernel<<<blocks, 128>>>(norm, batch, stride, pDelta, pMagnitude); LAUNCHERROR("kNormalizeDeltaMagnitudes_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateMaxoutDelta_kernel(NNFloat* pSrc, NNFloat* pSrcDelta, size_t size, NNFloat beta, NNFloat* pDst, NNFloat* pDstDelta) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat s = pSrc[pos]; NNFloat sdelta = pSrcDelta[pos]; NNFloat d = pDst[pos]; NNFloat delta = (s == d) ? sdelta : (NNFloat)0; if (beta == (NNFloat)0) pDstDelta[pos] = delta; else if (delta != (NNFloat)0.0) pDstDelta[pos] = beta * pDstDelta[pos] + delta; } } void kCalculateMaxoutDelta(NNFloat* pSrc, NNFloat* pSrcDelta, size_t size, NNFloat beta, NNFloat* pDst, NNFloat* pDstDelta) { unsigned long blocks = CalculateBlocks(size); kCalculateMaxoutDelta_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pSrc, pSrcDelta, size, beta, pDst, pDstDelta); LAUNCHERROR("kCalculateMaxoutDelta_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateCosineDelta_kernel(NNFloat* pDPDelta, NNFloat* pDP, NNFloat* pA, NNFloat* pB, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride) { // Preincrement pointers p0Vector += blockIdx.x * inputStride + threadIdx.x; pVector += blockIdx.x * inputStride + threadIdx.x; pDPDelta += blockIdx.x * stride; pDP += blockIdx.x * stride; pA += blockIdx.x * stride; pB += blockIdx.x * stride; pDelta0 += blockIdx.x * inputStride + threadIdx.x; pDelta += blockIdx.x * inputStride + threadIdx.x; uint32_t pos = threadIdx.x; NNFloat dp = *pDP; NNFloat dpDelta = *pDPDelta; NNFloat a = *pA; NNFloat b = *pB; NNFloat ab = a * b; NNFloat a2 = a * a; NNFloat b2 = b * b; // Calculate deltas while (pos < inputStride) { NNFloat ai = *p0Vector; NNFloat bi = *pVector; NNFloat delta0 = dpDelta * ((bi / ab) - (ai * dp / a2)); NNFloat delta = dpDelta * ((ai / ab) - (bi * dp / b2)); if (beta0 == (NNFloat)0) *pDelta0 = delta0; else *pDelta0 = *pDelta0 + beta0 * delta0; if (beta == (NNFloat)0) *pDelta = delta; else *pDelta = *pDelta + beta * delta; pDelta0 += blockDim.x; pDelta += blockDim.x; p0Vector += blockDim.x; pVector += blockDim.x; pos += blockDim.x; } } void kCalculateCosineDelta(NNFloat* pDPDeltaIn, NNFloat* pDPIn, NNFloat* pA, NNFloat* pB, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride) { unsigned long blocks = batch; unsigned long threadsPerBlock = std::min(stride, getGpu()._threadsPerBlock); kCalculateCosineDelta_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pDPDeltaIn, pDPIn, pA, pB, p0Vector, pVector, batch, stride, pDelta0, beta0, pDelta, beta, inputStride); LAUNCHERROR("kCalculateCosineDelta_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateDotProductDelta_kernel(NNFloat* pDPDelta, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride) { // Preincrement pointers p0Vector += blockIdx.x * inputStride + threadIdx.x; pVector += blockIdx.x * inputStride + threadIdx.x; pDPDelta += blockIdx.x * stride; pDelta0 += blockIdx.x * inputStride + threadIdx.x; pDelta += blockIdx.x * inputStride + threadIdx.x; uint32_t pos = threadIdx.x; NNFloat dpDelta = *pDPDelta; // Calculate deltas while (pos < inputStride) { NNFloat ai = *p0Vector; NNFloat bi = *pVector; NNFloat delta0 = dpDelta * bi; NNFloat delta = dpDelta * ai; if (beta0 == (NNFloat)0) *pDelta0 = delta0; else *pDelta0 = *pDelta0 + beta0 * delta0; if (beta == (NNFloat)0) *pDelta = delta; else *pDelta = *pDelta + beta * delta; pDelta0 += blockDim.x; pDelta += blockDim.x; p0Vector += blockDim.x; pVector += blockDim.x; pos += blockDim.x; } } void kCalculateDotProductDelta(NNFloat* pDPDelta, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride) { unsigned long blocks = batch; unsigned long threadsPerBlock = std::min(stride, getGpu()._threadsPerBlock); kCalculateDotProductDelta_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pDPDelta, p0Vector, pVector, batch, stride, pDelta0, beta0, pDelta, beta, inputStride); LAUNCHERROR("kCalculateDotProductDelta_kernel"); } // Instantiates allowable templated functions so we can hide the implementations here // instead of in the header file because we're mixing CUDA and C++ and that's // a migraine headache in the making otherwise. #define EXPLICITLY_INSTANTIATE_KERNELS(T) \ template void kCalculateL1OutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*, NNFloat, NNFloat, NNFloat); \ template void kCalculateCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \ template void kCalculateScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \ template void kCalculateOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*, NNFloat, NNFloat, NNFloat); \ template void kCalculateHingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \ template void kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint64_t*, uint64_t*, uint32_t*, T*, bool); \ template void kCalculateSparseAnalogOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint64_t*, uint64_t*, uint32_t*, T*, bool, NNFloat, NNFloat, NNFloat); \ /**/ EXPLICITLY_INSTANTIATE_KERNELS(NNFloat) EXPLICITLY_INSTANTIATE_KERNELS(double) EXPLICITLY_INSTANTIATE_KERNELS(unsigned char) EXPLICITLY_INSTANTIATE_KERNELS(char) EXPLICITLY_INSTANTIATE_KERNELS(uint32_t) EXPLICITLY_INSTANTIATE_KERNELS(uint64_t) EXPLICITLY_INSTANTIATE_KERNELS(int32_t) EXPLICITLY_INSTANTIATE_KERNELS(int64_t)
5d8e3c912756f055d805b61af53c822cb333d522.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/flatten_op.h" #include "caffe2/operators/minmax_ops.h" #include "caffe2/operators/utility_ops.h" #include "caffe2/utils/math.h" #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/system/hip/execution_policy.h> #include <thrust/unique.h> namespace caffe2 { template <> bool WeightedSumOp<CUDAContext>::RunOnDevice() { if (Input(0).IsType<float>()) { return DoRunWithType<float>(); } else if (Input(0).IsType<at::Half>()) { return DoRunWithType<at::Half>(); } else { CAFFE_THROW("Unsupported inputs"); } return false; } template <> bool SumOp<CUDAContext>::RunOnDevice() { if (Input(0).IsType<float>()) { return DoRunWithType<float, float>(); } else if (Input(0).IsType<at::Half>()) { return DoRunWithType<at::Half, at::Half>(); } else { CAFFE_THROW("Unsupported inputs"); } return false; } REGISTER_CUDA_OPERATOR(Print, PrintOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Flatten, FlattenOp<CUDAContext>); REGISTER_CUDA_OPERATOR(FlattenToVec, FlattenToVecOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Alias, AliasOp<CUDAContext>); REGISTER_CUDA_OPERATOR(ResizeLike, ResizeLikeOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Sum, SumOp<CUDAContext>); REGISTER_CUDA_OPERATOR(WeightedSum, WeightedSumOp<CUDAContext>); CAFFE_KNOWN_TYPE(const float*); REGISTER_CUDA_OPERATOR(EnsureDense, EnsureDenseOp<CUDAContext>); __global__ void NanCheckKernel(int N, const float* X, bool* result) { bool has_nan = false; CUDA_1D_KERNEL_LOOP(i, N) { // Note: we have no need to do early return, since only if this fails // will we not need to inspect all elements. No need to optimize the // case that will fail. has_nan = has_nan || isnan(X[i]) || isinf(X[i]); } __syncthreads(); if (has_nan) { result[0] = true; } } template <> bool NanCheckOp<CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); const size_t N = X.size(); const float* data_ptr = X.data<float>(); scratch_.Resize(1); math::Set<bool, CUDAContext>( 1, false, scratch_.mutable_data<bool>(), &context_); hipLaunchKernelGGL(( NanCheckKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, X.data<float>(), scratch_.mutable_data<bool>()); bool result = false; { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CUDA_ENFORCE(hipMemcpyAsync( &result, scratch_.raw_data(), 1, hipMemcpyDefault, context_.cuda_stream())); } // Note: we must synchronize here so we can inspect the result context_.FinishDeviceComputation(); // Print out diagnostic info if we have a NaN or inf if (result) { std::cerr << "Tensor contained NaN or inf: " << this->debug_def().input(0) << std::endl; for (int j = 0; j < InputSize(); j++) { Tensor cpu_X(CPU); cpu_X.ResizeLike(Input(j)); // Hack to cause allocaiton happen here, so it won't happen // when we do CopyFrom. We need the mutex then because host->gpu // copies seem to possibly lock with NCCL. cpu_X.mutable_data<float>(); { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); cpu_X.CopyFrom(Input(j)); // sync copy } std::cerr << "Input tensor: " << j << ": [" << this->debug_def().input(j) << "]" << std::endl; tensorPrinter_.Print<float>(cpu_X); if (j == 0) { std::cerr << "NaN idxs:" << std::endl; auto* cpu_X_data = cpu_X.data<float>(); for (size_t i = 0; i < cpu_X.size(); ++i) { if (std::isnan(cpu_X_data[i]) || std::isinf(cpu_X_data[i])) { std::cerr << i << " "; } } } std::cerr << std::endl; } return false; } // This op should act as an identity matrix if we don't find any NaNs/infs. // Copy over the data if we are not doing this in-place. if (&X != Y) { Y->CopyFrom(X, true /*async*/); } return true; } REGISTER_CUDA_OPERATOR(NanCheck, NanCheckOp<CUDAContext>); __global__ void ElwiseMaxKernel(const float* X, const float* Y, float* maxout, const int N) { CUDA_1D_KERNEL_LOOP(i, N) { maxout[i] = fmaxf(X[i], Y[i]); } } template <> bool MaxOp<float, CUDAContext>::Compute() { float* output_data = Output(0)->template mutable_data<float>(); const int N = Input(0).size(); // Run pairwise-maxes for (int i = 1; i < InputSize(); ++i) { hipLaunchKernelGGL(( ElwiseMaxKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), (i == 0 ? Input(0).data<float>() : Output(0)->data<float>()), Input(i).data<float>(), output_data, N); } return true; } REGISTER_CUDA_OPERATOR(Max, MaxOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MaxGradient, MaxGradientOp<float, CUDAContext>); __global__ void ElwiseMinKernel(const float* X, const float* Y, float* minout, const int N) { CUDA_1D_KERNEL_LOOP(i, N) { minout[i] = fminf(X[i], Y[i]); } } template <> bool MinOp<float, CUDAContext>::Compute() { float* output_data = Output(0)->template mutable_data<float>(); const int N = Input(0).size(); // Run pairwise-mines for (int i = 1; i < InputSize(); ++i) { hipLaunchKernelGGL(( ElwiseMinKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), (i == 0 ? Input(0).data<float>() : Output(0)->data<float>()), Input(i).data<float>(), output_data, N); } return true; } REGISTER_CUDA_OPERATOR(Min, MinOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MinGradient, MinGradientOp<float, CUDAContext>); template <typename T> __global__ void MaxMinGradKernel(int N, const T* mx, const T* x, const T* go, T* gi) { CUDA_1D_KERNEL_LOOP(i, N) { gi[i] = go[i] * (mx[i] == x[i]); } } template <> bool SelectGradientOpBase<float, CUDAContext>::RunOnDevice() { auto& output = Input(0); auto& grad_output = Input(1); const int kInputStartOffset = 2; const float* data = output.data<float>(); for (int i = 0; i < OutputSize(); i++) { auto& input = Input(i + kInputStartOffset); auto* grad_input = Output(i); grad_input->ResizeLike(input); hipLaunchKernelGGL(( MaxMinGradKernel), dim3(CAFFE_GET_BLOCKS(input.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), input.size(), output.data<float>(), input.data<float>(), grad_output.data<float>(), grad_input->template mutable_data<float>()); } return true; } /** * @brief Update slices of Y in-place with a batch of weighted X's. * Y[idx] = alpha[b] * X[b][i] + Y[idx] * i=0,...,N-1 * b=0,...,B-1 * idx=Indices[i] */ template <typename T_INDEX> __global__ void AxpySliceKernel( const float* weight0, const int64_t N, const int64_t B, const int64_t slice_size, const float** alpha, const float** X, const T_INDEX* Indices, float* Y, const int64_t M) { // This implementation requires that the first weight is 1.0 CUDA_KERNEL_ASSERT(weight0[0] == 1.0); for (int i = blockIdx.x; i < N; i += gridDim.x) { T_INDEX idx = Indices[i]; float* y_offset = Y + (idx * slice_size); for (int b = 0; b < B; b++) { float a = *alpha[b]; const float* x_offset = X[b] + (i * slice_size); for (int j = threadIdx.x; j < slice_size; j += blockDim.x) { atomicAdd(&y_offset[j], a * x_offset[j]); } } } } template <> bool ScatterWeightedSumOp<float, CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(2)); } template <> template <typename Index> bool ScatterWeightedSumOp<float, CUDAContext>::DoRunWithType() { CAFFE_ENFORCE_EQ(InputSize() % 2, 1); auto& X0 = Input(0); auto& weight0 = Input(1); auto& indices = Input(2); auto* output = Output(0); CAFFE_ENFORCE_EQ(&X0, output, "In place operation is required"); CAFFE_ENFORCE_GT(X0.size(), 0); CAFFE_ENFORCE_GT(X0.ndim(), 0, "X0 has to be at least the vector"); CAFFE_ENFORCE_EQ(weight0.size(), 1); int64_t M = X0.size(); int64_t N = X0.dim(0); int64_t K = indices.size(); int64_t block_size = M / N; float* data = output->template mutable_data<float>(); // In order to have all device pointers of x_i (and weight_i similarly) // consecutively in device memory, copy pointers to a host vector and then // copy back into a device array. const int64_t B = (InputSize() - 3) / 2; x_data_host_.Resize(B); weights_host_.Resize(B); x_data_device_.Resize(B); weights_device_.Resize(B); const float** x_data_host = x_data_host_.mutable_data<const float*>(); const float** weights_host = weights_host_.mutable_data<const float*>(); const float** x_data_device = x_data_device_.mutable_data<const float*>(); const float** weights_device = weights_device_.mutable_data<const float*>(); for (int inp = 3; inp < InputSize(); inp += 2) { int idx = (inp - 3) / 2; x_data_host[idx] = static_cast<const float*>(Input(inp).raw_data()); weights_host[idx] = static_cast<const float*>(Input(inp + 1).raw_data()); } context_.Copy<const float*, CPUContext, CUDAContext>( B, x_data_host, x_data_device); context_.Copy<const float*, CPUContext, CUDAContext>( B, weights_host, weights_device); hipLaunchKernelGGL(( AxpySliceKernel), dim3(std::min<int64_t>(K, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), weight0.template data<float>(), K, B, block_size, weights_device, x_data_device, indices.template data<Index>(), data, M); return true; } REGISTER_CUDA_OPERATOR( ScatterWeightedSum, ScatterWeightedSumOp<float, CUDAContext>); namespace { template <typename Index, typename T> __global__ void scatter_assign_kernel( T* data, const Index* idxs, const T* slicesData, int64_t N, int64_t K, int64_t block_size) { for (int64_t i = blockIdx.x; i < K; i += gridDim.x) { Index idx = idxs[i]; CUDA_KERNEL_ASSERT(0 <= idx && idx < N); const T* src = slicesData + block_size * i; T* dest = data + block_size * idx; for (int64_t j = threadIdx.x; j < block_size; j += blockDim.x) { dest[j] = src[j]; } } } } // namespace template <> template <typename Index, typename T> void ScatterAssignOp<CUDAContext>::DoScatterAssign( T* data, const Index* idxs, const T* slicesData, int64_t N, int64_t K, int64_t block_size) { hipLaunchKernelGGL(( scatter_assign_kernel), dim3(::min(K, static_cast<int64_t>(CAFFE_MAXIMUM_NUM_BLOCKS))), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), data, idxs, slicesData, N, K, block_size); } REGISTER_CUDA_OPERATOR(ScatterAssign, ScatterAssignOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Size, SizeOp<CUDAContext>); template <typename T> __global__ void RangeKernel(const int n, T* Y, T offset, T step) { CUDA_1D_KERNEL_LOOP(index, n) { Y[index] = index * step + offset; } } template <> template <typename T> bool RangeOp<CUDAContext>::DoRunOnDevice( const T& start, const T& step, Tensor* output) { int N = output->size(); hipLaunchKernelGGL(( RangeKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, output->template mutable_data<T>(), start, step); return true; } REGISTER_CUDA_OPERATOR(Range, RangeOp<CUDAContext>); } // namespace caffe2
5d8e3c912756f055d805b61af53c822cb333d522.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/flatten_op.h" #include "caffe2/operators/minmax_ops.h" #include "caffe2/operators/utility_ops.h" #include "caffe2/utils/math.h" #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/system/cuda/execution_policy.h> #include <thrust/unique.h> namespace caffe2 { template <> bool WeightedSumOp<CUDAContext>::RunOnDevice() { if (Input(0).IsType<float>()) { return DoRunWithType<float>(); } else if (Input(0).IsType<at::Half>()) { return DoRunWithType<at::Half>(); } else { CAFFE_THROW("Unsupported inputs"); } return false; } template <> bool SumOp<CUDAContext>::RunOnDevice() { if (Input(0).IsType<float>()) { return DoRunWithType<float, float>(); } else if (Input(0).IsType<at::Half>()) { return DoRunWithType<at::Half, at::Half>(); } else { CAFFE_THROW("Unsupported inputs"); } return false; } REGISTER_CUDA_OPERATOR(Print, PrintOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Flatten, FlattenOp<CUDAContext>); REGISTER_CUDA_OPERATOR(FlattenToVec, FlattenToVecOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Alias, AliasOp<CUDAContext>); REGISTER_CUDA_OPERATOR(ResizeLike, ResizeLikeOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Sum, SumOp<CUDAContext>); REGISTER_CUDA_OPERATOR(WeightedSum, WeightedSumOp<CUDAContext>); CAFFE_KNOWN_TYPE(const float*); REGISTER_CUDA_OPERATOR(EnsureDense, EnsureDenseOp<CUDAContext>); __global__ void NanCheckKernel(int N, const float* X, bool* result) { bool has_nan = false; CUDA_1D_KERNEL_LOOP(i, N) { // Note: we have no need to do early return, since only if this fails // will we not need to inspect all elements. No need to optimize the // case that will fail. has_nan = has_nan || isnan(X[i]) || isinf(X[i]); } __syncthreads(); if (has_nan) { result[0] = true; } } template <> bool NanCheckOp<CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); const size_t N = X.size(); const float* data_ptr = X.data<float>(); scratch_.Resize(1); math::Set<bool, CUDAContext>( 1, false, scratch_.mutable_data<bool>(), &context_); NanCheckKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, X.data<float>(), scratch_.mutable_data<bool>()); bool result = false; { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CUDA_ENFORCE(cudaMemcpyAsync( &result, scratch_.raw_data(), 1, cudaMemcpyDefault, context_.cuda_stream())); } // Note: we must synchronize here so we can inspect the result context_.FinishDeviceComputation(); // Print out diagnostic info if we have a NaN or inf if (result) { std::cerr << "Tensor contained NaN or inf: " << this->debug_def().input(0) << std::endl; for (int j = 0; j < InputSize(); j++) { Tensor cpu_X(CPU); cpu_X.ResizeLike(Input(j)); // Hack to cause allocaiton happen here, so it won't happen // when we do CopyFrom. We need the mutex then because host->gpu // copies seem to possibly lock with NCCL. cpu_X.mutable_data<float>(); { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); cpu_X.CopyFrom(Input(j)); // sync copy } std::cerr << "Input tensor: " << j << ": [" << this->debug_def().input(j) << "]" << std::endl; tensorPrinter_.Print<float>(cpu_X); if (j == 0) { std::cerr << "NaN idxs:" << std::endl; auto* cpu_X_data = cpu_X.data<float>(); for (size_t i = 0; i < cpu_X.size(); ++i) { if (std::isnan(cpu_X_data[i]) || std::isinf(cpu_X_data[i])) { std::cerr << i << " "; } } } std::cerr << std::endl; } return false; } // This op should act as an identity matrix if we don't find any NaNs/infs. // Copy over the data if we are not doing this in-place. if (&X != Y) { Y->CopyFrom(X, true /*async*/); } return true; } REGISTER_CUDA_OPERATOR(NanCheck, NanCheckOp<CUDAContext>); __global__ void ElwiseMaxKernel(const float* X, const float* Y, float* maxout, const int N) { CUDA_1D_KERNEL_LOOP(i, N) { maxout[i] = fmaxf(X[i], Y[i]); } } template <> bool MaxOp<float, CUDAContext>::Compute() { float* output_data = Output(0)->template mutable_data<float>(); const int N = Input(0).size(); // Run pairwise-maxes for (int i = 1; i < InputSize(); ++i) { ElwiseMaxKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( (i == 0 ? Input(0).data<float>() : Output(0)->data<float>()), Input(i).data<float>(), output_data, N); } return true; } REGISTER_CUDA_OPERATOR(Max, MaxOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MaxGradient, MaxGradientOp<float, CUDAContext>); __global__ void ElwiseMinKernel(const float* X, const float* Y, float* minout, const int N) { CUDA_1D_KERNEL_LOOP(i, N) { minout[i] = fminf(X[i], Y[i]); } } template <> bool MinOp<float, CUDAContext>::Compute() { float* output_data = Output(0)->template mutable_data<float>(); const int N = Input(0).size(); // Run pairwise-mines for (int i = 1; i < InputSize(); ++i) { ElwiseMinKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( (i == 0 ? Input(0).data<float>() : Output(0)->data<float>()), Input(i).data<float>(), output_data, N); } return true; } REGISTER_CUDA_OPERATOR(Min, MinOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MinGradient, MinGradientOp<float, CUDAContext>); template <typename T> __global__ void MaxMinGradKernel(int N, const T* mx, const T* x, const T* go, T* gi) { CUDA_1D_KERNEL_LOOP(i, N) { gi[i] = go[i] * (mx[i] == x[i]); } } template <> bool SelectGradientOpBase<float, CUDAContext>::RunOnDevice() { auto& output = Input(0); auto& grad_output = Input(1); const int kInputStartOffset = 2; const float* data = output.data<float>(); for (int i = 0; i < OutputSize(); i++) { auto& input = Input(i + kInputStartOffset); auto* grad_input = Output(i); grad_input->ResizeLike(input); MaxMinGradKernel<<< CAFFE_GET_BLOCKS(input.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( input.size(), output.data<float>(), input.data<float>(), grad_output.data<float>(), grad_input->template mutable_data<float>()); } return true; } /** * @brief Update slices of Y in-place with a batch of weighted X's. * Y[idx] = alpha[b] * X[b][i] + Y[idx] * i=0,...,N-1 * b=0,...,B-1 * idx=Indices[i] */ template <typename T_INDEX> __global__ void AxpySliceKernel( const float* weight0, const int64_t N, const int64_t B, const int64_t slice_size, const float** alpha, const float** X, const T_INDEX* Indices, float* Y, const int64_t M) { // This implementation requires that the first weight is 1.0 CUDA_KERNEL_ASSERT(weight0[0] == 1.0); for (int i = blockIdx.x; i < N; i += gridDim.x) { T_INDEX idx = Indices[i]; float* y_offset = Y + (idx * slice_size); for (int b = 0; b < B; b++) { float a = *alpha[b]; const float* x_offset = X[b] + (i * slice_size); for (int j = threadIdx.x; j < slice_size; j += blockDim.x) { atomicAdd(&y_offset[j], a * x_offset[j]); } } } } template <> bool ScatterWeightedSumOp<float, CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(2)); } template <> template <typename Index> bool ScatterWeightedSumOp<float, CUDAContext>::DoRunWithType() { CAFFE_ENFORCE_EQ(InputSize() % 2, 1); auto& X0 = Input(0); auto& weight0 = Input(1); auto& indices = Input(2); auto* output = Output(0); CAFFE_ENFORCE_EQ(&X0, output, "In place operation is required"); CAFFE_ENFORCE_GT(X0.size(), 0); CAFFE_ENFORCE_GT(X0.ndim(), 0, "X0 has to be at least the vector"); CAFFE_ENFORCE_EQ(weight0.size(), 1); int64_t M = X0.size(); int64_t N = X0.dim(0); int64_t K = indices.size(); int64_t block_size = M / N; float* data = output->template mutable_data<float>(); // In order to have all device pointers of x_i (and weight_i similarly) // consecutively in device memory, copy pointers to a host vector and then // copy back into a device array. const int64_t B = (InputSize() - 3) / 2; x_data_host_.Resize(B); weights_host_.Resize(B); x_data_device_.Resize(B); weights_device_.Resize(B); const float** x_data_host = x_data_host_.mutable_data<const float*>(); const float** weights_host = weights_host_.mutable_data<const float*>(); const float** x_data_device = x_data_device_.mutable_data<const float*>(); const float** weights_device = weights_device_.mutable_data<const float*>(); for (int inp = 3; inp < InputSize(); inp += 2) { int idx = (inp - 3) / 2; x_data_host[idx] = static_cast<const float*>(Input(inp).raw_data()); weights_host[idx] = static_cast<const float*>(Input(inp + 1).raw_data()); } context_.Copy<const float*, CPUContext, CUDAContext>( B, x_data_host, x_data_device); context_.Copy<const float*, CPUContext, CUDAContext>( B, weights_host, weights_device); AxpySliceKernel<<< std::min<int64_t>(K, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( weight0.template data<float>(), K, B, block_size, weights_device, x_data_device, indices.template data<Index>(), data, M); return true; } REGISTER_CUDA_OPERATOR( ScatterWeightedSum, ScatterWeightedSumOp<float, CUDAContext>); namespace { template <typename Index, typename T> __global__ void scatter_assign_kernel( T* data, const Index* idxs, const T* slicesData, int64_t N, int64_t K, int64_t block_size) { for (int64_t i = blockIdx.x; i < K; i += gridDim.x) { Index idx = idxs[i]; CUDA_KERNEL_ASSERT(0 <= idx && idx < N); const T* src = slicesData + block_size * i; T* dest = data + block_size * idx; for (int64_t j = threadIdx.x; j < block_size; j += blockDim.x) { dest[j] = src[j]; } } } } // namespace template <> template <typename Index, typename T> void ScatterAssignOp<CUDAContext>::DoScatterAssign( T* data, const Index* idxs, const T* slicesData, int64_t N, int64_t K, int64_t block_size) { scatter_assign_kernel<<< std::min(K, static_cast<int64_t>(CAFFE_MAXIMUM_NUM_BLOCKS)), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(data, idxs, slicesData, N, K, block_size); } REGISTER_CUDA_OPERATOR(ScatterAssign, ScatterAssignOp<CUDAContext>); REGISTER_CUDA_OPERATOR(Size, SizeOp<CUDAContext>); template <typename T> __global__ void RangeKernel(const int n, T* Y, T offset, T step) { CUDA_1D_KERNEL_LOOP(index, n) { Y[index] = index * step + offset; } } template <> template <typename T> bool RangeOp<CUDAContext>::DoRunOnDevice( const T& start, const T& step, Tensor* output) { int N = output->size(); RangeKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, output->template mutable_data<T>(), start, step); return true; } REGISTER_CUDA_OPERATOR(Range, RangeOp<CUDAContext>); } // namespace caffe2
66c8053880a6fac8ea9a5f53af3672386abf5ddd.hip
// !!! This is a file automatically generated by hipify!!! /* by Qin Yu, Apr 2019 */ #include <fstream> using namespace std; #include <hip/hip_runtime.h> #include <hip/hip_cooperative_groups.h> namespace cg = cooperative_groups; #define CUPRINTF(fmt, ...) printf("[%d, %d]:\t" fmt, \ blockIdx.y*gridDim.x+blockIdx.x,\ threadIdx.z*blockDim.x*blockDim.y+threadIdx.y*blockDim.x+threadIdx.x,\ __VA_ARGS__) // Idiom, not used, put here for convenient debugging. __global__ void kernel_minibatch(int *iters, float *alpha, float *sigma, float *K, int *y, int l, int C) { int j = blockIdx.x * blockDim.x + threadIdx.x; int can_stop = 0; float delta_ = 0; __shared__ float delta; float last_alpha_j, last_alpha; int counter = 0; while (true) { counter++; last_alpha_j = alpha[j]; for (int i = 0; i < l; i++) { if (j == i) { last_alpha = alpha[i]; delta = 1 / K[i * l + i] * (1 - y[i] * sigma[i]); alpha[i] += delta; if (alpha[i] < 0) { alpha[i] = 0; delta = 0 - last_alpha; } if (alpha[i] > C) { alpha[i] = C; delta = C - last_alpha; } } __syncthreads(); sigma[j] += delta * y[i] * K[i * l + j]; } can_stop = 0; delta_ = alpha[j] - last_alpha_j; if (-0.0001f < delta_ && delta_ < 0.0001f) can_stop = 1; // CUPRINTF("%d, %9.6f, %9.6f, %9.6f, %d\n", counter, alpha[j], // last_alpha_j, delta_, can_stop); if (__syncthreads_and(can_stop) > 0) { if (j == 1) { // CUPRINTF("iters = %d\n", counter); iters[0] = counter; } break; } } } extern "C" __global__ void kernel_minibatch_g(int *iters, float *alpha, float *sigma, float *K, int *y, int *d, int ddim, float *delta, int l, int C) { int j = blockIdx.x * blockDim.x + threadIdx.x; if (j < l) { cg::grid_group grid = cg::this_grid(); // if (j == l-1) CUPRINTF("l = %d, C = %d\n", l, C); int can_break = 0; int can_stop = 0; float delta_ = 0; float last_alpha_j, last_alpha; int counter = 0; while (true) { // for (int counter = 0; counter < 2000; counter++) { counter++; if (threadIdx.x == 1) d[blockIdx.x] = 0; last_alpha_j = alpha[j]; for (uint32_t i = 0; i < l; i++) { if (j == i) { // if (threadIdx.x == i) { // This was a big big bug last_alpha = alpha[i]; delta[0] = 1 / K[i * l + i] * (1 - y[i] * sigma[i]); alpha[i] += delta[0]; // alpha[i] += delta; if (alpha[i] < 0) { alpha[i] = 0; delta[0] = 0 - last_alpha; } if (alpha[i] > C) { alpha[i] = C; delta[0] = C - last_alpha; } } cg::sync(grid); sigma[j] += delta[0] * y[i] * K[i * l + j]; } can_stop = 0; delta_ = alpha[j] - last_alpha_j; if (-0.0001f < delta_ && delta_ < 0.0001f) can_stop = 1; if (__syncthreads_and(can_stop) > 0) if (threadIdx.x == 1) d[blockIdx.x] = 1; cg::sync(grid); can_break = 0; for (int i = 0; i < ddim; i++) { can_break += d[i]; } // if (j == 1) CUPRINTF("iters = %d\n", counter); if (can_break == ddim) { if (j == 1) { // CUPRINTF("iters = %d\n", counter); iters[0] = counter; } // cg::sync(grid); break; } } } } // Helper function for using CUDA to update sigma in parallel: hipError_t kernel_minibatch_wrapper(int *iters, float *alpha, float *sigma, float *K, int *y, int l, int C) { int *dev_iters = 0; float *dev_alpha = 0; float *dev_sigma = 0; float *dev_K = 0; int *dev_y = 0; int *dev_block_done = 0; float *dev_delta = 0; const int block_dim_max = 1024; int block_dimension = block_dim_max; int grid_dimension = (l - 1) / block_dim_max + 1; dim3 block(block_dimension); dim3 grid(grid_dimension); void *args[10] = { &dev_iters, &dev_alpha, &dev_sigma, &dev_K, &dev_y, &dev_block_done, &grid_dimension, &dev_delta, &l, &C}; hipError_t cudaStatus; // Allocate GPU buffers for all vectors: cudaStatus = hipMalloc(&dev_iters, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error1; } cudaStatus = hipMalloc(&dev_alpha, l * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error2; } cudaStatus = hipMalloc(&dev_sigma, l * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error3; } cudaStatus = hipMalloc(&dev_K, l * l * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error4; } cudaStatus = hipMalloc(&dev_y, l * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error5; } cudaStatus = hipMalloc(&dev_block_done, grid_dimension * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error5; } cudaStatus = hipMalloc(&dev_delta, 1 * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error5; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_K, K, l * l * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error5; } cudaStatus = hipMemcpy(dev_y, y, l * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error5; } // printf("READY TO CALL KERNEL\n"); cudaStatus = hipLaunchCooperativeKernel((void *)kernel_minibatch_g, grid, block, args, sizeof(float), hipStream_t(0)); if (cudaStatus != hipSuccess) { fprintf(stderr, "kernel_minibatch_g launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error5; } // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "kernel_minibatch_g launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error5; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching " "addKernel!\n", cudaStatus); goto Error5; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(iters, dev_iters, sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error5; } cudaStatus = hipMemcpy(alpha, dev_alpha, l * sizeof(float), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error5; } cudaStatus = hipMemcpy(sigma, dev_sigma, l * sizeof(float), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error5; } // Error6: hipFree(dev_block_done); hipFree(dev_delta); Error5: hipFree(dev_y); Error4: hipFree(dev_K); Error3: hipFree(dev_sigma); Error2: hipFree(dev_alpha); Error1: hipFree(dev_iters); // Error0: return cudaStatus; } hipError_t kernel_minibatch_block_wrapper(int *iters, float *alpha, float *sigma, float *K, int *y, int l, int C) { int *dev_iters = 0; float *dev_alpha = 0; float *dev_sigma = 0; float *dev_K = 0; int *dev_y = 0; dim3 grid(1); dim3 block(l); void *args[7] = {&dev_iters, &dev_alpha, &dev_sigma, &dev_K, &dev_y, &l, &C}; hipError_t cudaStatus; // Allocate GPU buffers for all vectors: cudaStatus = hipMalloc(&dev_iters, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error1; } cudaStatus = hipMalloc(&dev_alpha, l * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error2; } cudaStatus = hipMalloc(&dev_sigma, l * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error3; } cudaStatus = hipMalloc(&dev_K, l * l * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error4; } cudaStatus = hipMalloc(&dev_y, l * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error5; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_K, K, l * l * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error5; } cudaStatus = hipMemcpy(dev_y, y, l * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error5; } // printf("READY TO CALL KERNEL\n"); cudaStatus = cudaLaunchKernel((void *)kernel_minibatch, grid, block, args, sizeof(float), hipStream_t(0)); if (cudaStatus != hipSuccess) { fprintf(stderr, "kernel_minibatch launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error5; } // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "kernel_minibatch_g launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error5; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching " "addKernel!\n", cudaStatus); goto Error5; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(iters, dev_iters, sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error5; } cudaStatus = hipMemcpy(alpha, dev_alpha, l * sizeof(float), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error5; } cudaStatus = hipMemcpy(sigma, dev_sigma, l * sizeof(float), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error5; } Error5: hipFree(dev_y); Error4: hipFree(dev_K); Error3: hipFree(dev_sigma); Error2: hipFree(dev_alpha); Error1: hipFree(dev_iters); // Error0: return cudaStatus; }
66c8053880a6fac8ea9a5f53af3672386abf5ddd.cu
/* by Qin Yu, Apr 2019 */ #include <fstream> using namespace std; #include <cuda.h> #include <cooperative_groups.h> namespace cg = cooperative_groups; #define CUPRINTF(fmt, ...) printf("[%d, %d]:\t" fmt, \ blockIdx.y*gridDim.x+blockIdx.x,\ threadIdx.z*blockDim.x*blockDim.y+threadIdx.y*blockDim.x+threadIdx.x,\ __VA_ARGS__) // Idiom, not used, put here for convenient debugging. __global__ void kernel_minibatch(int *iters, float *alpha, float *sigma, float *K, int *y, int l, int C) { int j = blockIdx.x * blockDim.x + threadIdx.x; int can_stop = 0; float delta_ = 0; __shared__ float delta; float last_alpha_j, last_alpha; int counter = 0; while (true) { counter++; last_alpha_j = alpha[j]; for (int i = 0; i < l; i++) { if (j == i) { last_alpha = alpha[i]; delta = 1 / K[i * l + i] * (1 - y[i] * sigma[i]); alpha[i] += delta; if (alpha[i] < 0) { alpha[i] = 0; delta = 0 - last_alpha; } if (alpha[i] > C) { alpha[i] = C; delta = C - last_alpha; } } __syncthreads(); sigma[j] += delta * y[i] * K[i * l + j]; } can_stop = 0; delta_ = alpha[j] - last_alpha_j; if (-0.0001f < delta_ && delta_ < 0.0001f) can_stop = 1; // CUPRINTF("%d, %9.6f, %9.6f, %9.6f, %d\n", counter, alpha[j], // last_alpha_j, delta_, can_stop); if (__syncthreads_and(can_stop) > 0) { if (j == 1) { // CUPRINTF("iters = %d\n", counter); iters[0] = counter; } break; } } } extern "C" __global__ void kernel_minibatch_g(int *iters, float *alpha, float *sigma, float *K, int *y, int *d, int ddim, float *delta, int l, int C) { int j = blockIdx.x * blockDim.x + threadIdx.x; if (j < l) { cg::grid_group grid = cg::this_grid(); // if (j == l-1) CUPRINTF("l = %d, C = %d\n", l, C); int can_break = 0; int can_stop = 0; float delta_ = 0; float last_alpha_j, last_alpha; int counter = 0; while (true) { // for (int counter = 0; counter < 2000; counter++) { counter++; if (threadIdx.x == 1) d[blockIdx.x] = 0; last_alpha_j = alpha[j]; for (uint32_t i = 0; i < l; i++) { if (j == i) { // if (threadIdx.x == i) { // This was a big big bug last_alpha = alpha[i]; delta[0] = 1 / K[i * l + i] * (1 - y[i] * sigma[i]); alpha[i] += delta[0]; // alpha[i] += delta; if (alpha[i] < 0) { alpha[i] = 0; delta[0] = 0 - last_alpha; } if (alpha[i] > C) { alpha[i] = C; delta[0] = C - last_alpha; } } cg::sync(grid); sigma[j] += delta[0] * y[i] * K[i * l + j]; } can_stop = 0; delta_ = alpha[j] - last_alpha_j; if (-0.0001f < delta_ && delta_ < 0.0001f) can_stop = 1; if (__syncthreads_and(can_stop) > 0) if (threadIdx.x == 1) d[blockIdx.x] = 1; cg::sync(grid); can_break = 0; for (int i = 0; i < ddim; i++) { can_break += d[i]; } // if (j == 1) CUPRINTF("iters = %d\n", counter); if (can_break == ddim) { if (j == 1) { // CUPRINTF("iters = %d\n", counter); iters[0] = counter; } // cg::sync(grid); break; } } } } // Helper function for using CUDA to update sigma in parallel: cudaError_t kernel_minibatch_wrapper(int *iters, float *alpha, float *sigma, float *K, int *y, int l, int C) { int *dev_iters = 0; float *dev_alpha = 0; float *dev_sigma = 0; float *dev_K = 0; int *dev_y = 0; int *dev_block_done = 0; float *dev_delta = 0; const int block_dim_max = 1024; int block_dimension = block_dim_max; int grid_dimension = (l - 1) / block_dim_max + 1; dim3 block(block_dimension); dim3 grid(grid_dimension); void *args[10] = { &dev_iters, &dev_alpha, &dev_sigma, &dev_K, &dev_y, &dev_block_done, &grid_dimension, &dev_delta, &l, &C}; cudaError_t cudaStatus; // Allocate GPU buffers for all vectors: cudaStatus = cudaMalloc(&dev_iters, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error1; } cudaStatus = cudaMalloc(&dev_alpha, l * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error2; } cudaStatus = cudaMalloc(&dev_sigma, l * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error3; } cudaStatus = cudaMalloc(&dev_K, l * l * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error4; } cudaStatus = cudaMalloc(&dev_y, l * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error5; } cudaStatus = cudaMalloc(&dev_block_done, grid_dimension * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error5; } cudaStatus = cudaMalloc(&dev_delta, 1 * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error5; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_K, K, l * l * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error5; } cudaStatus = cudaMemcpy(dev_y, y, l * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error5; } // printf("READY TO CALL KERNEL\n"); cudaStatus = cudaLaunchCooperativeKernel((void *)kernel_minibatch_g, grid, block, args, sizeof(float), cudaStream_t(0)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "kernel_minibatch_g launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error5; } // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "kernel_minibatch_g launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error5; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching " "addKernel!\n", cudaStatus); goto Error5; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(iters, dev_iters, sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error5; } cudaStatus = cudaMemcpy(alpha, dev_alpha, l * sizeof(float), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error5; } cudaStatus = cudaMemcpy(sigma, dev_sigma, l * sizeof(float), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error5; } // Error6: cudaFree(dev_block_done); cudaFree(dev_delta); Error5: cudaFree(dev_y); Error4: cudaFree(dev_K); Error3: cudaFree(dev_sigma); Error2: cudaFree(dev_alpha); Error1: cudaFree(dev_iters); // Error0: return cudaStatus; } cudaError_t kernel_minibatch_block_wrapper(int *iters, float *alpha, float *sigma, float *K, int *y, int l, int C) { int *dev_iters = 0; float *dev_alpha = 0; float *dev_sigma = 0; float *dev_K = 0; int *dev_y = 0; dim3 grid(1); dim3 block(l); void *args[7] = {&dev_iters, &dev_alpha, &dev_sigma, &dev_K, &dev_y, &l, &C}; cudaError_t cudaStatus; // Allocate GPU buffers for all vectors: cudaStatus = cudaMalloc(&dev_iters, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error1; } cudaStatus = cudaMalloc(&dev_alpha, l * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error2; } cudaStatus = cudaMalloc(&dev_sigma, l * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error3; } cudaStatus = cudaMalloc(&dev_K, l * l * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error4; } cudaStatus = cudaMalloc(&dev_y, l * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error5; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_K, K, l * l * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error5; } cudaStatus = cudaMemcpy(dev_y, y, l * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error5; } // printf("READY TO CALL KERNEL\n"); cudaStatus = cudaLaunchKernel((void *)kernel_minibatch, grid, block, args, sizeof(float), cudaStream_t(0)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "kernel_minibatch launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error5; } // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "kernel_minibatch_g launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error5; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching " "addKernel!\n", cudaStatus); goto Error5; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(iters, dev_iters, sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error5; } cudaStatus = cudaMemcpy(alpha, dev_alpha, l * sizeof(float), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error5; } cudaStatus = cudaMemcpy(sigma, dev_sigma, l * sizeof(float), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error5; } Error5: cudaFree(dev_y); Error4: cudaFree(dev_K); Error3: cudaFree(dev_sigma); Error2: cudaFree(dev_alpha); Error1: cudaFree(dev_iters); // Error0: return cudaStatus; }
3f2b5263edbdeb5b7c7bb1d4dc7393e0c78bf1a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <float.h> #include <stdio.h> #include <stdlib.h> #include <stdexcept> // Intentionally not including header since it is meant for gcc consumption. // #include "cudaWrappers.h" #include "cudaCommon.hu" #include "cudaFolds.hu" #include "cudaGmm.hu" #include "cudaMVNormal.hu" #include "gmm.h" void gpuSum(size_t numPoints, const size_t pointDim, float* host_a, float* host_sum) { assert(numPoints > 0); assert(pointDim > 0); assert(host_a != NULL); assert(host_sum != NULL); int deviceId; check(hipGetDevice(&deviceId)); hipDeviceProp_t deviceProp; check(hipGetDeviceProperties(&deviceProp, deviceId)); // cudaArraySum is meant for powers of two size_t M = largestPowTwoLessThanEq(numPoints); float cpuSum[pointDim]; memset(cpuSum, 0, pointDim * sizeof(float)); for(size_t i = M; i < numPoints; ++i) { for(size_t j = 0; j < pointDim; ++j) { cpuSum[j] += host_a[i * pointDim + j]; } } numPoints = M; float *device_a = sendToGpu(numPoints * pointDim, host_a); // cudaArraySum is synchronous cudaArraySum( &deviceProp, numPoints, pointDim, device_a ); check(hipMemcpy(host_sum, device_a, pointDim * sizeof(float), hipMemcpyDeviceToHost)); hipFree(device_a); for(size_t i = 0; i < pointDim; ++i) { host_sum[i] += cpuSum[i]; } } float gpuMax(size_t N, float* host_a) { assert(host_a != NULL); assert(N > 0); int deviceId; check(hipGetDevice(&deviceId)); hipDeviceProp_t deviceProp; check(hipGetDeviceProperties(&deviceProp, deviceId)); float *device_a = sendToGpu(N, host_a); cudaArrayMax( &deviceProp, N, device_a ); float gpuMax = 0; check(hipMemcpy(&gpuMax, device_a, sizeof(float), hipMemcpyDeviceToHost)); hipFree(device_a); return gpuMax; } void gpuLogMVNormDist( const size_t numPoints, const size_t pointDim, const float* X, const float* mu, const float* sigmaL, float* logP ) { int deviceId; check(hipGetDevice(&deviceId)); hipDeviceProp_t deviceProp; check(hipGetDeviceProperties(&deviceProp, deviceId)); float* device_X = sendToGpu(numPoints * pointDim, X); float* device_mu = sendToGpu(pointDim, mu); float* device_sigmaL = sendToGpu(pointDim * pointDim, sigmaL); float* device_logP = mallocOnGpu(numPoints); dim3 grid, block; calcDim(numPoints, &deviceProp, &block, &grid); hipLaunchKernelGGL(( kernLogMVNormDist), dim3(grid), dim3(block), 0, 0, numPoints, pointDim, device_X, device_mu, device_sigmaL, device_logP ); check(hipMemcpy(logP, device_logP, numPoints * sizeof(float), hipMemcpyDeviceToHost)); hipDeviceSynchronize(); hipFree(device_X); hipFree(device_mu); hipFree(device_sigmaL); hipFree(device_logP); } float gpuGmmLogLikelihood( const size_t numPoints, const size_t numComponents, const float* logpi, float* logP ) { int deviceId; check(hipGetDevice(&deviceId)); hipDeviceProp_t deviceProp; check(hipGetDeviceProperties(&deviceProp, deviceId)); float* device_logpi = sendToGpu(numComponents, logpi); // Sending all data because logP is an array organized by: // [ <- numPoints -> ]_0 [ <- numPoints -> ]_... [ <- numPoints -> ]_{k-1} // So even though we are only using M of those points on the GPU, // we need all numPoints to ensure indexing by numPoints * k + i works // correctly to access prob(x_i|mu_k,Sigma_k). float* device_logP = sendToGpu(numComponents * numPoints, logP); float logL = cudaGmmLogLikelihoodAndGammaNK( & deviceProp, numPoints, numComponents, logpi, logP, device_logpi, device_logP ); hipFree(device_logpi); hipFree(device_logP); return logL; } void gpuCalcLogGammaNK( const size_t numPoints, const size_t numComponents, const float* logpi, float* loggamma ) { gpuGmmLogLikelihood( numPoints, numComponents, logpi, loggamma ); } void gpuCalcLogGammaK( const size_t numPoints, const size_t numComponents, const float* loggamma, float* logGamma ) { // Gamma[k] = max + log sum expf(loggamma - max) float* working = (float*)malloc(numPoints * sizeof(float)); for(size_t k = 0; k < numComponents; ++k) { // TODO: refactor to have a generic z = a + log sum expf(x - a) memcpy(working, & loggamma[k * numPoints], numPoints * sizeof(float)); float maxValue = gpuMax(numPoints, working); memcpy(working, & loggamma[k * numPoints], numPoints * sizeof(float)); for(size_t i = 0; i < numPoints; ++i) { working[i] = expf(working[i] - maxValue); } float sum = 0; gpuSum(numPoints, 1, working, & sum); logGamma[k] = maxValue + logf(sum ); } free(working); } void gpuGmmFit( const float* X, const size_t numPoints, const size_t pointDim, const size_t numComponents, float* pi, float* Mu, float* Sigma, float* SigmaL, float* normalizers, const size_t maxIterations, GMM* gmm ) { assert(X != NULL); assert(numPoints > 0); assert(pointDim > 0 && pointDim <= 1024); assert(numComponents > 0 && numComponents <= 1024); assert(pi != NULL); assert(Mu != NULL); assert(Sigma != NULL); assert(SigmaL != NULL); assert(normalizers != NULL); assert(maxIterations >= 1); int deviceId; check(hipGetDevice(&deviceId)); hipDeviceProp_t deviceProp; check(hipGetDeviceProperties(&deviceProp, deviceId)); // printf("name: %s\n", deviceProp.name); // printf("multiProcessorCount: %d\n", deviceProp.multiProcessorCount); // printf("concurrentKernels: %d\n", deviceProp.concurrentKernels); float* device_X = pinHostAndSendDevice(numPoints * pointDim, (float*) X); for(size_t i = 0; i < numComponents; ++i) { assert(pi[i] > 0); pi[i] = logf(pi[i]); } float* device_logpi = pinHostAndSendDevice(numComponents, pi); float* device_Mu = pinHostAndSendDevice(numComponents * pointDim, Mu); float* device_Sigma = pinHostAndSendDevice(numComponents * pointDim * pointDim, Sigma); float* device_SigmaL = pinHostAndSendDevice(numComponents * pointDim * pointDim, SigmaL); float* device_normalizers = pinHostAndSendDevice(numComponents, normalizers); int error = 0; int* device_error = (int*) pinHostAndSendDevice(1, (float*) &error); float* device_loggamma = mallocOnGpu(numPoints * numComponents); float* device_logGamma = mallocOnGpu(numPoints * numComponents); float previousLogL = -INFINITY; float* pinnedCurrentLogL; hipHostMalloc(&pinnedCurrentLogL, sizeof(float)); *pinnedCurrentLogL = -INFINITY; // logPx, mu, sigma reductions // This means for mu and sigma can only do one component at a time otherwise // the memory foot print will limit how much data we can actually work with. float* device_working = mallocOnGpu(numComponents * numPoints * pointDim * pointDim); dim3 grid, block; calcDim(numPoints, &deviceProp, &block, &grid); size_t iteration = 0; const float tolerance = 1e-8; hipStream_t streams[numComponents]; for(size_t k = 0; k < numComponents; ++k) { hipStreamCreate(&streams[k]); } hipEvent_t kernelEvent[numComponents + 1]; for(size_t k = 0; k <= numComponents; ++k) { hipEventCreateWithFlags(& kernelEvent[k], hipEventDisableTiming); } try { do { // -------------------------------------------------------------------------- // E-Step // -------------------------------------------------------------------------- // loggamma[k * numPoints + i] = p(x_i | mu_k, Sigma_k ) for(size_t k = 0; k < numComponents; ++k) { // Fill in numPoint many probabilities hipLaunchKernelGGL(( kernLogMVNormDist), dim3(grid), dim3(block), 0, streams[k], numPoints, pointDim, device_X, & device_Mu[k * pointDim], & device_SigmaL[k * pointDim * pointDim], & device_loggamma[k * numPoints] ); hipEventRecord(kernelEvent[k], streams[k]); } for(size_t k = 0; k < numComponents; ++k) { // streams[numComponents - 1] needs to wait for everyone else to finish hipStreamWaitEvent(streams[numComponents-1], kernelEvent[k], 0); } // loggamma[k * numPoints + i] = p(x_i | mu_k, Sigma_k) / p(x_i) // working[i] = p(x_i) hipLaunchKernelGGL(( kernCalcLogLikelihoodAndGammaNK), dim3(grid), dim3(block), 0, streams[numComponents - 1], numPoints, numComponents, device_logpi, device_working, device_loggamma ); // working[0] = sum_{i} p(x_i) cudaArraySum(&deviceProp, numPoints, 1, device_working, streams[numComponents - 1]); previousLogL = *pinnedCurrentLogL; check(hipMemcpyAsync( pinnedCurrentLogL, device_working, sizeof(float), hipMemcpyDeviceToHost, streams[numComponents - 1] )); for(size_t k = 0; k < numComponents; ++k) { // synchronize everybody with the host hipStreamSynchronize(streams[k]); } if(fabsf(*pinnedCurrentLogL - previousLogL) < tolerance || *pinnedCurrentLogL < previousLogL) { break; } // -------------------------------------------------------------------------- // M-Step // -------------------------------------------------------------------------- for(size_t k = 0; k < numComponents; ++k) { float* device_workingK = & device_working[k * numPoints * pointDim * pointDim]; cudaLogSumExp( & deviceProp, grid, block, numPoints, & device_loggamma[k * numPoints], & device_logGamma[k * numPoints], device_workingK, streams[k] ); } for(size_t k = 0; k < numComponents; ++k) { float* device_workingK = & device_working[k * numPoints * pointDim * pointDim]; // working[i * pointDim + j] = gamma_ik / Gamma K * x_j hipLaunchKernelGGL(( kernCalcMu), dim3(grid), dim3(block), 0, streams[k], numPoints, pointDim, device_X, & device_loggamma[k * numPoints], & device_logGamma[k * numPoints], device_workingK ); } for(size_t k = 0; k < numComponents; ++k) { float* device_workingK = & device_working[k * numPoints * pointDim * pointDim]; // working[0 + j] = sum gamma_ik / Gamma K * x_j cudaArraySum( &deviceProp, numPoints, pointDim, device_workingK, streams[k] ); } for(size_t k = 0; k < numComponents; ++k) { float* device_workingK = & device_working[k * numPoints * pointDim * pointDim]; check(hipMemcpyAsync( & device_Mu[k * pointDim], device_workingK, pointDim * sizeof(float), hipMemcpyDeviceToDevice, streams[k] )); } for(size_t k = 0; k < numComponents; ++k) { float* device_workingK = & device_working[k * numPoints * pointDim * pointDim]; check(hipMemcpyAsync( & device_Sigma[k * pointDim * pointDim], device_workingK, pointDim * pointDim * sizeof(float), hipMemcpyDeviceToDevice, streams[k] )); } for(size_t k = 0; k < numComponents; ++k) { float* device_workingK = & device_working[k * numPoints * pointDim * pointDim]; hipLaunchKernelGGL(( kernCalcSigma), dim3(grid), dim3(block), 0, streams[k], numPoints, pointDim, device_X, & device_Mu[k * pointDim], & device_loggamma[k * numPoints], & device_logGamma[k * numPoints], device_workingK ); } for(size_t k = 0; k < numComponents; ++k) { float* device_workingK = & device_working[k * numPoints * pointDim * pointDim]; // working[0 + j] = sum gamma_ik / Gamma K * [...]_j cudaArraySum( &deviceProp, numPoints, pointDim * pointDim, device_workingK, streams[k] ); } for(size_t k = 0; k < numComponents; ++k) { float* device_workingK = & device_working[k * numPoints * pointDim * pointDim]; check(hipMemcpyAsync( & device_Sigma[k * pointDim * pointDim], device_workingK, pointDim * pointDim * sizeof(float), hipMemcpyDeviceToDevice, streams[k] )); hipEventRecord(kernelEvent[k], streams[k]); } for(size_t k = 0; k < numComponents; ++k) { // streams[numComponents - 1] needs to wait for everyone else to finish hipStreamWaitEvent(streams[numComponents-1], kernelEvent[k], 0); } // pi_k^(t+1) = pi_k Gamma_k / sum_{i}^{K} pi_i * Gamma_i // Use thread sync to compute denom to avoid data race hipLaunchKernelGGL(( kernUpdatePi), dim3(1), dim3(numComponents), 0, streams[numComponents - 1], numPoints, numComponents, device_logpi, device_logGamma ); // recompute sigmaL and normalizer hipLaunchKernelGGL(( kernPrepareCovariances), dim3(1), dim3(numComponents), 0, streams[numComponents - 1], numComponents, pointDim, device_Sigma, device_SigmaL, device_normalizers, device_error ); hipEventRecord(kernelEvent[numComponents], streams[numComponents - 1]); for(size_t k = 0; k < numComponents; ++k) { // Everyone needs to wait for the work on streams[numComponents - 1] to finish. hipStreamWaitEvent(streams[k], kernelEvent[numComponents], 0); } // check error to see if inverse failed check(hipMemcpy(&error, device_error, sizeof(int), hipMemcpyDeviceToHost)); if ( error ) { throw std::runtime_error("Failed to compute inverse"); } } while(++iteration < maxIterations); // copy loggamma to host to compute output labels float* loggamma = (float *)malloc(numPoints * numComponents * sizeof(float)); check(hipMemcpy( loggamma, device_loggamma, numPoints * numComponents * sizeof(float), hipMemcpyDeviceToHost )); gmm->failed = false; gmm->y_pred = calcLabels(loggamma, numPoints, numComponents); gmm->logL = *pinnedCurrentLogL; } catch ( std::runtime_error& e ) { fprintf(stderr, "warning: model failed\n"); gmm->failed = true; } for(size_t k = 0; k <= numComponents; ++k) { hipEventDestroy(kernelEvent[k]); } for(size_t k = 0; k < numComponents; ++k) { hipStreamDestroy(streams[k]); } hipHostFree(pinnedCurrentLogL); hipFree(device_working); hipFree(device_logGamma); hipFree(device_loggamma); unpinHost(device_error, &error); recvDeviceUnpinHost(device_normalizers, normalizers, numComponents); recvDeviceUnpinHost(device_SigmaL, SigmaL, numComponents * pointDim * pointDim); recvDeviceUnpinHost(device_Sigma, Sigma, numComponents * pointDim * pointDim); recvDeviceUnpinHost(device_Mu, Mu, numComponents * pointDim); recvDeviceUnpinHost(device_logpi, pi, numComponents); for(size_t i = 0; i < numComponents; ++i) { pi[i] = expf(pi[i]); } unpinHost(device_X, (float*) X); }
3f2b5263edbdeb5b7c7bb1d4dc7393e0c78bf1a9.cu
#include <assert.h> #include <float.h> #include <stdio.h> #include <stdlib.h> #include <stdexcept> // Intentionally not including header since it is meant for gcc consumption. // #include "cudaWrappers.h" #include "cudaCommon.hu" #include "cudaFolds.hu" #include "cudaGmm.hu" #include "cudaMVNormal.hu" #include "gmm.h" void gpuSum(size_t numPoints, const size_t pointDim, float* host_a, float* host_sum) { assert(numPoints > 0); assert(pointDim > 0); assert(host_a != NULL); assert(host_sum != NULL); int deviceId; check(cudaGetDevice(&deviceId)); cudaDeviceProp deviceProp; check(cudaGetDeviceProperties(&deviceProp, deviceId)); // cudaArraySum is meant for powers of two size_t M = largestPowTwoLessThanEq(numPoints); float cpuSum[pointDim]; memset(cpuSum, 0, pointDim * sizeof(float)); for(size_t i = M; i < numPoints; ++i) { for(size_t j = 0; j < pointDim; ++j) { cpuSum[j] += host_a[i * pointDim + j]; } } numPoints = M; float *device_a = sendToGpu(numPoints * pointDim, host_a); // cudaArraySum is synchronous cudaArraySum( &deviceProp, numPoints, pointDim, device_a ); check(cudaMemcpy(host_sum, device_a, pointDim * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(device_a); for(size_t i = 0; i < pointDim; ++i) { host_sum[i] += cpuSum[i]; } } float gpuMax(size_t N, float* host_a) { assert(host_a != NULL); assert(N > 0); int deviceId; check(cudaGetDevice(&deviceId)); cudaDeviceProp deviceProp; check(cudaGetDeviceProperties(&deviceProp, deviceId)); float *device_a = sendToGpu(N, host_a); cudaArrayMax( &deviceProp, N, device_a ); float gpuMax = 0; check(cudaMemcpy(&gpuMax, device_a, sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(device_a); return gpuMax; } void gpuLogMVNormDist( const size_t numPoints, const size_t pointDim, const float* X, const float* mu, const float* sigmaL, float* logP ) { int deviceId; check(cudaGetDevice(&deviceId)); cudaDeviceProp deviceProp; check(cudaGetDeviceProperties(&deviceProp, deviceId)); float* device_X = sendToGpu(numPoints * pointDim, X); float* device_mu = sendToGpu(pointDim, mu); float* device_sigmaL = sendToGpu(pointDim * pointDim, sigmaL); float* device_logP = mallocOnGpu(numPoints); dim3 grid, block; calcDim(numPoints, &deviceProp, &block, &grid); kernLogMVNormDist<<<grid, block>>>( numPoints, pointDim, device_X, device_mu, device_sigmaL, device_logP ); check(cudaMemcpy(logP, device_logP, numPoints * sizeof(float), cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); cudaFree(device_X); cudaFree(device_mu); cudaFree(device_sigmaL); cudaFree(device_logP); } float gpuGmmLogLikelihood( const size_t numPoints, const size_t numComponents, const float* logpi, float* logP ) { int deviceId; check(cudaGetDevice(&deviceId)); cudaDeviceProp deviceProp; check(cudaGetDeviceProperties(&deviceProp, deviceId)); float* device_logpi = sendToGpu(numComponents, logpi); // Sending all data because logP is an array organized by: // [ <- numPoints -> ]_0 [ <- numPoints -> ]_... [ <- numPoints -> ]_{k-1} // So even though we are only using M of those points on the GPU, // we need all numPoints to ensure indexing by numPoints * k + i works // correctly to access prob(x_i|mu_k,Sigma_k). float* device_logP = sendToGpu(numComponents * numPoints, logP); float logL = cudaGmmLogLikelihoodAndGammaNK( & deviceProp, numPoints, numComponents, logpi, logP, device_logpi, device_logP ); cudaFree(device_logpi); cudaFree(device_logP); return logL; } void gpuCalcLogGammaNK( const size_t numPoints, const size_t numComponents, const float* logpi, float* loggamma ) { gpuGmmLogLikelihood( numPoints, numComponents, logpi, loggamma ); } void gpuCalcLogGammaK( const size_t numPoints, const size_t numComponents, const float* loggamma, float* logGamma ) { // Gamma[k] = max + log sum expf(loggamma - max) float* working = (float*)malloc(numPoints * sizeof(float)); for(size_t k = 0; k < numComponents; ++k) { // TODO: refactor to have a generic z = a + log sum expf(x - a) memcpy(working, & loggamma[k * numPoints], numPoints * sizeof(float)); float maxValue = gpuMax(numPoints, working); memcpy(working, & loggamma[k * numPoints], numPoints * sizeof(float)); for(size_t i = 0; i < numPoints; ++i) { working[i] = expf(working[i] - maxValue); } float sum = 0; gpuSum(numPoints, 1, working, & sum); logGamma[k] = maxValue + logf(sum ); } free(working); } void gpuGmmFit( const float* X, const size_t numPoints, const size_t pointDim, const size_t numComponents, float* pi, float* Mu, float* Sigma, float* SigmaL, float* normalizers, const size_t maxIterations, GMM* gmm ) { assert(X != NULL); assert(numPoints > 0); assert(pointDim > 0 && pointDim <= 1024); assert(numComponents > 0 && numComponents <= 1024); assert(pi != NULL); assert(Mu != NULL); assert(Sigma != NULL); assert(SigmaL != NULL); assert(normalizers != NULL); assert(maxIterations >= 1); int deviceId; check(cudaGetDevice(&deviceId)); cudaDeviceProp deviceProp; check(cudaGetDeviceProperties(&deviceProp, deviceId)); // printf("name: %s\n", deviceProp.name); // printf("multiProcessorCount: %d\n", deviceProp.multiProcessorCount); // printf("concurrentKernels: %d\n", deviceProp.concurrentKernels); float* device_X = pinHostAndSendDevice(numPoints * pointDim, (float*) X); for(size_t i = 0; i < numComponents; ++i) { assert(pi[i] > 0); pi[i] = logf(pi[i]); } float* device_logpi = pinHostAndSendDevice(numComponents, pi); float* device_Mu = pinHostAndSendDevice(numComponents * pointDim, Mu); float* device_Sigma = pinHostAndSendDevice(numComponents * pointDim * pointDim, Sigma); float* device_SigmaL = pinHostAndSendDevice(numComponents * pointDim * pointDim, SigmaL); float* device_normalizers = pinHostAndSendDevice(numComponents, normalizers); int error = 0; int* device_error = (int*) pinHostAndSendDevice(1, (float*) &error); float* device_loggamma = mallocOnGpu(numPoints * numComponents); float* device_logGamma = mallocOnGpu(numPoints * numComponents); float previousLogL = -INFINITY; float* pinnedCurrentLogL; cudaMallocHost(&pinnedCurrentLogL, sizeof(float)); *pinnedCurrentLogL = -INFINITY; // logPx, mu, sigma reductions // This means for mu and sigma can only do one component at a time otherwise // the memory foot print will limit how much data we can actually work with. float* device_working = mallocOnGpu(numComponents * numPoints * pointDim * pointDim); dim3 grid, block; calcDim(numPoints, &deviceProp, &block, &grid); size_t iteration = 0; const float tolerance = 1e-8; cudaStream_t streams[numComponents]; for(size_t k = 0; k < numComponents; ++k) { cudaStreamCreate(&streams[k]); } cudaEvent_t kernelEvent[numComponents + 1]; for(size_t k = 0; k <= numComponents; ++k) { cudaEventCreateWithFlags(& kernelEvent[k], cudaEventDisableTiming); } try { do { // -------------------------------------------------------------------------- // E-Step // -------------------------------------------------------------------------- // loggamma[k * numPoints + i] = p(x_i | mu_k, Sigma_k ) for(size_t k = 0; k < numComponents; ++k) { // Fill in numPoint many probabilities kernLogMVNormDist<<<grid, block, 0, streams[k]>>>( numPoints, pointDim, device_X, & device_Mu[k * pointDim], & device_SigmaL[k * pointDim * pointDim], & device_loggamma[k * numPoints] ); cudaEventRecord(kernelEvent[k], streams[k]); } for(size_t k = 0; k < numComponents; ++k) { // streams[numComponents - 1] needs to wait for everyone else to finish cudaStreamWaitEvent(streams[numComponents-1], kernelEvent[k], 0); } // loggamma[k * numPoints + i] = p(x_i | mu_k, Sigma_k) / p(x_i) // working[i] = p(x_i) kernCalcLogLikelihoodAndGammaNK<<<grid, block, 0, streams[numComponents - 1]>>>( numPoints, numComponents, device_logpi, device_working, device_loggamma ); // working[0] = sum_{i} p(x_i) cudaArraySum(&deviceProp, numPoints, 1, device_working, streams[numComponents - 1]); previousLogL = *pinnedCurrentLogL; check(cudaMemcpyAsync( pinnedCurrentLogL, device_working, sizeof(float), cudaMemcpyDeviceToHost, streams[numComponents - 1] )); for(size_t k = 0; k < numComponents; ++k) { // synchronize everybody with the host cudaStreamSynchronize(streams[k]); } if(fabsf(*pinnedCurrentLogL - previousLogL) < tolerance || *pinnedCurrentLogL < previousLogL) { break; } // -------------------------------------------------------------------------- // M-Step // -------------------------------------------------------------------------- for(size_t k = 0; k < numComponents; ++k) { float* device_workingK = & device_working[k * numPoints * pointDim * pointDim]; cudaLogSumExp( & deviceProp, grid, block, numPoints, & device_loggamma[k * numPoints], & device_logGamma[k * numPoints], device_workingK, streams[k] ); } for(size_t k = 0; k < numComponents; ++k) { float* device_workingK = & device_working[k * numPoints * pointDim * pointDim]; // working[i * pointDim + j] = gamma_ik / Gamma K * x_j kernCalcMu<<<grid, block, 0, streams[k]>>>( numPoints, pointDim, device_X, & device_loggamma[k * numPoints], & device_logGamma[k * numPoints], device_workingK ); } for(size_t k = 0; k < numComponents; ++k) { float* device_workingK = & device_working[k * numPoints * pointDim * pointDim]; // working[0 + j] = sum gamma_ik / Gamma K * x_j cudaArraySum( &deviceProp, numPoints, pointDim, device_workingK, streams[k] ); } for(size_t k = 0; k < numComponents; ++k) { float* device_workingK = & device_working[k * numPoints * pointDim * pointDim]; check(cudaMemcpyAsync( & device_Mu[k * pointDim], device_workingK, pointDim * sizeof(float), cudaMemcpyDeviceToDevice, streams[k] )); } for(size_t k = 0; k < numComponents; ++k) { float* device_workingK = & device_working[k * numPoints * pointDim * pointDim]; check(cudaMemcpyAsync( & device_Sigma[k * pointDim * pointDim], device_workingK, pointDim * pointDim * sizeof(float), cudaMemcpyDeviceToDevice, streams[k] )); } for(size_t k = 0; k < numComponents; ++k) { float* device_workingK = & device_working[k * numPoints * pointDim * pointDim]; kernCalcSigma<<<grid, block, 0, streams[k]>>>( numPoints, pointDim, device_X, & device_Mu[k * pointDim], & device_loggamma[k * numPoints], & device_logGamma[k * numPoints], device_workingK ); } for(size_t k = 0; k < numComponents; ++k) { float* device_workingK = & device_working[k * numPoints * pointDim * pointDim]; // working[0 + j] = sum gamma_ik / Gamma K * [...]_j cudaArraySum( &deviceProp, numPoints, pointDim * pointDim, device_workingK, streams[k] ); } for(size_t k = 0; k < numComponents; ++k) { float* device_workingK = & device_working[k * numPoints * pointDim * pointDim]; check(cudaMemcpyAsync( & device_Sigma[k * pointDim * pointDim], device_workingK, pointDim * pointDim * sizeof(float), cudaMemcpyDeviceToDevice, streams[k] )); cudaEventRecord(kernelEvent[k], streams[k]); } for(size_t k = 0; k < numComponents; ++k) { // streams[numComponents - 1] needs to wait for everyone else to finish cudaStreamWaitEvent(streams[numComponents-1], kernelEvent[k], 0); } // pi_k^(t+1) = pi_k Gamma_k / sum_{i}^{K} pi_i * Gamma_i // Use thread sync to compute denom to avoid data race kernUpdatePi<<<1, numComponents, 0, streams[numComponents - 1]>>>( numPoints, numComponents, device_logpi, device_logGamma ); // recompute sigmaL and normalizer kernPrepareCovariances<<<1, numComponents, 0, streams[numComponents - 1]>>>( numComponents, pointDim, device_Sigma, device_SigmaL, device_normalizers, device_error ); cudaEventRecord(kernelEvent[numComponents], streams[numComponents - 1]); for(size_t k = 0; k < numComponents; ++k) { // Everyone needs to wait for the work on streams[numComponents - 1] to finish. cudaStreamWaitEvent(streams[k], kernelEvent[numComponents], 0); } // check error to see if inverse failed check(cudaMemcpy(&error, device_error, sizeof(int), cudaMemcpyDeviceToHost)); if ( error ) { throw std::runtime_error("Failed to compute inverse"); } } while(++iteration < maxIterations); // copy loggamma to host to compute output labels float* loggamma = (float *)malloc(numPoints * numComponents * sizeof(float)); check(cudaMemcpy( loggamma, device_loggamma, numPoints * numComponents * sizeof(float), cudaMemcpyDeviceToHost )); gmm->failed = false; gmm->y_pred = calcLabels(loggamma, numPoints, numComponents); gmm->logL = *pinnedCurrentLogL; } catch ( std::runtime_error& e ) { fprintf(stderr, "warning: model failed\n"); gmm->failed = true; } for(size_t k = 0; k <= numComponents; ++k) { cudaEventDestroy(kernelEvent[k]); } for(size_t k = 0; k < numComponents; ++k) { cudaStreamDestroy(streams[k]); } cudaFreeHost(pinnedCurrentLogL); cudaFree(device_working); cudaFree(device_logGamma); cudaFree(device_loggamma); unpinHost(device_error, &error); recvDeviceUnpinHost(device_normalizers, normalizers, numComponents); recvDeviceUnpinHost(device_SigmaL, SigmaL, numComponents * pointDim * pointDim); recvDeviceUnpinHost(device_Sigma, Sigma, numComponents * pointDim * pointDim); recvDeviceUnpinHost(device_Mu, Mu, numComponents * pointDim); recvDeviceUnpinHost(device_logpi, pi, numComponents); for(size_t i = 0; i < numComponents; ++i) { pi[i] = expf(pi[i]); } unpinHost(device_X, (float*) X); }
ae3aced482e6fce0716262ab0769409b4a223e0f.hip
// !!! This is a file automatically generated by hipify!!! /** * @file CudauUtils.h * @author Liran Nachman ([email protected]) * @brief * @version 0.1 * @date 2019-07-27 * * @copyright Copyright (c) 2019 * */ #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "cudaUtils.h" #include "kernel.h" hipError_t FreeFunction(double * dev_W, double * dev_alfa, int * dev_mislead) { hipError_t cudaStatus; cudaStatus = hipFree(dev_W); if (cudaStatus != hipSuccess) printf("failed to free cuda - W \n"); cudaStatus = hipFree(dev_mislead); if (cudaStatus != hipSuccess) printf("failed to free cuda - mislead points \n"); cudaStatus = hipFree(dev_alfa); if (cudaStatus != hipSuccess) printf("failed to free cuda - alfa \n"); return cudaStatus; } void MyCudaMalloc(void** dev_pointer, size_t size, int error_label) { hipError_t cudaStatus; // points malloc n dims . cudaStatus = hipMalloc(dev_pointer, size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed! error_label : %d ", error_label); MyCudaFree(*dev_pointer, error_label); } } void MyCudaCopy(void* dest, void * src, size_t size, hipMemcpyKind kind, int error_label) { hipError_t cudaStatus; cudaStatus = hipMemcpy(dest, src, size, kind); if (cudaStatus != hipSuccess) fprintf(stderr, "hipMemcpy failed! error_label : %d", error_label); } void MyCudaFree(void * object, int error_label) { hipError_t cudaStatus; cudaStatus = hipFree(object); if (cudaStatus != hipSuccess) fprintf(stderr, "hipMemcpy failed! error_label : %d", error_label); } void FreeConstanstCuda(Point * dev_pts, double * dev_values, int * dev_n, int * dev_k) { MyCudaFree(dev_pts, 03); MyCudaFree(dev_values, 04); MyCudaFree(dev_n, 05); MyCudaFree(dev_k, 06); } void mallocConstCuda(Point * pts, int n, int k, Point ** dev_pts, int ** dev_n, int ** dev_k, double ** dev_values) { MyCudaMalloc((void**)&(*dev_pts), sizeof(Point)* n, 1); MyCudaMalloc((void**)&(*dev_values), sizeof(double)* (n*(k + 1)), 2); // value n * (k+1) each point have k+1 dims values MyCudaCopy((*dev_pts), pts, sizeof(Point)*n, hipMemcpyHostToDevice, 4); for (int i = 0; i < n; i++) MyCudaCopy(&(*dev_values)[i*(k + 1)], &pts[i].values[0], sizeof(double)*(k + 1), hipMemcpyHostToDevice, 5); MyCudaMalloc((void**)&(*dev_n), sizeof(int), 265); MyCudaMalloc((void**)&(*dev_k), sizeof(int), 3); MyCudaCopy((*dev_n), &n, 1, hipMemcpyHostToDevice, 5); MyCudaCopy((*dev_k), &k, 1, hipMemcpyHostToDevice, 6); } double ProcessAlfa(Point * dev_pts, double* dev_values, double * alfa, int *dev_n, int *dev_k, int limit, double QC, int n, int k, double ** WSaved) { *WSaved = (double*)malloc((k + 1) * sizeof(double)); // W k+1 dims int * mislead = (int*)malloc(n * sizeof(int)); // array of n points , //mislead points will be 1 or -1 ,currect=0 int * dev_mislead = NULL; double * dev_W = NULL; double * dev_alfa = NULL; hipError_t cudaStatus; #pragma region malloc and copy values (W ,mislead , alfa,tempresult) to GPU cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); FreeFunction(dev_W, dev_alfa, dev_mislead); return -2; } MyCudaMalloc((void**)&dev_W, sizeof(double)* (k + 1), 7); hipMemset(dev_W, 0, sizeof(double)* (k + 1)); // calloc cuda MyCudaMalloc((void**)&dev_mislead, sizeof(int)* (n), 9); hipMemset(dev_mislead, 0, sizeof(int)* (n)); MyCudaMalloc((void**)&dev_alfa, sizeof(double), 11); MyCudaCopy(dev_alfa, alfa, sizeof(double), hipMemcpyHostToDevice, 12); MyCudaCopy(dev_n, &n, sizeof(int), hipMemcpyHostToDevice, 14); #pragma endregion int threadDims = 1000; int blockDims = (n / threadDims) + 1; int counter_limit = 0; while (counter_limit < limit) { // get all mislead points getMisLeadArrayFromPoints << <blockDims, threadDims >> > (dev_pts, dev_values, dev_W, dev_mislead, dev_k, dev_n); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "loopOverPoints2 launch failed: %s\n", hipGetErrorString(cudaStatus)); FreeFunction(dev_W, dev_alfa, dev_mislead); return -2; } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching loopOverPoints! : %s \n", cudaStatus, hipGetErrorString(cudaStatus)); FreeFunction(dev_W, dev_alfa, dev_mislead); return -2; } // copy mislead points from gpu after calc FX for all points MyCudaCopy(mislead, dev_mislead, (n) * sizeof(int), hipMemcpyDeviceToHost, 15); int indexerMiss = 0; int result; // we get a array of mislead values (0 == ok , else 1||-1 are mislead by sign )); for (indexerMiss = 0; indexerMiss < n; indexerMiss++) { result = mislead[indexerMiss]; if (result == -1 || result == 1) // found point that mislead break; } if (indexerMiss == n)// all point in good places break; else { // need to create a new W *alfa = *alfa*mislead[indexerMiss]; // alfa * sign MyCudaCopy(dev_alfa, alfa, sizeof(double), hipMemcpyHostToDevice, 77); *alfa = fabs(*alfa); // back to postive alfa because we modified him on gpu. int indexValues = indexerMiss * (k + 1); // n* (K+1) int * dev_index_values = NULL; //index_values , the pointer for coordinates of point ,note values array size n*(k+1) MyCudaMalloc((void**)&dev_index_values, sizeof(int), 88); MyCudaCopy(dev_index_values, &indexValues, sizeof(int), hipMemcpyHostToDevice, 99); //create a new weight createNewWeight << <1, k + 1 >> > (dev_alfa, dev_values, dev_index_values, dev_W); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "createNewWeight launch failed: %s\n", hipGetErrorString(cudaStatus)); FreeFunction(dev_W, dev_alfa, dev_mislead); return -2; } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching createNewWeight! : %s \n", cudaStatus, hipGetErrorString(cudaStatus)); FreeFunction(dev_W, dev_alfa, dev_mislead); return -2; } } counter_limit++; } //// need to calcate the q int sumOFmisLead = 0; for (int i = 0; i < n; i++) { if (mislead[i] != 0) sumOFmisLead += 1; } double q = sumOFmisLead / (n*(1.0)); MyCudaCopy(*WSaved, dev_W, sizeof(double)*(k + 1), hipMemcpyDeviceToHost, 70); // copy W // clear resources on cuda GPU FreeFunction(dev_W, dev_alfa, dev_mislead); if (q <= QC) return q; else return 2.0; // q that never will get and larger from all q possiblies . }
ae3aced482e6fce0716262ab0769409b4a223e0f.cu
/** * @file CudauUtils.h * @author Liran Nachman ([email protected]) * @brief * @version 0.1 * @date 2019-07-27 * * @copyright Copyright (c) 2019 * */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cudaUtils.h" #include "kernel.h" cudaError_t FreeFunction(double * dev_W, double * dev_alfa, int * dev_mislead) { cudaError_t cudaStatus; cudaStatus = cudaFree(dev_W); if (cudaStatus != cudaSuccess) printf("failed to free cuda - W \n"); cudaStatus = cudaFree(dev_mislead); if (cudaStatus != cudaSuccess) printf("failed to free cuda - mislead points \n"); cudaStatus = cudaFree(dev_alfa); if (cudaStatus != cudaSuccess) printf("failed to free cuda - alfa \n"); return cudaStatus; } void MyCudaMalloc(void** dev_pointer, size_t size, int error_label) { cudaError_t cudaStatus; // points malloc n dims . cudaStatus = cudaMalloc(dev_pointer, size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed! error_label : %d ", error_label); MyCudaFree(*dev_pointer, error_label); } } void MyCudaCopy(void* dest, void * src, size_t size, cudaMemcpyKind kind, int error_label) { cudaError_t cudaStatus; cudaStatus = cudaMemcpy(dest, src, size, kind); if (cudaStatus != cudaSuccess) fprintf(stderr, "cudaMemcpy failed! error_label : %d", error_label); } void MyCudaFree(void * object, int error_label) { cudaError_t cudaStatus; cudaStatus = cudaFree(object); if (cudaStatus != cudaSuccess) fprintf(stderr, "cudaMemcpy failed! error_label : %d", error_label); } void FreeConstanstCuda(Point * dev_pts, double * dev_values, int * dev_n, int * dev_k) { MyCudaFree(dev_pts, 03); MyCudaFree(dev_values, 04); MyCudaFree(dev_n, 05); MyCudaFree(dev_k, 06); } void mallocConstCuda(Point * pts, int n, int k, Point ** dev_pts, int ** dev_n, int ** dev_k, double ** dev_values) { MyCudaMalloc((void**)&(*dev_pts), sizeof(Point)* n, 1); MyCudaMalloc((void**)&(*dev_values), sizeof(double)* (n*(k + 1)), 2); // value n * (k+1) each point have k+1 dims values MyCudaCopy((*dev_pts), pts, sizeof(Point)*n, cudaMemcpyHostToDevice, 4); for (int i = 0; i < n; i++) MyCudaCopy(&(*dev_values)[i*(k + 1)], &pts[i].values[0], sizeof(double)*(k + 1), cudaMemcpyHostToDevice, 5); MyCudaMalloc((void**)&(*dev_n), sizeof(int), 265); MyCudaMalloc((void**)&(*dev_k), sizeof(int), 3); MyCudaCopy((*dev_n), &n, 1, cudaMemcpyHostToDevice, 5); MyCudaCopy((*dev_k), &k, 1, cudaMemcpyHostToDevice, 6); } double ProcessAlfa(Point * dev_pts, double* dev_values, double * alfa, int *dev_n, int *dev_k, int limit, double QC, int n, int k, double ** WSaved) { *WSaved = (double*)malloc((k + 1) * sizeof(double)); // W k+1 dims int * mislead = (int*)malloc(n * sizeof(int)); // array of n points , //mislead points will be 1 or -1 ,currect=0 int * dev_mislead = NULL; double * dev_W = NULL; double * dev_alfa = NULL; cudaError_t cudaStatus; #pragma region malloc and copy values (W ,mislead , alfa,tempresult) to GPU cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); FreeFunction(dev_W, dev_alfa, dev_mislead); return -2; } MyCudaMalloc((void**)&dev_W, sizeof(double)* (k + 1), 7); cudaMemset(dev_W, 0, sizeof(double)* (k + 1)); // calloc cuda MyCudaMalloc((void**)&dev_mislead, sizeof(int)* (n), 9); cudaMemset(dev_mislead, 0, sizeof(int)* (n)); MyCudaMalloc((void**)&dev_alfa, sizeof(double), 11); MyCudaCopy(dev_alfa, alfa, sizeof(double), cudaMemcpyHostToDevice, 12); MyCudaCopy(dev_n, &n, sizeof(int), cudaMemcpyHostToDevice, 14); #pragma endregion int threadDims = 1000; int blockDims = (n / threadDims) + 1; int counter_limit = 0; while (counter_limit < limit) { // get all mislead points getMisLeadArrayFromPoints << <blockDims, threadDims >> > (dev_pts, dev_values, dev_W, dev_mislead, dev_k, dev_n); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "loopOverPoints2 launch failed: %s\n", cudaGetErrorString(cudaStatus)); FreeFunction(dev_W, dev_alfa, dev_mislead); return -2; } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching loopOverPoints! : %s \n", cudaStatus, cudaGetErrorString(cudaStatus)); FreeFunction(dev_W, dev_alfa, dev_mislead); return -2; } // copy mislead points from gpu after calc FX for all points MyCudaCopy(mislead, dev_mislead, (n) * sizeof(int), cudaMemcpyDeviceToHost, 15); int indexerMiss = 0; int result; // we get a array of mislead values (0 == ok , else 1||-1 are mislead by sign )); for (indexerMiss = 0; indexerMiss < n; indexerMiss++) { result = mislead[indexerMiss]; if (result == -1 || result == 1) // found point that mislead break; } if (indexerMiss == n)// all point in good places break; else { // need to create a new W *alfa = *alfa*mislead[indexerMiss]; // alfa * sign MyCudaCopy(dev_alfa, alfa, sizeof(double), cudaMemcpyHostToDevice, 77); *alfa = fabs(*alfa); // back to postive alfa because we modified him on gpu. int indexValues = indexerMiss * (k + 1); // n* (K+1) int * dev_index_values = NULL; //index_values , the pointer for coordinates of point ,note values array size n*(k+1) MyCudaMalloc((void**)&dev_index_values, sizeof(int), 88); MyCudaCopy(dev_index_values, &indexValues, sizeof(int), cudaMemcpyHostToDevice, 99); //create a new weight createNewWeight << <1, k + 1 >> > (dev_alfa, dev_values, dev_index_values, dev_W); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "createNewWeight launch failed: %s\n", cudaGetErrorString(cudaStatus)); FreeFunction(dev_W, dev_alfa, dev_mislead); return -2; } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching createNewWeight! : %s \n", cudaStatus, cudaGetErrorString(cudaStatus)); FreeFunction(dev_W, dev_alfa, dev_mislead); return -2; } } counter_limit++; } //// need to calcate the q int sumOFmisLead = 0; for (int i = 0; i < n; i++) { if (mislead[i] != 0) sumOFmisLead += 1; } double q = sumOFmisLead / (n*(1.0)); MyCudaCopy(*WSaved, dev_W, sizeof(double)*(k + 1), cudaMemcpyDeviceToHost, 70); // copy W // clear resources on cuda GPU FreeFunction(dev_W, dev_alfa, dev_mislead); if (q <= QC) return q; else return 2.0; // q that never will get and larger from all q possiblies . }
634e78d110ecfa5d369411279c0aa8f457310cf2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "parallelMeanUnroll2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_inputArray = NULL; hipMalloc(&d_inputArray, XSIZE*YSIZE); uint64_t inputLength = 1; float *d_outputMean = NULL; hipMalloc(&d_outputMean, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( parallelMeanUnroll2), dim3(gridBlock),dim3(threadBlock), 0, 0, d_inputArray,inputLength,d_outputMean); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( parallelMeanUnroll2), dim3(gridBlock),dim3(threadBlock), 0, 0, d_inputArray,inputLength,d_outputMean); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( parallelMeanUnroll2), dim3(gridBlock),dim3(threadBlock), 0, 0, d_inputArray,inputLength,d_outputMean); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
634e78d110ecfa5d369411279c0aa8f457310cf2.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "parallelMeanUnroll2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_inputArray = NULL; cudaMalloc(&d_inputArray, XSIZE*YSIZE); uint64_t inputLength = 1; float *d_outputMean = NULL; cudaMalloc(&d_outputMean, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); parallelMeanUnroll2<<<gridBlock,threadBlock>>>(d_inputArray,inputLength,d_outputMean); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { parallelMeanUnroll2<<<gridBlock,threadBlock>>>(d_inputArray,inputLength,d_outputMean); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { parallelMeanUnroll2<<<gridBlock,threadBlock>>>(d_inputArray,inputLength,d_outputMean); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2fc836b2c7c7a292a38874ead4712779338f489f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gpu_stencil37_hack1_cp_cols.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *dst = NULL; hipMalloc(&dst, XSIZE*YSIZE); double *shared_rows = NULL; hipMalloc(&shared_rows, XSIZE*YSIZE); double *shared_cols = NULL; hipMalloc(&shared_cols, XSIZE*YSIZE); double *shared_slices = NULL; hipMalloc(&shared_slices, XSIZE*YSIZE); int n_rows = 1; int n_cols = 1; int n_slices = 1; int tile_x = 1; int tile_y = 1; int tile_z = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gpu_stencil37_hack1_cp_cols), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,shared_rows,shared_cols,shared_slices,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gpu_stencil37_hack1_cp_cols), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,shared_rows,shared_cols,shared_slices,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gpu_stencil37_hack1_cp_cols), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,shared_rows,shared_cols,shared_slices,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2fc836b2c7c7a292a38874ead4712779338f489f.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gpu_stencil37_hack1_cp_cols.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *dst = NULL; cudaMalloc(&dst, XSIZE*YSIZE); double *shared_rows = NULL; cudaMalloc(&shared_rows, XSIZE*YSIZE); double *shared_cols = NULL; cudaMalloc(&shared_cols, XSIZE*YSIZE); double *shared_slices = NULL; cudaMalloc(&shared_slices, XSIZE*YSIZE); int n_rows = 1; int n_cols = 1; int n_slices = 1; int tile_x = 1; int tile_y = 1; int tile_z = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gpu_stencil37_hack1_cp_cols<<<gridBlock,threadBlock>>>(dst,shared_rows,shared_cols,shared_slices,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gpu_stencil37_hack1_cp_cols<<<gridBlock,threadBlock>>>(dst,shared_rows,shared_cols,shared_slices,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gpu_stencil37_hack1_cp_cols<<<gridBlock,threadBlock>>>(dst,shared_rows,shared_cols,shared_slices,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a79ff7b80ea0cc103dab9f65e606b6e4bab10995.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "layer.h" #include <iostream> #include <fstream> #include <string> #include <exception> // Constructor Layer::Layer(int M, int N, int O) { this->M = M; this->N = N; this->O = O; float h_bias[N]; float h_weight[N][M]; output = NULL; preact = NULL; bias = NULL; weight = NULL; for (int i = 0; i < N; ++i) { h_bias[i] = 0.5f - float(rand()) / float(RAND_MAX); /*h_bias[i] = 0.0f;*/ for (int j = 0; j < M; ++j) { h_weight[i][j] = 0.5f - float(rand()) / float(RAND_MAX); /*h_weight[i][j] = 0.05f;*/ } } hipMalloc(&output, sizeof(float) * O); hipMalloc(&preact, sizeof(float) * O); hipMalloc(&bias, sizeof(float) * N); hipMalloc(&weight, sizeof(float) * M * N); hipMalloc(&d_output, sizeof(float) * O); hipMalloc(&d_preact, sizeof(float) * O); hipMalloc(&d_weight, sizeof(float) * M * N); hipMemcpy(bias, h_bias, sizeof(float) * N, hipMemcpyHostToDevice); hipMemcpy(weight, h_weight, sizeof(float) * M * N, hipMemcpyHostToDevice); } // Load from a previously saved layer. Path should point to a file as saved by // Layer::export(). If enableTrain is true, memory will be allocated for // training purposes, otherwise the layer is loaded for inference only and can // only be used for forward_pass(). Layer::Layer(std::string path, bool enableTrain) { std::ifstream inF(path, std::ios::binary); if(inF.fail()) { throw std::runtime_error("Failed to open layer file\n"); } inF.read((char*)&M, sizeof(int)); inF.read((char*)&N, sizeof(int)); inF.read((char*)&O, sizeof(int)); if(inF.fail()) { throw std::runtime_error("Failed to write layer file\n"); } float h_bias[N]; float h_weight[N][M]; inF.read((char*)h_bias, sizeof(float) * N); inF.read((char*)h_weight, sizeof(float) * M * N); inF.close(); hipMalloc(&output, sizeof(float) * O); hipMalloc(&preact, sizeof(float) * O); hipMalloc(&bias, sizeof(float) * N); hipMalloc(&weight, sizeof(float) * M * N); hipMemcpy(bias, h_bias, sizeof(float) * N, hipMemcpyHostToDevice); hipMemcpy(weight, h_weight, sizeof(float) * M * N, hipMemcpyHostToDevice); if(enableTrain) { hipMalloc(&d_output, sizeof(float) * O); hipMalloc(&d_preact, sizeof(float) * O); hipMalloc(&d_weight, sizeof(float) * M * N); } } // Destructor Layer::~Layer() { hipFree(output); hipFree(preact); hipFree(bias); hipFree(weight); hipFree(d_output); hipFree(d_preact); hipFree(d_weight); } // Save the layer to a file bool Layer::save(std::string path) { std::ofstream outF(path, std::ios::binary | std::ios::trunc); if(outF.fail()) { printf("Failed to write layer to %s\n", path.c_str()); return false; } float h_bias[N]; float h_weight[N][M]; hipMemcpy(h_bias, bias, sizeof(float) * N, hipMemcpyDeviceToHost); hipMemcpy(h_weight, weight, sizeof(float) * M * N, hipMemcpyDeviceToHost); outF.write((char*)&M, sizeof(int)); outF.write((char*)&N, sizeof(int)); outF.write((char*)&O, sizeof(int)); outF.write((char*)h_bias, sizeof(float) * N); outF.write((char*)h_weight, sizeof(float) * M * N); outF.close(); if(outF.fail()) { printf("Failed to write layer to %s\n", path.c_str()); return false; } return true; } // Send data one row from dataset to the GPU void Layer::setOutput(float *data) { hipMemcpy(output, data, sizeof(float) * O, hipMemcpyHostToDevice); } // Reset GPU memory between iterations void Layer::clear() { hipMemset(output, 0x00, sizeof(float) * O); hipMemset(preact, 0x00, sizeof(float) * O); } void Layer::bp_clear() { hipMemset(d_output, 0x00, sizeof(float) * O); hipMemset(d_preact, 0x00, sizeof(float) * O); hipMemset(d_weight, 0x00, sizeof(float) * M * N); } __device__ float step_function(float v) { return 1 / (1 + exp(-v)); } __global__ void apply_step_function(float *input, float *output, const int N) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) { output[idx] = step_function(input[idx]); } } __global__ void makeError(float *err, float *output, unsigned int Y, const int N) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) { err[idx] = ((Y == idx ? 1.0f : 0.0f) - output[idx]); } } __global__ void apply_grad(float *output, float *grad, const int N) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) { output[idx] += dt * grad[idx]; } } __global__ void fp_preact_c1(float input[28][28], float preact[6][24][24], float weight[6][5][5]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 5*5*6*24*24; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 5); const int i2 = ((idx /= 5 ) % 5); const int i3 = ((idx /= 5 ) % 6); const int i4 = ((idx /= 6 ) % 24); const int i5 = ((idx /= 24 ) % 24); atomicAdd(&preact[i3][i4][i5], weight[i3][i1][i2] * input[i4 + i1][i5 + i2]); } } __global__ void fp_bias_c1(float preact[6][24][24], float bias[6]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 6*24*24; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 6); const int i2 = ((idx /= 6 ) % 24); const int i3 = ((idx /= 24 ) % 24); preact[i1][i2][i3] += bias[i1]; } } __global__ void fp_preact_s1(float input[6][24][24], float preact[6][6][6], float weight[1][4][4]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 4*4*6*6*6; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 4); const int i2 = ((idx /= 4 ) % 4); const int i3 = ((idx /= 4 ) % 6); const int i4 = ((idx /= 6 ) % 6); const int i5 = ((idx /= 6 ) % 6); atomicAdd(&preact[i3][i4][i5], weight[0][i1][i2] * input[i3][i4 * 4 + i1][i5 * 4 + i2]); } } __global__ void fp_bias_s1(float preact[6][6][6], float bias[1]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 6*6*6; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 6); const int i2 = ((idx /= 6 ) % 6); const int i3 = ((idx /= 6 ) % 6); preact[i1][i2][i3] += bias[0]; } } __global__ void fp_preact_f(float input[6][6][6], float preact[10], float weight[10][6][6][6]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 10*6*6*6; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 10); const int i2 = ((idx /= 10 ) % 6); const int i3 = ((idx /= 6 ) % 6); const int i4 = ((idx /= 6 ) % 6); atomicAdd(&preact[i1], weight[i1][i2][i3][i4] * input[i2][i3][i4]); } } __global__ void fp_bias_f(float preact[10], float bias[10]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 10; for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) { preact[idx] += bias[idx]; } } __global__ void bp_weight_f(float d_weight[10][6][6][6], float d_preact[10], float p_output[6][6][6]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 10*6*6*6; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 10); const int i2 = ((idx /= 10 ) % 6); const int i3 = ((idx /= 6 ) % 6); const int i4 = ((idx /= 6 ) % 6); d_weight[i1][i2][i3][i4] = d_preact[i1] * p_output[i2][i3][i4]; } } __global__ void bp_bias_f(float bias[10], float d_preact[10]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 10; for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) { bias[idx] += dt * d_preact[idx]; } } __global__ void bp_output_s1(float d_output[6][6][6], float n_weight[10][6][6][6], float nd_preact[10]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 10*6*6*6; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 10); const int i2 = ((idx /= 10 ) % 6); const int i3 = ((idx /= 6 ) % 6); const int i4 = ((idx /= 6 ) % 6); atomicAdd(&d_output[i2][i3][i4], n_weight[i1][i2][i3][i4] * nd_preact[i1]); } } __global__ void bp_preact_s1(float d_preact[6][6][6], float d_output[6][6][6], float preact[6][6][6]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 6*6*6; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 6); const int i2 = ((idx /= 6 ) % 6); const int i3 = ((idx /= 6 ) % 6); const float o = step_function(preact[i1][i2][i3]); d_preact[i1][i2][i3] = d_output[i1][i2][i3] * o * (1 - o); } } __global__ void bp_weight_s1(float d_weight[1][4][4], float d_preact[6][6][6], float p_output[6][24][24]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 1*4*4*6*6*6; const float d = pow(6.0f, 3.0f); for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 1); const int i2 = ((idx /= 1 ) % 4); const int i3 = ((idx /= 4 ) % 4); const int i4 = ((idx /= 4 ) % 6); const int i5 = ((idx /= 6 ) % 6); const int i6 = ((idx /= 6 ) % 6); atomicAdd(&d_weight[i1][i2][i3], d_preact[i4][i5][i6] * p_output[i4][i5 * 4 + i2][i6 * 4 + i3]); } } __global__ void bp_bias_s1(float bias[1], float d_preact[6][6][6]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 6*6*6; const float d = pow(6.0f, 3.0f); for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 6); const int i2 = ((idx /= 6 ) % 6); const int i3 = ((idx /= 6 ) % 6); atomicAdd(&bias[0], dt * d_preact[i1][i2][i3] / d); } } __global__ void bp_output_c1(float d_output[6][24][24], float n_weight[1][4][4], float nd_preact[6][6][6]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 1*4*4*6*6*6; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 1); const int i2 = ((idx /= 1 ) % 4); const int i3 = ((idx /= 4 ) % 4); const int i4 = ((idx /= 4 ) % 6); const int i5 = ((idx /= 6 ) % 6); const int i6 = ((idx /= 6 ) % 6); atomicAdd(&d_output[i4][i5 * 4 + i2][i6 * 4 + i3], n_weight[i1][i2][i3] * nd_preact[i4][i5][i6]); } } __global__ void bp_preact_c1(float d_preact[6][24][24], float d_output[6][24][24], float preact[6][24][24]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 6*24*24; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 6); const int i2 = ((idx /= 6 ) % 24); const int i3 = ((idx /= 24 ) % 24); const float o = step_function(preact[i1][i2][i3]); d_preact[i1][i2][i3] = d_output[i1][i2][i3] * o * (1 - o); } } __global__ void bp_weight_c1(float d_weight[6][5][5], float d_preact[6][24][24], float p_output[28][28]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 6*5*5*24*24; const float d = pow(24.0f, 2.0f); for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 6); const int i2 = ((idx /= 6 ) % 5); const int i3 = ((idx /= 5 ) % 5); const int i4 = ((idx /= 5 ) % 24); const int i5 = ((idx /= 24 ) % 24); atomicAdd(&d_weight[i1][i2][i3], d_preact[i1][i4][i5] * p_output[i4 + i2][i5 + i3] / d); } } __global__ void bp_bias_c1(float bias[6], float d_preact[6][24][24]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 6*24*24; const float d = pow(24.0f, 2.0f); for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 6); const int i2 = ((idx /= 6 ) % 24); const int i3 = ((idx /= 24 ) % 24); atomicAdd(&bias[i1], dt * d_preact[i1][i2][i3] / d); } }
a79ff7b80ea0cc103dab9f65e606b6e4bab10995.cu
#include "layer.h" #include <iostream> #include <fstream> #include <string> #include <exception> // Constructor Layer::Layer(int M, int N, int O) { this->M = M; this->N = N; this->O = O; float h_bias[N]; float h_weight[N][M]; output = NULL; preact = NULL; bias = NULL; weight = NULL; for (int i = 0; i < N; ++i) { h_bias[i] = 0.5f - float(rand()) / float(RAND_MAX); /*h_bias[i] = 0.0f;*/ for (int j = 0; j < M; ++j) { h_weight[i][j] = 0.5f - float(rand()) / float(RAND_MAX); /*h_weight[i][j] = 0.05f;*/ } } cudaMalloc(&output, sizeof(float) * O); cudaMalloc(&preact, sizeof(float) * O); cudaMalloc(&bias, sizeof(float) * N); cudaMalloc(&weight, sizeof(float) * M * N); cudaMalloc(&d_output, sizeof(float) * O); cudaMalloc(&d_preact, sizeof(float) * O); cudaMalloc(&d_weight, sizeof(float) * M * N); cudaMemcpy(bias, h_bias, sizeof(float) * N, cudaMemcpyHostToDevice); cudaMemcpy(weight, h_weight, sizeof(float) * M * N, cudaMemcpyHostToDevice); } // Load from a previously saved layer. Path should point to a file as saved by // Layer::export(). If enableTrain is true, memory will be allocated for // training purposes, otherwise the layer is loaded for inference only and can // only be used for forward_pass(). Layer::Layer(std::string path, bool enableTrain) { std::ifstream inF(path, std::ios::binary); if(inF.fail()) { throw std::runtime_error("Failed to open layer file\n"); } inF.read((char*)&M, sizeof(int)); inF.read((char*)&N, sizeof(int)); inF.read((char*)&O, sizeof(int)); if(inF.fail()) { throw std::runtime_error("Failed to write layer file\n"); } float h_bias[N]; float h_weight[N][M]; inF.read((char*)h_bias, sizeof(float) * N); inF.read((char*)h_weight, sizeof(float) * M * N); inF.close(); cudaMalloc(&output, sizeof(float) * O); cudaMalloc(&preact, sizeof(float) * O); cudaMalloc(&bias, sizeof(float) * N); cudaMalloc(&weight, sizeof(float) * M * N); cudaMemcpy(bias, h_bias, sizeof(float) * N, cudaMemcpyHostToDevice); cudaMemcpy(weight, h_weight, sizeof(float) * M * N, cudaMemcpyHostToDevice); if(enableTrain) { cudaMalloc(&d_output, sizeof(float) * O); cudaMalloc(&d_preact, sizeof(float) * O); cudaMalloc(&d_weight, sizeof(float) * M * N); } } // Destructor Layer::~Layer() { cudaFree(output); cudaFree(preact); cudaFree(bias); cudaFree(weight); cudaFree(d_output); cudaFree(d_preact); cudaFree(d_weight); } // Save the layer to a file bool Layer::save(std::string path) { std::ofstream outF(path, std::ios::binary | std::ios::trunc); if(outF.fail()) { printf("Failed to write layer to %s\n", path.c_str()); return false; } float h_bias[N]; float h_weight[N][M]; cudaMemcpy(h_bias, bias, sizeof(float) * N, cudaMemcpyDeviceToHost); cudaMemcpy(h_weight, weight, sizeof(float) * M * N, cudaMemcpyDeviceToHost); outF.write((char*)&M, sizeof(int)); outF.write((char*)&N, sizeof(int)); outF.write((char*)&O, sizeof(int)); outF.write((char*)h_bias, sizeof(float) * N); outF.write((char*)h_weight, sizeof(float) * M * N); outF.close(); if(outF.fail()) { printf("Failed to write layer to %s\n", path.c_str()); return false; } return true; } // Send data one row from dataset to the GPU void Layer::setOutput(float *data) { cudaMemcpy(output, data, sizeof(float) * O, cudaMemcpyHostToDevice); } // Reset GPU memory between iterations void Layer::clear() { cudaMemset(output, 0x00, sizeof(float) * O); cudaMemset(preact, 0x00, sizeof(float) * O); } void Layer::bp_clear() { cudaMemset(d_output, 0x00, sizeof(float) * O); cudaMemset(d_preact, 0x00, sizeof(float) * O); cudaMemset(d_weight, 0x00, sizeof(float) * M * N); } __device__ float step_function(float v) { return 1 / (1 + exp(-v)); } __global__ void apply_step_function(float *input, float *output, const int N) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) { output[idx] = step_function(input[idx]); } } __global__ void makeError(float *err, float *output, unsigned int Y, const int N) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) { err[idx] = ((Y == idx ? 1.0f : 0.0f) - output[idx]); } } __global__ void apply_grad(float *output, float *grad, const int N) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) { output[idx] += dt * grad[idx]; } } __global__ void fp_preact_c1(float input[28][28], float preact[6][24][24], float weight[6][5][5]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 5*5*6*24*24; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 5); const int i2 = ((idx /= 5 ) % 5); const int i3 = ((idx /= 5 ) % 6); const int i4 = ((idx /= 6 ) % 24); const int i5 = ((idx /= 24 ) % 24); atomicAdd(&preact[i3][i4][i5], weight[i3][i1][i2] * input[i4 + i1][i5 + i2]); } } __global__ void fp_bias_c1(float preact[6][24][24], float bias[6]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 6*24*24; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 6); const int i2 = ((idx /= 6 ) % 24); const int i3 = ((idx /= 24 ) % 24); preact[i1][i2][i3] += bias[i1]; } } __global__ void fp_preact_s1(float input[6][24][24], float preact[6][6][6], float weight[1][4][4]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 4*4*6*6*6; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 4); const int i2 = ((idx /= 4 ) % 4); const int i3 = ((idx /= 4 ) % 6); const int i4 = ((idx /= 6 ) % 6); const int i5 = ((idx /= 6 ) % 6); atomicAdd(&preact[i3][i4][i5], weight[0][i1][i2] * input[i3][i4 * 4 + i1][i5 * 4 + i2]); } } __global__ void fp_bias_s1(float preact[6][6][6], float bias[1]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 6*6*6; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 6); const int i2 = ((idx /= 6 ) % 6); const int i3 = ((idx /= 6 ) % 6); preact[i1][i2][i3] += bias[0]; } } __global__ void fp_preact_f(float input[6][6][6], float preact[10], float weight[10][6][6][6]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 10*6*6*6; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 10); const int i2 = ((idx /= 10 ) % 6); const int i3 = ((idx /= 6 ) % 6); const int i4 = ((idx /= 6 ) % 6); atomicAdd(&preact[i1], weight[i1][i2][i3][i4] * input[i2][i3][i4]); } } __global__ void fp_bias_f(float preact[10], float bias[10]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 10; for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) { preact[idx] += bias[idx]; } } __global__ void bp_weight_f(float d_weight[10][6][6][6], float d_preact[10], float p_output[6][6][6]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 10*6*6*6; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 10); const int i2 = ((idx /= 10 ) % 6); const int i3 = ((idx /= 6 ) % 6); const int i4 = ((idx /= 6 ) % 6); d_weight[i1][i2][i3][i4] = d_preact[i1] * p_output[i2][i3][i4]; } } __global__ void bp_bias_f(float bias[10], float d_preact[10]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 10; for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) { bias[idx] += dt * d_preact[idx]; } } __global__ void bp_output_s1(float d_output[6][6][6], float n_weight[10][6][6][6], float nd_preact[10]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 10*6*6*6; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 10); const int i2 = ((idx /= 10 ) % 6); const int i3 = ((idx /= 6 ) % 6); const int i4 = ((idx /= 6 ) % 6); atomicAdd(&d_output[i2][i3][i4], n_weight[i1][i2][i3][i4] * nd_preact[i1]); } } __global__ void bp_preact_s1(float d_preact[6][6][6], float d_output[6][6][6], float preact[6][6][6]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 6*6*6; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 6); const int i2 = ((idx /= 6 ) % 6); const int i3 = ((idx /= 6 ) % 6); const float o = step_function(preact[i1][i2][i3]); d_preact[i1][i2][i3] = d_output[i1][i2][i3] * o * (1 - o); } } __global__ void bp_weight_s1(float d_weight[1][4][4], float d_preact[6][6][6], float p_output[6][24][24]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 1*4*4*6*6*6; const float d = pow(6.0f, 3.0f); for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 1); const int i2 = ((idx /= 1 ) % 4); const int i3 = ((idx /= 4 ) % 4); const int i4 = ((idx /= 4 ) % 6); const int i5 = ((idx /= 6 ) % 6); const int i6 = ((idx /= 6 ) % 6); atomicAdd(&d_weight[i1][i2][i3], d_preact[i4][i5][i6] * p_output[i4][i5 * 4 + i2][i6 * 4 + i3]); } } __global__ void bp_bias_s1(float bias[1], float d_preact[6][6][6]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 6*6*6; const float d = pow(6.0f, 3.0f); for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 6); const int i2 = ((idx /= 6 ) % 6); const int i3 = ((idx /= 6 ) % 6); atomicAdd(&bias[0], dt * d_preact[i1][i2][i3] / d); } } __global__ void bp_output_c1(float d_output[6][24][24], float n_weight[1][4][4], float nd_preact[6][6][6]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 1*4*4*6*6*6; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 1); const int i2 = ((idx /= 1 ) % 4); const int i3 = ((idx /= 4 ) % 4); const int i4 = ((idx /= 4 ) % 6); const int i5 = ((idx /= 6 ) % 6); const int i6 = ((idx /= 6 ) % 6); atomicAdd(&d_output[i4][i5 * 4 + i2][i6 * 4 + i3], n_weight[i1][i2][i3] * nd_preact[i4][i5][i6]); } } __global__ void bp_preact_c1(float d_preact[6][24][24], float d_output[6][24][24], float preact[6][24][24]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 6*24*24; for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 6); const int i2 = ((idx /= 6 ) % 24); const int i3 = ((idx /= 24 ) % 24); const float o = step_function(preact[i1][i2][i3]); d_preact[i1][i2][i3] = d_output[i1][i2][i3] * o * (1 - o); } } __global__ void bp_weight_c1(float d_weight[6][5][5], float d_preact[6][24][24], float p_output[28][28]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 6*5*5*24*24; const float d = pow(24.0f, 2.0f); for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 6); const int i2 = ((idx /= 6 ) % 5); const int i3 = ((idx /= 5 ) % 5); const int i4 = ((idx /= 5 ) % 24); const int i5 = ((idx /= 24 ) % 24); atomicAdd(&d_weight[i1][i2][i3], d_preact[i1][i4][i5] * p_output[i4 + i2][i5 + i3] / d); } } __global__ void bp_bias_c1(float bias[6], float d_preact[6][24][24]) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int size = blockDim.x * gridDim.x; const int N = 6*24*24; const float d = pow(24.0f, 2.0f); for (int n = N * pos / size; n < N * (pos+1) / size; ++n) { int idx = n; const int i1 = ((idx /= 1 ) % 6); const int i2 = ((idx /= 6 ) % 24); const int i3 = ((idx /= 24 ) % 24); atomicAdd(&bias[i1], dt * d_preact[i1][i2][i3] / d); } }
18a64874b1758a29f79b354494bf353c01e4363f.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/InitialTensorOptions.h> #include <ATen/SparseCsrTensorImpl.h> #include <ATen/SparseCsrTensorUtils.h> #include <ATen/SparseTensorUtils.h> #include <ATen/WrapDimUtilsMulti.h> #include <ATen/native/BinaryOps.h> #include <ATen/native/Resize.h> #include <algorithm> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/NativeFunctions.h> #else #include <ATen/ops/_convert_indices_from_coo_to_csr_native.h> #include <ATen/ops/_convert_indices_from_csr_to_coo_native.h> #include <ATen/ops/_sparse_csr_tensor_unsafe_native.h> #include <ATen/ops/_unique.h> #include <ATen/ops/add_native.h> #include <ATen/ops/resize_as_sparse_native.h> #include <ATen/ops/tensor.h> #include <ATen/ops/zeros.h> #endif #include <hip/hip_runtime.h> #include <type_traits> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPUtils.h> #include <ATen/hip/ThrustAllocator.h> #include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h> #include <ATen/native/hip/Reduce.cuh> #include <ATen/native/sparse/hip/SparseBlasImpl.h> #include <ATen/native/sparse/hip/SparseHIPBlas.h> #include <ATen/native/sparse/hip/SparseHIPTensorMath.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/sequence.h> namespace at { namespace native { namespace { template <typename input_t, typename output_t> __global__ void convert_indices_from_coo_to_csr_cuda_kernel(output_t* data_out, const input_t* data_in, const int64_t size, const int64_t numel) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid == 0) { for (int64_t i = 0; i <= data_in[0]; i++) data_out[i] = static_cast<output_t>(0); } else if (tid < numel) { for (int64_t i = data_in[tid - 1]; i < data_in[tid]; i++) data_out[i + 1] = static_cast<output_t>(tid); } else if (tid == numel) { for (int64_t i = data_in[numel - 1] + 1; i < size + 1; i++) data_out[i] = static_cast<output_t>(numel); } } template <typename input_t, typename output_t> void convert_indices_from_coo_to_csr_cuda(const Tensor& result, const Tensor& input, const int64_t size) { int64_t numel = input.numel(); const input_t* data_in = input.data_ptr<input_t>(); output_t* data_out = result.data_ptr<output_t>(); if (numel == 0) { result.zero_(); return; } // Run (numel + 1) threads... int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (numel + THREADS) / THREADS; at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( convert_indices_from_coo_to_csr_cuda_kernel), dim3(BLOCKS), dim3(THREADS), 0, stream, data_out, data_in, size, numel); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <typename input_t, typename output_t> __global__ void convert_indices_from_csr_to_coo_cuda_kernel(output_t* data_out, const input_t* data_in, const int64_t nrows) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < nrows) { for (int64_t i = data_in[tid]; i < data_in[tid + 1]; i++) data_out[i] = static_cast<output_t>(tid); } } template <typename input_t, typename output_t> void convert_indices_from_csr_to_coo_cuda(const Tensor& indices, const Tensor& crow_indices, const Tensor& col_indices, const bool transpose=false) { int64_t nrows = crow_indices.numel() - 1; if (nrows == 0) { indices.zero_(); return; } auto crow_indices_ = crow_indices.expect_contiguous(); const input_t* crow_indices_data_in = crow_indices_->data_ptr<input_t>(); TORCH_INTERNAL_ASSERT(indices.is_contiguous()); auto row0 = indices.select(0, transpose?1:0); auto row1 = indices.select(0, transpose?0:1); output_t* data_out = row0.data_ptr<output_t>(); // Run nrows threads... int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (nrows + THREADS) / THREADS; at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); row1.copy_(*col_indices.expect_contiguous()); hipLaunchKernelGGL(( convert_indices_from_csr_to_coo_cuda_kernel), dim3(BLOCKS), dim3(THREADS), 0, stream, data_out, crow_indices_data_in, nrows); C10_HIP_KERNEL_LAUNCH_CHECK(); } } // namespace using namespace at::sparse_csr; // certain utiliy functions are usable from sparse COO. using namespace at::sparse; Tensor& add_out_dense_sparse_csr_cuda( Tensor& output, const Tensor& dense, const SparseCsrTensor& src, const Scalar& alpha) { TORCH_INTERNAL_ASSERT(dense.layout() == kStrided); TORCH_INTERNAL_ASSERT(src.is_sparse_csr()); TORCH_INTERNAL_ASSERT(dense.is_cuda()); TORCH_CHECK( output.is_contiguous(), "out argument must be contiguous, but got: ", output.suggest_memory_format()); TORCH_CHECK( output.is_cuda(), "add: expected 'out' to be CUDA tensor, but got tensor on device: ", output.device()); TORCH_CHECK( src.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got tensor on device: ", src.device()); TORCH_CHECK( dense.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ", dense.sizes(), " while other has size ", src.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)"); auto commonDtype = promoteTypes(dense.scalar_type(), src.scalar_type()); TORCH_CHECK( canCast(commonDtype, output.scalar_type()), "Can't convert result type ", commonDtype, " to output ", output.scalar_type(), " in add operation"); Tensor src_values = src.values(); resize_output(output, dense.sizes()); Tensor resultBuffer = output; if (output.scalar_type() != commonDtype) { resultBuffer = dense.to(commonDtype); } else if (!is_same_tensor(output, dense)) { resultBuffer.copy_(dense); } if (src._nnz() == 0) { return output; } auto valuesBuffer = src_values.to(commonDtype).view({-1, src_values.size(-1)}); resultBuffer = resultBuffer.view({-1, output.size(-2), output.size(-1)}); auto src_crow_indices = src.crow_indices().view({-1, src.crow_indices().size(-1)}); auto src_col_indices = src.col_indices().view({-1, src.col_indices().size(-1)}); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( kComplexHalf, kHalf, kBool, kBFloat16, commonDtype, "add_out_op2_sparse_csr", [&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() { AT_DISPATCH_INDEX_TYPES( src_crow_indices.scalar_type(), "csr_add_out_crow_indices", [&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() { auto batch_count = resultBuffer.dim() > 2 ? resultBuffer.size(-3) : 1; scalar_t* values_accessor = valuesBuffer.data_ptr<scalar_t>(); scalar_t* out_ptr = resultBuffer.data_ptr<scalar_t>(); scalar_t cast_value = alpha.to<scalar_t>(); index_t* crow_indices_accessor = src_crow_indices.data_ptr<index_t>(); index_t* col_indices_accessor = src_col_indices.data_ptr<index_t>(); int64_t out_storage_offset = resultBuffer.storage_offset(); auto out_strides = resultBuffer.strides(); auto out_strides0 = out_strides[0]; auto out_strides1 = out_strides[1]; auto crow_stride0 = src_crow_indices.stride(0); auto col_stride0 = src_col_indices.stride(0); auto val_stride0 = valuesBuffer.stride(0); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); at::cuda::ThrustAllocator allocator; auto policy = thrust::hip::par(allocator).on(stream); // Note that this could be wildly imbalanced if the sparsity pattern varies a lot between rows. thrust::for_each( policy, thrust::make_counting_iterator(int64_t(0)), thrust::make_counting_iterator(int64_t(src_crow_indices.size(-1) - 1)), [values_accessor, crow_indices_accessor, col_indices_accessor, out_ptr, cast_value, out_strides0, out_strides1, crow_stride0, col_stride0, val_stride0, batch_count ]__device__(int64_t irow) { for (index_t batch_idx = 0; batch_idx < batch_count; batch_idx++) { index_t start_index = crow_indices_accessor[batch_idx*crow_stride0 + irow]; index_t end_index = crow_indices_accessor[batch_idx*crow_stride0 + irow + 1]; for (index_t i = start_index; i < end_index; ++i) { auto icol = col_indices_accessor[batch_idx*col_stride0 + i]; auto index = batch_idx * out_strides0 + irow * out_strides1 + icol; out_ptr[index] += cast_value * values_accessor[batch_idx*val_stride0 + i]; } } }); }); }); if (output.scalar_type() != commonDtype) { output.copy_(resultBuffer); } return output; } Tensor& add_out_sparse_csr_cuda( const Tensor& self, const SparseCsrTensor& other, const Scalar& alpha, SparseCsrTensor& out) { if (self.layout() == kStrided) { add_out_dense_sparse_csr_cuda(out, self, other, alpha); } else { TORCH_CHECK( self.sizes().equals(other.sizes()), "torch.add: Expected input tensors to have the same shape, but got tensor `self` with shape ", self.sizes(), " and tensor `other` with shape ", other.sizes()); at::native::resize_as_sparse_csr_(out, self); sparse::impl::cuda::add_out_sparse_csr(self, other, Scalar(1), alpha, out); } return out; } TORCH_IMPL_FUNC(_convert_indices_from_coo_to_csr_structured_cuda) ( const Tensor& input, const int64_t size, const bool out_int32, const Tensor& result ) { if (out_int32) { AT_DISPATCH_INTEGRAL_TYPES(input.scalar_type(), "convert_indices_from_coo_to_csr_cuda", [&] { convert_indices_from_coo_to_csr_cuda<scalar_t, int>(result, input, size); }); } else { AT_DISPATCH_INTEGRAL_TYPES(input.scalar_type(), "convert_indices_from_coo_to_csr_cuda", [&] { convert_indices_from_coo_to_csr_cuda<scalar_t, int64_t>(result, input, size); }); } } TORCH_IMPL_FUNC(_convert_indices_from_csr_to_coo_structured_cuda) ( const Tensor& crow_indices, const Tensor& col_indices, const bool out_int32, const bool transpose, const Tensor& result ) { if (out_int32) { AT_DISPATCH_INTEGRAL_TYPES(crow_indices.scalar_type(), "convert_indices_from_csr_to_coo_cuda", [&] { convert_indices_from_csr_to_coo_cuda<scalar_t, int32_t>(result, crow_indices, col_indices, transpose); }); } else { AT_DISPATCH_INTEGRAL_TYPES(crow_indices.scalar_type(), "convert_indices_from_csr_to_coo_cuda", [&] { convert_indices_from_csr_to_coo_cuda<scalar_t, int64_t>(result, crow_indices, col_indices, transpose); }); } } /* Reductions on sparse CSR tensors using masked semantics. - To support a reduction operator on a CSR tensor with CUDA storage, define template <typename scalar_t> struct Reduction...Op { __device__ __forceinline__ scalar_t operator()(const scalar_t a, const scalar_t b) const { return a ... b; } __device__ __forceinline__ scalar_t identity() const { return ...; } __forceinline__ scalar_t identity_cpu() const { return ...; } }; Tensor _sparse_csr_..._cuda(const Tensor& input, IntArrayRef dims_to_sum, bool keepdim, c10::optional<ScalarType> dtype) { ... result = reduce_sparse_csr_cuda_template<scalar_t>(input_, dims_to_sum, keepdim, Reduction...Op<scalar_t>()); ... return result; } and add the following - func: _sparse_csr_op.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor dispatch: SparseCsrCUDA: _sparse_csr_..._cuda to native_functions.yaml */ namespace { template <typename scalar_t, typename index_t, typename ReductionOp> __global__ void reduce_sparse_csr_dim0_cuda_kernel(scalar_t* new_values, const index_t* new_col_indices, const int64_t new_nnz, const scalar_t* values, const index_t* col_indices, const int64_t nnz, ReductionOp rop ) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < new_nnz) { index_t col = new_col_indices[tid]; scalar_t v = rop.identity(); for (int64_t j=0; j < nnz; j++) { if (col == col_indices[j]) { v = rop(v, values[j]); } } new_values[tid] = v; } } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_dim0_cuda_template(const Tensor& sparse, ReductionOp rop) { /* Consider the following sparse tensor: 1 * * * * * * * 2 * * * 3 * * * * * * * 4 * 5 * * that has CSR representation crow_indices = [0, 1, 2, 3, 3, 5] col_indices = [0, 3, 2, 0, 2] values = [1, 2, 3, 4, 5] Reduction with dim=0 results: rop(1,4) * rop(3,5) 2 * that has CSR representation new_crow_indices = [0, 3] new_col_indices = [0, 2, 3] new_values = [rop(1, 4], rop(3, 5), 2] In general, the CSR representation data can be computed as follows: nnz = col_indices.numel() new_col_indices = col_indices.unique(sorted=True, return_inverse=False) new_nnz = new_col_indices.numel() new_crow_indices = [0, new_nnz] new_values.resize(new_nnz) for i in range(new_nnz): v = identity col = new_col_indices[i] for j in range(nnz): if col == col_indices[j]: v = rop(v, values[j]) new_values[i] = v Notice this algorithm is different from the one used on CPU data. */ Tensor col_indices = sparse.col_indices(); Tensor values = sparse.values(); auto ncols = sparse.size(1); auto nnz = col_indices.numel(); Tensor new_col_indices; std::tie(new_col_indices, std::ignore) = at::_unique(col_indices, true, false); auto new_nnz = new_col_indices.numel(); Tensor new_crow_indices = at::tensor(ArrayRef<int64_t>{0, new_nnz}, col_indices.options()); Tensor new_values = at::empty({new_nnz}, values.options()); scalar_t* values_ptr = values.data_ptr<scalar_t>(); scalar_t* new_values_ptr = new_values.data_ptr<scalar_t>(); int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (new_nnz + THREADS) / THREADS; at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_INDEX_TYPES(col_indices.scalar_type(), "reduce_sparse_csr_dim0_cuda_indices", [&]() { index_t* col_indices_ptr = col_indices.data_ptr<index_t>(); index_t* new_col_indices_ptr = new_col_indices.data_ptr<index_t>(); hipLaunchKernelGGL(( reduce_sparse_csr_dim0_cuda_kernel), dim3(BLOCKS), dim3(THREADS), 0, stream, new_values_ptr, new_col_indices_ptr, new_nnz, values_ptr, col_indices_ptr, nnz, rop ); }); C10_HIP_KERNEL_LAUNCH_CHECK(); return at::native::_sparse_csr_tensor_unsafe(new_crow_indices, new_col_indices, new_values, {1, ncols}, new_values.scalar_type(), sparse.layout(), new_values.device()); } template <typename index_t> __global__ void reduce_crow_indices_dim1_cuda_kernel(index_t* new_crow_indices, index_t* row_map, const index_t* crow_indices, const int64_t nrows ) { int64_t nnz = 0; new_crow_indices[0] = 0; for(int64_t i=0; i<nrows; i++) { if (crow_indices[i] != crow_indices[i + 1]) { row_map[i] = nnz; nnz++; } new_crow_indices[i + 1] = nnz; } } template <typename scalar_t, typename index_t, typename ReductionOp> __global__ void reduce_sparse_csr_dim1_cuda_kernel(scalar_t* new_values, const scalar_t* values, const index_t* crow_indices, const index_t* row_map, const int64_t nrows, ReductionOp rop ) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < nrows) { index_t i_start = crow_indices[tid]; index_t i_end = crow_indices[tid+1]; if (i_start != i_end) { scalar_t acc = rop.identity(); for (index_t i = i_start; i < i_end; i++) { acc = rop(acc, values[i]); } new_values[row_map[tid]] = acc; } } } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_dim1_cuda_template(const Tensor& sparse, ReductionOp rop) { /* The algorithm of computing reduce of a CSR tensor along the last dimension is explained in the comment of the reduce_sparse_csr_dim1_cpu_template function. */ Tensor crow_indices = sparse.crow_indices(); auto ioptions = crow_indices.options(); Tensor values = sparse.values(); auto nrows = sparse.size(0); auto numel = values.numel(); Tensor new_crow_indices = at::empty({crow_indices.numel()}, ioptions); Tensor new_col_indices = at::empty({}, ioptions); Tensor new_values = at::empty({}, values.options()); Tensor row_map = at::empty({nrows}, ioptions); at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (nrows + THREADS) / THREADS; AT_DISPATCH_INDEX_TYPES(crow_indices.scalar_type(), "reduce_sparse_csr_dim1_cuda_indices", [&]() { index_t* crow_indices_ptr = crow_indices.data_ptr<index_t>(); index_t* new_crow_indices_ptr = new_crow_indices.data_ptr<index_t>(); index_t* row_map_ptr = row_map.data_ptr<index_t>(); hipLaunchKernelGGL(( reduce_crow_indices_dim1_cuda_kernel), dim3(1), dim3(1), 0, stream, new_crow_indices_ptr, row_map_ptr, crow_indices_ptr, nrows); C10_HIP_KERNEL_LAUNCH_CHECK(); index_t new_nnz = new_crow_indices[-1].item<index_t>(); new_col_indices.resize_(new_nnz); new_col_indices.fill_(index_t(0)); new_values.resize_(new_nnz); scalar_t* values_ptr = values.data_ptr<scalar_t>(); scalar_t* new_values_ptr = new_values.data_ptr<scalar_t>(); hipLaunchKernelGGL(( reduce_sparse_csr_dim1_cuda_kernel), dim3(BLOCKS), dim3(THREADS), 0, stream, new_values_ptr, values_ptr, crow_indices_ptr, row_map_ptr, nrows, rop); C10_HIP_KERNEL_LAUNCH_CHECK(); }); return at::native::_sparse_csr_tensor_unsafe(new_crow_indices, new_col_indices, new_values, {sparse.size(0), 1}, new_values.scalar_type(), sparse.layout(), new_values.device()); } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_dim01_cuda_template(const Tensor& sparse, ReductionOp rop) { auto ioptions = sparse.col_indices().options(); Tensor values = sparse.values(); auto numel = values.numel(); auto nnz = std::min<int64_t>(1, numel); Tensor new_values; if (numel > 0) { new_values = at::empty({1}, values.options()); auto iter = TensorIterator::reduce_op(new_values, values); gpu_reduce_kernel<scalar_t, scalar_t>(iter, func_wrapper<scalar_t>(rop), rop.identity_cpu()); } else { new_values = at::empty({}, values.options()); } Tensor new_col_indices = at::zeros({nnz}, ioptions); Tensor new_crow_indices = at::tensor(ArrayRef<int64_t>{0, nnz}, ioptions); return at::native::_sparse_csr_tensor_unsafe(new_crow_indices, new_col_indices, new_values, {1, std::min<int64_t>(1, sparse.size(1))}, new_values.scalar_type(), sparse.layout(), new_values.device()); } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_cuda_template(const Tensor& sparse, std::vector<int64_t> dims, ReductionOp rop) { if (dims.size() == 1) { if (dims[0] == 0) { return reduce_sparse_csr_dim0_cuda_template<scalar_t>(sparse, rop); } else { TORCH_INTERNAL_ASSERT(dims[0] == 1); return reduce_sparse_csr_dim1_cuda_template<scalar_t>(sparse, rop); } } else if (dims.size() == 2) { TORCH_INTERNAL_ASSERT(((dims[0] == 0 && dims[1] == 1) || (dims[0] == 1 && dims[1] == 0))); return reduce_sparse_csr_dim01_cuda_template<scalar_t>(sparse, rop); } TORCH_INTERNAL_ASSERT(dims.size() == 0); // effective after gh-29137 has been resolved return sparse.clone(); } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_cuda_template(const Tensor& sparse, IntArrayRef dims_to_sum, bool keepdim, ReductionOp rop) { TORCH_INTERNAL_ASSERT(sparse.is_sparse_csr()); TORCH_CHECK(keepdim, "reduction operations on CSR tensors with keepdim=False is unsupported"); TORCH_INTERNAL_ASSERT(sparse.is_cuda()); const int64_t input_dim = sparse.dim(); TORCH_INTERNAL_ASSERT(input_dim == 2); auto dims = dims_to_sum.vec(); maybe_wrap_dims(dims, input_dim); if (dims.size() == 0) { // after gh-29137 is resolved, delete this if-block dims.emplace_back(0); dims.emplace_back(1); } return reduce_sparse_csr_cuda_template<scalar_t>(sparse, dims, rop); } template <typename scalar_t> struct ReductionAddOp { __device__ __forceinline__ scalar_t operator()(const scalar_t a, const scalar_t b) const { return a + b; } __device__ __forceinline__ scalar_t identity() const { return 0; } __forceinline__ scalar_t identity_cpu() const { return 0; } }; template <typename scalar_t> struct ReductionMulOp { __device__ __forceinline__ scalar_t operator()(const scalar_t a, const scalar_t b) const { return a * b; } __device__ __forceinline__ scalar_t identity() const { return 1; } __forceinline__ scalar_t identity_cpu() const { return 1; } }; } // namespace Tensor _sparse_csr_sum_cuda(const Tensor& input, IntArrayRef dims_to_sum, bool keepdim, c10::optional<ScalarType> dtype) { ScalarType dtype_ = dtype.value_or(input.scalar_type()); Tensor input_ = input.to(dtype_); Tensor result; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( kHalf, kBFloat16, input_.scalar_type(), "_sparse_csr_sum_cuda", [&] { result = reduce_sparse_csr_cuda_template<scalar_t>(input_, dims_to_sum, keepdim, ReductionAddOp<scalar_t>()); }); return result; } Tensor _sparse_csr_prod_cuda(const Tensor& input, IntArrayRef dims_to_reduce, bool keepdim, c10::optional<ScalarType> dtype) { ScalarType dtype_ = dtype.value_or(input.scalar_type()); Tensor input_ = input.to(dtype_); Tensor result; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( kHalf, kBFloat16, input_.scalar_type(), "_sparse_csr_prod_cuda", [&] { result = reduce_sparse_csr_cuda_template<scalar_t>(input_, dims_to_reduce, keepdim, ReductionMulOp<scalar_t>()); }); return result; } } // namespace native } // namespace at
18a64874b1758a29f79b354494bf353c01e4363f.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/InitialTensorOptions.h> #include <ATen/SparseCsrTensorImpl.h> #include <ATen/SparseCsrTensorUtils.h> #include <ATen/SparseTensorUtils.h> #include <ATen/WrapDimUtilsMulti.h> #include <ATen/native/BinaryOps.h> #include <ATen/native/Resize.h> #include <algorithm> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/NativeFunctions.h> #else #include <ATen/ops/_convert_indices_from_coo_to_csr_native.h> #include <ATen/ops/_convert_indices_from_csr_to_coo_native.h> #include <ATen/ops/_sparse_csr_tensor_unsafe_native.h> #include <ATen/ops/_unique.h> #include <ATen/ops/add_native.h> #include <ATen/ops/resize_as_sparse_native.h> #include <ATen/ops/tensor.h> #include <ATen/ops/zeros.h> #endif #include <cuda_runtime.h> #include <type_traits> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAUtils.h> #include <ATen/cuda/ThrustAllocator.h> #include <c10/cuda/CUDACachingAllocator.h> #include <ATen/native/cuda/Reduce.cuh> #include <ATen/native/sparse/cuda/SparseBlasImpl.h> #include <ATen/native/sparse/cuda/SparseCUDABlas.h> #include <ATen/native/sparse/cuda/SparseCUDATensorMath.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/sequence.h> namespace at { namespace native { namespace { template <typename input_t, typename output_t> __global__ void convert_indices_from_coo_to_csr_cuda_kernel(output_t* data_out, const input_t* data_in, const int64_t size, const int64_t numel) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid == 0) { for (int64_t i = 0; i <= data_in[0]; i++) data_out[i] = static_cast<output_t>(0); } else if (tid < numel) { for (int64_t i = data_in[tid - 1]; i < data_in[tid]; i++) data_out[i + 1] = static_cast<output_t>(tid); } else if (tid == numel) { for (int64_t i = data_in[numel - 1] + 1; i < size + 1; i++) data_out[i] = static_cast<output_t>(numel); } } template <typename input_t, typename output_t> void convert_indices_from_coo_to_csr_cuda(const Tensor& result, const Tensor& input, const int64_t size) { int64_t numel = input.numel(); const input_t* data_in = input.data_ptr<input_t>(); output_t* data_out = result.data_ptr<output_t>(); if (numel == 0) { result.zero_(); return; } // Run (numel + 1) threads... int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (numel + THREADS) / THREADS; at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); convert_indices_from_coo_to_csr_cuda_kernel<<<BLOCKS, THREADS, 0, stream>>>(data_out, data_in, size, numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <typename input_t, typename output_t> __global__ void convert_indices_from_csr_to_coo_cuda_kernel(output_t* data_out, const input_t* data_in, const int64_t nrows) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < nrows) { for (int64_t i = data_in[tid]; i < data_in[tid + 1]; i++) data_out[i] = static_cast<output_t>(tid); } } template <typename input_t, typename output_t> void convert_indices_from_csr_to_coo_cuda(const Tensor& indices, const Tensor& crow_indices, const Tensor& col_indices, const bool transpose=false) { int64_t nrows = crow_indices.numel() - 1; if (nrows == 0) { indices.zero_(); return; } auto crow_indices_ = crow_indices.expect_contiguous(); const input_t* crow_indices_data_in = crow_indices_->data_ptr<input_t>(); TORCH_INTERNAL_ASSERT(indices.is_contiguous()); auto row0 = indices.select(0, transpose?1:0); auto row1 = indices.select(0, transpose?0:1); output_t* data_out = row0.data_ptr<output_t>(); // Run nrows threads... int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (nrows + THREADS) / THREADS; at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); row1.copy_(*col_indices.expect_contiguous()); convert_indices_from_csr_to_coo_cuda_kernel<<<BLOCKS, THREADS, 0, stream>>>(data_out, crow_indices_data_in, nrows); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } // namespace using namespace at::sparse_csr; // certain utiliy functions are usable from sparse COO. using namespace at::sparse; Tensor& add_out_dense_sparse_csr_cuda( Tensor& output, const Tensor& dense, const SparseCsrTensor& src, const Scalar& alpha) { TORCH_INTERNAL_ASSERT(dense.layout() == kStrided); TORCH_INTERNAL_ASSERT(src.is_sparse_csr()); TORCH_INTERNAL_ASSERT(dense.is_cuda()); TORCH_CHECK( output.is_contiguous(), "out argument must be contiguous, but got: ", output.suggest_memory_format()); TORCH_CHECK( output.is_cuda(), "add: expected 'out' to be CUDA tensor, but got tensor on device: ", output.device()); TORCH_CHECK( src.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got tensor on device: ", src.device()); TORCH_CHECK( dense.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ", dense.sizes(), " while other has size ", src.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)"); auto commonDtype = promoteTypes(dense.scalar_type(), src.scalar_type()); TORCH_CHECK( canCast(commonDtype, output.scalar_type()), "Can't convert result type ", commonDtype, " to output ", output.scalar_type(), " in add operation"); Tensor src_values = src.values(); resize_output(output, dense.sizes()); Tensor resultBuffer = output; if (output.scalar_type() != commonDtype) { resultBuffer = dense.to(commonDtype); } else if (!is_same_tensor(output, dense)) { resultBuffer.copy_(dense); } if (src._nnz() == 0) { return output; } auto valuesBuffer = src_values.to(commonDtype).view({-1, src_values.size(-1)}); resultBuffer = resultBuffer.view({-1, output.size(-2), output.size(-1)}); auto src_crow_indices = src.crow_indices().view({-1, src.crow_indices().size(-1)}); auto src_col_indices = src.col_indices().view({-1, src.col_indices().size(-1)}); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( kComplexHalf, kHalf, kBool, kBFloat16, commonDtype, "add_out_op2_sparse_csr", [&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() { AT_DISPATCH_INDEX_TYPES( src_crow_indices.scalar_type(), "csr_add_out_crow_indices", [&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() { auto batch_count = resultBuffer.dim() > 2 ? resultBuffer.size(-3) : 1; scalar_t* values_accessor = valuesBuffer.data_ptr<scalar_t>(); scalar_t* out_ptr = resultBuffer.data_ptr<scalar_t>(); scalar_t cast_value = alpha.to<scalar_t>(); index_t* crow_indices_accessor = src_crow_indices.data_ptr<index_t>(); index_t* col_indices_accessor = src_col_indices.data_ptr<index_t>(); int64_t out_storage_offset = resultBuffer.storage_offset(); auto out_strides = resultBuffer.strides(); auto out_strides0 = out_strides[0]; auto out_strides1 = out_strides[1]; auto crow_stride0 = src_crow_indices.stride(0); auto col_stride0 = src_col_indices.stride(0); auto val_stride0 = valuesBuffer.stride(0); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); at::cuda::ThrustAllocator allocator; auto policy = thrust::cuda::par(allocator).on(stream); // Note that this could be wildly imbalanced if the sparsity pattern varies a lot between rows. thrust::for_each( policy, thrust::make_counting_iterator(int64_t(0)), thrust::make_counting_iterator(int64_t(src_crow_indices.size(-1) - 1)), [values_accessor, crow_indices_accessor, col_indices_accessor, out_ptr, cast_value, out_strides0, out_strides1, crow_stride0, col_stride0, val_stride0, batch_count ]__device__(int64_t irow) { for (index_t batch_idx = 0; batch_idx < batch_count; batch_idx++) { index_t start_index = crow_indices_accessor[batch_idx*crow_stride0 + irow]; index_t end_index = crow_indices_accessor[batch_idx*crow_stride0 + irow + 1]; for (index_t i = start_index; i < end_index; ++i) { auto icol = col_indices_accessor[batch_idx*col_stride0 + i]; auto index = batch_idx * out_strides0 + irow * out_strides1 + icol; out_ptr[index] += cast_value * values_accessor[batch_idx*val_stride0 + i]; } } }); }); }); if (output.scalar_type() != commonDtype) { output.copy_(resultBuffer); } return output; } Tensor& add_out_sparse_csr_cuda( const Tensor& self, const SparseCsrTensor& other, const Scalar& alpha, SparseCsrTensor& out) { if (self.layout() == kStrided) { add_out_dense_sparse_csr_cuda(out, self, other, alpha); } else { TORCH_CHECK( self.sizes().equals(other.sizes()), "torch.add: Expected input tensors to have the same shape, but got tensor `self` with shape ", self.sizes(), " and tensor `other` with shape ", other.sizes()); at::native::resize_as_sparse_csr_(out, self); sparse::impl::cuda::add_out_sparse_csr(self, other, Scalar(1), alpha, out); } return out; } TORCH_IMPL_FUNC(_convert_indices_from_coo_to_csr_structured_cuda) ( const Tensor& input, const int64_t size, const bool out_int32, const Tensor& result ) { if (out_int32) { AT_DISPATCH_INTEGRAL_TYPES(input.scalar_type(), "convert_indices_from_coo_to_csr_cuda", [&] { convert_indices_from_coo_to_csr_cuda<scalar_t, int>(result, input, size); }); } else { AT_DISPATCH_INTEGRAL_TYPES(input.scalar_type(), "convert_indices_from_coo_to_csr_cuda", [&] { convert_indices_from_coo_to_csr_cuda<scalar_t, int64_t>(result, input, size); }); } } TORCH_IMPL_FUNC(_convert_indices_from_csr_to_coo_structured_cuda) ( const Tensor& crow_indices, const Tensor& col_indices, const bool out_int32, const bool transpose, const Tensor& result ) { if (out_int32) { AT_DISPATCH_INTEGRAL_TYPES(crow_indices.scalar_type(), "convert_indices_from_csr_to_coo_cuda", [&] { convert_indices_from_csr_to_coo_cuda<scalar_t, int32_t>(result, crow_indices, col_indices, transpose); }); } else { AT_DISPATCH_INTEGRAL_TYPES(crow_indices.scalar_type(), "convert_indices_from_csr_to_coo_cuda", [&] { convert_indices_from_csr_to_coo_cuda<scalar_t, int64_t>(result, crow_indices, col_indices, transpose); }); } } /* Reductions on sparse CSR tensors using masked semantics. - To support a reduction operator on a CSR tensor with CUDA storage, define template <typename scalar_t> struct Reduction...Op { __device__ __forceinline__ scalar_t operator()(const scalar_t a, const scalar_t b) const { return a ... b; } __device__ __forceinline__ scalar_t identity() const { return ...; } __forceinline__ scalar_t identity_cpu() const { return ...; } }; Tensor _sparse_csr_..._cuda(const Tensor& input, IntArrayRef dims_to_sum, bool keepdim, c10::optional<ScalarType> dtype) { ... result = reduce_sparse_csr_cuda_template<scalar_t>(input_, dims_to_sum, keepdim, Reduction...Op<scalar_t>()); ... return result; } and add the following - func: _sparse_csr_op.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor dispatch: SparseCsrCUDA: _sparse_csr_..._cuda to native_functions.yaml */ namespace { template <typename scalar_t, typename index_t, typename ReductionOp> __global__ void reduce_sparse_csr_dim0_cuda_kernel(scalar_t* new_values, const index_t* new_col_indices, const int64_t new_nnz, const scalar_t* values, const index_t* col_indices, const int64_t nnz, ReductionOp rop ) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < new_nnz) { index_t col = new_col_indices[tid]; scalar_t v = rop.identity(); for (int64_t j=0; j < nnz; j++) { if (col == col_indices[j]) { v = rop(v, values[j]); } } new_values[tid] = v; } } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_dim0_cuda_template(const Tensor& sparse, ReductionOp rop) { /* Consider the following sparse tensor: 1 * * * * * * * 2 * * * 3 * * * * * * * 4 * 5 * * that has CSR representation crow_indices = [0, 1, 2, 3, 3, 5] col_indices = [0, 3, 2, 0, 2] values = [1, 2, 3, 4, 5] Reduction with dim=0 results: rop(1,4) * rop(3,5) 2 * that has CSR representation new_crow_indices = [0, 3] new_col_indices = [0, 2, 3] new_values = [rop(1, 4], rop(3, 5), 2] In general, the CSR representation data can be computed as follows: nnz = col_indices.numel() new_col_indices = col_indices.unique(sorted=True, return_inverse=False) new_nnz = new_col_indices.numel() new_crow_indices = [0, new_nnz] new_values.resize(new_nnz) for i in range(new_nnz): v = identity col = new_col_indices[i] for j in range(nnz): if col == col_indices[j]: v = rop(v, values[j]) new_values[i] = v Notice this algorithm is different from the one used on CPU data. */ Tensor col_indices = sparse.col_indices(); Tensor values = sparse.values(); auto ncols = sparse.size(1); auto nnz = col_indices.numel(); Tensor new_col_indices; std::tie(new_col_indices, std::ignore) = at::_unique(col_indices, true, false); auto new_nnz = new_col_indices.numel(); Tensor new_crow_indices = at::tensor(ArrayRef<int64_t>{0, new_nnz}, col_indices.options()); Tensor new_values = at::empty({new_nnz}, values.options()); scalar_t* values_ptr = values.data_ptr<scalar_t>(); scalar_t* new_values_ptr = new_values.data_ptr<scalar_t>(); int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (new_nnz + THREADS) / THREADS; at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_INDEX_TYPES(col_indices.scalar_type(), "reduce_sparse_csr_dim0_cuda_indices", [&]() { index_t* col_indices_ptr = col_indices.data_ptr<index_t>(); index_t* new_col_indices_ptr = new_col_indices.data_ptr<index_t>(); reduce_sparse_csr_dim0_cuda_kernel<<<BLOCKS, THREADS, 0, stream>>>(new_values_ptr, new_col_indices_ptr, new_nnz, values_ptr, col_indices_ptr, nnz, rop ); }); C10_CUDA_KERNEL_LAUNCH_CHECK(); return at::native::_sparse_csr_tensor_unsafe(new_crow_indices, new_col_indices, new_values, {1, ncols}, new_values.scalar_type(), sparse.layout(), new_values.device()); } template <typename index_t> __global__ void reduce_crow_indices_dim1_cuda_kernel(index_t* new_crow_indices, index_t* row_map, const index_t* crow_indices, const int64_t nrows ) { int64_t nnz = 0; new_crow_indices[0] = 0; for(int64_t i=0; i<nrows; i++) { if (crow_indices[i] != crow_indices[i + 1]) { row_map[i] = nnz; nnz++; } new_crow_indices[i + 1] = nnz; } } template <typename scalar_t, typename index_t, typename ReductionOp> __global__ void reduce_sparse_csr_dim1_cuda_kernel(scalar_t* new_values, const scalar_t* values, const index_t* crow_indices, const index_t* row_map, const int64_t nrows, ReductionOp rop ) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < nrows) { index_t i_start = crow_indices[tid]; index_t i_end = crow_indices[tid+1]; if (i_start != i_end) { scalar_t acc = rop.identity(); for (index_t i = i_start; i < i_end; i++) { acc = rop(acc, values[i]); } new_values[row_map[tid]] = acc; } } } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_dim1_cuda_template(const Tensor& sparse, ReductionOp rop) { /* The algorithm of computing reduce of a CSR tensor along the last dimension is explained in the comment of the reduce_sparse_csr_dim1_cpu_template function. */ Tensor crow_indices = sparse.crow_indices(); auto ioptions = crow_indices.options(); Tensor values = sparse.values(); auto nrows = sparse.size(0); auto numel = values.numel(); Tensor new_crow_indices = at::empty({crow_indices.numel()}, ioptions); Tensor new_col_indices = at::empty({}, ioptions); Tensor new_values = at::empty({}, values.options()); Tensor row_map = at::empty({nrows}, ioptions); at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (nrows + THREADS) / THREADS; AT_DISPATCH_INDEX_TYPES(crow_indices.scalar_type(), "reduce_sparse_csr_dim1_cuda_indices", [&]() { index_t* crow_indices_ptr = crow_indices.data_ptr<index_t>(); index_t* new_crow_indices_ptr = new_crow_indices.data_ptr<index_t>(); index_t* row_map_ptr = row_map.data_ptr<index_t>(); reduce_crow_indices_dim1_cuda_kernel<<<1, 1, 0, stream>>>(new_crow_indices_ptr, row_map_ptr, crow_indices_ptr, nrows); C10_CUDA_KERNEL_LAUNCH_CHECK(); index_t new_nnz = new_crow_indices[-1].item<index_t>(); new_col_indices.resize_(new_nnz); new_col_indices.fill_(index_t(0)); new_values.resize_(new_nnz); scalar_t* values_ptr = values.data_ptr<scalar_t>(); scalar_t* new_values_ptr = new_values.data_ptr<scalar_t>(); reduce_sparse_csr_dim1_cuda_kernel<<<BLOCKS, THREADS, 0, stream>>>(new_values_ptr, values_ptr, crow_indices_ptr, row_map_ptr, nrows, rop); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); return at::native::_sparse_csr_tensor_unsafe(new_crow_indices, new_col_indices, new_values, {sparse.size(0), 1}, new_values.scalar_type(), sparse.layout(), new_values.device()); } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_dim01_cuda_template(const Tensor& sparse, ReductionOp rop) { auto ioptions = sparse.col_indices().options(); Tensor values = sparse.values(); auto numel = values.numel(); auto nnz = std::min<int64_t>(1, numel); Tensor new_values; if (numel > 0) { new_values = at::empty({1}, values.options()); auto iter = TensorIterator::reduce_op(new_values, values); gpu_reduce_kernel<scalar_t, scalar_t>(iter, func_wrapper<scalar_t>(rop), rop.identity_cpu()); } else { new_values = at::empty({}, values.options()); } Tensor new_col_indices = at::zeros({nnz}, ioptions); Tensor new_crow_indices = at::tensor(ArrayRef<int64_t>{0, nnz}, ioptions); return at::native::_sparse_csr_tensor_unsafe(new_crow_indices, new_col_indices, new_values, {1, std::min<int64_t>(1, sparse.size(1))}, new_values.scalar_type(), sparse.layout(), new_values.device()); } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_cuda_template(const Tensor& sparse, std::vector<int64_t> dims, ReductionOp rop) { if (dims.size() == 1) { if (dims[0] == 0) { return reduce_sparse_csr_dim0_cuda_template<scalar_t>(sparse, rop); } else { TORCH_INTERNAL_ASSERT(dims[0] == 1); return reduce_sparse_csr_dim1_cuda_template<scalar_t>(sparse, rop); } } else if (dims.size() == 2) { TORCH_INTERNAL_ASSERT(((dims[0] == 0 && dims[1] == 1) || (dims[0] == 1 && dims[1] == 0))); return reduce_sparse_csr_dim01_cuda_template<scalar_t>(sparse, rop); } TORCH_INTERNAL_ASSERT(dims.size() == 0); // effective after gh-29137 has been resolved return sparse.clone(); } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_cuda_template(const Tensor& sparse, IntArrayRef dims_to_sum, bool keepdim, ReductionOp rop) { TORCH_INTERNAL_ASSERT(sparse.is_sparse_csr()); TORCH_CHECK(keepdim, "reduction operations on CSR tensors with keepdim=False is unsupported"); TORCH_INTERNAL_ASSERT(sparse.is_cuda()); const int64_t input_dim = sparse.dim(); TORCH_INTERNAL_ASSERT(input_dim == 2); auto dims = dims_to_sum.vec(); maybe_wrap_dims(dims, input_dim); if (dims.size() == 0) { // after gh-29137 is resolved, delete this if-block dims.emplace_back(0); dims.emplace_back(1); } return reduce_sparse_csr_cuda_template<scalar_t>(sparse, dims, rop); } template <typename scalar_t> struct ReductionAddOp { __device__ __forceinline__ scalar_t operator()(const scalar_t a, const scalar_t b) const { return a + b; } __device__ __forceinline__ scalar_t identity() const { return 0; } __forceinline__ scalar_t identity_cpu() const { return 0; } }; template <typename scalar_t> struct ReductionMulOp { __device__ __forceinline__ scalar_t operator()(const scalar_t a, const scalar_t b) const { return a * b; } __device__ __forceinline__ scalar_t identity() const { return 1; } __forceinline__ scalar_t identity_cpu() const { return 1; } }; } // namespace Tensor _sparse_csr_sum_cuda(const Tensor& input, IntArrayRef dims_to_sum, bool keepdim, c10::optional<ScalarType> dtype) { ScalarType dtype_ = dtype.value_or(input.scalar_type()); Tensor input_ = input.to(dtype_); Tensor result; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( kHalf, kBFloat16, input_.scalar_type(), "_sparse_csr_sum_cuda", [&] { result = reduce_sparse_csr_cuda_template<scalar_t>(input_, dims_to_sum, keepdim, ReductionAddOp<scalar_t>()); }); return result; } Tensor _sparse_csr_prod_cuda(const Tensor& input, IntArrayRef dims_to_reduce, bool keepdim, c10::optional<ScalarType> dtype) { ScalarType dtype_ = dtype.value_or(input.scalar_type()); Tensor input_ = input.to(dtype_); Tensor result; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( kHalf, kBFloat16, input_.scalar_type(), "_sparse_csr_prod_cuda", [&] { result = reduce_sparse_csr_cuda_template<scalar_t>(input_, dims_to_reduce, keepdim, ReductionMulOp<scalar_t>()); }); return result; } } // namespace native } // namespace at
53ffc7bef7ae4c47df52e1defaf68fad11289c92.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "intersection.h" #include "optimization.h" #include "mirrored_memory.h" #include "kernel_common.h" namespace dart { // -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=- __global__ void gpu_selfIntersectionCount(const float4 * testSites, const int nSites, const SE3 * T_mfs, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const int * potentialIntersection, int * nCollisions) { const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames[srcGrid]; v_src_f.w = 1; const float4 v_m = T_mfs[srcFrame]*v_src_f; for (int dstGrid=0; dstGrid<nSdfs; ++dstGrid) { if (potentialIntersection[dstGrid + srcGrid*nSdfs]) { const int dstFrame = sdfFrames[dstGrid]; const float4 v_dst_f = T_fms[dstFrame]*v_m; const Grid3D<float> &dstSdf = sdfs[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); //printf("%f %f %f ",v_dst_g.x,v_dst_g.y,v_dst_g.z); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float d = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (d < 0) { // collision detected atomicAdd(nCollisions,1); return; } } } } } template <bool dbgErr> __global__ void gpu_normEqnsSelfIntersection(const float4 * testSites, const int nSites, const int dims, const SE3 * T_mfs, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const int * dependencies, const JointType * jointTypes, const float3 * jointAxes, const int * potentialIntersection, float * result, float * debugError) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } // if (dbgErr) { debugError[index] = NAN; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames[srcGrid]; v_src_f.w = 1; const float4 v_m = T_mfs[srcFrame]*v_src_f; for (int dstGrid=0; dstGrid<nSdfs; ++dstGrid) { if (potentialIntersection[dstGrid + srcGrid*nSdfs]) { const int dstFrame = sdfFrames[dstGrid]; const float4 v_dst_f = T_fms[dstFrame]*v_m; const Grid3D<float> & dstSdf = sdfs[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); //printf("%f %f %f ",v_dst_g.x,v_dst_g.y,v_dst_g.z); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { // collision detected float * J = &s[threadIdx.x*(dims-6)]; // dims-6 because self-intersection doesn't depend on global transform const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_m = make_float3(SE3Rotate(T_mfs[dstFrame],make_float4(dstSdfGrad_dst_f,0))); getErrorJacobianOfModelPointArticulationOnly(J,v_m,srcFrame,dstSdfGrad_m,dims,dependencies,jointTypes,jointAxes,T_fms,T_mfs); float * JTr = result; float * JTJ = &result[dims-6]; float * e = &result[dims-6 + JTJSize(dims-6)]; computeSquaredLossResult(dims-6,residual,J,e,JTr, JTJ); if (dbgErr) { debugError[index] = residual*residual; } return; } } } } } __global__ void gpu_normEqnsSelfIntersectionReduced(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 * T_mfs, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const int * dependencies, const JointType * jointTypes, const float3 * jointAxes, const float * dtheta_dalpha, const int * potentialIntersection, float * result) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames[srcGrid]; v_src_f.w = 1; const float4 v_m = T_mfs[srcFrame]*v_src_f; for (int dstGrid=0; dstGrid<nSdfs; ++dstGrid) { if (potentialIntersection[dstGrid + srcGrid*nSdfs]) { const int dstFrame = sdfFrames[dstGrid]; const float4 v_dst_f = T_fms[dstFrame]*v_m; const Grid3D<float> &dstSdf = sdfs[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { //printf("*"); // collision detected float * de_dtheta = &s[threadIdx.x*(fullDims-6+redDims-6)]; // redDims-6 because self-intersection doesn't depend on global transform float * J = &s[threadIdx.x*(fullDims-6+redDims-6) + fullDims-6]; const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_m = make_float3(SE3Rotate(T_mfs[dstFrame],make_float4(dstSdfGrad_dst_f,0))); getErrorJacobianOfModelPointArticulationOnly(de_dtheta,v_m,srcFrame,dstSdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs); // reduction doPoseGradientReductionArticulationOnly(J,de_dtheta,dtheta_dalpha,fullDims,redDims); float * JTr = result; float * JTJ = &result[redDims-6]; float * e = &result[redDims-6 + JTJSize(redDims-6)]; computeSquaredLossResult(redDims-6,residual,J,e,JTr,JTJ); return; } } } } } __global__ void gpu_normEqnsSelfIntersectionParamMap(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 * T_mfs, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const int * dependencies, const JointType * jointTypes, const float3 * jointAxes, const int * dMapping, const int * potentialIntersection, float * result) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames[srcGrid]; v_src_f.w = 1; const float4 v_m = T_mfs[srcFrame]*v_src_f; for (int dstGrid=0; dstGrid<nSdfs; ++dstGrid) { if (potentialIntersection[dstGrid + srcGrid*nSdfs]) { const int dstFrame = sdfFrames[dstGrid]; const float4 v_dst_f = T_fms[dstFrame]*v_m; const Grid3D<float> &dstSdf = sdfs[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { //printf("*"); // collision detected float * de_dtheta = &s[threadIdx.x*(fullDims-6+redDims-6)]; // redDims-6 because self-intersection doesn't depend on global transform float * J = &s[threadIdx.x*(fullDims-6+redDims-6) + fullDims-6]; const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_m = make_float3(SE3Rotate(T_mfs[dstFrame],make_float4(dstSdfGrad_dst_f,0))); getErrorJacobianOfModelPointArticulationOnly(de_dtheta,v_m,srcFrame,dstSdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs); doParamMappingArticulationOnly(J,de_dtheta,dMapping,fullDims,redDims); float * JTr = result; float * JTJ = &result[redDims-6]; float * e = &result[redDims-6 + JTJSize(redDims-6)]; computeSquaredLossResult(redDims-6,residual,J,e,JTr,JTJ); return; } } } } } __global__ void gpu_initDebugIntersectionError(float * debugError, const int nSites) { const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } debugError[index] = NAN; } __global__ void gpu_intersectionCount(const float4 * testSites, const int nSites, const SE3 T_ds, const SE3 * T_mfs_src, const int * sdfFrames_src, const SE3 * T_fms_dst, const int * sdfFrames_dst, const Grid3D<float> * sdfs_dst, const int nSdfs_dst, int * nCollisions) { const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames_src[srcGrid]; v_src_f.w = 1; const float4 v_src_m = T_mfs_src[srcFrame]*v_src_f; const float4 v_dst_m = T_ds*v_src_m; for (int dstGrid=0; dstGrid<nSdfs_dst; ++dstGrid) { const int dstFrame = sdfFrames_dst[dstGrid]; const float4 v_dst_f = T_fms_dst[dstFrame]*v_dst_m; const Grid3D<float> & dstSdf = sdfs_dst[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); //printf("%f %f %f ",v_dst_g.x,v_dst_g.y,v_dst_g.z); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float d = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (d < 0) { // collision detected atomicAdd(nCollisions,1); return; } } } } template <bool dbgErr> __global__ void gpu_normEquationsIntersection(const float4 * testSites, const int nSites, const int dims, const SE3 T_ds, const SE3 T_sd, const SE3 * T_mfs_src, const SE3 * T_fms_src, const int * sdfFrames_src, const SE3 * T_mfs_dst, const SE3 * T_fms_dst, const int * sdfFrames_dst, const Grid3D<float> * sdfs_dst, const int nSdfs_dst, const int * dependencies_src, const JointType * jointTypes_src, const float3 * jointAxes_src, float * result, float * debugError) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; const int tid = threadIdx.x + threadIdx.y*blockDim.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames_src[srcGrid]; v_src_f.w = 1; const float4 v_src_m = T_mfs_src[srcFrame]*v_src_f; const float4 v_dst_m = T_ds*v_src_m; for (int dstGrid=0; dstGrid<nSdfs_dst; ++dstGrid) { const int dstFrame = sdfFrames_dst[dstGrid]; const float4 v_dst_f = T_fms_dst[dstFrame]*v_dst_m; const Grid3D<float> & dstSdf = sdfs_dst[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { // collision detected float * J = &s[tid*dims]; const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_dst_m = make_float3(SE3Rotate(T_mfs_dst[dstFrame],make_float4(dstSdfGrad_dst_f,0))); const float3 dstSdfGrad_src_m = SE3Rotate(T_sd,dstSdfGrad_dst_m); getErrorJacobianOfModelPoint(J,v_src_m,srcFrame,dstSdfGrad_src_m,dims,dependencies_src,jointTypes_src,jointAxes_src,T_fms_src,T_mfs_src); float * JTr = result; float * JTJ = &result[dims]; float * e = &result[dims + JTJSize(dims)]; computeSquaredLossResult(dims,residual,J,e,JTr,JTJ); if (dbgErr) { debugError[index] += (residual*residual); } return; } } } } __global__ void gpu_normEqnsIntersectionReduced(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 T_ds, const SE3 T_sd, const SE3 * T_mfs_src, const SE3 * T_fms_src, const int * sdfFrames_src, const SE3 * T_mfs_dst, const SE3 * T_fms_dst, const int * sdfFrames_dst, const Grid3D<float> * sdfs_dst, const int nSdfs_dst, const int * dependencies_src, const JointType * jointTypes_src, const float3 * jointAxes_src, const float * dtheta_dalpha_src, float * result) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; const int tid = threadIdx.x + threadIdx.y*blockDim.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames_src[srcGrid]; v_src_f.w = 1; const float4 v_src_m = T_mfs_src[srcFrame]*v_src_f; const float4 v_dst_m = T_ds*v_src_m; for (int dstGrid=0; dstGrid<nSdfs_dst; ++dstGrid) { const int dstFrame = sdfFrames_dst[dstGrid]; const float4 v_dst_f = T_fms_dst[dstFrame]*v_dst_m; const Grid3D<float> & dstSdf = sdfs_dst[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { // collision detected float * de_dtheta = &s[tid*(fullDims+redDims)]; float * J = &s[tid*(fullDims+redDims)+fullDims]; const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_dst_m = make_float3(SE3Rotate(T_mfs_dst[dstFrame],make_float4(dstSdfGrad_dst_f,0))); const float3 dstSdfGrad_src_m = SE3Rotate(T_sd,dstSdfGrad_dst_m); getErrorJacobianOfModelPoint(de_dtheta,v_src_m,srcFrame,dstSdfGrad_src_m,fullDims,dependencies_src,jointTypes_src,jointAxes_src,T_fms_src,T_mfs_src); doPoseGradientReduction(J,de_dtheta,dtheta_dalpha_src,fullDims,redDims); float * JTr = result; float * JTJ = &result[redDims]; float * e = &result[redDims + JTJSize(redDims)]; computeSquaredLossResult(redDims,residual,J,e,JTr,JTJ); return; } } } } __global__ void gpu_normEqnsIntersectionParamMap(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 T_ds, const SE3 T_sd, const SE3 * T_mfs_src, const SE3 * T_fms_src, const int * sdfFrames_src, const SE3 * T_mfs_dst, const SE3 * T_fms_dst, const int * sdfFrames_dst, const Grid3D<float> * sdfs_dst, const int nSdfs_dst, const int * dependencies_src, const JointType * jointTypes_src, const float3 * jointAxes_src, const int * dMapping_src, float * result) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; const int tid = threadIdx.x + threadIdx.y*blockDim.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames_src[srcGrid]; v_src_f.w = 1; const float4 v_src_m = T_mfs_src[srcFrame]*v_src_f; const float4 v_dst_m = T_ds*v_src_m; for (int dstGrid=0; dstGrid<nSdfs_dst; ++dstGrid) { const int dstFrame = sdfFrames_dst[dstGrid]; const float4 v_dst_f = T_fms_dst[dstFrame]*v_dst_m; const Grid3D<float> & dstSdf = sdfs_dst[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { // collision detected float * de_dtheta = &s[tid*(fullDims+redDims)]; float * J = &s[tid*(fullDims+redDims)+fullDims]; const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_dst_m = make_float3(SE3Rotate(T_mfs_dst[dstFrame],make_float4(dstSdfGrad_dst_f,0))); const float3 dstSdfGrad_src_m = SE3Rotate(T_sd,dstSdfGrad_dst_m); getErrorJacobianOfModelPoint(de_dtheta,v_src_m,srcFrame,dstSdfGrad_src_m,fullDims,dependencies_src,jointTypes_src,jointAxes_src,T_fms_src,T_mfs_src); doParamMapping(J,de_dtheta,dMapping_src,fullDims,redDims); float * JTr = result; float * JTJ = &result[redDims]; float * e = &result[redDims + JTJSize(redDims)]; computeSquaredLossResult(redDims,residual,J,e,JTr,JTJ); return; } } } } __global__ void gpu_intersectionCheckRigidObjInHand(const float4 * testSites, const int nSites, const SE3 T_ho, const SE3 T_oh, const SE3 * T_mfs_h, const SE3 * T_fms_h, const int * sdfFrames_h, const Grid3D<float> * sdfs_h, const int nSdfs_h, float * result) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; const int tid = threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_o = testSites[index]; v_o.w = 1; const float4 v_h = T_ho*v_o; for (int hGrid=0; hGrid<nSdfs_h; ++hGrid) { const int hFrame = sdfFrames_h[hGrid]; const float4 v_f = T_fms_h[hFrame]*v_h; const Grid3D<float> &hSdf = sdfs_h[hGrid]; const float3 v_g = hSdf.getGridCoords(make_float3(v_f)); if (hSdf.isInBoundsGradientInterp(v_g)) { const float d = hSdf.getValueInterpolated(v_g)*hSdf.resolution; if (d < 0) { // collision detected float * J = &s[tid*12]; const float3 hSdfGrad_f = hSdf.getGradientInterpolated(v_g); const float3 hSdfGrad_h = SE3Rotate(T_mfs_h[hFrame],hSdfGrad_f); const float3 hSdfGrad_o = SE3Rotate(T_oh,hSdfGrad_h); // hand derivative J[0] = dot(hSdfGrad_h,make_float3(-1, 0, 0)); J[1] = dot(hSdfGrad_h,make_float3( 0,-1, 0)); J[2] = dot(hSdfGrad_h,make_float3( 0, 0,-1)); J[3] = dot(hSdfGrad_h,make_float3( 0, v_h.z,-v_h.y)); J[4] = dot(hSdfGrad_h,make_float3(-v_h.z, 0, v_h.x)); J[5] = dot(hSdfGrad_h,make_float3( v_h.y,-v_h.x, 0)); // object derivative J[6] = dot(hSdfGrad_o,make_float3(-1, 0, 0)); J[7] = dot(hSdfGrad_o,make_float3( 0,-1, 0)); J[8] = dot(hSdfGrad_o,make_float3( 0, 0,-1)); J[9] = dot(hSdfGrad_o,make_float3( 0, v_o.z,-v_o.y)); J[10] = dot(hSdfGrad_o,make_float3(-v_o.z, 0, v_o.x)); J[11] = dot(hSdfGrad_o,make_float3( v_o.y,-v_o.x, 0)); float * eJ = result; float * JTJ = &result[12]; float * e = &result[12 + JTJSize(12)]; for (int i=0; i<12; ++i) { if (J[i] == 0.0f) { continue; } float eJval = -d*-J[i]; atomicAdd(&eJ[i],eJval); for (int j=0; j<=i; ++j) { float JTJval = J[i]*J[j]; atomicAdd(&JTJ[((i*(i+1))>>1) + j],JTJval); } } atomicAdd(e,d*d); return; } } } } __global__ void gpu_getDistanceToSdf(const float4 * testSites, const int nSites, const SE3 T_ds, const Grid3D<float> * sdf_dst, float * distances) { const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_src = testSites[index]; v_src.w = 1.0; float4 v_dst = T_ds*v_src; const float3 v_dst_g = sdf_dst->getGridCoords(make_float3(v_dst)); if (!sdf_dst->isInBoundsGradientInterp(v_dst_g)) { distances[index] = 1e20; // printf("%f ",sdf_dst->resolution); } else { distances[index] = sdf_dst->getValueInterpolated(v_dst_g)*sdf_dst->resolution; } } // -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=- int countSelfIntersections(const float4 * testSites, const int nSites, const SE3 * T_mfs, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const int * potentialIntersection) { dim3 block(128,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); MirroredVector<int> nCollisions(1); hipMemset(nCollisions.devicePtr(),0,sizeof(int)); hipLaunchKernelGGL(( gpu_selfIntersectionCount), dim3(grid),dim3(block), 0, 0, testSites,nSites,T_mfs,T_fms, sdfFrames,sdfs,nSdfs,potentialIntersection, nCollisions.devicePtr()); nCollisions.syncDeviceToHost(); return nCollisions.hostPtr()[0]; } int countIntersections(const float4 * testSites, const int nSites, const SE3 & T_ds, const SE3 * T_mfs_src, const int * sdfFrames_src, const SE3 * T_fms_dst, const int * sdfFrames_dst, const Grid3D<float> * sdfs_dst, const int nSdfs_dst) { dim3 block(128,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); MirroredVector<int> nCollisions(1); hipMemset(nCollisions.devicePtr(),0,sizeof(int)); hipLaunchKernelGGL(( gpu_intersectionCount), dim3(grid),dim3(block), 0, 0, testSites,nSites,T_ds,T_mfs_src,sdfFrames_src, T_fms_dst,sdfFrames_dst,sdfs_dst,nSdfs_dst, nCollisions.devicePtr()); nCollisions.syncDeviceToHost(); return nCollisions.hostPtr()[0]; } void normEqnsSelfIntersection(const float4 * testSites, const int nSites, const int dims, const MirroredModel & model, const int * potentialIntersection, float * result, float * debugError) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); hipMemset(result,0,((dims-6)+JTJSize(dims-6)+1)*sizeof(float)); if (debugError == 0) { hipLaunchKernelGGL(( gpu_normEqnsSelfIntersection<false>), dim3(grid),dim3(block),64*(dims-6)*sizeof(float), 0, testSites, nSites, dims, model.getDeviceTransformsFrameToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), potentialIntersection, result, debugError); } else { hipLaunchKernelGGL(( gpu_normEqnsSelfIntersection<true>), dim3(grid),dim3(block),64*(dims-6)*sizeof(float), 0, testSites, nSites, dims, model.getDeviceTransformsFrameToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), potentialIntersection, result, debugError); } } void normEqnsIntersection(const float4 * testSites, const int nSites, const int dims, const SE3 T_ds, const SE3 T_sd, const MirroredModel & srcModel, const MirroredModel & dstModel, float * result, float * debugError) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); hipMemset(result,0,((dims)+JTJSize(dims)+1)*sizeof(float)); if (debugError == 0) { hipLaunchKernelGGL(( gpu_normEquationsIntersection<false>), dim3(grid),dim3(block),64*(dims)*sizeof(float), 0, testSites, nSites, dims, T_ds, T_sd, srcModel.getDeviceTransformsFrameToModel(), srcModel.getDeviceTransformsModelToFrame(), srcModel.getDeviceSdfFrames(), dstModel.getDeviceTransformsFrameToModel(), dstModel.getDeviceTransformsModelToFrame(), dstModel.getDeviceSdfFrames(), dstModel.getDeviceSdfs(), dstModel.getNumSdfs(), srcModel.getDeviceDependencies(), srcModel.getDeviceJointTypes(), srcModel.getDeviceJointAxes(), result, debugError); } else { hipLaunchKernelGGL(( gpu_normEquationsIntersection<true>), dim3(grid),dim3(block),64*(dims)*sizeof(float), 0, testSites, nSites, dims, T_ds, T_sd, srcModel.getDeviceTransformsFrameToModel(), srcModel.getDeviceTransformsModelToFrame(), srcModel.getDeviceSdfFrames(), dstModel.getDeviceTransformsFrameToModel(), dstModel.getDeviceTransformsModelToFrame(), dstModel.getDeviceSdfFrames(), dstModel.getDeviceSdfs(), dstModel.getNumSdfs(), srcModel.getDeviceDependencies(), srcModel.getDeviceJointTypes(), srcModel.getDeviceJointAxes(), result, debugError); } } void normEqnsSelfIntersectionReduced(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const MirroredModel & model, const float * dtheta_dalpha, const int * potentialIntersection, float * result) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); hipMemset(result,0,((redDims-6)+JTJSize(redDims-6)+1)*sizeof(float)); hipLaunchKernelGGL(( gpu_normEqnsSelfIntersectionReduced), dim3(grid),dim3(block),64*(fullDims-6+redDims-6)*sizeof(float), 0, testSites, nSites, fullDims, redDims, model.getDeviceTransformsFrameToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dtheta_dalpha, potentialIntersection, result); } void normEqnsSelfIntersectionParamMap(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const MirroredModel & model, const int * dMapping, const int * potentialIntersection, float * result) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); hipMemset(result,0,((redDims-6)+JTJSize(redDims-6)+1)*sizeof(float)); hipLaunchKernelGGL(( gpu_normEqnsSelfIntersectionParamMap), dim3(grid),dim3(block),64*(fullDims-6+redDims-6)*sizeof(float), 0, testSites, nSites, fullDims, redDims, model.getDeviceTransformsFrameToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, potentialIntersection, result); } void normEqnsIntersectionReduced(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 T_ds, const SE3 T_sd, const MirroredModel & srcModel, const MirroredModel & dstModel, const float * dtheta_dalpha_src, float * result) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); hipMemset(result,0,((redDims)+JTJSize(redDims)+1)*sizeof(float)); hipLaunchKernelGGL(( gpu_normEqnsIntersectionReduced), dim3(grid),dim3(block),64*(fullDims+redDims)*sizeof(float), 0, testSites, nSites, fullDims, redDims, T_ds, T_sd, srcModel.getDeviceTransformsFrameToModel(), srcModel.getDeviceTransformsModelToFrame(), srcModel.getDeviceSdfFrames(), dstModel.getDeviceTransformsFrameToModel(), dstModel.getDeviceTransformsModelToFrame(), dstModel.getDeviceSdfFrames(), dstModel.getDeviceSdfs(), dstModel.getNumSdfs(), srcModel.getDeviceDependencies(), srcModel.getDeviceJointTypes(), srcModel.getDeviceJointAxes(), dtheta_dalpha_src, result); } void normEqnsIntersectionParamMap(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 T_ds, const SE3 T_sd, const MirroredModel & srcModel, const MirroredModel & dstModel, const int * dMapping_src, float * result) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); hipMemset(result,0,((redDims)+JTJSize(redDims)+1)*sizeof(float)); hipLaunchKernelGGL(( gpu_normEqnsIntersectionParamMap), dim3(grid),dim3(block),64*(fullDims+redDims)*sizeof(float), 0, testSites, nSites, fullDims, redDims, T_ds, T_sd, srcModel.getDeviceTransformsFrameToModel(), srcModel.getDeviceTransformsModelToFrame(), srcModel.getDeviceSdfFrames(), dstModel.getDeviceTransformsFrameToModel(), dstModel.getDeviceTransformsModelToFrame(), dstModel.getDeviceSdfFrames(), dstModel.getDeviceSdfs(), dstModel.getNumSdfs(), srcModel.getDeviceDependencies(), srcModel.getDeviceJointTypes(), srcModel.getDeviceJointAxes(), dMapping_src, result); } void intersectionCheckRigidObjInHand(const float4 * testSites, const int nSites, const SE3 T_ho, const SE3 T_oh, const SE3 * T_mfs_h, const SE3 * T_fms_h, const int * sdfFrames_h, const Grid3D<float> * sdfs_h, const int nSdfs_h, float * result) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); hipMemset(result,0,(12+JTJSize(12)+1)*sizeof(float)); hipLaunchKernelGGL(( gpu_intersectionCheckRigidObjInHand), dim3(grid),dim3(block),64*12*sizeof(float), 0, testSites, nSites, T_ho, T_oh, T_mfs_h, T_fms_h, sdfFrames_h, sdfs_h, nSdfs_h, result); } void getDistanceToSdf(const float4 * testSites, const int nSites, const SE3 T_ds, const Grid3D<float> * sdf_dst, float * distances, const hipStream_t stream) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); hipLaunchKernelGGL(( gpu_getDistanceToSdf), dim3(grid),dim3(block),0,stream, testSites,nSites,T_ds,sdf_dst,distances); } void initDebugIntersectionError(float * debugError, const int nSites) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); hipLaunchKernelGGL(( gpu_initDebugIntersectionError), dim3(grid),dim3(block), 0, 0, debugError, nSites); } }
53ffc7bef7ae4c47df52e1defaf68fad11289c92.cu
#include "intersection.h" #include "optimization.h" #include "mirrored_memory.h" #include "kernel_common.h" namespace dart { // -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=- __global__ void gpu_selfIntersectionCount(const float4 * testSites, const int nSites, const SE3 * T_mfs, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const int * potentialIntersection, int * nCollisions) { const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames[srcGrid]; v_src_f.w = 1; const float4 v_m = T_mfs[srcFrame]*v_src_f; for (int dstGrid=0; dstGrid<nSdfs; ++dstGrid) { if (potentialIntersection[dstGrid + srcGrid*nSdfs]) { const int dstFrame = sdfFrames[dstGrid]; const float4 v_dst_f = T_fms[dstFrame]*v_m; const Grid3D<float> &dstSdf = sdfs[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); //printf("%f %f %f ",v_dst_g.x,v_dst_g.y,v_dst_g.z); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float d = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (d < 0) { // collision detected atomicAdd(nCollisions,1); return; } } } } } template <bool dbgErr> __global__ void gpu_normEqnsSelfIntersection(const float4 * testSites, const int nSites, const int dims, const SE3 * T_mfs, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const int * dependencies, const JointType * jointTypes, const float3 * jointAxes, const int * potentialIntersection, float * result, float * debugError) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } // if (dbgErr) { debugError[index] = NAN; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames[srcGrid]; v_src_f.w = 1; const float4 v_m = T_mfs[srcFrame]*v_src_f; for (int dstGrid=0; dstGrid<nSdfs; ++dstGrid) { if (potentialIntersection[dstGrid + srcGrid*nSdfs]) { const int dstFrame = sdfFrames[dstGrid]; const float4 v_dst_f = T_fms[dstFrame]*v_m; const Grid3D<float> & dstSdf = sdfs[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); //printf("%f %f %f ",v_dst_g.x,v_dst_g.y,v_dst_g.z); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { // collision detected float * J = &s[threadIdx.x*(dims-6)]; // dims-6 because self-intersection doesn't depend on global transform const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_m = make_float3(SE3Rotate(T_mfs[dstFrame],make_float4(dstSdfGrad_dst_f,0))); getErrorJacobianOfModelPointArticulationOnly(J,v_m,srcFrame,dstSdfGrad_m,dims,dependencies,jointTypes,jointAxes,T_fms,T_mfs); float * JTr = result; float * JTJ = &result[dims-6]; float * e = &result[dims-6 + JTJSize(dims-6)]; computeSquaredLossResult(dims-6,residual,J,e,JTr, JTJ); if (dbgErr) { debugError[index] = residual*residual; } return; } } } } } __global__ void gpu_normEqnsSelfIntersectionReduced(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 * T_mfs, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const int * dependencies, const JointType * jointTypes, const float3 * jointAxes, const float * dtheta_dalpha, const int * potentialIntersection, float * result) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames[srcGrid]; v_src_f.w = 1; const float4 v_m = T_mfs[srcFrame]*v_src_f; for (int dstGrid=0; dstGrid<nSdfs; ++dstGrid) { if (potentialIntersection[dstGrid + srcGrid*nSdfs]) { const int dstFrame = sdfFrames[dstGrid]; const float4 v_dst_f = T_fms[dstFrame]*v_m; const Grid3D<float> &dstSdf = sdfs[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { //printf("*"); // collision detected float * de_dtheta = &s[threadIdx.x*(fullDims-6+redDims-6)]; // redDims-6 because self-intersection doesn't depend on global transform float * J = &s[threadIdx.x*(fullDims-6+redDims-6) + fullDims-6]; const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_m = make_float3(SE3Rotate(T_mfs[dstFrame],make_float4(dstSdfGrad_dst_f,0))); getErrorJacobianOfModelPointArticulationOnly(de_dtheta,v_m,srcFrame,dstSdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs); // reduction doPoseGradientReductionArticulationOnly(J,de_dtheta,dtheta_dalpha,fullDims,redDims); float * JTr = result; float * JTJ = &result[redDims-6]; float * e = &result[redDims-6 + JTJSize(redDims-6)]; computeSquaredLossResult(redDims-6,residual,J,e,JTr,JTJ); return; } } } } } __global__ void gpu_normEqnsSelfIntersectionParamMap(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 * T_mfs, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const int * dependencies, const JointType * jointTypes, const float3 * jointAxes, const int * dMapping, const int * potentialIntersection, float * result) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames[srcGrid]; v_src_f.w = 1; const float4 v_m = T_mfs[srcFrame]*v_src_f; for (int dstGrid=0; dstGrid<nSdfs; ++dstGrid) { if (potentialIntersection[dstGrid + srcGrid*nSdfs]) { const int dstFrame = sdfFrames[dstGrid]; const float4 v_dst_f = T_fms[dstFrame]*v_m; const Grid3D<float> &dstSdf = sdfs[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { //printf("*"); // collision detected float * de_dtheta = &s[threadIdx.x*(fullDims-6+redDims-6)]; // redDims-6 because self-intersection doesn't depend on global transform float * J = &s[threadIdx.x*(fullDims-6+redDims-6) + fullDims-6]; const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_m = make_float3(SE3Rotate(T_mfs[dstFrame],make_float4(dstSdfGrad_dst_f,0))); getErrorJacobianOfModelPointArticulationOnly(de_dtheta,v_m,srcFrame,dstSdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs); doParamMappingArticulationOnly(J,de_dtheta,dMapping,fullDims,redDims); float * JTr = result; float * JTJ = &result[redDims-6]; float * e = &result[redDims-6 + JTJSize(redDims-6)]; computeSquaredLossResult(redDims-6,residual,J,e,JTr,JTJ); return; } } } } } __global__ void gpu_initDebugIntersectionError(float * debugError, const int nSites) { const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } debugError[index] = NAN; } __global__ void gpu_intersectionCount(const float4 * testSites, const int nSites, const SE3 T_ds, const SE3 * T_mfs_src, const int * sdfFrames_src, const SE3 * T_fms_dst, const int * sdfFrames_dst, const Grid3D<float> * sdfs_dst, const int nSdfs_dst, int * nCollisions) { const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames_src[srcGrid]; v_src_f.w = 1; const float4 v_src_m = T_mfs_src[srcFrame]*v_src_f; const float4 v_dst_m = T_ds*v_src_m; for (int dstGrid=0; dstGrid<nSdfs_dst; ++dstGrid) { const int dstFrame = sdfFrames_dst[dstGrid]; const float4 v_dst_f = T_fms_dst[dstFrame]*v_dst_m; const Grid3D<float> & dstSdf = sdfs_dst[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); //printf("%f %f %f ",v_dst_g.x,v_dst_g.y,v_dst_g.z); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float d = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (d < 0) { // collision detected atomicAdd(nCollisions,1); return; } } } } template <bool dbgErr> __global__ void gpu_normEquationsIntersection(const float4 * testSites, const int nSites, const int dims, const SE3 T_ds, const SE3 T_sd, const SE3 * T_mfs_src, const SE3 * T_fms_src, const int * sdfFrames_src, const SE3 * T_mfs_dst, const SE3 * T_fms_dst, const int * sdfFrames_dst, const Grid3D<float> * sdfs_dst, const int nSdfs_dst, const int * dependencies_src, const JointType * jointTypes_src, const float3 * jointAxes_src, float * result, float * debugError) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; const int tid = threadIdx.x + threadIdx.y*blockDim.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames_src[srcGrid]; v_src_f.w = 1; const float4 v_src_m = T_mfs_src[srcFrame]*v_src_f; const float4 v_dst_m = T_ds*v_src_m; for (int dstGrid=0; dstGrid<nSdfs_dst; ++dstGrid) { const int dstFrame = sdfFrames_dst[dstGrid]; const float4 v_dst_f = T_fms_dst[dstFrame]*v_dst_m; const Grid3D<float> & dstSdf = sdfs_dst[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { // collision detected float * J = &s[tid*dims]; const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_dst_m = make_float3(SE3Rotate(T_mfs_dst[dstFrame],make_float4(dstSdfGrad_dst_f,0))); const float3 dstSdfGrad_src_m = SE3Rotate(T_sd,dstSdfGrad_dst_m); getErrorJacobianOfModelPoint(J,v_src_m,srcFrame,dstSdfGrad_src_m,dims,dependencies_src,jointTypes_src,jointAxes_src,T_fms_src,T_mfs_src); float * JTr = result; float * JTJ = &result[dims]; float * e = &result[dims + JTJSize(dims)]; computeSquaredLossResult(dims,residual,J,e,JTr,JTJ); if (dbgErr) { debugError[index] += (residual*residual); } return; } } } } __global__ void gpu_normEqnsIntersectionReduced(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 T_ds, const SE3 T_sd, const SE3 * T_mfs_src, const SE3 * T_fms_src, const int * sdfFrames_src, const SE3 * T_mfs_dst, const SE3 * T_fms_dst, const int * sdfFrames_dst, const Grid3D<float> * sdfs_dst, const int nSdfs_dst, const int * dependencies_src, const JointType * jointTypes_src, const float3 * jointAxes_src, const float * dtheta_dalpha_src, float * result) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; const int tid = threadIdx.x + threadIdx.y*blockDim.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames_src[srcGrid]; v_src_f.w = 1; const float4 v_src_m = T_mfs_src[srcFrame]*v_src_f; const float4 v_dst_m = T_ds*v_src_m; for (int dstGrid=0; dstGrid<nSdfs_dst; ++dstGrid) { const int dstFrame = sdfFrames_dst[dstGrid]; const float4 v_dst_f = T_fms_dst[dstFrame]*v_dst_m; const Grid3D<float> & dstSdf = sdfs_dst[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { // collision detected float * de_dtheta = &s[tid*(fullDims+redDims)]; float * J = &s[tid*(fullDims+redDims)+fullDims]; const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_dst_m = make_float3(SE3Rotate(T_mfs_dst[dstFrame],make_float4(dstSdfGrad_dst_f,0))); const float3 dstSdfGrad_src_m = SE3Rotate(T_sd,dstSdfGrad_dst_m); getErrorJacobianOfModelPoint(de_dtheta,v_src_m,srcFrame,dstSdfGrad_src_m,fullDims,dependencies_src,jointTypes_src,jointAxes_src,T_fms_src,T_mfs_src); doPoseGradientReduction(J,de_dtheta,dtheta_dalpha_src,fullDims,redDims); float * JTr = result; float * JTJ = &result[redDims]; float * e = &result[redDims + JTJSize(redDims)]; computeSquaredLossResult(redDims,residual,J,e,JTr,JTJ); return; } } } } __global__ void gpu_normEqnsIntersectionParamMap(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 T_ds, const SE3 T_sd, const SE3 * T_mfs_src, const SE3 * T_fms_src, const int * sdfFrames_src, const SE3 * T_mfs_dst, const SE3 * T_fms_dst, const int * sdfFrames_dst, const Grid3D<float> * sdfs_dst, const int nSdfs_dst, const int * dependencies_src, const JointType * jointTypes_src, const float3 * jointAxes_src, const int * dMapping_src, float * result) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; const int tid = threadIdx.x + threadIdx.y*blockDim.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames_src[srcGrid]; v_src_f.w = 1; const float4 v_src_m = T_mfs_src[srcFrame]*v_src_f; const float4 v_dst_m = T_ds*v_src_m; for (int dstGrid=0; dstGrid<nSdfs_dst; ++dstGrid) { const int dstFrame = sdfFrames_dst[dstGrid]; const float4 v_dst_f = T_fms_dst[dstFrame]*v_dst_m; const Grid3D<float> & dstSdf = sdfs_dst[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { // collision detected float * de_dtheta = &s[tid*(fullDims+redDims)]; float * J = &s[tid*(fullDims+redDims)+fullDims]; const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_dst_m = make_float3(SE3Rotate(T_mfs_dst[dstFrame],make_float4(dstSdfGrad_dst_f,0))); const float3 dstSdfGrad_src_m = SE3Rotate(T_sd,dstSdfGrad_dst_m); getErrorJacobianOfModelPoint(de_dtheta,v_src_m,srcFrame,dstSdfGrad_src_m,fullDims,dependencies_src,jointTypes_src,jointAxes_src,T_fms_src,T_mfs_src); doParamMapping(J,de_dtheta,dMapping_src,fullDims,redDims); float * JTr = result; float * JTJ = &result[redDims]; float * e = &result[redDims + JTJSize(redDims)]; computeSquaredLossResult(redDims,residual,J,e,JTr,JTJ); return; } } } } __global__ void gpu_intersectionCheckRigidObjInHand(const float4 * testSites, const int nSites, const SE3 T_ho, const SE3 T_oh, const SE3 * T_mfs_h, const SE3 * T_fms_h, const int * sdfFrames_h, const Grid3D<float> * sdfs_h, const int nSdfs_h, float * result) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; const int tid = threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_o = testSites[index]; v_o.w = 1; const float4 v_h = T_ho*v_o; for (int hGrid=0; hGrid<nSdfs_h; ++hGrid) { const int hFrame = sdfFrames_h[hGrid]; const float4 v_f = T_fms_h[hFrame]*v_h; const Grid3D<float> &hSdf = sdfs_h[hGrid]; const float3 v_g = hSdf.getGridCoords(make_float3(v_f)); if (hSdf.isInBoundsGradientInterp(v_g)) { const float d = hSdf.getValueInterpolated(v_g)*hSdf.resolution; if (d < 0) { // collision detected float * J = &s[tid*12]; const float3 hSdfGrad_f = hSdf.getGradientInterpolated(v_g); const float3 hSdfGrad_h = SE3Rotate(T_mfs_h[hFrame],hSdfGrad_f); const float3 hSdfGrad_o = SE3Rotate(T_oh,hSdfGrad_h); // hand derivative J[0] = dot(hSdfGrad_h,make_float3(-1, 0, 0)); J[1] = dot(hSdfGrad_h,make_float3( 0,-1, 0)); J[2] = dot(hSdfGrad_h,make_float3( 0, 0,-1)); J[3] = dot(hSdfGrad_h,make_float3( 0, v_h.z,-v_h.y)); J[4] = dot(hSdfGrad_h,make_float3(-v_h.z, 0, v_h.x)); J[5] = dot(hSdfGrad_h,make_float3( v_h.y,-v_h.x, 0)); // object derivative J[6] = dot(hSdfGrad_o,make_float3(-1, 0, 0)); J[7] = dot(hSdfGrad_o,make_float3( 0,-1, 0)); J[8] = dot(hSdfGrad_o,make_float3( 0, 0,-1)); J[9] = dot(hSdfGrad_o,make_float3( 0, v_o.z,-v_o.y)); J[10] = dot(hSdfGrad_o,make_float3(-v_o.z, 0, v_o.x)); J[11] = dot(hSdfGrad_o,make_float3( v_o.y,-v_o.x, 0)); float * eJ = result; float * JTJ = &result[12]; float * e = &result[12 + JTJSize(12)]; for (int i=0; i<12; ++i) { if (J[i] == 0.0f) { continue; } float eJval = -d*-J[i]; atomicAdd(&eJ[i],eJval); for (int j=0; j<=i; ++j) { float JTJval = J[i]*J[j]; atomicAdd(&JTJ[((i*(i+1))>>1) + j],JTJval); } } atomicAdd(e,d*d); return; } } } } __global__ void gpu_getDistanceToSdf(const float4 * testSites, const int nSites, const SE3 T_ds, const Grid3D<float> * sdf_dst, float * distances) { const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_src = testSites[index]; v_src.w = 1.0; float4 v_dst = T_ds*v_src; const float3 v_dst_g = sdf_dst->getGridCoords(make_float3(v_dst)); if (!sdf_dst->isInBoundsGradientInterp(v_dst_g)) { distances[index] = 1e20; // printf("%f ",sdf_dst->resolution); } else { distances[index] = sdf_dst->getValueInterpolated(v_dst_g)*sdf_dst->resolution; } } // -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=- int countSelfIntersections(const float4 * testSites, const int nSites, const SE3 * T_mfs, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const int * potentialIntersection) { dim3 block(128,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); MirroredVector<int> nCollisions(1); cudaMemset(nCollisions.devicePtr(),0,sizeof(int)); gpu_selfIntersectionCount<<<grid,block>>>(testSites,nSites,T_mfs,T_fms, sdfFrames,sdfs,nSdfs,potentialIntersection, nCollisions.devicePtr()); nCollisions.syncDeviceToHost(); return nCollisions.hostPtr()[0]; } int countIntersections(const float4 * testSites, const int nSites, const SE3 & T_ds, const SE3 * T_mfs_src, const int * sdfFrames_src, const SE3 * T_fms_dst, const int * sdfFrames_dst, const Grid3D<float> * sdfs_dst, const int nSdfs_dst) { dim3 block(128,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); MirroredVector<int> nCollisions(1); cudaMemset(nCollisions.devicePtr(),0,sizeof(int)); gpu_intersectionCount<<<grid,block>>>(testSites,nSites,T_ds,T_mfs_src,sdfFrames_src, T_fms_dst,sdfFrames_dst,sdfs_dst,nSdfs_dst, nCollisions.devicePtr()); nCollisions.syncDeviceToHost(); return nCollisions.hostPtr()[0]; } void normEqnsSelfIntersection(const float4 * testSites, const int nSites, const int dims, const MirroredModel & model, const int * potentialIntersection, float * result, float * debugError) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); cudaMemset(result,0,((dims-6)+JTJSize(dims-6)+1)*sizeof(float)); if (debugError == 0) { gpu_normEqnsSelfIntersection<false><<<grid,block,64*(dims-6)*sizeof(float)>>>(testSites, nSites, dims, model.getDeviceTransformsFrameToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), potentialIntersection, result, debugError); } else { gpu_normEqnsSelfIntersection<true><<<grid,block,64*(dims-6)*sizeof(float)>>>(testSites, nSites, dims, model.getDeviceTransformsFrameToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), potentialIntersection, result, debugError); } } void normEqnsIntersection(const float4 * testSites, const int nSites, const int dims, const SE3 T_ds, const SE3 T_sd, const MirroredModel & srcModel, const MirroredModel & dstModel, float * result, float * debugError) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); cudaMemset(result,0,((dims)+JTJSize(dims)+1)*sizeof(float)); if (debugError == 0) { gpu_normEquationsIntersection<false><<<grid,block,64*(dims)*sizeof(float)>>>(testSites, nSites, dims, T_ds, T_sd, srcModel.getDeviceTransformsFrameToModel(), srcModel.getDeviceTransformsModelToFrame(), srcModel.getDeviceSdfFrames(), dstModel.getDeviceTransformsFrameToModel(), dstModel.getDeviceTransformsModelToFrame(), dstModel.getDeviceSdfFrames(), dstModel.getDeviceSdfs(), dstModel.getNumSdfs(), srcModel.getDeviceDependencies(), srcModel.getDeviceJointTypes(), srcModel.getDeviceJointAxes(), result, debugError); } else { gpu_normEquationsIntersection<true><<<grid,block,64*(dims)*sizeof(float)>>>(testSites, nSites, dims, T_ds, T_sd, srcModel.getDeviceTransformsFrameToModel(), srcModel.getDeviceTransformsModelToFrame(), srcModel.getDeviceSdfFrames(), dstModel.getDeviceTransformsFrameToModel(), dstModel.getDeviceTransformsModelToFrame(), dstModel.getDeviceSdfFrames(), dstModel.getDeviceSdfs(), dstModel.getNumSdfs(), srcModel.getDeviceDependencies(), srcModel.getDeviceJointTypes(), srcModel.getDeviceJointAxes(), result, debugError); } } void normEqnsSelfIntersectionReduced(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const MirroredModel & model, const float * dtheta_dalpha, const int * potentialIntersection, float * result) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); cudaMemset(result,0,((redDims-6)+JTJSize(redDims-6)+1)*sizeof(float)); gpu_normEqnsSelfIntersectionReduced<<<grid,block,64*(fullDims-6+redDims-6)*sizeof(float)>>>(testSites, nSites, fullDims, redDims, model.getDeviceTransformsFrameToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dtheta_dalpha, potentialIntersection, result); } void normEqnsSelfIntersectionParamMap(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const MirroredModel & model, const int * dMapping, const int * potentialIntersection, float * result) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); cudaMemset(result,0,((redDims-6)+JTJSize(redDims-6)+1)*sizeof(float)); gpu_normEqnsSelfIntersectionParamMap<<<grid,block,64*(fullDims-6+redDims-6)*sizeof(float)>>>(testSites, nSites, fullDims, redDims, model.getDeviceTransformsFrameToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, potentialIntersection, result); } void normEqnsIntersectionReduced(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 T_ds, const SE3 T_sd, const MirroredModel & srcModel, const MirroredModel & dstModel, const float * dtheta_dalpha_src, float * result) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); cudaMemset(result,0,((redDims)+JTJSize(redDims)+1)*sizeof(float)); gpu_normEqnsIntersectionReduced<<<grid,block,64*(fullDims+redDims)*sizeof(float)>>>(testSites, nSites, fullDims, redDims, T_ds, T_sd, srcModel.getDeviceTransformsFrameToModel(), srcModel.getDeviceTransformsModelToFrame(), srcModel.getDeviceSdfFrames(), dstModel.getDeviceTransformsFrameToModel(), dstModel.getDeviceTransformsModelToFrame(), dstModel.getDeviceSdfFrames(), dstModel.getDeviceSdfs(), dstModel.getNumSdfs(), srcModel.getDeviceDependencies(), srcModel.getDeviceJointTypes(), srcModel.getDeviceJointAxes(), dtheta_dalpha_src, result); } void normEqnsIntersectionParamMap(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 T_ds, const SE3 T_sd, const MirroredModel & srcModel, const MirroredModel & dstModel, const int * dMapping_src, float * result) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); cudaMemset(result,0,((redDims)+JTJSize(redDims)+1)*sizeof(float)); gpu_normEqnsIntersectionParamMap<<<grid,block,64*(fullDims+redDims)*sizeof(float)>>>(testSites, nSites, fullDims, redDims, T_ds, T_sd, srcModel.getDeviceTransformsFrameToModel(), srcModel.getDeviceTransformsModelToFrame(), srcModel.getDeviceSdfFrames(), dstModel.getDeviceTransformsFrameToModel(), dstModel.getDeviceTransformsModelToFrame(), dstModel.getDeviceSdfFrames(), dstModel.getDeviceSdfs(), dstModel.getNumSdfs(), srcModel.getDeviceDependencies(), srcModel.getDeviceJointTypes(), srcModel.getDeviceJointAxes(), dMapping_src, result); } void intersectionCheckRigidObjInHand(const float4 * testSites, const int nSites, const SE3 T_ho, const SE3 T_oh, const SE3 * T_mfs_h, const SE3 * T_fms_h, const int * sdfFrames_h, const Grid3D<float> * sdfs_h, const int nSdfs_h, float * result) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); cudaMemset(result,0,(12+JTJSize(12)+1)*sizeof(float)); gpu_intersectionCheckRigidObjInHand<<<grid,block,64*12*sizeof(float)>>>(testSites, nSites, T_ho, T_oh, T_mfs_h, T_fms_h, sdfFrames_h, sdfs_h, nSdfs_h, result); } void getDistanceToSdf(const float4 * testSites, const int nSites, const SE3 T_ds, const Grid3D<float> * sdf_dst, float * distances, const cudaStream_t stream) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); gpu_getDistanceToSdf<<<grid,block,0,stream>>>(testSites,nSites,T_ds,sdf_dst,distances); } void initDebugIntersectionError(float * debugError, const int nSites) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); gpu_initDebugIntersectionError<<<grid,block>>>(debugError, nSites); } }
0fe77c5fa7680e513a4783acdb6a600231b02083.hip
// !!! This is a file automatically generated by hipify!!! #include "WTGPUChunk.cuh" WTGPUChunkData::WTGPUChunkData(int argGPUId, int argWordLength, int argMaxChunkWTLength, int argWTLength, int argNumOfWordS) { GPUId = argGPUId; wordLength = argWordLength; maxChunkWTLength = argMaxChunkWTLength; WTLength = argWTLength; numOfWordS = argNumOfWordS; } void WTGPUChunkData::GPUMemAllocate(int argGPUId) { GPUId = argGPUId; hipSetDevice(argGPUId); hipMalloc((void**)&deviceNZWTCount, (numOfWordS) * sizeof(int)); hipMalloc((void**)&deviceWTIndex, (maxWTLength) * sizeof(int)); hipMalloc((void**)&deviceWTValue, (maxWTLength) * sizeof(int)); hipMalloc((void**)&deviceWTCount, (wordLength) * sizeof(int)); hipMalloc((void**)&deviceWTOffset, (wordLength) * sizeof(int)); hipMalloc((void**)&deviceWTRowSum, (K) * sizeof(int)); hipMalloc((void**)&deviceChunkWTCount, (numOfWordS) * sizeof(int)); hipMalloc((void**)&deviceChunkWTOffset, (numOfWordS) * sizeof(int)); hipMalloc((void**)&deviceChunkNZWTCount, (numOfWordS) * sizeof(int)); hipMalloc((void**)&deviceChunkWTIndex, (maxChunkWTLength) * sizeof(int)); hipMalloc((void**)&deviceChunkWTValue, (maxChunkWTLength) * sizeof(int)); } void WTGPUChunkData::GPUMemset(int argGPUId) { hipSetDevice(argGPUId); hipMemset(deviceNZWTCount, 0, (numOfWordS) * sizeof(int)); hipMemset(deviceWTIndex, 0, (maxWTLength) * sizeof(int)); hipMemset(deviceWTValue, 0, (maxWTLength) * sizeof(int)); hipMemset(deviceWTRowSum, 0, (K) * sizeof(int)); } void WTGPUChunkData::chunkGPUMemset(int argGPUId) { hipSetDevice(argGPUId); hipMemset(deviceChunkNZWTCount, 0, (numOfWordS) * sizeof(int)); hipMemset(deviceChunkWTIndex, 0, (maxChunkWTLength) * sizeof(int)); hipMemset(deviceChunkWTValue, 0, (maxChunkWTLength) * sizeof(int)); //hipMemset(deviceWTRowSum, 0, (K) * sizeof(int)); }
0fe77c5fa7680e513a4783acdb6a600231b02083.cu
#include "WTGPUChunk.cuh" WTGPUChunkData::WTGPUChunkData(int argGPUId, int argWordLength, int argMaxChunkWTLength, int argWTLength, int argNumOfWordS) { GPUId = argGPUId; wordLength = argWordLength; maxChunkWTLength = argMaxChunkWTLength; WTLength = argWTLength; numOfWordS = argNumOfWordS; } void WTGPUChunkData::GPUMemAllocate(int argGPUId) { GPUId = argGPUId; cudaSetDevice(argGPUId); cudaMalloc((void**)&deviceNZWTCount, (numOfWordS) * sizeof(int)); cudaMalloc((void**)&deviceWTIndex, (maxWTLength) * sizeof(int)); cudaMalloc((void**)&deviceWTValue, (maxWTLength) * sizeof(int)); cudaMalloc((void**)&deviceWTCount, (wordLength) * sizeof(int)); cudaMalloc((void**)&deviceWTOffset, (wordLength) * sizeof(int)); cudaMalloc((void**)&deviceWTRowSum, (K) * sizeof(int)); cudaMalloc((void**)&deviceChunkWTCount, (numOfWordS) * sizeof(int)); cudaMalloc((void**)&deviceChunkWTOffset, (numOfWordS) * sizeof(int)); cudaMalloc((void**)&deviceChunkNZWTCount, (numOfWordS) * sizeof(int)); cudaMalloc((void**)&deviceChunkWTIndex, (maxChunkWTLength) * sizeof(int)); cudaMalloc((void**)&deviceChunkWTValue, (maxChunkWTLength) * sizeof(int)); } void WTGPUChunkData::GPUMemset(int argGPUId) { cudaSetDevice(argGPUId); cudaMemset(deviceNZWTCount, 0, (numOfWordS) * sizeof(int)); cudaMemset(deviceWTIndex, 0, (maxWTLength) * sizeof(int)); cudaMemset(deviceWTValue, 0, (maxWTLength) * sizeof(int)); cudaMemset(deviceWTRowSum, 0, (K) * sizeof(int)); } void WTGPUChunkData::chunkGPUMemset(int argGPUId) { cudaSetDevice(argGPUId); cudaMemset(deviceChunkNZWTCount, 0, (numOfWordS) * sizeof(int)); cudaMemset(deviceChunkWTIndex, 0, (maxChunkWTLength) * sizeof(int)); cudaMemset(deviceChunkWTValue, 0, (maxChunkWTLength) * sizeof(int)); //cudaMemset(deviceWTRowSum, 0, (K) * sizeof(int)); }
e8f27d68b4211b8530df42fc9cf6d3bd1973a233.hip
// !!! This is a file automatically generated by hipify!!! //#include "device.h" //#include <cutil.h> //#include <cutil_math.h> //#include <cutil_inline_runtime.h> //#include <cutil.h> //#include <cutil_math.h> //#include <cutil_inline_runtime.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> __global__ void g_draw(hipSurfaceObject_t sf,float time,unsigned int width,unsigned height){ unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // g_image[((y*width+x)*4+0)*sizeof(unsigned char)]= 125;//(int)(time*60)%255; // g_image[((y*width+x)*4+1)*sizeof(unsigned char)]= 255;//255*x/width; // g_image[((y*width+x)*4+2)*sizeof(unsigned char)]= 125;//255-255*x/width; // g_image[((y*width+x)*4+3)*sizeof(unsigned char)]= 105;//0; float4 data = make_float4(.0f, .6f, .8f, 1.0f); surf2Dwrite(data, sf, x * sizeof(float4), y); } extern "C" void mydraw(hipSurfaceObject_t sf, float time,unsigned int width, unsigned height){ dim3 dimBlock(32,32,1); dim3 dimGrid(width/dimBlock.x,height/dimBlock.y,1); hipLaunchKernelGGL(( g_draw), dim3(dimGrid),dim3(dimBlock), 0, 0, sf,time,width,height); //cutilSafeCall( hipDeviceSynchronize() ); } __global__ void g_setKernel(hipSurfaceObject_t outputSurfObj,int width, int height){ // Calculate surface coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < width && y < height) { uchar4 data; // Read from input surface data=make_uchar4(32,64,128,255); // Write to output surface surf2Dwrite(data, outputSurfObj, x * 4, y); } } extern "C" void setKernel(hipSurfaceObject_t outputSurfObj,int width, int height){ dim3 dimBlock(16, 16); dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x,(height + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( g_setKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, outputSurfObj, width, height); }
e8f27d68b4211b8530df42fc9cf6d3bd1973a233.cu
//#include "device.h" //#include <cutil.h> //#include <cutil_math.h> //#include <cutil_inline_runtime.h> //#include <cutil.h> //#include <cutil_math.h> //#include <cutil_inline_runtime.h> #include <cuda_runtime_api.h> #include <cuda_runtime.h> #include <cuda.h> #include <curand.h> __global__ void g_draw(cudaSurfaceObject_t sf,float time,unsigned int width,unsigned height){ unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // g_image[((y*width+x)*4+0)*sizeof(unsigned char)]= 125;//(int)(time*60)%255; // g_image[((y*width+x)*4+1)*sizeof(unsigned char)]= 255;//255*x/width; // g_image[((y*width+x)*4+2)*sizeof(unsigned char)]= 125;//255-255*x/width; // g_image[((y*width+x)*4+3)*sizeof(unsigned char)]= 105;//0; float4 data = make_float4(.0f, .6f, .8f, 1.0f); surf2Dwrite(data, sf, x * sizeof(float4), y); } extern "C" void mydraw(cudaSurfaceObject_t sf, float time,unsigned int width, unsigned height){ dim3 dimBlock(32,32,1); dim3 dimGrid(width/dimBlock.x,height/dimBlock.y,1); g_draw<<<dimGrid,dimBlock>>>(sf,time,width,height); //cutilSafeCall( cudaThreadSynchronize() ); } __global__ void g_setKernel(cudaSurfaceObject_t outputSurfObj,int width, int height){ // Calculate surface coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < width && y < height) { uchar4 data; // Read from input surface data=make_uchar4(32,64,128,255); // Write to output surface surf2Dwrite(data, outputSurfObj, x * 4, y); } } extern "C" void setKernel(cudaSurfaceObject_t outputSurfObj,int width, int height){ dim3 dimBlock(16, 16); dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x,(height + dimBlock.y - 1) / dimBlock.y); g_setKernel<<<dimGrid, dimBlock>>>(outputSurfObj, width, height); }
7aa318ac6375e5e5ae1211d74f049d9f1bb0c97a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /************************************************************* * Copyright (c) 2017, Palo Alto Research Center. * * All rights reserved. * *************************************************************/ #include "hml_quick_sort.h" #include "hml_triangle_count.h" #include "hml_file_utils.h" #define cHmlBlocksPerGrid 128 #define cHmlThreadsPerBlock 128 #define cHmlTriangleCountPartitionSizeInit 1024 #define cHmlTriangleCountMaxNumPartitions 32 #define cHmlTriangleCountLinearSearchMaxEdges 4096 #define cHmlTriangleCountNumBlocks 256 typedef struct { uint32_t *P; /* vertex permutation array */ } HmlTriangleCountGraphInsertState; /* partition 'graph' into 'numPartitions' s.t. for all vertices * v in partition p, the following property holds: * minOutDeg[p] <= out-degree(v) < minOutDeg[p + 1], * except for the last partition p = numParititions - 1, for which * it holds: * minOutDeg[numPartitions - 1] <= out-degree(v) < infinity * Thus, minOutDeg[] has 'numPartitions' elements. * The output is stored in vertexRank, an array of size * (graph->maxSrcVertex - graph->minSrcVertex + 1) elements. * vertexRank[] must be allocated by the caller of this function. * vertexRank[r] stores the id of vertex v in partition p s.t. * vertexRank[r] == v && partitionPrefixSize[p - 1] <= r && * r < partitionPrefixSize[p], except for the first partition p = 0, where * 0 <= r < partitionPrefixSize[0] * The actual size of vertexRank is given by: * partitionPrefixSize[numPartitions - 1], which should never exceed * numSrcVertices (see below). It's the caller's responsibility to * resize vertexRank afterwards to free its unused portion. */ void hmlTriangleCountPartitionVertexByOutDeg(HmlGraphCore *core, uint32_t *minOutDeg, uint32_t numPartitions, uint32_t *vertexRank, uint32_t *partitionPrefixSize) { uint32_t **partitions; uint32_t p; uint32_t v; uint32_t outDeg; uint32_t *R = core->R; uint32_t *pPartitionAllocSize; /* allocation size */ uint32_t **partitionPtr; uint32_t **partitionEndPtr; /* actual used size */ uint32_t numSrcVertices = core->maxSrcVertex - core->minSrcVertex + 1; uint32_t prefixSize = 0; MALLOC(partitions, uint32_t *, numPartitions); MALLOC(partitionPtr, uint32_t *, numPartitions); MALLOC(partitionEndPtr, uint32_t *, numPartitions); MALLOC(pPartitionAllocSize, uint32_t, numPartitions); for(p = 0; p < numPartitions; ++p) { MALLOC(partitions[p], uint32_t, cHmlTriangleCountPartitionSizeInit); pPartitionAllocSize[p] = cHmlTriangleCountPartitionSizeInit; partitionPtr[p] = partitions[p]; partitionEndPtr[p] = partitions[p] + cHmlTriangleCountPartitionSizeInit; } for(v = core->minSrcVertex; v <= core->maxSrcVertex; ++v) { outDeg = R[v + 1] - R[v]; /* each page takes one 32-bit word */ /* use linear scan to find which partition this vertex belongs to */ for(p = 0; p < numPartitions && minOutDeg[p] <= outDeg; ++p); if(p > 0) { --p; if(partitionPtr[p] == partitionEndPtr[p]) { REALLOC(partitions[p], uint32_t, pPartitionAllocSize[p] * 2); partitionPtr[p] = partitions[p] + pPartitionAllocSize[p]; pPartitionAllocSize[p] *= 2; partitionEndPtr[p] = partitions[p] + pPartitionAllocSize[p]; } *partitionPtr[p]++ = v; } } for(p = 0; p < numPartitions; ++p) { prefixSize += partitionPtr[p] - partitions[p]; partitionPrefixSize[p] = prefixSize; if(prefixSize > numSrcVertices) { fprintf(stderr, "; Error: prefixSize = %d > numSrcVertices = %d\n", prefixSize, numSrcVertices); exit(EXIT_FAILURE); } memcpy((void *)vertexRank, partitions[p], sizeof(uint32_t) * (partitionPtr[p] - partitions[p])); vertexRank += partitionPtr[p] - partitions[p]; } /* free memory */ for(p = 0; p < numPartitions; ++p) { FREE(partitions[p]); FREE(partitionPtr); FREE(partitionEndPtr); FREE(pPartitionAllocSize); } FREE(partitions); } static HmlErrCode hmlTriangleCountCopyToGpu(HmlTriangleCountBase *cpu, HmlTriangleCountBase *gpu) { HML_ERR_PROLOGUE; /* shallow copy of HmlTriangleCount object from "cpu" to "gpu" */ memcpy(gpu, cpu, sizeof(HmlTriangleCount)); /* reset all pointer members of "gpu" object */ gpu->D = NULL; gpu->P = NULL; gpu->numTrianglesEachThread = NULL; /* alloc memory on GPU for graph core and copy the graph data */ hmlGraphCoreCopyToGpu(&cpu->core, &gpu->core); HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountGpuInit(HmlTriangleCount *triangleCount) { HML_ERR_PROLOGUE; HmlGraphCore *core = &triangleCount->cpu.core; uint32_t minSrcVertex = core->minSrcVertex; uint32_t maxSrcVertex = core->maxSrcVertex; uint32_t numSrcVertices = maxSrcVertex - minSrcVertex + 1; uint32_t numVertices = max(maxSrcVertex, core->maxDestVertex) + 1; uint32_t *vertexRank; /* array of vertex ids sorted by out-degree */ uint32_t minOutDeg[cHmlTriangleCountMaxNumPartitions]; /* min out-deg of each partition */ uint32_t partitionPrefixSize[cHmlTriangleCountMaxNumPartitions]; /* cumulative size */ uint32_t vertexRankSize; size_t freeBytes; size_t totalBytes; double cpuStart, wallStart; double cpuEnd, wallEnd; /* get free gpu memory size */ if(triangleCount->verbosity >= 2) { HANDLE_ERROR(hipMemGetInfo(&freeBytes, &totalBytes)); fprintf(stderr, "; Info: GPU memory: %ld bytes free, %ld bytes total\n", freeBytes, totalBytes); } hmlGetSecs(&cpuStart, &wallStart); /* create GPU object */ hmlTriangleCountCopyToGpu(&triangleCount->cpu, &triangleCount->gpu); hipDeviceSynchronize(); hmlGetSecs(&cpuEnd, &wallEnd); if(triangleCount->verbosity >= 2) { fprintf(stderr, "; Info: Load graph to device: wall time = %.2lf\n", (wallEnd - wallStart) * 1000); } if(numVertices > cHmlMaxCudaTexture1DLinear) { hmlPrintf("; Error: Number of vertices exceeds the maximum " "texture 1D size\n"); HML_ERR_GEN(true, cHmlErrGeneral); } hmlTriangleCountKernelSetup(triangleCount->kernel, minOutDeg, cHmlTriangleCountMaxNumKernels, &triangleCount->numPartitions, triangleCount->kernelArgs); /* create vertexRank mapping */ hmlGetSecs(&cpuStart, &wallStart); /* allocate vertexRank[] on CPU */ MALLOC(vertexRank, uint32_t, numSrcVertices); hmlTriangleCountPartitionVertexByOutDeg(&triangleCount->cpu.core, minOutDeg, triangleCount->numPartitions, vertexRank, partitionPrefixSize); //hmlGetSecs(&cpuEnd, &wallEnd); //fprintf(stderr, "; Info: Partition vertices on CPU: " // "cpu time = %.2lf, wall time = %.2lf\n", // (cpuEnd - cpuStart* 1000, (wallEnd - wallStart) * 1000); vertexRankSize = partitionPrefixSize[triangleCount->numPartitions - 1]; /* resize vertexRank */ REALLOC(vertexRank, uint32_t, vertexRankSize); /* allocate gpuVertexRank[] on device */ HANDLE_ERROR(hipMalloc(&triangleCount->gpuVertexRank, sizeof(uint32_t) * vertexRankSize)); /* copy vertexRank[] to gpuVertexRank[] */ HANDLE_ERROR(hipMemcpy(triangleCount->gpuVertexRank, vertexRank, sizeof(uint32_t) * vertexRankSize, hipMemcpyHostToDevice)); hipDeviceSynchronize(); hmlGetSecs(&cpuEnd, &wallEnd); if(triangleCount->verbosity >= 2) { fprintf(stderr, "; Info: Partition and copy vertice ranks to device: " "wall time = %.2lf\n", (wallEnd - wallStart) * 1000); fprintf(stderr, "; Info: Number of pages with in-coming link: %d (%.2lf%%)\n", vertexRankSize, 100 * vertexRankSize/(double)(numVertices)); fprintf(stderr, "; Info: Partitioned graph size = %.2lf MB\n", (core->maxNumSrcVertices + core->numEdges + vertexRankSize) * sizeof(uint32_t) / (double)(1024 * 1024)); } /* print vertex ranks for small graphs */ if(triangleCount->verbosity >= 3 && vertexRankSize <= 100) { for(uint32_t r = 0; r < vertexRankSize; ++r) { fprintf(stderr, "; Info: rank %3d = vertex %3d\n", r, vertexRank[r]); } } /* set the kernel arguments */ hmlTriangleCountKernelArgSet(triangleCount->kernelArgs, triangleCount->numPartitions, minOutDeg, partitionPrefixSize); /* print kernel params */ if(triangleCount->verbosity >= 2) { hmlTriangleCountKernelArgPrint(triangleCount->kernelArgs, triangleCount->numPartitions); } HANDLE_ERROR(hipMalloc(&triangleCount->gpuCountArr, sizeof(uint32_t) * (maxSrcVertex + 1))); HANDLE_ERROR(hipMemset(triangleCount->gpuCountArr, 0, sizeof(uint32_t) * (maxSrcVertex + 1))); HANDLE_ERROR(hipMalloc(&triangleCount->gpuBlockCountArr, sizeof(uint64_t) * cHmlTriangleCountSumBlocks)); HANDLE_ERROR(hipMemset(triangleCount->gpuBlockCountArr, 0, sizeof(uint64_t) * cHmlTriangleCountSumBlocks)); FREE(vertexRank); HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountGpu(HmlTriangleCount *triangleCount) { HML_ERR_PROLOGUE; HmlGraphCore *gpuCore = &triangleCount->gpu.core; uint32_t maxSrcVertex = gpuCore->maxSrcVertex; uint32_t *gpuVertexRank = triangleCount->gpuVertexRank; uint32_t *gpuCountArr = triangleCount->gpuCountArr; uint64_t *gpuBlockCountArr = triangleCount->gpuBlockCountArr; uint32_t numPartitions = triangleCount->numPartitions; double cpuStart, wallStart; double cpuEnd, wallEnd; HmlTriangleCountKernel *kernel = triangleCount->kernel; HmlTriangleCountKernelArg *kernelArgs = triangleCount->kernelArgs; int blk; uint32_t *countArr; MALLOC(countArr, uint32_t, maxSrcVertex + 1); hmlGetSecs(&cpuStart, &wallStart); //fprintf(stderr, "; Info: iter = %d\n", iter); for(uint32_t p = 0; p < numPartitions; ++p) { kernel[kernelArgs[p]hipLaunchKernelGGL((.id)], dim3(kernelArgs[p].grid), dim3(kernelArgs[p].block), 0, 0, gpuCountArr, gpuCore->R, gpuCore->E, maxSrcVertex, gpuVertexRank, kernelArgs[p].minVertexRank, kernelArgs[p].maxVertexRank); } hipLaunchKernelGGL(( hmlTriangleCountSumKernel), dim3(cHmlTriangleCountSumBlocks), dim3(cHmlTriangleCountSumThreadsPerBlock), 0, 0, gpuBlockCountArr, gpuCountArr, maxSrcVertex); hipDeviceSynchronize(); hmlGetSecs(&cpuEnd, &wallEnd); if(triangleCount->verbosity >= 1) { fprintf(stderr, "; Info: GPU TriangleCount: wall time = %.2lf\n", (wallEnd - wallStart) * 1000); } HANDLE_ERROR(hipMemcpy(triangleCount->blockCountArr, gpuBlockCountArr, sizeof(uint64_t) * cHmlTriangleCountSumBlocks, hipMemcpyDeviceToHost)); triangleCount->gpu.numTriangles = 0; for(blk = 0; blk < cHmlTriangleCountSumBlocks; blk++) { triangleCount->gpu.numTriangles += triangleCount->blockCountArr[blk]; } /* HANDLE_ERROR(hipMemcpy(countArr, gpuCountArr, sizeof(uint32_t) * (maxSrcVertex + 1), hipMemcpyDeviceToHost)); triangleCount->gpu.numTriangles = 0; for (blk = 0; blk <= maxSrcVertex; blk++) { triangleCount->gpu.numTriangles += countArr[blk]; } */ FREE(countArr); HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountBaseInit(HmlTriangleCountBase *count, uint32_t numThreads) { HML_ERR_PROLOGUE; memset(count, 0, sizeof(HmlTriangleCount)); HML_ERR_PASS(hmlVertexPartitionInit(&count->partition)); count->numThreads = numThreads; if(numThreads > 1) { CALLOC(count->numTrianglesEachThread, uint64_t, numThreads); } HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountBaseDelete(HmlTriangleCountBase *count) { HML_ERR_PROLOGUE; HmlGraphCore *core = &count->core; if(count->numThreads > 1) { FREE(count->numTrianglesEachThread); } FREE(count->D); FREE(count->P); HML_ERR_PASS(hmlGraphCoreDelete(core)); HML_ERR_PASS(hmlVertexPartitionDelete(&count->partition)); memset(count, 0, sizeof(HmlTriangleCount)); HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountInit(HmlTriangleCount *triangleCount) { HML_ERR_PROLOGUE; memset(triangleCount, 0, sizeof(HmlTriangleCount)); HML_ERR_PASS(hmlTriangleCountBaseInit(&triangleCount->cpu, 1)); MALLOC(triangleCount->blockCountArr, uint64_t, cHmlTriangleCountSumBlocks); HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountDelete(HmlTriangleCount *triangleCount) { HML_ERR_PROLOGUE; FREE(triangleCount->blockCountArr); HML_ERR_PASS(hmlTriangleCountBaseDelete(&triangleCount->cpu)); /* free GPU stuff */ HANDLE_ERROR(hipFree(triangleCount->gpuCountArr)); HANDLE_ERROR(hipFree(triangleCount->gpuBlockCountArr)); if(triangleCount->gpu.core.numEdges > 0) { hmlGraphCoreGpuDelete(&triangleCount->gpu.core); } HANDLE_ERROR(hipFree(triangleCount->gpuVertexRank)); memset(triangleCount, 0, sizeof(HmlTriangleCount)); HML_NORMAL_RETURN; } /* only allows edge (u, v), iff u < v */ static HmlErrCode hmlTriangleCountGraphAppender(HmlGraphCore *core, void *appendState, uint32_t srcVertex, uint32_t destVertex) { HML_ERR_PROLOGUE; HmlGraphCoreAppendState *appState = (HmlGraphCoreAppendState *)appendState; if(srcVertex < destVertex) { HML_ERR_PASS(hmlGraphCoreAppend(core, appState, srcVertex, destVertex)); } HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountReadOrderedTsv2File(HmlTriangleCountBase *count, FILE *file, bool srcVertexOnRightColumn) { HML_ERR_PROLOGUE; HmlGraphCoreAppendState state; hmlGraphCoreAppendFromTsv2File(&count->core, file, srcVertexOnRightColumn, (HmlGraphCoreAppendIniter)hmlGraphCoreAppendInit, (HmlGraphCoreAppender)hmlTriangleCountGraphAppender, (HmlGraphCoreAppendFinalizer)hmlGraphCoreAppendFinal, &state); HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountReadOrderedTsv2FileByName(HmlTriangleCountBase *count, char const *fileName, bool srcVertexOnRightColumn) { HML_ERR_PROLOGUE; FILE *file; HML_ERR_PASS(hmlFileOpenRead(fileName, &file)); HML_ERR_PASS(hmlTriangleCountReadOrderedTsv2File(count, file, srcVertexOnRightColumn)); HML_ERR_PASS(hmlFileClose(file)); HML_NORMAL_RETURN; } static HmlErrCode hmlTriangleCountGraphInserter(HmlGraphCore *core, void *insertState, uint32_t srcVertex, uint32_t destVertex) { HML_ERR_PROLOGUE; HmlTriangleCountGraphInsertState *state = (HmlTriangleCountGraphInsertState *)insertState; uint32_t tmpVertex; if(srcVertex != destVertex) { srcVertex = state->P[srcVertex]; destVertex = state->P[destVertex]; if(srcVertex > destVertex) { tmpVertex = srcVertex; srcVertex = destVertex; destVertex = tmpVertex; } HML_ERR_PASS(hmlGraphCoreAddEdge(core, srcVertex, destVertex)); } HML_NORMAL_RETURN; } static HmlErrCode hmlTriangleCountSetPartition(HmlTriangleCountBase *count) { HML_ERR_PROLOGUE; uint64_t maxNumEdgesPerPartition = (count->core.numEdges + count->numThreads - 1) / count->numThreads; maxNumEdgesPerPartition += count->core.maxOutDegree; HML_ERR_PASS(hmlGraphCoreVertexPartition(&count->core, maxNumEdgesPerPartition, &count->partition)); HML_NORMAL_RETURN; } static int HML_QSORT_COMPARE_FUNC(hmlTriangleCountVertexCompare, a, b, arg) { uint32_t *D = (uint32_t *)arg; return D[*(const uint32_t *)b] - D[*(const uint32_t *)a]; } /* reorder the edges of an undirected graph such that * the vertices with more neighbors are stored * as source vertices; whereas those with fewer * neighbors are stored in the adjacency list of * the high-degree vertices. * that is, if (u, v) in E and deg(u) > deg(v), then * v is stored as a successor of u but NOT the other * way around in a succinct encoding of undirected graph */ HmlErrCode hmlTriangleCountReorderEdges(HmlTriangleCountBase *count) { HML_ERR_PROLOGUE; HmlTriangleCountGraphInsertState insertState; HmlGraphCore *core = &count->core; HmlGraphCore copyVal; HmlGraphCore *copy = &copyVal; uint32_t numVertices = max(core->maxSrcVertex, core->maxDestVertex) + 1; uint32_t *D; uint32_t v; HML_ERR_GEN(count->D, cHmlErrGeneral); CALLOC(count->D, uint32_t, numVertices); /* use the just allocated count->D to store out-degree information */ HML_ERR_PASS(hmlGraphCoreCountBidirectDegree(core, count->D, numVertices)); /* create vertex permutation array: count->P */ HML_ERR_GEN(count->P, cHmlErrGeneral); MALLOC(count->P, uint32_t, numVertices); for(v = 0; v < numVertices; v++) { count->P[v] = v; } HML_QSORT(count->P, numVertices, sizeof(uint32_t), hmlTriangleCountVertexCompare, count->D); CALLOC(D, uint32_t, numVertices); /* store the "adjusted" degree in 'D' and use it as * the 'D' argument to call hmlGraphCoreSetR(core, D, ...) */ HML_ERR_PASS(hmlGraphCoreCountDegreeIfSmallerP(core, count->P, D, numVertices)); /* make a shallow copy so that we can still free the memory allocated * for R and E in the original 'core' */ memcpy(copy, core, sizeof(HmlGraphCore)); /* since we've got a copy of 'core', we can (re)initialize 'core' * as if it never existed before */ HML_ERR_PASS(hmlGraphCoreInit(core, numVertices, copy->numEdges)); HML_ERR_PASS(hmlGraphCoreSetR(core, D, 0, numVertices - 1)); insertState.P = count->P; HML_ERR_PASS(hmlGraphCoreInsertFromSrc(core, copy, hmlGraphCoreDefaultInsertIniter, hmlTriangleCountGraphInserter, NULL, &insertState)); if(count->numThreads > 1 && count->numThreads != count->partition.numPartitions) { HML_ERR_PASS(hmlTriangleCountSetPartition(count)); HML_ERR_PASS(hmlGraphCoreSortEdgesByPartition(core, &count->partition)); } else { HML_ERR_PASS(hmlGraphCoreSortEdges(core, 1)); } /* free R and E in the old 'core' now that the new one has been created */ HML_ERR_PASS(hmlGraphCoreDelete(copy)); FREE(D); HML_NORMAL_RETURN; } static HmlErrCode hmlTriangleCountBySearch(HmlGraphCore *core, uint32_t thread, uint32_t minVertex, uint32_t maxVertex, void *args) { HML_ERR_PROLOGUE; HmlTriangleCountBase *count = (HmlTriangleCountBase *)args; uint32_t *R = core->R; uint32_t *E = core->E; uint32_t *eU; uint32_t *eU2; uint32_t *eV; uint32_t *endU; uint32_t *endV; uint32_t u; uint32_t v; uint32_t w; int32_t lowV; int32_t midV; int32_t highV; uint64_t numTriangles = 0; minVertex = max(minVertex, core->minSrcVertex); maxVertex = min(maxVertex, core->maxSrcVertex); for(u = minVertex; u <= maxVertex; u++) { endU = &E[R[u + 1]] - 1; for(eU = &E[R[u]]; eU < endU; eU++) { v = *eU; /* due to lexicographic edge pruning, v may be > maxSrcVertex */ if(v <= core->maxSrcVertex) { eV = &E[R[v]]; endV = &E[R[v + 1]] - 1; if(endV - eV <= cHmlTriangleCountLinearSearchMaxEdges) { for(eU2 = eU + 1; eU2 <= endU; eU2++) { w = *eU2; while(eV <= endV && *eV < w) { eV++; } if(eV > endV) { break; } if(*eV == w) { numTriangles++; } } } else { /* use binary search */ for(eU2 = eU + 1; eU2 <= endU; eU2++) { w = *eU2; highV = (uint32_t)(endV - eV); if(highV <= cHmlTriangleCountLinearSearchMaxEdges) { while(eV <= endV && *eV < w) { eV++; } if(eV > endV) { break; } if(*eV == w) { numTriangles++; } } else { lowV = 0; while(lowV <= highV) { /* to avoid overflow in (lowV + highV) / 2 */ midV = lowV + (highV - lowV) / 2; if(eV[midV] == w) { lowV = midV + 1; numTriangles++; break; } else if(eV[midV] < w) { lowV = midV + 1; } else { highV = midV - 1; } } eV += lowV; if(eV > endV) { break; } } } } } } } count->numTrianglesEachThread[thread] = numTriangles; HML_NORMAL_RETURN; } static HmlErrCode hmlTriangleCountByHash(HmlGraphCore *core, uint32_t thread, uint32_t minVertex, uint32_t maxVertex, void *args) { HML_ERR_PROLOGUE; HmlTriangleCountBase *count = (HmlTriangleCountBase *)args; uint32_t *R = core->R; uint32_t *E = core->E; uint32_t eU; uint32_t eU2; uint32_t eV; uint32_t endU; uint32_t u; uint32_t v; uint64_t numTriangles = 0; uint32_t *edgeId0; uint32_t *edgeId; uint32_t numDestVertices = core->maxDestVertex - core->minDestVertex + 1; CALLOC(edgeId0, uint32_t, numDestVertices); /* initialize edgeId0[] with (uint32_t)-1, an invalid edge id */ memset(edgeId0, 0xFF, sizeof(uint32_t) * numDestVertices); edgeId = edgeId0 - core->minDestVertex; minVertex = max(minVertex, core->minSrcVertex); maxVertex = min(maxVertex, core->maxSrcVertex); for(u = minVertex; u <= maxVertex; u++) { endU = R[u + 1] - 1; for(eU = R[u]; eU < endU; eU++) { v = E[eU]; /* due to lexicographic edge pruning, v may be > maxSrcVertex */ if(v <= core->maxSrcVertex) { for(eV = R[v]; eV < R[v + 1]; eV++) { edgeId[E[eV]] = eU; } for(eU2 = eU + 1; eU2 <= endU; eU2++) { if(edgeId[E[eU2]] == eU) { numTriangles++; } } } } } count->numTrianglesEachThread[thread] = numTriangles; FREE(edgeId0); HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountRun(HmlTriangleCountBase *count) { HML_ERR_PROLOGUE; HmlGraphCore *core = &count->core; uint32_t thread; HmlGraphCoreParaFunc func = (count->countByHash) ? hmlTriangleCountByHash : hmlTriangleCountBySearch; if(count->numThreads > 1) { if(count->numThreads != count->partition.numPartitions) { HML_ERR_PASS(hmlTriangleCountSetPartition(count)); } HML_ERR_PASS(hmlGraphCoreRunParaFuncByPartition(core, func, count, &count->partition)); count->numTriangles = 0; for(thread = 0; thread < count->numThreads; thread++) { count->numTriangles += count->numTrianglesEachThread[thread]; } } else { count->numTrianglesEachThread = &count->numTriangles; HML_ERR_PASS(func(&count->core, 0, count->core.minSrcVertex, count->core.maxSrcVertex, count)); count->numTrianglesEachThread = NULL; /* count->numTriangles = count->numTrianglesEachThread[0]; */ } HML_NORMAL_RETURN; }
7aa318ac6375e5e5ae1211d74f049d9f1bb0c97a.cu
/************************************************************* * Copyright (c) 2017, Palo Alto Research Center. * * All rights reserved. * *************************************************************/ #include "hml_quick_sort.h" #include "hml_triangle_count.h" #include "hml_file_utils.h" #define cHmlBlocksPerGrid 128 #define cHmlThreadsPerBlock 128 #define cHmlTriangleCountPartitionSizeInit 1024 #define cHmlTriangleCountMaxNumPartitions 32 #define cHmlTriangleCountLinearSearchMaxEdges 4096 #define cHmlTriangleCountNumBlocks 256 typedef struct { uint32_t *P; /* vertex permutation array */ } HmlTriangleCountGraphInsertState; /* partition 'graph' into 'numPartitions' s.t. for all vertices * v in partition p, the following property holds: * minOutDeg[p] <= out-degree(v) < minOutDeg[p + 1], * except for the last partition p = numParititions - 1, for which * it holds: * minOutDeg[numPartitions - 1] <= out-degree(v) < infinity * Thus, minOutDeg[] has 'numPartitions' elements. * The output is stored in vertexRank, an array of size * (graph->maxSrcVertex - graph->minSrcVertex + 1) elements. * vertexRank[] must be allocated by the caller of this function. * vertexRank[r] stores the id of vertex v in partition p s.t. * vertexRank[r] == v && partitionPrefixSize[p - 1] <= r && * r < partitionPrefixSize[p], except for the first partition p = 0, where * 0 <= r < partitionPrefixSize[0] * The actual size of vertexRank is given by: * partitionPrefixSize[numPartitions - 1], which should never exceed * numSrcVertices (see below). It's the caller's responsibility to * resize vertexRank afterwards to free its unused portion. */ void hmlTriangleCountPartitionVertexByOutDeg(HmlGraphCore *core, uint32_t *minOutDeg, uint32_t numPartitions, uint32_t *vertexRank, uint32_t *partitionPrefixSize) { uint32_t **partitions; uint32_t p; uint32_t v; uint32_t outDeg; uint32_t *R = core->R; uint32_t *pPartitionAllocSize; /* allocation size */ uint32_t **partitionPtr; uint32_t **partitionEndPtr; /* actual used size */ uint32_t numSrcVertices = core->maxSrcVertex - core->minSrcVertex + 1; uint32_t prefixSize = 0; MALLOC(partitions, uint32_t *, numPartitions); MALLOC(partitionPtr, uint32_t *, numPartitions); MALLOC(partitionEndPtr, uint32_t *, numPartitions); MALLOC(pPartitionAllocSize, uint32_t, numPartitions); for(p = 0; p < numPartitions; ++p) { MALLOC(partitions[p], uint32_t, cHmlTriangleCountPartitionSizeInit); pPartitionAllocSize[p] = cHmlTriangleCountPartitionSizeInit; partitionPtr[p] = partitions[p]; partitionEndPtr[p] = partitions[p] + cHmlTriangleCountPartitionSizeInit; } for(v = core->minSrcVertex; v <= core->maxSrcVertex; ++v) { outDeg = R[v + 1] - R[v]; /* each page takes one 32-bit word */ /* use linear scan to find which partition this vertex belongs to */ for(p = 0; p < numPartitions && minOutDeg[p] <= outDeg; ++p); if(p > 0) { --p; if(partitionPtr[p] == partitionEndPtr[p]) { REALLOC(partitions[p], uint32_t, pPartitionAllocSize[p] * 2); partitionPtr[p] = partitions[p] + pPartitionAllocSize[p]; pPartitionAllocSize[p] *= 2; partitionEndPtr[p] = partitions[p] + pPartitionAllocSize[p]; } *partitionPtr[p]++ = v; } } for(p = 0; p < numPartitions; ++p) { prefixSize += partitionPtr[p] - partitions[p]; partitionPrefixSize[p] = prefixSize; if(prefixSize > numSrcVertices) { fprintf(stderr, "; Error: prefixSize = %d > numSrcVertices = %d\n", prefixSize, numSrcVertices); exit(EXIT_FAILURE); } memcpy((void *)vertexRank, partitions[p], sizeof(uint32_t) * (partitionPtr[p] - partitions[p])); vertexRank += partitionPtr[p] - partitions[p]; } /* free memory */ for(p = 0; p < numPartitions; ++p) { FREE(partitions[p]); FREE(partitionPtr); FREE(partitionEndPtr); FREE(pPartitionAllocSize); } FREE(partitions); } static HmlErrCode hmlTriangleCountCopyToGpu(HmlTriangleCountBase *cpu, HmlTriangleCountBase *gpu) { HML_ERR_PROLOGUE; /* shallow copy of HmlTriangleCount object from "cpu" to "gpu" */ memcpy(gpu, cpu, sizeof(HmlTriangleCount)); /* reset all pointer members of "gpu" object */ gpu->D = NULL; gpu->P = NULL; gpu->numTrianglesEachThread = NULL; /* alloc memory on GPU for graph core and copy the graph data */ hmlGraphCoreCopyToGpu(&cpu->core, &gpu->core); HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountGpuInit(HmlTriangleCount *triangleCount) { HML_ERR_PROLOGUE; HmlGraphCore *core = &triangleCount->cpu.core; uint32_t minSrcVertex = core->minSrcVertex; uint32_t maxSrcVertex = core->maxSrcVertex; uint32_t numSrcVertices = maxSrcVertex - minSrcVertex + 1; uint32_t numVertices = max(maxSrcVertex, core->maxDestVertex) + 1; uint32_t *vertexRank; /* array of vertex ids sorted by out-degree */ uint32_t minOutDeg[cHmlTriangleCountMaxNumPartitions]; /* min out-deg of each partition */ uint32_t partitionPrefixSize[cHmlTriangleCountMaxNumPartitions]; /* cumulative size */ uint32_t vertexRankSize; size_t freeBytes; size_t totalBytes; double cpuStart, wallStart; double cpuEnd, wallEnd; /* get free gpu memory size */ if(triangleCount->verbosity >= 2) { HANDLE_ERROR(cudaMemGetInfo(&freeBytes, &totalBytes)); fprintf(stderr, "; Info: GPU memory: %ld bytes free, %ld bytes total\n", freeBytes, totalBytes); } hmlGetSecs(&cpuStart, &wallStart); /* create GPU object */ hmlTriangleCountCopyToGpu(&triangleCount->cpu, &triangleCount->gpu); cudaDeviceSynchronize(); hmlGetSecs(&cpuEnd, &wallEnd); if(triangleCount->verbosity >= 2) { fprintf(stderr, "; Info: Load graph to device: wall time = %.2lf\n", (wallEnd - wallStart) * 1000); } if(numVertices > cHmlMaxCudaTexture1DLinear) { hmlPrintf("; Error: Number of vertices exceeds the maximum " "texture 1D size\n"); HML_ERR_GEN(true, cHmlErrGeneral); } hmlTriangleCountKernelSetup(triangleCount->kernel, minOutDeg, cHmlTriangleCountMaxNumKernels, &triangleCount->numPartitions, triangleCount->kernelArgs); /* create vertexRank mapping */ hmlGetSecs(&cpuStart, &wallStart); /* allocate vertexRank[] on CPU */ MALLOC(vertexRank, uint32_t, numSrcVertices); hmlTriangleCountPartitionVertexByOutDeg(&triangleCount->cpu.core, minOutDeg, triangleCount->numPartitions, vertexRank, partitionPrefixSize); //hmlGetSecs(&cpuEnd, &wallEnd); //fprintf(stderr, "; Info: Partition vertices on CPU: " // "cpu time = %.2lf, wall time = %.2lf\n", // (cpuEnd - cpuStart* 1000, (wallEnd - wallStart) * 1000); vertexRankSize = partitionPrefixSize[triangleCount->numPartitions - 1]; /* resize vertexRank */ REALLOC(vertexRank, uint32_t, vertexRankSize); /* allocate gpuVertexRank[] on device */ HANDLE_ERROR(cudaMalloc(&triangleCount->gpuVertexRank, sizeof(uint32_t) * vertexRankSize)); /* copy vertexRank[] to gpuVertexRank[] */ HANDLE_ERROR(cudaMemcpy(triangleCount->gpuVertexRank, vertexRank, sizeof(uint32_t) * vertexRankSize, cudaMemcpyHostToDevice)); cudaDeviceSynchronize(); hmlGetSecs(&cpuEnd, &wallEnd); if(triangleCount->verbosity >= 2) { fprintf(stderr, "; Info: Partition and copy vertice ranks to device: " "wall time = %.2lf\n", (wallEnd - wallStart) * 1000); fprintf(stderr, "; Info: Number of pages with in-coming link: %d (%.2lf%%)\n", vertexRankSize, 100 * vertexRankSize/(double)(numVertices)); fprintf(stderr, "; Info: Partitioned graph size = %.2lf MB\n", (core->maxNumSrcVertices + core->numEdges + vertexRankSize) * sizeof(uint32_t) / (double)(1024 * 1024)); } /* print vertex ranks for small graphs */ if(triangleCount->verbosity >= 3 && vertexRankSize <= 100) { for(uint32_t r = 0; r < vertexRankSize; ++r) { fprintf(stderr, "; Info: rank %3d = vertex %3d\n", r, vertexRank[r]); } } /* set the kernel arguments */ hmlTriangleCountKernelArgSet(triangleCount->kernelArgs, triangleCount->numPartitions, minOutDeg, partitionPrefixSize); /* print kernel params */ if(triangleCount->verbosity >= 2) { hmlTriangleCountKernelArgPrint(triangleCount->kernelArgs, triangleCount->numPartitions); } HANDLE_ERROR(cudaMalloc(&triangleCount->gpuCountArr, sizeof(uint32_t) * (maxSrcVertex + 1))); HANDLE_ERROR(cudaMemset(triangleCount->gpuCountArr, 0, sizeof(uint32_t) * (maxSrcVertex + 1))); HANDLE_ERROR(cudaMalloc(&triangleCount->gpuBlockCountArr, sizeof(uint64_t) * cHmlTriangleCountSumBlocks)); HANDLE_ERROR(cudaMemset(triangleCount->gpuBlockCountArr, 0, sizeof(uint64_t) * cHmlTriangleCountSumBlocks)); FREE(vertexRank); HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountGpu(HmlTriangleCount *triangleCount) { HML_ERR_PROLOGUE; HmlGraphCore *gpuCore = &triangleCount->gpu.core; uint32_t maxSrcVertex = gpuCore->maxSrcVertex; uint32_t *gpuVertexRank = triangleCount->gpuVertexRank; uint32_t *gpuCountArr = triangleCount->gpuCountArr; uint64_t *gpuBlockCountArr = triangleCount->gpuBlockCountArr; uint32_t numPartitions = triangleCount->numPartitions; double cpuStart, wallStart; double cpuEnd, wallEnd; HmlTriangleCountKernel *kernel = triangleCount->kernel; HmlTriangleCountKernelArg *kernelArgs = triangleCount->kernelArgs; int blk; uint32_t *countArr; MALLOC(countArr, uint32_t, maxSrcVertex + 1); hmlGetSecs(&cpuStart, &wallStart); //fprintf(stderr, "; Info: iter = %d\n", iter); for(uint32_t p = 0; p < numPartitions; ++p) { kernel[kernelArgs[p].id]<<<kernelArgs[p].grid, kernelArgs[p].block>>> (gpuCountArr, gpuCore->R, gpuCore->E, maxSrcVertex, gpuVertexRank, kernelArgs[p].minVertexRank, kernelArgs[p].maxVertexRank); } hmlTriangleCountSumKernel<<<cHmlTriangleCountSumBlocks, cHmlTriangleCountSumThreadsPerBlock>>> (gpuBlockCountArr, gpuCountArr, maxSrcVertex); cudaDeviceSynchronize(); hmlGetSecs(&cpuEnd, &wallEnd); if(triangleCount->verbosity >= 1) { fprintf(stderr, "; Info: GPU TriangleCount: wall time = %.2lf\n", (wallEnd - wallStart) * 1000); } HANDLE_ERROR(cudaMemcpy(triangleCount->blockCountArr, gpuBlockCountArr, sizeof(uint64_t) * cHmlTriangleCountSumBlocks, cudaMemcpyDeviceToHost)); triangleCount->gpu.numTriangles = 0; for(blk = 0; blk < cHmlTriangleCountSumBlocks; blk++) { triangleCount->gpu.numTriangles += triangleCount->blockCountArr[blk]; } /* HANDLE_ERROR(cudaMemcpy(countArr, gpuCountArr, sizeof(uint32_t) * (maxSrcVertex + 1), cudaMemcpyDeviceToHost)); triangleCount->gpu.numTriangles = 0; for (blk = 0; blk <= maxSrcVertex; blk++) { triangleCount->gpu.numTriangles += countArr[blk]; } */ FREE(countArr); HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountBaseInit(HmlTriangleCountBase *count, uint32_t numThreads) { HML_ERR_PROLOGUE; memset(count, 0, sizeof(HmlTriangleCount)); HML_ERR_PASS(hmlVertexPartitionInit(&count->partition)); count->numThreads = numThreads; if(numThreads > 1) { CALLOC(count->numTrianglesEachThread, uint64_t, numThreads); } HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountBaseDelete(HmlTriangleCountBase *count) { HML_ERR_PROLOGUE; HmlGraphCore *core = &count->core; if(count->numThreads > 1) { FREE(count->numTrianglesEachThread); } FREE(count->D); FREE(count->P); HML_ERR_PASS(hmlGraphCoreDelete(core)); HML_ERR_PASS(hmlVertexPartitionDelete(&count->partition)); memset(count, 0, sizeof(HmlTriangleCount)); HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountInit(HmlTriangleCount *triangleCount) { HML_ERR_PROLOGUE; memset(triangleCount, 0, sizeof(HmlTriangleCount)); HML_ERR_PASS(hmlTriangleCountBaseInit(&triangleCount->cpu, 1)); MALLOC(triangleCount->blockCountArr, uint64_t, cHmlTriangleCountSumBlocks); HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountDelete(HmlTriangleCount *triangleCount) { HML_ERR_PROLOGUE; FREE(triangleCount->blockCountArr); HML_ERR_PASS(hmlTriangleCountBaseDelete(&triangleCount->cpu)); /* free GPU stuff */ HANDLE_ERROR(cudaFree(triangleCount->gpuCountArr)); HANDLE_ERROR(cudaFree(triangleCount->gpuBlockCountArr)); if(triangleCount->gpu.core.numEdges > 0) { hmlGraphCoreGpuDelete(&triangleCount->gpu.core); } HANDLE_ERROR(cudaFree(triangleCount->gpuVertexRank)); memset(triangleCount, 0, sizeof(HmlTriangleCount)); HML_NORMAL_RETURN; } /* only allows edge (u, v), iff u < v */ static HmlErrCode hmlTriangleCountGraphAppender(HmlGraphCore *core, void *appendState, uint32_t srcVertex, uint32_t destVertex) { HML_ERR_PROLOGUE; HmlGraphCoreAppendState *appState = (HmlGraphCoreAppendState *)appendState; if(srcVertex < destVertex) { HML_ERR_PASS(hmlGraphCoreAppend(core, appState, srcVertex, destVertex)); } HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountReadOrderedTsv2File(HmlTriangleCountBase *count, FILE *file, bool srcVertexOnRightColumn) { HML_ERR_PROLOGUE; HmlGraphCoreAppendState state; hmlGraphCoreAppendFromTsv2File(&count->core, file, srcVertexOnRightColumn, (HmlGraphCoreAppendIniter)hmlGraphCoreAppendInit, (HmlGraphCoreAppender)hmlTriangleCountGraphAppender, (HmlGraphCoreAppendFinalizer)hmlGraphCoreAppendFinal, &state); HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountReadOrderedTsv2FileByName(HmlTriangleCountBase *count, char const *fileName, bool srcVertexOnRightColumn) { HML_ERR_PROLOGUE; FILE *file; HML_ERR_PASS(hmlFileOpenRead(fileName, &file)); HML_ERR_PASS(hmlTriangleCountReadOrderedTsv2File(count, file, srcVertexOnRightColumn)); HML_ERR_PASS(hmlFileClose(file)); HML_NORMAL_RETURN; } static HmlErrCode hmlTriangleCountGraphInserter(HmlGraphCore *core, void *insertState, uint32_t srcVertex, uint32_t destVertex) { HML_ERR_PROLOGUE; HmlTriangleCountGraphInsertState *state = (HmlTriangleCountGraphInsertState *)insertState; uint32_t tmpVertex; if(srcVertex != destVertex) { srcVertex = state->P[srcVertex]; destVertex = state->P[destVertex]; if(srcVertex > destVertex) { tmpVertex = srcVertex; srcVertex = destVertex; destVertex = tmpVertex; } HML_ERR_PASS(hmlGraphCoreAddEdge(core, srcVertex, destVertex)); } HML_NORMAL_RETURN; } static HmlErrCode hmlTriangleCountSetPartition(HmlTriangleCountBase *count) { HML_ERR_PROLOGUE; uint64_t maxNumEdgesPerPartition = (count->core.numEdges + count->numThreads - 1) / count->numThreads; maxNumEdgesPerPartition += count->core.maxOutDegree; HML_ERR_PASS(hmlGraphCoreVertexPartition(&count->core, maxNumEdgesPerPartition, &count->partition)); HML_NORMAL_RETURN; } static int HML_QSORT_COMPARE_FUNC(hmlTriangleCountVertexCompare, a, b, arg) { uint32_t *D = (uint32_t *)arg; return D[*(const uint32_t *)b] - D[*(const uint32_t *)a]; } /* reorder the edges of an undirected graph such that * the vertices with more neighbors are stored * as source vertices; whereas those with fewer * neighbors are stored in the adjacency list of * the high-degree vertices. * that is, if (u, v) in E and deg(u) > deg(v), then * v is stored as a successor of u but NOT the other * way around in a succinct encoding of undirected graph */ HmlErrCode hmlTriangleCountReorderEdges(HmlTriangleCountBase *count) { HML_ERR_PROLOGUE; HmlTriangleCountGraphInsertState insertState; HmlGraphCore *core = &count->core; HmlGraphCore copyVal; HmlGraphCore *copy = &copyVal; uint32_t numVertices = max(core->maxSrcVertex, core->maxDestVertex) + 1; uint32_t *D; uint32_t v; HML_ERR_GEN(count->D, cHmlErrGeneral); CALLOC(count->D, uint32_t, numVertices); /* use the just allocated count->D to store out-degree information */ HML_ERR_PASS(hmlGraphCoreCountBidirectDegree(core, count->D, numVertices)); /* create vertex permutation array: count->P */ HML_ERR_GEN(count->P, cHmlErrGeneral); MALLOC(count->P, uint32_t, numVertices); for(v = 0; v < numVertices; v++) { count->P[v] = v; } HML_QSORT(count->P, numVertices, sizeof(uint32_t), hmlTriangleCountVertexCompare, count->D); CALLOC(D, uint32_t, numVertices); /* store the "adjusted" degree in 'D' and use it as * the 'D' argument to call hmlGraphCoreSetR(core, D, ...) */ HML_ERR_PASS(hmlGraphCoreCountDegreeIfSmallerP(core, count->P, D, numVertices)); /* make a shallow copy so that we can still free the memory allocated * for R and E in the original 'core' */ memcpy(copy, core, sizeof(HmlGraphCore)); /* since we've got a copy of 'core', we can (re)initialize 'core' * as if it never existed before */ HML_ERR_PASS(hmlGraphCoreInit(core, numVertices, copy->numEdges)); HML_ERR_PASS(hmlGraphCoreSetR(core, D, 0, numVertices - 1)); insertState.P = count->P; HML_ERR_PASS(hmlGraphCoreInsertFromSrc(core, copy, hmlGraphCoreDefaultInsertIniter, hmlTriangleCountGraphInserter, NULL, &insertState)); if(count->numThreads > 1 && count->numThreads != count->partition.numPartitions) { HML_ERR_PASS(hmlTriangleCountSetPartition(count)); HML_ERR_PASS(hmlGraphCoreSortEdgesByPartition(core, &count->partition)); } else { HML_ERR_PASS(hmlGraphCoreSortEdges(core, 1)); } /* free R and E in the old 'core' now that the new one has been created */ HML_ERR_PASS(hmlGraphCoreDelete(copy)); FREE(D); HML_NORMAL_RETURN; } static HmlErrCode hmlTriangleCountBySearch(HmlGraphCore *core, uint32_t thread, uint32_t minVertex, uint32_t maxVertex, void *args) { HML_ERR_PROLOGUE; HmlTriangleCountBase *count = (HmlTriangleCountBase *)args; uint32_t *R = core->R; uint32_t *E = core->E; uint32_t *eU; uint32_t *eU2; uint32_t *eV; uint32_t *endU; uint32_t *endV; uint32_t u; uint32_t v; uint32_t w; int32_t lowV; int32_t midV; int32_t highV; uint64_t numTriangles = 0; minVertex = max(minVertex, core->minSrcVertex); maxVertex = min(maxVertex, core->maxSrcVertex); for(u = minVertex; u <= maxVertex; u++) { endU = &E[R[u + 1]] - 1; for(eU = &E[R[u]]; eU < endU; eU++) { v = *eU; /* due to lexicographic edge pruning, v may be > maxSrcVertex */ if(v <= core->maxSrcVertex) { eV = &E[R[v]]; endV = &E[R[v + 1]] - 1; if(endV - eV <= cHmlTriangleCountLinearSearchMaxEdges) { for(eU2 = eU + 1; eU2 <= endU; eU2++) { w = *eU2; while(eV <= endV && *eV < w) { eV++; } if(eV > endV) { break; } if(*eV == w) { numTriangles++; } } } else { /* use binary search */ for(eU2 = eU + 1; eU2 <= endU; eU2++) { w = *eU2; highV = (uint32_t)(endV - eV); if(highV <= cHmlTriangleCountLinearSearchMaxEdges) { while(eV <= endV && *eV < w) { eV++; } if(eV > endV) { break; } if(*eV == w) { numTriangles++; } } else { lowV = 0; while(lowV <= highV) { /* to avoid overflow in (lowV + highV) / 2 */ midV = lowV + (highV - lowV) / 2; if(eV[midV] == w) { lowV = midV + 1; numTriangles++; break; } else if(eV[midV] < w) { lowV = midV + 1; } else { highV = midV - 1; } } eV += lowV; if(eV > endV) { break; } } } } } } } count->numTrianglesEachThread[thread] = numTriangles; HML_NORMAL_RETURN; } static HmlErrCode hmlTriangleCountByHash(HmlGraphCore *core, uint32_t thread, uint32_t minVertex, uint32_t maxVertex, void *args) { HML_ERR_PROLOGUE; HmlTriangleCountBase *count = (HmlTriangleCountBase *)args; uint32_t *R = core->R; uint32_t *E = core->E; uint32_t eU; uint32_t eU2; uint32_t eV; uint32_t endU; uint32_t u; uint32_t v; uint64_t numTriangles = 0; uint32_t *edgeId0; uint32_t *edgeId; uint32_t numDestVertices = core->maxDestVertex - core->minDestVertex + 1; CALLOC(edgeId0, uint32_t, numDestVertices); /* initialize edgeId0[] with (uint32_t)-1, an invalid edge id */ memset(edgeId0, 0xFF, sizeof(uint32_t) * numDestVertices); edgeId = edgeId0 - core->minDestVertex; minVertex = max(minVertex, core->minSrcVertex); maxVertex = min(maxVertex, core->maxSrcVertex); for(u = minVertex; u <= maxVertex; u++) { endU = R[u + 1] - 1; for(eU = R[u]; eU < endU; eU++) { v = E[eU]; /* due to lexicographic edge pruning, v may be > maxSrcVertex */ if(v <= core->maxSrcVertex) { for(eV = R[v]; eV < R[v + 1]; eV++) { edgeId[E[eV]] = eU; } for(eU2 = eU + 1; eU2 <= endU; eU2++) { if(edgeId[E[eU2]] == eU) { numTriangles++; } } } } } count->numTrianglesEachThread[thread] = numTriangles; FREE(edgeId0); HML_NORMAL_RETURN; } HmlErrCode hmlTriangleCountRun(HmlTriangleCountBase *count) { HML_ERR_PROLOGUE; HmlGraphCore *core = &count->core; uint32_t thread; HmlGraphCoreParaFunc func = (count->countByHash) ? hmlTriangleCountByHash : hmlTriangleCountBySearch; if(count->numThreads > 1) { if(count->numThreads != count->partition.numPartitions) { HML_ERR_PASS(hmlTriangleCountSetPartition(count)); } HML_ERR_PASS(hmlGraphCoreRunParaFuncByPartition(core, func, count, &count->partition)); count->numTriangles = 0; for(thread = 0; thread < count->numThreads; thread++) { count->numTriangles += count->numTrianglesEachThread[thread]; } } else { count->numTrianglesEachThread = &count->numTriangles; HML_ERR_PASS(func(&count->core, 0, count->core.minSrcVertex, count->core.maxSrcVertex, count)); count->numTrianglesEachThread = NULL; /* count->numTriangles = count->numTrianglesEachThread[0]; */ } HML_NORMAL_RETURN; }
db182addc51fd470f2558a639c59d835ab3246f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * The MIT License * * Copyright (c) 1997-2019 The University of Utah * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <CCA/Components/Schedulers/GPUDataWarehouse.h> #include <Core/Grid/Variables/GPUGridVariable.h> #include <Core/Parallel/Parallel.h> #include <Core/Util/GPU.h> #include <sci_defs/cuda_defs.h> namespace Uintah { //______________________________________________________________________ // // @brief A GPU kernel for the Jacobi iterations in the Poisson 1-material solver // @param patchID the patch this kernel will operate over // @param matlIndex the material associated with the specified patchID // @param domainLow a three component vector that gives the lower corner of the work area as (x,y,z) // @param domainHigh a three component vector that gives the highest corner of the work area as (x,y,z) // @param old_gpudw the old GPU DataWarehouse // @param new_gpudw the new GPU DataWarehouse __global__ void unifiedSchedulerTestKernel( int patchID , uint3 patchNodeLowIndex , uint3 patchNodeHighIndex , uint3 domainLow , uint3 domainHigh , GPUDataWarehouse * old_gpudw , GPUDataWarehouse * new_gpudw , hipStream_t * stream ) { const GPUGridVariable<double> phi; GPUGridVariable<double> newphi; old_gpudw->get(phi, "phi", patchID, 0, 0); new_gpudw->getModifiable(newphi, "phi", patchID, 0); // calculate the thread indices int i = blockDim.x * blockIdx.x + threadIdx.x + patchNodeLowIndex.x; int j = blockDim.y * blockIdx.y + threadIdx.y + patchNodeLowIndex.y; // If the threads are within the bounds of the patch the algorithm is allowed to stream along the z direction // applying the stencil to a line of cells. The z direction is streamed because it allows access of x and y // elements that are close to one another which should allow coalesced memory accesses. // Copy all face cells (any on an exposed face). // These outer cells don't get computed, just preserved across iterations. // newphi(i,j,k) = phi(i,j,k) if (i >= patchNodeLowIndex.x && j >= patchNodeLowIndex.y && i < patchNodeHighIndex.x && j < patchNodeHighIndex.y ) { if ((domainLow.x - patchNodeLowIndex.x == 1 && i == patchNodeLowIndex.x) || /*left face*/ (domainLow.y - patchNodeLowIndex.y == 1 && j == patchNodeLowIndex.y) || /*bottom face*/ (patchNodeHighIndex.x - domainHigh.x == 1 && i == patchNodeHighIndex.x - 1) || /*right face*/ (patchNodeHighIndex.y - domainHigh.y == 1 && j == patchNodeHighIndex.y - 1)) { /*top face*/ for (int k = domainLow.z; k < domainHigh.z; k++) { newphi(i,j,k) = phi(i,j,k); } } if (domainLow.z - patchNodeLowIndex.z == 1) { newphi(i,j,patchNodeLowIndex.z) = phi(i,j,patchNodeLowIndex.z); } if (patchNodeHighIndex.z - domainHigh.z == 1) { newphi(i,j,patchNodeHighIndex.z-1) = phi(i,j,patchNodeHighIndex.z-1); } } __syncthreads(); if (i >= domainLow.x && j >= domainLow.y && i < domainHigh.x && j < domainHigh.y ) { for (int k = domainLow.z; k < domainHigh.z; k++) { newphi(i,j,k) = (1. / 6) * (phi(i-1, j, k) + phi(i+1, j, k) + phi(i, j-1, k) + phi(i, j+1, k) + phi(i, j, k-1) + phi(i, j, k+1)); } } } void launchUnifiedSchedulerTestKernel( dim3 dimGrid , dim3 dimBlock , hipStream_t * stream , int patchID , uint3 patchNodeLowIndex , uint3 patchNodeHighIndex , uint3 domainLow , uint3 domainHigh , GPUDataWarehouse * old_gpudw , GPUDataWarehouse * new_gpudw ) { hipLaunchKernelGGL(( unifiedSchedulerTestKernel), dim3(dimGrid), dim3(dimBlock), 0, *stream, patchID , patchNodeLowIndex , patchNodeHighIndex , domainLow , domainHigh , old_gpudw , new_gpudw , stream ); //hipDeviceSynchronize(); } } //end namespace Uintah
db182addc51fd470f2558a639c59d835ab3246f3.cu
/* * The MIT License * * Copyright (c) 1997-2019 The University of Utah * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <CCA/Components/Schedulers/GPUDataWarehouse.h> #include <Core/Grid/Variables/GPUGridVariable.h> #include <Core/Parallel/Parallel.h> #include <Core/Util/GPU.h> #include <sci_defs/cuda_defs.h> namespace Uintah { //______________________________________________________________________ // // @brief A GPU kernel for the Jacobi iterations in the Poisson 1-material solver // @param patchID the patch this kernel will operate over // @param matlIndex the material associated with the specified patchID // @param domainLow a three component vector that gives the lower corner of the work area as (x,y,z) // @param domainHigh a three component vector that gives the highest corner of the work area as (x,y,z) // @param old_gpudw the old GPU DataWarehouse // @param new_gpudw the new GPU DataWarehouse __global__ void unifiedSchedulerTestKernel( int patchID , uint3 patchNodeLowIndex , uint3 patchNodeHighIndex , uint3 domainLow , uint3 domainHigh , GPUDataWarehouse * old_gpudw , GPUDataWarehouse * new_gpudw , cudaStream_t * stream ) { const GPUGridVariable<double> phi; GPUGridVariable<double> newphi; old_gpudw->get(phi, "phi", patchID, 0, 0); new_gpudw->getModifiable(newphi, "phi", patchID, 0); // calculate the thread indices int i = blockDim.x * blockIdx.x + threadIdx.x + patchNodeLowIndex.x; int j = blockDim.y * blockIdx.y + threadIdx.y + patchNodeLowIndex.y; // If the threads are within the bounds of the patch the algorithm is allowed to stream along the z direction // applying the stencil to a line of cells. The z direction is streamed because it allows access of x and y // elements that are close to one another which should allow coalesced memory accesses. // Copy all face cells (any on an exposed face). // These outer cells don't get computed, just preserved across iterations. // newphi(i,j,k) = phi(i,j,k) if (i >= patchNodeLowIndex.x && j >= patchNodeLowIndex.y && i < patchNodeHighIndex.x && j < patchNodeHighIndex.y ) { if ((domainLow.x - patchNodeLowIndex.x == 1 && i == patchNodeLowIndex.x) || /*left face*/ (domainLow.y - patchNodeLowIndex.y == 1 && j == patchNodeLowIndex.y) || /*bottom face*/ (patchNodeHighIndex.x - domainHigh.x == 1 && i == patchNodeHighIndex.x - 1) || /*right face*/ (patchNodeHighIndex.y - domainHigh.y == 1 && j == patchNodeHighIndex.y - 1)) { /*top face*/ for (int k = domainLow.z; k < domainHigh.z; k++) { newphi(i,j,k) = phi(i,j,k); } } if (domainLow.z - patchNodeLowIndex.z == 1) { newphi(i,j,patchNodeLowIndex.z) = phi(i,j,patchNodeLowIndex.z); } if (patchNodeHighIndex.z - domainHigh.z == 1) { newphi(i,j,patchNodeHighIndex.z-1) = phi(i,j,patchNodeHighIndex.z-1); } } __syncthreads(); if (i >= domainLow.x && j >= domainLow.y && i < domainHigh.x && j < domainHigh.y ) { for (int k = domainLow.z; k < domainHigh.z; k++) { newphi(i,j,k) = (1. / 6) * (phi(i-1, j, k) + phi(i+1, j, k) + phi(i, j-1, k) + phi(i, j+1, k) + phi(i, j, k-1) + phi(i, j, k+1)); } } } void launchUnifiedSchedulerTestKernel( dim3 dimGrid , dim3 dimBlock , cudaStream_t * stream , int patchID , uint3 patchNodeLowIndex , uint3 patchNodeHighIndex , uint3 domainLow , uint3 domainHigh , GPUDataWarehouse * old_gpudw , GPUDataWarehouse * new_gpudw ) { unifiedSchedulerTestKernel<<< dimGrid, dimBlock, 0, *stream>>>( patchID , patchNodeLowIndex , patchNodeHighIndex , domainLow , domainHigh , old_gpudw , new_gpudw , stream ); //cudaDeviceSynchronize(); } } //end namespace Uintah
04cbe9b906e04a24460489ee009f8482bac42272.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel_hip.cuh" #define THREADS_PER_BLOCK 1024 __global__ void Fadd_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] + y[index]; } } void Fadd_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Fadd_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); //hipDeviceSynchronize(); } __global__ void Fadd_inplace_kernel(dtype* x, const dtype* y, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { x[index] += y[index]; } } void Fadd_inplace_impl(dtype * x, const dtype *y, int size) { hipLaunchKernelGGL(( Fadd_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, size); //hipDeviceSynchronize(); } __global__ void Fadd_kernel(const dtype* x, dtype** y, dtype* r, int count, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { dtype sum = x[index]; int offset = 0; for(int idx = 0; idx < count; idx++) { int global = index + offset; int idx = global / size; int idy = global % size; sum += y[idx][idy]; offset += size; } r[index] = sum; } } void Fadd_impl(const dtype* x, dtype** y, dtype* r, int count, int size) { hipLaunchKernelGGL(( Fadd_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, count, size); //hipDeviceSynchronize(); } __global__ void Fadd_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = x[idx][idy] + y[idx][idy]; } } void Fadd_impl(dtype** x, dtype** y, dtype** r, int dim0, int size) { hipLaunchKernelGGL(( Fadd_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, dim0, size); //hipDeviceSynchronize(); } __global__ void Fadd_inplace_kernel(dtype** x, dtype** y, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; x[idx][idy] += y[idx][idy]; } } void Fadd_inplace_impl(dtype** x, dtype** y, int dim0, int size) { hipLaunchKernelGGL(( Fadd_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, dim0, size); //hipDeviceSynchronize(); } __global__ void Fadd_inplace_kernel(dtype* x, dtype** y, int count, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { dtype sum = x[index]; int offset = 0; for(int idx = 0; idx < count; idx++) { int global = index + offset; int idx = global / size; int idy = global % size; sum += y[idx][idy]; offset += size; } x[index] = sum; } } void Fadd_inplace_impl(dtype* x, dtype** y, int count, int size) { hipLaunchKernelGGL(( Fadd_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, count, size); //hipDeviceSynchronize(); } __global__ void Fadd_inplace_kernel(dtype** x, dtype*** y, int n, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int x_idx = index / dim0; int x_idy = index % dim0; x[x_idx][x_idy] = 0; int y_idx = x_idx; int y_idz = x_idy; for(int i = 0; i < n; i++) { x[x_idx][x_idy] += y[y_idx][i][y_idz]; } } } void Fadd_inplace_impl(dtype** x, dtype*** y, int count, int n, int dim0) { int size = count * dim0; hipLaunchKernelGGL(( Fadd_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, n, dim0, size); //hipError_t res = hipGetLastError(); //std::cout << hipGetErrorString(res) << std::endl; } __global__ void Fsubtract_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] - y[index]; } } void Fsubtract_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Fsubtract_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); //hipDeviceSynchronize(); } __global__ void Fsubtract_inplace_kernel(dtype* x, const dtype* y, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { x[index] -= y[index]; } } void Fsubtract_inplace_impl(dtype* x, const dtype* y, int size) { hipLaunchKernelGGL(( Fsubtract_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, size); //hipDeviceSynchronize(); } __global__ void Fmultiply_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * y[index]; } } void Fmultiply_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Fmultiply_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); //hipDeviceSynchronize(); } __global__ void Fmultiply_inplace_kernel(dtype* x, const dtype* y, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { x[index] *= y[index]; } } void Fmultiply_inplace_impl(dtype* x, const dtype* y, int size) { hipLaunchKernelGGL(( Fmultiply_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, size); //hipDeviceSynchronize(); } __global__ void Fmultiply_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = x[idx][idy] * y[idx][idy]; } } void Fmultiply_impl(dtype** x, dtype** y, dtype** r, int dim0, int size) { hipLaunchKernelGGL(( Fmultiply_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, dim0, size); //hipDeviceSynchronize(); } __global__ void Fmultiply_inplace_kernel(dtype** x, dtype** y, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; x[idx][idy] *= y[idx][idy]; } } void Fmultiply_inplace_impl(dtype** x, dtype** y, int dim0, int size) { hipLaunchKernelGGL(( Fmultiply_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, dim0, size); //hipDeviceSynchronize(); } __global__ void Fdivide_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] / y[index]; } } void Fdivide_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Fdivide_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); //hipDeviceSynchronize(); } __global__ void Fmultiply_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * y; } } void Fmultiply_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) { hipLaunchKernelGGL(( Fmultiply_scalar_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); //hipDeviceSynchronize(); } __global__ void Fmultiply_scalar_kernel(const dtype* x, const dtype* scalar, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * scalar[0]; } } void Fmultiply_scalar_impl(const dtype* x, const dtype* scalar, dtype* r, int size) { hipLaunchKernelGGL(( Fmultiply_scalar_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, scalar, r, size); //hipDeviceSynchronize(); } __global__ void Fmultiply_scalar_inplace_kernel(dtype* x, const dtype y, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { x[index] *= y; } } void Fmultiply_scalar_inplace_impl(dtype* x, const dtype y, int size) { hipLaunchKernelGGL(( Fmultiply_scalar_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, size); //hipDeviceSynchronize(); } __global__ void Fadd_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] + y; } } void Fadd_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) { hipLaunchKernelGGL(( Fadd_scalar_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); //hipDeviceSynchronize(); } __global__ void Fadd_scalar_inplace_kernel(dtype* x, const dtype y, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { x[index] += y; } } void Fadd_scalar_inplace_impl(dtype* x, const dtype y, int size) { hipLaunchKernelGGL(( Fadd_scalar_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, size); //hipDeviceSynchronize(); } __global__ void Fsquare_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * x[index]; } } void Fsquare_impl(const dtype* x, dtype* r, int size) { hipLaunchKernelGGL(( Fsquare_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size); //hipDeviceSynchronize(); } __global__ void Ftanh_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = tanh(x[index]); } } void Ftanh_impl(const dtype* x, dtype* r, int size) { hipLaunchKernelGGL(( Ftanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size); //hipDeviceSynchronize(); } __global__ void Ftanh_kernel(dtype** x, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = tanh(x[idx][idy]); } } void Ftanh_impl(dtype** x, dtype** r, int dim0, int size) { hipLaunchKernelGGL(( Ftanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, size); //hipDeviceSynchronize(); } __global__ void Fsigmoid_kernel(dtype** x, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = 1.0 / (1.0 + exp(-x[idx][idy])); } } void Fsigmoid_impl(dtype** x, dtype** r, int dim0, int size) { hipLaunchKernelGGL(( Fsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, size); //hipDeviceSynchronize(); } __global__ void Dsigmoid_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = (1 - y[idx][idy]) * y[idx][idy]; } } void Dsigmoid_impl(dtype** x, dtype** y, dtype** r, int dim0, int size){ hipLaunchKernelGGL(( Dsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, dim0, size); //hipDeviceSynchronize(); } __global__ void Dtanh_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = (1 + y[index]) * (1 - y[index]); } } void Dtanh_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Dtanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); //hipDeviceSynchronize(); } __global__ void Dtanh_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = (1 + y[idx][idy]) * (1 - y[idx][idy]); } } void Dtanh_impl(dtype** x, dtype** y, dtype** r, int dim0, int size){ hipLaunchKernelGGL(( Dtanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, dim0, size); //hipDeviceSynchronize(); } __global__ void Fsigmoid_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = 1.0 / (1.0 + exp(-x[index])); } } void Fsigmoid_impl(const dtype* x, dtype* r, int size) { hipLaunchKernelGGL(( Fsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size); //hipDeviceSynchronize(); } __global__ void Dsigmoid_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = (1 - y[index]) * y[index]; } } void Dsigmoid_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Dsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); //hipDeviceSynchronize(); } __global__ void Fsqrt_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = sqrt(x[index]); } } void Fsqrt_impl(const dtype* x, dtype* r, int size) { hipLaunchKernelGGL(( Fsqrt_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size); //hipDeviceSynchronize(); } __global__ void concat_kernel(const dtype *src, dtype* dst, int offset, int dim) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < dim) { dst[offset + index] = src[index]; } } void concat_impl(const dtype *src, dtype* dst, int offset, int dim) { hipLaunchKernelGGL(( concat_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, dst, offset, dim); //hipDeviceSynchronize(); } __global__ void concat_kernel(dtype **src, dtype* dst, int src_dim, int dst_dim) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < dst_dim) { int idx = index / src_dim; int idy = index % src_dim; dst[index] = src[idx][idy]; } } void concat_impl(dtype **src, dtype* dst, int src_dim, int dst_dim) { hipLaunchKernelGGL(( concat_kernel), dim3((dst_dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, dst, src_dim, dst_dim); //hipDeviceSynchronize(); } __global__ void concat_kernel(dtype ***src, int count, int n, int src_dim, int src_size, dtype **dst) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < src_size) { int s_idx = index / (n * src_dim); int s_idy = (index - s_idx * n * src_dim) / src_dim; int s_idz = (index - s_idx * s_idy * src_dim) % (src_dim); //printf("id:%d:, idx:%d, idy:%d, idz:%d, val:%lf\n", index, s_idx, s_idy, s_idz, src[s_idx][s_idy][s_idz]); int d_idx = s_idx; int d_idy = index % (n * src_dim); //printf("id:%d:\n", index); dst[d_idx][d_idy] = src[s_idx][s_idy][s_idz]; } } void concat_impl(dtype ***src, int count, int n, int src_dim, int src_size, dtype **dst) { hipLaunchKernelGGL(( concat_kernel), dim3((src_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, count, n, src_dim, src_size, dst); //hipDeviceSynchronize(); } __global__ void unconcat_kernel(dtype **src, int count, int n, int src_dim, int src_size, dtype ***dst) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < src_size) { int s_idx = index / (n * src_dim); int s_idy = (index - s_idx * n * src_dim) / src_dim; int s_idz = (index - s_idx * s_idy * src_dim) % (src_dim); //printf("id:%d:, idx:%d, idy:%d, idz:%d, val:%lf\n", index, s_idx, s_idy, s_idz, src[s_idx][s_idy][s_idz]); int d_idx = s_idx; int d_idy = index % (n * src_dim); //printf("id:%d:\n", index); dst[s_idx][s_idy][s_idz] = src[d_idx][d_idy]; } } void unconcat_impl(dtype **src, int count, int n, int src_dim, int src_size, dtype ***dst) { hipLaunchKernelGGL(( unconcat_kernel), dim3((src_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, count, n, src_dim, src_size, dst); //hipDeviceSynchronize(); } __global__ void unconcat_kernel(const dtype *src, dtype* dst, int offset, int dim) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < dim) { dst[index] = src[offset + index]; } } void unconcat_impl(const dtype *src, dtype* dst, int offset, int dim) { hipLaunchKernelGGL(( unconcat_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, dst, offset, dim); //hipDeviceSynchronize(); } __global__ void unconcat_kernel(const dtype *src, dtype **dst, int src_dim, int dst_dim) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < src_dim) { int idx = index / dst_dim; int idy = index % dst_dim; dst[idx][idy] = src[index]; } } void unconcat_impl(const dtype *src, dtype** dst, int src_dim, int dst_dim) { hipLaunchKernelGGL(( unconcat_kernel), dim3((src_dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, dst, src_dim, dst_dim); //hipDeviceSynchronize(); } __global__ void Fconcat_kernel(dtype ***src, int count, int n, int *offset_ptr, int *dims, int max_dim, int dst_size, dtype **dst) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < dst_size) { int dst_idx = index / (n * max_dim); int src_idx = dst_idx; int src_idy = index % (n * max_dim) / max_dim; int src_idz = index % (n * max_dim) % max_dim; //printf("%d, %d, %d, %d\n", index, src_idx, src_idy, src_idz); if(src_idz < dims[src_idy]) { int dst_idy = offset_ptr[src_idy] + src_idz; dst[dst_idx][dst_idy] = src[src_idx][src_idy][src_idz]; //printf("%d, %d, %d, %d\n", index, src_idx, src_idy, src_idz); } //printf("%d\n", offset_ptr[index]); } } void Fconcat_impl(dtype ***src, int count, int n, int *offset_ptr, int *dims, int max_dim, dtype **dst) { int dst_size = count * n * max_dim; hipLaunchKernelGGL(( Fconcat_kernel), dim3((dst_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, count, n, offset_ptr, dims, max_dim, dst_size, dst); //cout << "Error: " << hipGetErrorString(hipGetLastError()) << endl; //hipDeviceSynchronize(); } __global__ void Dconcat_kernel(dtype **loss, int count, int n, int *offset_ptr, int *dims, int max_dim, int loss_size, dtype ***in_loss) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < loss_size) { int dst_idx = index / (n * max_dim); int src_idx = dst_idx; int src_idy = index % (n * max_dim) / max_dim; int src_idz = index % (n * max_dim) % max_dim; if(src_idz < dims[src_idy]) { int dst_idy = offset_ptr[src_idy] + src_idz; atomicAdd(in_loss[src_idx][src_idy] + src_idz, loss[dst_idx][dst_idy]); //printf("%d, %d, %d, %d\n", index, src_idx, src_idy, src_idz); } //in_loss[src_idx][src_idy][src_idz] += loss[dst_idx][dst_idy]; } } void Dconcat_impl(dtype **loss, int count, int n, int *offset_ptr, int *dims, int max_dim, dtype ***in_loss) { int loss_size = count * n * max_dim; hipLaunchKernelGGL(( Dconcat_kernel), dim3((loss_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, loss, count, n, offset_ptr, dims, max_dim, loss_size, in_loss); //hipDeviceSynchronize(); } /* __global__ void Ftranspose_kernel(const dtype* x, dtype* r, int dim0, int dim1, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index % dim0 * dim1 + index / dim0]; } } void Ftranspose_impl(const dtype* x, dtype* r, int dim0, int dim1, int size) { Ftranspose_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, dim1, size); //hipDeviceSynchronize(); } */ __global__ void set_kernel(dtype **x, int* dims, int n, int max_dim, dtype val) { int index = threadIdx.x + blockIdx.x * blockDim.x; int idx = index / max_dim; int idy = index % max_dim; if (idx < n && idy < dims[idx]) { x[idx][idy] = val; } } void set_impl(dtype **x, int* dims, int n, int max_dim, dtype val) { int size = n * max_dim; hipLaunchKernelGGL(( set_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, dims, n, max_dim, val); //hipDeviceSynchronize(); } __global__ void set_col_kernel(dtype* x, int dim0, int col, int size, dtype val) { int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { x[i] = val; } } void set_col_impl(dtype* x, int dim0, int col, int size, dtype val) { hipLaunchKernelGGL(( set_col_kernel), dim3((dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, dim0, col, size, val); //hipDeviceSynchronize(); } __global__ void set_cols_kernel(dtype* x, int dim0, int* cols, int col_num, dtype* val, int val_size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < val_size) { int col_num = cols[index / dim0]; int offset = index % dim0; x[col_num * dim0 + offset] = val[index]; } } void set_cols_impl(dtype* x, int dim0, int* cols, int col_num, dtype* val) { int val_size = col_num * dim0; hipLaunchKernelGGL(( set_cols_kernel), dim3((val_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, dim0, cols, col_num, val, val_size); //hipDeviceSynchronize(); } __global__ void FLookup_kernel(const dtype* x, dtype** r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < r_size) { int col_index = index / xdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % xdim0; int x_index = col * xdim0 + offset; if(x_index < xdim0 * xdim1) { r[col_index][offset] = x[x_index]; } } } } void FLookup_impl(const dtype* x, dtype** r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { hipLaunchKernelGGL(( FLookup_kernel), dim3((r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, xdim0, xdim1, r_size, cols, col_num); //hipDeviceSynchronize(); } __global__ void DLookup_kernel(dtype* gx, dtype** loss, int gxdim0, int gxdim1, int l_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < l_size) { int col_index = index / gxdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % gxdim0; int gx_index = col * gxdim0 + offset; if(gx_index < gxdim0 * gxdim1) { atomicAdd(gx + gx_index, loss[col_index][offset]); //gx[gx_index] += loss[col_index][offset]; } } } } void DLookup_impl(dtype* gx, dtype** loss, int gxdim0, int gxdim1, int l_size, int* cols, int col_num) { hipLaunchKernelGGL(( DLookup_kernel), dim3((l_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gx, loss, gxdim0, gxdim1, l_size, cols, col_num); //hipDeviceSynchronize(); } __global__ void get_cols_kernel(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < r_size) { int col_index = index / xdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % xdim0; int x_index = col * xdim0 + offset; if(x_index < xdim0 * xdim1) { r[index] = x[x_index]; } } } } void get_cols_impl(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { hipLaunchKernelGGL(( get_cols_kernel), dim3((r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, xdim0, xdim1, r_size, cols, col_num); //hipDeviceSynchronize(); } __global__ void get_col_kernel(const dtype* x, dtype* r, int dim0, int col, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { r[index] = x[i]; } } void get_col_impl(const dtype* x, dtype* r, int dim0, int col, int size) { hipLaunchKernelGGL(( get_col_kernel), dim3((dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, col, size); //hipDeviceSynchronize(); } __global__ void Fadd_col_kernel(dtype* x, const dtype* y, int col, int dim0, int size){ int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { x[i] = x[i] + y[index]; } } void Fadd_col_impl(dtype* x, const dtype* y, int col, int dim0, int size) { hipLaunchKernelGGL(( Fadd_col_kernel), dim3((dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, col, dim0, size); //hipDeviceSynchronize(); } template<int BLOCK_SIZE> __global__ void Favgpooling_kernel( dtype **px, int skip, int n, dtype *py) { __shared__ dtype temp[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; int index_start = bid % skip + (bid / skip) * skip * n; temp[tid] = 0; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; temp[tid] += px[idx][idy]; } ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) temp[tid] += temp[tid + k]; \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) py[bid] = temp[0] / n; } void Favgpooling_impl(dtype** x, dtype* y, int n, int r, int s) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k:hipLaunchKernelGGL(( ::Favgpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } //hipDeviceSynchronize(); } __global__ void Davgpooling_kernel(const dtype* gy, int gy_size, int gx_size, int n, dtype** gx) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < gx_size) { int idx = i / gy_size; int idy = i % gy_size; atomicAdd(gx[idx] + idy, gy[idy] / n); //gx[idx][idy] += (gy[idy] / n); } } void Davgpooling_impl(const dtype* gy, int gy_size, int gx_size, int n, dtype** gx) { hipLaunchKernelGGL(( Davgpooling_kernel), dim3((gx_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gy, gy_size, gx_size, n, gx); //hipDeviceSynchronize(); } template<int BLOCK_SIZE> __global__ void Fsumpooling_kernel( dtype **px, int skip, int n, dtype *py) { __shared__ dtype temp[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; int index_start = bid % skip + (bid / skip) * skip * n; temp[tid] = 0; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; temp[tid] += val; } ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) temp[tid] += temp[tid + k]; \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) py[bid] = temp[0]; } void Fsumpooling_impl(dtype** x, dtype* y, int n, int r, int s) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k:hipLaunchKernelGGL(( ::Fsumpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } //hipDeviceSynchronize(); } __global__ void Dsumpooling_kernel(const dtype* gy, int gy_size, int gx_size, dtype** gx) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < gx_size) { int idx = i / gy_size; int idy = i % gy_size; atomicAdd(gx[idx] + idy, gy[idy]); //gx[idx][idy] += gy[idy]; } } void Dsumpooling_impl(const dtype* gy, int gy_size, int gx_size, dtype** gx) { hipLaunchKernelGGL(( Dsumpooling_kernel), dim3((gx_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gy, gy_size, gx_size, gx); //hipDeviceSynchronize(); } template<int BLOCK_SIZE> __global__ void Fmaxpooling_kernel( dtype **px, int skip, int n, dtype *py, int* index) { __shared__ dtype temp[BLOCK_SIZE]; __shared__ int temp_index[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; dtype thread_max = NEGATIVE_INFINITY; int index_start = bid % skip + (bid / skip) * skip * n; int index_max; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; if(val > thread_max) { thread_max = val; index_max = index_start + i * skip; } } temp[tid] = thread_max; temp_index[tid] = index_max; ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) if(temp[tid + k] > temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];} } void Fmaxpooling_impl(dtype** x, dtype* y, int n, int r, int s, int* index){ int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k:hipLaunchKernelGGL(( ::Fmaxpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y, index); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } //hipDeviceSynchronize(); } __global__ void Dmaxpooling_kernel(const dtype* gy, dtype** gx, int* index, int dim) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < dim) { int idx = index[i] / dim; int idy = index[i] % dim; atomicAdd(gx[idx] + idy, gy[i]); //gx[idx][idy] += gy[i]; } } void Dmaxpooling_impl(const dtype* gy, dtype** gx, int* index, int dim) { hipLaunchKernelGGL(( Dmaxpooling_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gy, gx, index, dim); //hipDeviceSynchronize(); } template<int BLOCK_SIZE> __global__ void Fminpooling_kernel( dtype **px, int skip, int n, dtype *py, int* index) { __shared__ dtype temp[BLOCK_SIZE]; __shared__ int temp_index[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; dtype thread_min = POSITIVE_INFINITY; int index_start = bid % skip + (bid / skip) * skip * n; int index_min; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; if(val < thread_min) { thread_min = val; index_min = index_start + i * skip; } } temp[tid] = thread_min; temp_index[tid] = index_min; ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) if(temp[tid + k] < temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];} } void Fminpooling_impl(dtype** x, dtype* y, int n, int r, int s, int* index) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k:hipLaunchKernelGGL(( ::Fminpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y, index); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } //hipDeviceSynchronize(); } __global__ void Dminpooling_kernel(const dtype* gy, dtype** gx, int* index, int dim) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < dim) { int idx = index[i] / dim; int idy = index[i] % dim; atomicAdd(gx[idx] + idy, gy[i]); //gx[idx][idy] += gy[i]; } } void Dminpooling_impl(const dtype* gy, dtype** gx, int* index, int dim) { hipLaunchKernelGGL(( Dminpooling_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gy, gx, index, dim); //hipDeviceSynchronize(); }
04cbe9b906e04a24460489ee009f8482bac42272.cu
#include "kernel.cuh" #define THREADS_PER_BLOCK 1024 __global__ void Fadd_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] + y[index]; } } void Fadd_impl(const dtype* x, const dtype* y, dtype* r, int size) { Fadd_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); //cudaDeviceSynchronize(); } __global__ void Fadd_inplace_kernel(dtype* x, const dtype* y, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { x[index] += y[index]; } } void Fadd_inplace_impl(dtype * x, const dtype *y, int size) { Fadd_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, size); //cudaDeviceSynchronize(); } __global__ void Fadd_kernel(const dtype* x, dtype** y, dtype* r, int count, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { dtype sum = x[index]; int offset = 0; for(int idx = 0; idx < count; idx++) { int global = index + offset; int idx = global / size; int idy = global % size; sum += y[idx][idy]; offset += size; } r[index] = sum; } } void Fadd_impl(const dtype* x, dtype** y, dtype* r, int count, int size) { Fadd_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, count, size); //cudaDeviceSynchronize(); } __global__ void Fadd_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = x[idx][idy] + y[idx][idy]; } } void Fadd_impl(dtype** x, dtype** y, dtype** r, int dim0, int size) { Fadd_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, dim0, size); //cudaDeviceSynchronize(); } __global__ void Fadd_inplace_kernel(dtype** x, dtype** y, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; x[idx][idy] += y[idx][idy]; } } void Fadd_inplace_impl(dtype** x, dtype** y, int dim0, int size) { Fadd_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, dim0, size); //cudaDeviceSynchronize(); } __global__ void Fadd_inplace_kernel(dtype* x, dtype** y, int count, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { dtype sum = x[index]; int offset = 0; for(int idx = 0; idx < count; idx++) { int global = index + offset; int idx = global / size; int idy = global % size; sum += y[idx][idy]; offset += size; } x[index] = sum; } } void Fadd_inplace_impl(dtype* x, dtype** y, int count, int size) { Fadd_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, count, size); //cudaDeviceSynchronize(); } __global__ void Fadd_inplace_kernel(dtype** x, dtype*** y, int n, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int x_idx = index / dim0; int x_idy = index % dim0; x[x_idx][x_idy] = 0; int y_idx = x_idx; int y_idz = x_idy; for(int i = 0; i < n; i++) { x[x_idx][x_idy] += y[y_idx][i][y_idz]; } } } void Fadd_inplace_impl(dtype** x, dtype*** y, int count, int n, int dim0) { int size = count * dim0; Fadd_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, n, dim0, size); //cudaError_t res = cudaGetLastError(); //std::cout << cudaGetErrorString(res) << std::endl; } __global__ void Fsubtract_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] - y[index]; } } void Fsubtract_impl(const dtype* x, const dtype* y, dtype* r, int size) { Fsubtract_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); //cudaDeviceSynchronize(); } __global__ void Fsubtract_inplace_kernel(dtype* x, const dtype* y, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { x[index] -= y[index]; } } void Fsubtract_inplace_impl(dtype* x, const dtype* y, int size) { Fsubtract_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, size); //cudaDeviceSynchronize(); } __global__ void Fmultiply_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * y[index]; } } void Fmultiply_impl(const dtype* x, const dtype* y, dtype* r, int size) { Fmultiply_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); //cudaDeviceSynchronize(); } __global__ void Fmultiply_inplace_kernel(dtype* x, const dtype* y, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { x[index] *= y[index]; } } void Fmultiply_inplace_impl(dtype* x, const dtype* y, int size) { Fmultiply_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, size); //cudaDeviceSynchronize(); } __global__ void Fmultiply_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = x[idx][idy] * y[idx][idy]; } } void Fmultiply_impl(dtype** x, dtype** y, dtype** r, int dim0, int size) { Fmultiply_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, dim0, size); //cudaDeviceSynchronize(); } __global__ void Fmultiply_inplace_kernel(dtype** x, dtype** y, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; x[idx][idy] *= y[idx][idy]; } } void Fmultiply_inplace_impl(dtype** x, dtype** y, int dim0, int size) { Fmultiply_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, dim0, size); //cudaDeviceSynchronize(); } __global__ void Fdivide_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] / y[index]; } } void Fdivide_impl(const dtype* x, const dtype* y, dtype* r, int size) { Fdivide_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); //cudaDeviceSynchronize(); } __global__ void Fmultiply_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * y; } } void Fmultiply_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) { Fmultiply_scalar_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); //cudaDeviceSynchronize(); } __global__ void Fmultiply_scalar_kernel(const dtype* x, const dtype* scalar, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * scalar[0]; } } void Fmultiply_scalar_impl(const dtype* x, const dtype* scalar, dtype* r, int size) { Fmultiply_scalar_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, scalar, r, size); //cudaDeviceSynchronize(); } __global__ void Fmultiply_scalar_inplace_kernel(dtype* x, const dtype y, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { x[index] *= y; } } void Fmultiply_scalar_inplace_impl(dtype* x, const dtype y, int size) { Fmultiply_scalar_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, size); //cudaDeviceSynchronize(); } __global__ void Fadd_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] + y; } } void Fadd_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) { Fadd_scalar_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); //cudaDeviceSynchronize(); } __global__ void Fadd_scalar_inplace_kernel(dtype* x, const dtype y, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { x[index] += y; } } void Fadd_scalar_inplace_impl(dtype* x, const dtype y, int size) { Fadd_scalar_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, size); //cudaDeviceSynchronize(); } __global__ void Fsquare_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * x[index]; } } void Fsquare_impl(const dtype* x, dtype* r, int size) { Fsquare_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size); //cudaDeviceSynchronize(); } __global__ void Ftanh_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = tanh(x[index]); } } void Ftanh_impl(const dtype* x, dtype* r, int size) { Ftanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size); //cudaDeviceSynchronize(); } __global__ void Ftanh_kernel(dtype** x, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = tanh(x[idx][idy]); } } void Ftanh_impl(dtype** x, dtype** r, int dim0, int size) { Ftanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, size); //cudaDeviceSynchronize(); } __global__ void Fsigmoid_kernel(dtype** x, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = 1.0 / (1.0 + exp(-x[idx][idy])); } } void Fsigmoid_impl(dtype** x, dtype** r, int dim0, int size) { Fsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, size); //cudaDeviceSynchronize(); } __global__ void Dsigmoid_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = (1 - y[idx][idy]) * y[idx][idy]; } } void Dsigmoid_impl(dtype** x, dtype** y, dtype** r, int dim0, int size){ Dsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, dim0, size); //cudaDeviceSynchronize(); } __global__ void Dtanh_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = (1 + y[index]) * (1 - y[index]); } } void Dtanh_impl(const dtype* x, const dtype* y, dtype* r, int size) { Dtanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); //cudaDeviceSynchronize(); } __global__ void Dtanh_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = (1 + y[idx][idy]) * (1 - y[idx][idy]); } } void Dtanh_impl(dtype** x, dtype** y, dtype** r, int dim0, int size){ Dtanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, dim0, size); //cudaDeviceSynchronize(); } __global__ void Fsigmoid_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = 1.0 / (1.0 + exp(-x[index])); } } void Fsigmoid_impl(const dtype* x, dtype* r, int size) { Fsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size); //cudaDeviceSynchronize(); } __global__ void Dsigmoid_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = (1 - y[index]) * y[index]; } } void Dsigmoid_impl(const dtype* x, const dtype* y, dtype* r, int size) { Dsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); //cudaDeviceSynchronize(); } __global__ void Fsqrt_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = sqrt(x[index]); } } void Fsqrt_impl(const dtype* x, dtype* r, int size) { Fsqrt_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size); //cudaDeviceSynchronize(); } __global__ void concat_kernel(const dtype *src, dtype* dst, int offset, int dim) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < dim) { dst[offset + index] = src[index]; } } void concat_impl(const dtype *src, dtype* dst, int offset, int dim) { concat_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, dst, offset, dim); //cudaDeviceSynchronize(); } __global__ void concat_kernel(dtype **src, dtype* dst, int src_dim, int dst_dim) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < dst_dim) { int idx = index / src_dim; int idy = index % src_dim; dst[index] = src[idx][idy]; } } void concat_impl(dtype **src, dtype* dst, int src_dim, int dst_dim) { concat_kernel<<<(dst_dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, dst, src_dim, dst_dim); //cudaDeviceSynchronize(); } __global__ void concat_kernel(dtype ***src, int count, int n, int src_dim, int src_size, dtype **dst) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < src_size) { int s_idx = index / (n * src_dim); int s_idy = (index - s_idx * n * src_dim) / src_dim; int s_idz = (index - s_idx * s_idy * src_dim) % (src_dim); //printf("id:%d:, idx:%d, idy:%d, idz:%d, val:%lf\n", index, s_idx, s_idy, s_idz, src[s_idx][s_idy][s_idz]); int d_idx = s_idx; int d_idy = index % (n * src_dim); //printf("id:%d:\n", index); dst[d_idx][d_idy] = src[s_idx][s_idy][s_idz]; } } void concat_impl(dtype ***src, int count, int n, int src_dim, int src_size, dtype **dst) { concat_kernel<<<(src_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, count, n, src_dim, src_size, dst); //cudaDeviceSynchronize(); } __global__ void unconcat_kernel(dtype **src, int count, int n, int src_dim, int src_size, dtype ***dst) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < src_size) { int s_idx = index / (n * src_dim); int s_idy = (index - s_idx * n * src_dim) / src_dim; int s_idz = (index - s_idx * s_idy * src_dim) % (src_dim); //printf("id:%d:, idx:%d, idy:%d, idz:%d, val:%lf\n", index, s_idx, s_idy, s_idz, src[s_idx][s_idy][s_idz]); int d_idx = s_idx; int d_idy = index % (n * src_dim); //printf("id:%d:\n", index); dst[s_idx][s_idy][s_idz] = src[d_idx][d_idy]; } } void unconcat_impl(dtype **src, int count, int n, int src_dim, int src_size, dtype ***dst) { unconcat_kernel<<<(src_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, count, n, src_dim, src_size, dst); //cudaDeviceSynchronize(); } __global__ void unconcat_kernel(const dtype *src, dtype* dst, int offset, int dim) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < dim) { dst[index] = src[offset + index]; } } void unconcat_impl(const dtype *src, dtype* dst, int offset, int dim) { unconcat_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, dst, offset, dim); //cudaDeviceSynchronize(); } __global__ void unconcat_kernel(const dtype *src, dtype **dst, int src_dim, int dst_dim) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < src_dim) { int idx = index / dst_dim; int idy = index % dst_dim; dst[idx][idy] = src[index]; } } void unconcat_impl(const dtype *src, dtype** dst, int src_dim, int dst_dim) { unconcat_kernel<<<(src_dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, dst, src_dim, dst_dim); //cudaDeviceSynchronize(); } __global__ void Fconcat_kernel(dtype ***src, int count, int n, int *offset_ptr, int *dims, int max_dim, int dst_size, dtype **dst) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < dst_size) { int dst_idx = index / (n * max_dim); int src_idx = dst_idx; int src_idy = index % (n * max_dim) / max_dim; int src_idz = index % (n * max_dim) % max_dim; //printf("%d, %d, %d, %d\n", index, src_idx, src_idy, src_idz); if(src_idz < dims[src_idy]) { int dst_idy = offset_ptr[src_idy] + src_idz; dst[dst_idx][dst_idy] = src[src_idx][src_idy][src_idz]; //printf("%d, %d, %d, %d\n", index, src_idx, src_idy, src_idz); } //printf("%d\n", offset_ptr[index]); } } void Fconcat_impl(dtype ***src, int count, int n, int *offset_ptr, int *dims, int max_dim, dtype **dst) { int dst_size = count * n * max_dim; Fconcat_kernel<<<(dst_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, count, n, offset_ptr, dims, max_dim, dst_size, dst); //cout << "Error: " << cudaGetErrorString(cudaGetLastError()) << endl; //cudaDeviceSynchronize(); } __global__ void Dconcat_kernel(dtype **loss, int count, int n, int *offset_ptr, int *dims, int max_dim, int loss_size, dtype ***in_loss) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < loss_size) { int dst_idx = index / (n * max_dim); int src_idx = dst_idx; int src_idy = index % (n * max_dim) / max_dim; int src_idz = index % (n * max_dim) % max_dim; if(src_idz < dims[src_idy]) { int dst_idy = offset_ptr[src_idy] + src_idz; atomicAdd(in_loss[src_idx][src_idy] + src_idz, loss[dst_idx][dst_idy]); //printf("%d, %d, %d, %d\n", index, src_idx, src_idy, src_idz); } //in_loss[src_idx][src_idy][src_idz] += loss[dst_idx][dst_idy]; } } void Dconcat_impl(dtype **loss, int count, int n, int *offset_ptr, int *dims, int max_dim, dtype ***in_loss) { int loss_size = count * n * max_dim; Dconcat_kernel<<<(loss_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(loss, count, n, offset_ptr, dims, max_dim, loss_size, in_loss); //cudaDeviceSynchronize(); } /* __global__ void Ftranspose_kernel(const dtype* x, dtype* r, int dim0, int dim1, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index % dim0 * dim1 + index / dim0]; } } void Ftranspose_impl(const dtype* x, dtype* r, int dim0, int dim1, int size) { Ftranspose_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, dim1, size); //cudaDeviceSynchronize(); } */ __global__ void set_kernel(dtype **x, int* dims, int n, int max_dim, dtype val) { int index = threadIdx.x + blockIdx.x * blockDim.x; int idx = index / max_dim; int idy = index % max_dim; if (idx < n && idy < dims[idx]) { x[idx][idy] = val; } } void set_impl(dtype **x, int* dims, int n, int max_dim, dtype val) { int size = n * max_dim; set_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, dims, n, max_dim, val); //cudaDeviceSynchronize(); } __global__ void set_col_kernel(dtype* x, int dim0, int col, int size, dtype val) { int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { x[i] = val; } } void set_col_impl(dtype* x, int dim0, int col, int size, dtype val) { set_col_kernel<<<(dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, dim0, col, size, val); //cudaDeviceSynchronize(); } __global__ void set_cols_kernel(dtype* x, int dim0, int* cols, int col_num, dtype* val, int val_size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < val_size) { int col_num = cols[index / dim0]; int offset = index % dim0; x[col_num * dim0 + offset] = val[index]; } } void set_cols_impl(dtype* x, int dim0, int* cols, int col_num, dtype* val) { int val_size = col_num * dim0; set_cols_kernel<<<(val_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, dim0, cols, col_num, val, val_size); //cudaDeviceSynchronize(); } __global__ void FLookup_kernel(const dtype* x, dtype** r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < r_size) { int col_index = index / xdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % xdim0; int x_index = col * xdim0 + offset; if(x_index < xdim0 * xdim1) { r[col_index][offset] = x[x_index]; } } } } void FLookup_impl(const dtype* x, dtype** r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { FLookup_kernel<<<(r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>> (x, r, xdim0, xdim1, r_size, cols, col_num); //cudaDeviceSynchronize(); } __global__ void DLookup_kernel(dtype* gx, dtype** loss, int gxdim0, int gxdim1, int l_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < l_size) { int col_index = index / gxdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % gxdim0; int gx_index = col * gxdim0 + offset; if(gx_index < gxdim0 * gxdim1) { atomicAdd(gx + gx_index, loss[col_index][offset]); //gx[gx_index] += loss[col_index][offset]; } } } } void DLookup_impl(dtype* gx, dtype** loss, int gxdim0, int gxdim1, int l_size, int* cols, int col_num) { DLookup_kernel<<<(l_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>> (gx, loss, gxdim0, gxdim1, l_size, cols, col_num); //cudaDeviceSynchronize(); } __global__ void get_cols_kernel(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < r_size) { int col_index = index / xdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % xdim0; int x_index = col * xdim0 + offset; if(x_index < xdim0 * xdim1) { r[index] = x[x_index]; } } } } void get_cols_impl(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { get_cols_kernel<<<(r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>> (x, r, xdim0, xdim1, r_size, cols, col_num); //cudaDeviceSynchronize(); } __global__ void get_col_kernel(const dtype* x, dtype* r, int dim0, int col, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { r[index] = x[i]; } } void get_col_impl(const dtype* x, dtype* r, int dim0, int col, int size) { get_col_kernel<<<(dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, col, size); //cudaDeviceSynchronize(); } __global__ void Fadd_col_kernel(dtype* x, const dtype* y, int col, int dim0, int size){ int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { x[i] = x[i] + y[index]; } } void Fadd_col_impl(dtype* x, const dtype* y, int col, int dim0, int size) { Fadd_col_kernel<<<(dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, col, dim0, size); //cudaDeviceSynchronize(); } template<int BLOCK_SIZE> __global__ void Favgpooling_kernel( dtype **px, int skip, int n, dtype *py) { __shared__ dtype temp[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; int index_start = bid % skip + (bid / skip) * skip * n; temp[tid] = 0; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; temp[tid] += px[idx][idy]; } ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) temp[tid] += temp[tid + k]; \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) py[bid] = temp[0] / n; } void Favgpooling_impl(dtype** x, dtype* y, int n, int r, int s) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k: ::Favgpooling_kernel<k><<<r, k>>>(x, s, n, y); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } //cudaDeviceSynchronize(); } __global__ void Davgpooling_kernel(const dtype* gy, int gy_size, int gx_size, int n, dtype** gx) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < gx_size) { int idx = i / gy_size; int idy = i % gy_size; atomicAdd(gx[idx] + idy, gy[idy] / n); //gx[idx][idy] += (gy[idy] / n); } } void Davgpooling_impl(const dtype* gy, int gy_size, int gx_size, int n, dtype** gx) { Davgpooling_kernel<<<(gx_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(gy, gy_size, gx_size, n, gx); //cudaDeviceSynchronize(); } template<int BLOCK_SIZE> __global__ void Fsumpooling_kernel( dtype **px, int skip, int n, dtype *py) { __shared__ dtype temp[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; int index_start = bid % skip + (bid / skip) * skip * n; temp[tid] = 0; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; temp[tid] += val; } ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) temp[tid] += temp[tid + k]; \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) py[bid] = temp[0]; } void Fsumpooling_impl(dtype** x, dtype* y, int n, int r, int s) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k: ::Fsumpooling_kernel<k><<<r, k>>>(x, s, n, y); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } //cudaDeviceSynchronize(); } __global__ void Dsumpooling_kernel(const dtype* gy, int gy_size, int gx_size, dtype** gx) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < gx_size) { int idx = i / gy_size; int idy = i % gy_size; atomicAdd(gx[idx] + idy, gy[idy]); //gx[idx][idy] += gy[idy]; } } void Dsumpooling_impl(const dtype* gy, int gy_size, int gx_size, dtype** gx) { Dsumpooling_kernel<<<(gx_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(gy, gy_size, gx_size, gx); //cudaDeviceSynchronize(); } template<int BLOCK_SIZE> __global__ void Fmaxpooling_kernel( dtype **px, int skip, int n, dtype *py, int* index) { __shared__ dtype temp[BLOCK_SIZE]; __shared__ int temp_index[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; dtype thread_max = NEGATIVE_INFINITY; int index_start = bid % skip + (bid / skip) * skip * n; int index_max; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; if(val > thread_max) { thread_max = val; index_max = index_start + i * skip; } } temp[tid] = thread_max; temp_index[tid] = index_max; ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) if(temp[tid + k] > temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];} } void Fmaxpooling_impl(dtype** x, dtype* y, int n, int r, int s, int* index){ int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k: ::Fmaxpooling_kernel<k><<<r, k>>>(x, s, n, y, index); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } //cudaDeviceSynchronize(); } __global__ void Dmaxpooling_kernel(const dtype* gy, dtype** gx, int* index, int dim) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < dim) { int idx = index[i] / dim; int idy = index[i] % dim; atomicAdd(gx[idx] + idy, gy[i]); //gx[idx][idy] += gy[i]; } } void Dmaxpooling_impl(const dtype* gy, dtype** gx, int* index, int dim) { Dmaxpooling_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(gy, gx, index, dim); //cudaDeviceSynchronize(); } template<int BLOCK_SIZE> __global__ void Fminpooling_kernel( dtype **px, int skip, int n, dtype *py, int* index) { __shared__ dtype temp[BLOCK_SIZE]; __shared__ int temp_index[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; dtype thread_min = POSITIVE_INFINITY; int index_start = bid % skip + (bid / skip) * skip * n; int index_min; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; if(val < thread_min) { thread_min = val; index_min = index_start + i * skip; } } temp[tid] = thread_min; temp_index[tid] = index_min; ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) if(temp[tid + k] < temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];} } void Fminpooling_impl(dtype** x, dtype* y, int n, int r, int s, int* index) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k: ::Fminpooling_kernel<k><<<r, k>>>(x, s, n, y, index); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } //cudaDeviceSynchronize(); } __global__ void Dminpooling_kernel(const dtype* gy, dtype** gx, int* index, int dim) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < dim) { int idx = index[i] / dim; int idy = index[i] % dim; atomicAdd(gx[idx] + idy, gy[i]); //gx[idx][idy] += gy[i]; } } void Dminpooling_impl(const dtype* gy, dtype** gx, int* index, int dim) { Dminpooling_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(gy, gx, index, dim); //cudaDeviceSynchronize(); }
ddaaa3aa6787622c68252e93ef22984679b4fe28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void big_kernel(Cell *cells, int *cnumPars,Cell *cells2, int *cnumPars2,struct kernel_consts *dev,int *border) { int ix; int iy; int iz; int nx = blockDim.x * gridDim.x; int ny = blockDim.y * gridDim.y; int nz = blockDim.z * gridDim.z; ix = blockIdx.x * blockDim.x + threadIdx.x; iy = blockIdx.y * blockDim.y + threadIdx.y; iz = blockIdx.z * blockDim.z + threadIdx.z; //printf("x: %d : %d\n",nx,blockDim.x * gridDim.x); //printf("y: %d : %d\n",ny,blockDim.y * gridDim.y); //printf("z: %d : %d\n",nz,blockDim.z * gridDim.z); //move common declarations on top int index = (iz*ny + iy)*nx + ix; int np; //internal loop limit //this should be moved to shared memory Cell &cell = cells[index]; //just a reference to the correspondig cell //FIXME int neighCells[27]; //it is safe to move the call here, neighbours do not change between the two original calls //move this computation to cpu //const float tc_orig = hSq*hSq*hSq; const float parSize = 0.0002f; const float epsilon = 1e-10f; const float stiffness = 30000.f; const float damping = 128.f; /* for (i=0;i<27;i++) { neighCells[i] = 0xffffffff; } */ int numNeighCells = InitNeighCellList(ix, iy, iz, neighCells,cnumPars); /* //printf("thread %d: number of neighbors: %d\n",index,numNeighCells); for (int i=0;i<numNeighCells;i++) { printf("thread %d : %d-th neighbor %d\n",index,i,neighCells[i]); } */ //////////////////////////////////////////////////////////////////////////////// //void ClearParticlesMT(int i) { //////////////////////////////////////////////////////////////////////////////// /**/ // for (int iz = grids[i].sz; iz < grids[i].ez; ++iz) // for (int iy = grids[i].sy; iy < grids[i].ey; ++iy) // for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) { // int index = (iz*ny + iy)*nx + ix; cnumPars[index] = 0; // } //close nested loop; __syncthreads(); //} close ClearParticlesMT() //////////////////////////////////////////////////////////////////////////////// //void RebuildGridMT(int i) { // for (int iz = grids[i].sz; iz < grids[i].ez; ++iz) // for (int iy = grids[i].sy; iy < grids[i].ey; ++iy) // for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) { // int index = (iz*ny + iy)*nx + ix; Cell const &cell2 = cells2[index]; int np2 = cnumPars2[index]; for (int j = 0; j < np2; ++j) { int ci = (int)((cell2.p[j].x - domainMin.x) / dev->delta.x); int cj = (int)((cell2.p[j].y - domainMin.y) / dev->delta.y); int ck = (int)((cell2.p[j].z - domainMin.z) / dev->delta.z); if (ci < 0) ci = 0; else if (ci > (nx-1)) ci = nx-1; if (cj < 0) cj = 0; else if (cj > (ny-1)) cj = ny-1; if (ck < 0) ck = 0; else if (ck > (nz-1)) ck = nz-1; int index2 = (ck*ny + cj)*nx + ci; // this assumes that particles cannot travel more than one grid cell per time step int np_renamed = cnumPars[index2]; if (border[index2]) { //use atomic atomicAdd(&cnumPars[index2],1); } else { cnumPars[index2]++; } //#warning what if we exceed CELL_PARTICLES particles per cell here?? //from what I see is that we calculate the same frame over and over //so every cell has at most CELL_PARTICLES particles, from the initialisation Cell &cell_renamed = cells[index2]; cell_renamed.p[np_renamed].x = cell2.p[j].x; cell_renamed.p[np_renamed].y = cell2.p[j].y; cell_renamed.p[np_renamed].z = cell2.p[j].z; cell_renamed.hv[np_renamed].x = cell2.hv[j].x; cell_renamed.hv[np_renamed].y = cell2.hv[j].y; cell_renamed.hv[np_renamed].z = cell2.hv[j].z; cell_renamed.v[np_renamed].x = cell2.v[j].x; cell_renamed.v[np_renamed].y = cell2.v[j].y; cell_renamed.v[np_renamed].z = cell2.v[j].z; //cell_renamed.debug[np_renamed] = index2; } // } //close nested loops __syncthreads(); //} close RebuildGridMT() //////////////////////////////////////////////////////////////////////////////// //void InitDensitiesAndForcesMT(int i) { //from now on we don't change the cnumPars[index] np = cnumPars[index]; //internal loop limit // for (int iz = grids[i].sz; iz < grids[i].ez; ++iz) // for (int iy = grids[i].sy; iy < grids[i].ey; ++iy) // for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) { // int index = (iz*ny + iy)*nx + ix; // Cell &cell = cells[index]; // int np = cnumPars[index]; for (int j = 0; j < np; ++j) { cell.density[j] = 0.f; cell.a[j].x = externalAcceleration.x; cell.a[j].y = externalAcceleration.y; cell.a[j].z = externalAcceleration.z; } // } //close nested loops __syncthreads(); //} close InitDensitiesAndForcesMT() //////////////////////////////////////////////////////////////////////////////// //void ComputeDensitiesMT(int i) { // int neighCells[27]; // for (int iz = grids[i].sz; iz < grids[i].ez; ++iz) // for (int iy = grids[i].sy; iy < grids[i].ey; ++iy) // for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) { // int index = (iz*ny + iy)*nx + ix; // int np = cnumPars[index]; // if (np == 0) continue; // // if np==0 we do net enter the following loop // int numNeighCells = InitNeighCellList(ix, iy, iz, neighCells); // Cell &cell = cells[index]; Vec3 tmp; for (int j = 0; j < np; ++j) for (int inc = 0; inc < numNeighCells; ++inc) { int indexNeigh = neighCells[inc]; Cell &neigh = cells[indexNeigh]; int numNeighPars = cnumPars[indexNeigh]; for (int iparNeigh = 0; iparNeigh < numNeighPars; ++iparNeigh) if (&neigh.p[iparNeigh] < &cell.p[j]) { //float distSq = (cell.p[j] - neigh.p[iparNeigh]).GetLengthSq(); float distSq; operator_sub(&tmp,&cell.p[j],&neigh.p[iparNeigh]); distSq = GetLengthSq(&tmp); if (distSq < dev->hSq) { float t = dev->hSq - distSq; float tc = t*t*t; if (border[index]) { //use atomic atomicAdd(&cell.density[j],tc); } else { cell.density[j] += tc; } if (border[indexNeigh]) { //use atomic atomicAdd(&neigh.density[iparNeigh],tc); } else { neigh.density[iparNeigh] += tc; } } } ; } // } //close nested loops __syncthreads(); //} close ComputeDensitiesMT() //////////////////////////////////////////////////////////////////////////////// //void ComputeDensities2MT(int i) { // const float tc = hSq*hSq*hSq; // for (int iz = grids[i].sz; iz < grids[i].ez; ++iz) // for (int iy = grids[i].sy; iy < grids[i].ey; ++iy) // for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) { // int index = (iz*ny + iy)*nx + ix; // Cell &cell = cells[index]; // int np = cnumPars[index]; for (int j = 0; j < np; ++j) { cell.density[j] += dev->tc_orig; cell.density[j] *= dev->densityCoeff; } // } //close nested loops __syncthreads(); //} close ComputeDensities2MT() //////////////////////////////////////////////////////////////////////////////// //void ComputeForcesMT(int i) { // int neighCells[27]; // for (int iz = grids[i].sz; iz < grids[i].ez; ++iz) // for (int iy = grids[i].sy; iy < grids[i].ey; ++iy) // for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) { // int index = (iz*ny + iy)*nx + ix; // int np = cnumPars[index]; // if (np == 0) continue; // // if np==0 we do net enter the following loop // int numNeighCells = InitNeighCellList(ix, iy, iz, neighCells); // Cell &cell = cells[index]; for (int j = 0; j < np; ++j) for (int inc = 0; inc < numNeighCells; ++inc) { int indexNeigh = neighCells[inc]; Cell &neigh = cells[indexNeigh]; int numNeighPars = cnumPars[indexNeigh]; for (int iparNeigh = 0; iparNeigh < numNeighPars; ++iparNeigh) if (&neigh.p[iparNeigh] < &cell.p[j]) { //Vec3 disp = cell.p[j] - neigh.p[iparNeigh]; //float distSq = disp.GetLengthSq(); Vec3 disp; operator_sub(&disp,&cell.p[j],&neigh.p[iparNeigh]); float distSq = GetLengthSq(&disp); if (distSq < dev->hSq) { //float dist = sqrtf(::max(distSq, 1e-12f)); float dist = sqrtf(fmax(distSq, 1e-12f)); float hmr = dev->h - dist; //Vec3 acc = disp * pressureCoeff * (hmr*hmr/dist) * // (cell.density[j]+neigh.density[iparNeigh] - doubleRestDensity); //acc += (neigh.v[iparNeigh] - cell.v[j]) * viscosityCoeff * hmr; //acc /= cell.density[j] * neigh.density[iparNeigh]; Vec3 acc; operator_mult(&acc,&disp, dev->pressureCoeff * (hmr*hmr/dist) * (cell.density[j]+neigh.density[iparNeigh] - doubleRestDensity)); operator_sub(&tmp,&neigh.v[iparNeigh],&cell.v[j]); operator_mult(&tmp,&tmp,dev->viscosityCoeff * hmr); operator_add(&acc,&acc,&tmp); operator_div(&acc,&acc,cell.density[j] * neigh.density[iparNeigh]); if (border[index]) { //use atomics #warning this works because no one reads these values at the moment ?? atomicAdd(&cell.a[j].x,acc.x); atomicAdd(&cell.a[j].y,acc.y); atomicAdd(&cell.a[j].z,acc.z); } else { operator_add(&cell.a[j],&cell.a[j],&acc); } if (border[indexNeigh]) { //use atomics #warning this works because no one reads these values at the moment ?? //reminder: there is no atomicSub for floats, so we add the negative value atomicAdd(&neigh.a[iparNeigh].x,-acc.x); atomicAdd(&neigh.a[iparNeigh].y,-acc.y); atomicAdd(&neigh.a[iparNeigh].z,-acc.z); } else { operator_sub(&neigh.a[iparNeigh],&neigh.a[iparNeigh],&acc); } } } } // } //close nested loops __syncthreads(); //} close ComputeForcesMT() //////////////////////////////////////////////////////////////////////////////// //void ProcessCollisionsMT(int i) { // const float parSize = 0.0002f; // const float epsilon = 1e-10f; // const float stiffness = 30000.f; // const float damping = 128.f; // for (int iz = grids[i].sz; iz < grids[i].ez; ++iz) // for (int iy = grids[i].sy; iy < grids[i].ey; ++iy) // for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) { // int index = (iz*ny + iy)*nx + ix; // Cell &cell = cells[index]; // int np = cnumPars[index]; for (int j = 0; j < np; ++j) { //Vec3 pos = cell.p[j] + cell.hv[j] * timeStep; Vec3 pos; operator_mult(&pos,&cell.hv[j],timeStep); operator_add(&pos,&pos,&cell.p[j]); float diff = parSize - (pos.x - domainMin.x); if (diff > epsilon) cell.a[j].x += stiffness*diff - damping*cell.v[j].x; diff = parSize - (domainMax.x - pos.x); if (diff > epsilon) cell.a[j].x -= stiffness*diff + damping*cell.v[j].x; diff = parSize - (pos.y - domainMin.y); if (diff > epsilon) cell.a[j].y += stiffness*diff - damping*cell.v[j].y; diff = parSize - (domainMax.y - pos.y); if (diff > epsilon) cell.a[j].y -= stiffness*diff + damping*cell.v[j].y; diff = parSize - (pos.z - domainMin.z); if (diff > epsilon) cell.a[j].z += stiffness*diff - damping*cell.v[j].z; diff = parSize - (domainMax.z - pos.z); if (diff > epsilon) cell.a[j].z -= stiffness*diff + damping*cell.v[j].z; } // } //close nested loops __syncthreads(); //} close ProcessCollisionsMT() //////////////////////////////////////////////////////////////////////////////// //void AdvanceParticlesMT(int i) { // for (int iz = grids[i].sz; iz < grids[i].ez; ++iz) // for (int iy = grids[i].sy; iy < grids[i].ey; ++iy) // for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) { // int index = (iz*ny + iy)*nx + ix; // Cell &cell = cells[index]; // int np = cnumPars[index]; for (int j = 0; j < np; ++j) { //Vec3 v_half = cell.hv[j] + cell.a[j]*timeStep; Vec3 v_half; operator_mult(&v_half,&cell.a[j],timeStep); operator_add(&v_half,&v_half,&cell.hv[j]); //cell.hv[j] = v_half; cell.hv[j].x = v_half.x; cell.hv[j].y = v_half.y; cell.hv[j].z = v_half.z; //cell.v[j] *= 0.5f; operator_mult(&cell.v[j],&cell.v[j],0.5f); //cell.v[j] = cell.hv[j] + v_half; operator_add(&cell.v[j],&cell.hv[j],&v_half); //we can change v_half now, (we want to use only one tmp variable) //cell.p[j] += v_half * timeStep; operator_mult(&v_half,&v_half,timeStep); operator_add(&cell.p[j],&cell.p[j],&v_half); } // } //close nested loops __syncthreads(); //} close AdvanceParticlesMT() //////////////////////////////////////////////////////////////////////////////// /**/ } //close big_kernel()
ddaaa3aa6787622c68252e93ef22984679b4fe28.cu
__global__ void big_kernel(Cell *cells, int *cnumPars,Cell *cells2, int *cnumPars2,struct kernel_consts *dev,int *border) { int ix; int iy; int iz; int nx = blockDim.x * gridDim.x; int ny = blockDim.y * gridDim.y; int nz = blockDim.z * gridDim.z; ix = blockIdx.x * blockDim.x + threadIdx.x; iy = blockIdx.y * blockDim.y + threadIdx.y; iz = blockIdx.z * blockDim.z + threadIdx.z; //printf("x: %d : %d\n",nx,blockDim.x * gridDim.x); //printf("y: %d : %d\n",ny,blockDim.y * gridDim.y); //printf("z: %d : %d\n",nz,blockDim.z * gridDim.z); //move common declarations on top int index = (iz*ny + iy)*nx + ix; int np; //internal loop limit //this should be moved to shared memory Cell &cell = cells[index]; //just a reference to the correspondig cell //FIXME int neighCells[27]; //it is safe to move the call here, neighbours do not change between the two original calls //move this computation to cpu //const float tc_orig = hSq*hSq*hSq; const float parSize = 0.0002f; const float epsilon = 1e-10f; const float stiffness = 30000.f; const float damping = 128.f; /* for (i=0;i<27;i++) { neighCells[i] = 0xffffffff; } */ int numNeighCells = InitNeighCellList(ix, iy, iz, neighCells,cnumPars); /* //printf("thread %d: number of neighbors: %d\n",index,numNeighCells); for (int i=0;i<numNeighCells;i++) { printf("thread %d : %d-th neighbor %d\n",index,i,neighCells[i]); } */ //////////////////////////////////////////////////////////////////////////////// //void ClearParticlesMT(int i) { //////////////////////////////////////////////////////////////////////////////// /**/ // for (int iz = grids[i].sz; iz < grids[i].ez; ++iz) // for (int iy = grids[i].sy; iy < grids[i].ey; ++iy) // for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) { // int index = (iz*ny + iy)*nx + ix; cnumPars[index] = 0; // } //close nested loop; __syncthreads(); //} close ClearParticlesMT() //////////////////////////////////////////////////////////////////////////////// //void RebuildGridMT(int i) { // for (int iz = grids[i].sz; iz < grids[i].ez; ++iz) // for (int iy = grids[i].sy; iy < grids[i].ey; ++iy) // for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) { // int index = (iz*ny + iy)*nx + ix; Cell const &cell2 = cells2[index]; int np2 = cnumPars2[index]; for (int j = 0; j < np2; ++j) { int ci = (int)((cell2.p[j].x - domainMin.x) / dev->delta.x); int cj = (int)((cell2.p[j].y - domainMin.y) / dev->delta.y); int ck = (int)((cell2.p[j].z - domainMin.z) / dev->delta.z); if (ci < 0) ci = 0; else if (ci > (nx-1)) ci = nx-1; if (cj < 0) cj = 0; else if (cj > (ny-1)) cj = ny-1; if (ck < 0) ck = 0; else if (ck > (nz-1)) ck = nz-1; int index2 = (ck*ny + cj)*nx + ci; // this assumes that particles cannot travel more than one grid cell per time step int np_renamed = cnumPars[index2]; if (border[index2]) { //use atomic atomicAdd(&cnumPars[index2],1); } else { cnumPars[index2]++; } //#warning what if we exceed CELL_PARTICLES particles per cell here?? //from what I see is that we calculate the same frame over and over //so every cell has at most CELL_PARTICLES particles, from the initialisation Cell &cell_renamed = cells[index2]; cell_renamed.p[np_renamed].x = cell2.p[j].x; cell_renamed.p[np_renamed].y = cell2.p[j].y; cell_renamed.p[np_renamed].z = cell2.p[j].z; cell_renamed.hv[np_renamed].x = cell2.hv[j].x; cell_renamed.hv[np_renamed].y = cell2.hv[j].y; cell_renamed.hv[np_renamed].z = cell2.hv[j].z; cell_renamed.v[np_renamed].x = cell2.v[j].x; cell_renamed.v[np_renamed].y = cell2.v[j].y; cell_renamed.v[np_renamed].z = cell2.v[j].z; //cell_renamed.debug[np_renamed] = index2; } // } //close nested loops __syncthreads(); //} close RebuildGridMT() //////////////////////////////////////////////////////////////////////////////// //void InitDensitiesAndForcesMT(int i) { //from now on we don't change the cnumPars[index] np = cnumPars[index]; //internal loop limit // for (int iz = grids[i].sz; iz < grids[i].ez; ++iz) // for (int iy = grids[i].sy; iy < grids[i].ey; ++iy) // for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) { // int index = (iz*ny + iy)*nx + ix; // Cell &cell = cells[index]; // int np = cnumPars[index]; for (int j = 0; j < np; ++j) { cell.density[j] = 0.f; cell.a[j].x = externalAcceleration.x; cell.a[j].y = externalAcceleration.y; cell.a[j].z = externalAcceleration.z; } // } //close nested loops __syncthreads(); //} close InitDensitiesAndForcesMT() //////////////////////////////////////////////////////////////////////////////// //void ComputeDensitiesMT(int i) { // int neighCells[27]; // for (int iz = grids[i].sz; iz < grids[i].ez; ++iz) // for (int iy = grids[i].sy; iy < grids[i].ey; ++iy) // for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) { // int index = (iz*ny + iy)*nx + ix; // int np = cnumPars[index]; // if (np == 0) continue; // // if np==0 we do net enter the following loop // int numNeighCells = InitNeighCellList(ix, iy, iz, neighCells); // Cell &cell = cells[index]; Vec3 tmp; for (int j = 0; j < np; ++j) for (int inc = 0; inc < numNeighCells; ++inc) { int indexNeigh = neighCells[inc]; Cell &neigh = cells[indexNeigh]; int numNeighPars = cnumPars[indexNeigh]; for (int iparNeigh = 0; iparNeigh < numNeighPars; ++iparNeigh) if (&neigh.p[iparNeigh] < &cell.p[j]) { //float distSq = (cell.p[j] - neigh.p[iparNeigh]).GetLengthSq(); float distSq; operator_sub(&tmp,&cell.p[j],&neigh.p[iparNeigh]); distSq = GetLengthSq(&tmp); if (distSq < dev->hSq) { float t = dev->hSq - distSq; float tc = t*t*t; if (border[index]) { //use atomic atomicAdd(&cell.density[j],tc); } else { cell.density[j] += tc; } if (border[indexNeigh]) { //use atomic atomicAdd(&neigh.density[iparNeigh],tc); } else { neigh.density[iparNeigh] += tc; } } } ; } // } //close nested loops __syncthreads(); //} close ComputeDensitiesMT() //////////////////////////////////////////////////////////////////////////////// //void ComputeDensities2MT(int i) { // const float tc = hSq*hSq*hSq; // for (int iz = grids[i].sz; iz < grids[i].ez; ++iz) // for (int iy = grids[i].sy; iy < grids[i].ey; ++iy) // for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) { // int index = (iz*ny + iy)*nx + ix; // Cell &cell = cells[index]; // int np = cnumPars[index]; for (int j = 0; j < np; ++j) { cell.density[j] += dev->tc_orig; cell.density[j] *= dev->densityCoeff; } // } //close nested loops __syncthreads(); //} close ComputeDensities2MT() //////////////////////////////////////////////////////////////////////////////// //void ComputeForcesMT(int i) { // int neighCells[27]; // for (int iz = grids[i].sz; iz < grids[i].ez; ++iz) // for (int iy = grids[i].sy; iy < grids[i].ey; ++iy) // for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) { // int index = (iz*ny + iy)*nx + ix; // int np = cnumPars[index]; // if (np == 0) continue; // // if np==0 we do net enter the following loop // int numNeighCells = InitNeighCellList(ix, iy, iz, neighCells); // Cell &cell = cells[index]; for (int j = 0; j < np; ++j) for (int inc = 0; inc < numNeighCells; ++inc) { int indexNeigh = neighCells[inc]; Cell &neigh = cells[indexNeigh]; int numNeighPars = cnumPars[indexNeigh]; for (int iparNeigh = 0; iparNeigh < numNeighPars; ++iparNeigh) if (&neigh.p[iparNeigh] < &cell.p[j]) { //Vec3 disp = cell.p[j] - neigh.p[iparNeigh]; //float distSq = disp.GetLengthSq(); Vec3 disp; operator_sub(&disp,&cell.p[j],&neigh.p[iparNeigh]); float distSq = GetLengthSq(&disp); if (distSq < dev->hSq) { //float dist = sqrtf(std::max(distSq, 1e-12f)); float dist = sqrtf(fmax(distSq, 1e-12f)); float hmr = dev->h - dist; //Vec3 acc = disp * pressureCoeff * (hmr*hmr/dist) * // (cell.density[j]+neigh.density[iparNeigh] - doubleRestDensity); //acc += (neigh.v[iparNeigh] - cell.v[j]) * viscosityCoeff * hmr; //acc /= cell.density[j] * neigh.density[iparNeigh]; Vec3 acc; operator_mult(&acc,&disp, dev->pressureCoeff * (hmr*hmr/dist) * (cell.density[j]+neigh.density[iparNeigh] - doubleRestDensity)); operator_sub(&tmp,&neigh.v[iparNeigh],&cell.v[j]); operator_mult(&tmp,&tmp,dev->viscosityCoeff * hmr); operator_add(&acc,&acc,&tmp); operator_div(&acc,&acc,cell.density[j] * neigh.density[iparNeigh]); if (border[index]) { //use atomics #warning this works because no one reads these values at the moment ?? atomicAdd(&cell.a[j].x,acc.x); atomicAdd(&cell.a[j].y,acc.y); atomicAdd(&cell.a[j].z,acc.z); } else { operator_add(&cell.a[j],&cell.a[j],&acc); } if (border[indexNeigh]) { //use atomics #warning this works because no one reads these values at the moment ?? //reminder: there is no atomicSub for floats, so we add the negative value atomicAdd(&neigh.a[iparNeigh].x,-acc.x); atomicAdd(&neigh.a[iparNeigh].y,-acc.y); atomicAdd(&neigh.a[iparNeigh].z,-acc.z); } else { operator_sub(&neigh.a[iparNeigh],&neigh.a[iparNeigh],&acc); } } } } // } //close nested loops __syncthreads(); //} close ComputeForcesMT() //////////////////////////////////////////////////////////////////////////////// //void ProcessCollisionsMT(int i) { // const float parSize = 0.0002f; // const float epsilon = 1e-10f; // const float stiffness = 30000.f; // const float damping = 128.f; // for (int iz = grids[i].sz; iz < grids[i].ez; ++iz) // for (int iy = grids[i].sy; iy < grids[i].ey; ++iy) // for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) { // int index = (iz*ny + iy)*nx + ix; // Cell &cell = cells[index]; // int np = cnumPars[index]; for (int j = 0; j < np; ++j) { //Vec3 pos = cell.p[j] + cell.hv[j] * timeStep; Vec3 pos; operator_mult(&pos,&cell.hv[j],timeStep); operator_add(&pos,&pos,&cell.p[j]); float diff = parSize - (pos.x - domainMin.x); if (diff > epsilon) cell.a[j].x += stiffness*diff - damping*cell.v[j].x; diff = parSize - (domainMax.x - pos.x); if (diff > epsilon) cell.a[j].x -= stiffness*diff + damping*cell.v[j].x; diff = parSize - (pos.y - domainMin.y); if (diff > epsilon) cell.a[j].y += stiffness*diff - damping*cell.v[j].y; diff = parSize - (domainMax.y - pos.y); if (diff > epsilon) cell.a[j].y -= stiffness*diff + damping*cell.v[j].y; diff = parSize - (pos.z - domainMin.z); if (diff > epsilon) cell.a[j].z += stiffness*diff - damping*cell.v[j].z; diff = parSize - (domainMax.z - pos.z); if (diff > epsilon) cell.a[j].z -= stiffness*diff + damping*cell.v[j].z; } // } //close nested loops __syncthreads(); //} close ProcessCollisionsMT() //////////////////////////////////////////////////////////////////////////////// //void AdvanceParticlesMT(int i) { // for (int iz = grids[i].sz; iz < grids[i].ez; ++iz) // for (int iy = grids[i].sy; iy < grids[i].ey; ++iy) // for (int ix = grids[i].sx; ix < grids[i].ex; ++ix) { // int index = (iz*ny + iy)*nx + ix; // Cell &cell = cells[index]; // int np = cnumPars[index]; for (int j = 0; j < np; ++j) { //Vec3 v_half = cell.hv[j] + cell.a[j]*timeStep; Vec3 v_half; operator_mult(&v_half,&cell.a[j],timeStep); operator_add(&v_half,&v_half,&cell.hv[j]); //cell.hv[j] = v_half; cell.hv[j].x = v_half.x; cell.hv[j].y = v_half.y; cell.hv[j].z = v_half.z; //cell.v[j] *= 0.5f; operator_mult(&cell.v[j],&cell.v[j],0.5f); //cell.v[j] = cell.hv[j] + v_half; operator_add(&cell.v[j],&cell.hv[j],&v_half); //we can change v_half now, (we want to use only one tmp variable) //cell.p[j] += v_half * timeStep; operator_mult(&v_half,&v_half,timeStep); operator_add(&cell.p[j],&cell.p[j],&v_half); } // } //close nested loops __syncthreads(); //} close AdvanceParticlesMT() //////////////////////////////////////////////////////////////////////////////// /**/ } //close big_kernel()
4c42ac254ede5b54acc26f19178e814d8b6f0545.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2008-2009 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdio> #include <hip/hip_runtime.h> #include "ocuutil/float_routines.h" #include "ocuutil/thread.h" #include "ocustorage/grid3dops.h" #include "ocuequation/sol_selfadvection3d.h" //! This routine works because u,v,w,phi, and dphidt must all be padded so that they have the same memory layout, //! even though they have different dimensions. Then we can calculate indexing math once, and reuse it for //! all of the grids. template<typename T, typename INTERP> __global__ void Sol_SelfAdvection3D_apply_upwind( T *u, T *v, T *w, T *dudt, T *dvdt, T *dwdt, T invhx, T invhy, T invhz, int xstride, int ystride, int nx, int ny, int nz, int blocksInY, float invBlocksInY, INTERP interp) { int blockIdxz = truncf(blockIdx.y * invBlocksInY); int blockIdxy = blockIdx.y - __mul24(blockIdxz,blocksInY); // transpose for coalescing since k is the fastest changing index int k = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; int j = __mul24(blockIdxy ,blockDim.y) + threadIdx.y; int i = __mul24(blockIdxz ,blockDim.z) + threadIdx.z; if (i < nx && j < ny && k < nz) { // calc phi indexing int idx = __mul24(i, xstride) + __mul24(j,ystride) + k; int idx_pi = idx + xstride; int idx_pj = idx + ystride; int idx_pk = idx + 1; int idx_mi = idx - xstride; int idx_mj = idx - ystride; int idx_mk = idx - 1; T u_idx = u[idx]; T v_idx = v[idx]; T w_idx = w[idx]; //---- dudt ---- T u_iph_j_k = .5 * (u_idx + u[idx_pi]); T u_imh_j_k = .5 * (u_idx + u[idx_mi]); T duu = (u_iph_j_k * interp(u_iph_j_k, u_idx , u[idx_pi])) - (u_imh_j_k * interp(u_imh_j_k, u[idx_mi], u_idx )); T v_atu_i_jph_k = .5* (v[idx_pj] + v[idx_pj - xstride]); T v_atu_i_jmh_k = .5* (v_idx + v[idx_mi]); T dvu = (v_atu_i_jph_k * interp(v_atu_i_jph_k, u_idx , u[idx_pj])) - (v_atu_i_jmh_k * interp(v_atu_i_jmh_k, u[idx_mj], u_idx)); T w_atu_i_j_kph = .5* (w[idx_pk] + w[idx_pk - xstride]); T w_atu_i_j_kmh = .5* (w_idx + w[idx_mi]); T dwu = (w_atu_i_j_kph * interp(w_atu_i_j_kph, u_idx , u[idx_pk])) - (w_atu_i_j_kmh * interp(w_atu_i_j_kmh, u[idx_mk], u_idx)); dudt[idx] = -duu*invhx - dvu*invhy - dwu*invhz; //---- dvdt ---- T u_atv_iph_j_k = .5* (u[idx_pi] + u[idx_pi - ystride]); T u_atv_imh_j_k = .5* (u_idx + u[idx_mj]); T duv = (u_atv_iph_j_k * interp(u_atv_iph_j_k, v_idx , v[idx_pi])) - (u_atv_imh_j_k * interp(u_atv_imh_j_k, v[idx_mi], v_idx)); T v_i_jph_k = .5 * (v_idx + v[idx_pj]); T v_i_jmh_k = .5 * (v_idx + v[idx_mj]); T dvv = (v_i_jph_k * interp(v_i_jph_k, v_idx , v[idx_pj])) - (v_i_jmh_k * interp(v_i_jmh_k, v[idx_mj], v_idx)); T w_atv_i_j_kph = .5* (w[idx_pk] + w[idx_pk - ystride]); T w_atv_i_j_kmh = .5* (w_idx + w[idx_mj]); T dwv = (w_atv_i_j_kph * interp(w_atv_i_j_kph, v_idx , v[idx_pk])) - (w_atv_i_j_kmh * interp(w_atv_i_j_kmh, v[idx_mk], v_idx)); dvdt[idx] = -duv*invhx - dvv*invhy - dwv*invhz; //---- dwdt ---- T u_atw_iph_j_k = .5* (u[idx_pi] + u[idx_pi - 1]); T u_atw_imh_j_k = .5* (u_idx + u[idx_mk]); T duw = (u_atw_iph_j_k * interp(u_atw_iph_j_k, w_idx , w[idx_pi])) - (u_atw_imh_j_k * interp(u_atw_imh_j_k, w[idx_mi], w_idx)); T v_atw_i_jph_k = .5* (v[idx_pj] + v[idx_pj - 1]); T v_atw_i_jmh_k = .5* (v_idx + v[idx_mk]); T dvw = (v_atw_i_jph_k * interp(v_atw_i_jph_k, w_idx , w[idx_pj])) - (v_atw_i_jmh_k * interp(v_atw_i_jmh_k, w[idx_mj], w_idx)); T w_i_j_kph = .5 * (w_idx + w[idx_pk]); T w_i_j_kmh = .5 * (w_idx + w[idx_mk]); T dww = (w_i_j_kph * interp(w_i_j_kph, w_idx , w[idx_pk])) - (w_i_j_kmh * interp(w_i_j_kmh, w[idx_mk], w_idx)); dwdt[idx] = -duw*invhx - dvw*invhy - dww*invhz; } } texture<float, 1, hipReadModeElementType> tex_u; texture<float, 1, hipReadModeElementType> tex_v; texture<float, 1, hipReadModeElementType> tex_w; __inline__ __device__ float tex1Dfetch_u(const int& i) { return tex1Dfetch(tex_u, i); } __inline__ __device__ float tex1Dfetch_v(const int& i) { return tex1Dfetch(tex_v, i); } __inline__ __device__ float tex1Dfetch_w(const int& i) { return tex1Dfetch(tex_w, i); } template<typename INTERP> __global__ void Advection3DF_apply_upwind_TEX( float *dudt, float *dvdt, float *dwdt, float invhx, float invhy, float invhz, int xstride, int ystride, int tex_offset, int nx, int ny, int nz, int blocksInY, float invBlocksInY, INTERP interp) { int blockIdxz = truncf(blockIdx.y * invBlocksInY); int blockIdxy = blockIdx.y - __mul24(blockIdxz,blocksInY); // transpose for coalescing since k is the fastest changing index int k = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; int j = __mul24(blockIdxy ,blockDim.y) + threadIdx.y; int i = __mul24(blockIdxz ,blockDim.z) + threadIdx.z; if (i < nx && j < ny && k < nz) { // calc phi indexing int idx_no_offset = __mul24(i, xstride) + __mul24(j,ystride) + k; int idx = idx_no_offset + tex_offset; int idx_pi = idx + xstride; int idx_pj = idx + ystride; int idx_pk = idx + 1; int idx_mi = idx - xstride; int idx_mj = idx - ystride; int idx_mk = idx - 1; float u_idx = tex1Dfetch_u(idx); float v_idx = tex1Dfetch_v(idx); float w_idx = tex1Dfetch_w(idx); //---- dudt ---- float u_iph_j_k = .5f * (u_idx + tex1Dfetch_u(idx_pi)); float u_imh_j_k = .5f * (u_idx + tex1Dfetch_u(idx_mi)); float duu = (u_iph_j_k * interp(u_iph_j_k, u_idx , tex1Dfetch_u(idx_pi))) - (u_imh_j_k * interp(u_imh_j_k, tex1Dfetch_u(idx_mi) , u_idx )); float v_atu_i_jph_k = .5f* (tex1Dfetch_v(idx_pj) + tex1Dfetch_v(idx_pj - xstride)); float v_atu_i_jmh_k = .5f* (v_idx + tex1Dfetch_v(idx_mi)); float dvu = (v_atu_i_jph_k * interp(v_atu_i_jph_k, u_idx , tex1Dfetch_u(idx_pj))) - (v_atu_i_jmh_k * interp(v_atu_i_jmh_k, tex1Dfetch_u(idx_mj), u_idx)); float w_atu_i_j_kph = .5f* (tex1Dfetch_w(idx_pk) + tex1Dfetch_w(idx_pk - xstride)); float w_atu_i_j_kmh = .5f* (w_idx + tex1Dfetch_w(idx_mi)); float dwu = (w_atu_i_j_kph * interp(w_atu_i_j_kph, u_idx , tex1Dfetch_u(idx_pk))) - (w_atu_i_j_kmh * interp(w_atu_i_j_kmh, tex1Dfetch_u(idx_mk), u_idx)); dudt[idx_no_offset] = -duu*invhx - dvu*invhy - dwu*invhz; //---- dvdt ---- float u_atv_iph_j_k = .5f* (tex1Dfetch_u(idx_pi) + tex1Dfetch_u(idx_pi - ystride)); float u_atv_imh_j_k = .5f* (u_idx + tex1Dfetch_u(idx_mj)); float duv = (u_atv_iph_j_k * interp(u_atv_iph_j_k, v_idx , tex1Dfetch_v(idx_pi))) - (u_atv_imh_j_k * interp(u_atv_imh_j_k, tex1Dfetch_v(idx_mi), v_idx)); float v_i_jph_k = .5f * (v_idx + tex1Dfetch_v(idx_pj)); float v_i_jmh_k = .5f * (v_idx + tex1Dfetch_v(idx_mj)); float dvv = (v_i_jph_k * interp(v_i_jph_k, v_idx , tex1Dfetch_v(idx_pj))) - (v_i_jmh_k * interp(v_i_jmh_k, tex1Dfetch_v(idx_mj), v_idx )); float w_atv_i_j_kph = .5f* (tex1Dfetch_w(idx_pk) + tex1Dfetch_w(idx_pk - ystride)); float w_atv_i_j_kmh = .5f* (w_idx + tex1Dfetch_w(idx_mj)); float dwv = (w_atv_i_j_kph * interp(w_atv_i_j_kph, v_idx , tex1Dfetch_v(idx_pk))) - (w_atv_i_j_kmh * interp(w_atv_i_j_kmh, tex1Dfetch_v(idx_mk), v_idx)); dvdt[idx_no_offset] = -duv*invhx - dvv*invhy - dwv*invhz; //---- dwdt ---- float u_atw_iph_j_k = .5f* (tex1Dfetch_u(idx_pi) + tex1Dfetch_u(idx_pi - 1)); float u_atw_imh_j_k = .5f* (u_idx + tex1Dfetch_u(idx_mk)); float duw = (u_atw_iph_j_k * interp(u_atw_iph_j_k, w_idx , tex1Dfetch_w(idx_pi))) - (u_atw_imh_j_k * interp(u_atw_imh_j_k, tex1Dfetch_w(idx_mi), w_idx)); float v_atw_i_jph_k = .5f* (tex1Dfetch_v(idx_pj) + tex1Dfetch_v(idx_pj - 1)); float v_atw_i_jmh_k = .5f* (v_idx + tex1Dfetch_v(idx_mk)); float dvw = (v_atw_i_jph_k * interp(v_atw_i_jph_k, w_idx , tex1Dfetch_w(idx_pj))) - (v_atw_i_jmh_k * interp(v_atw_i_jmh_k, tex1Dfetch_w(idx_mj), w_idx)); float w_i_j_kph = .5f * (w_idx + tex1Dfetch_w(idx_pk)); float w_i_j_kmh = .5f * (w_idx + tex1Dfetch_w(idx_mk)); float dww = (w_i_j_kph * interp(w_i_j_kph, w_idx , tex1Dfetch_w(idx_pk))) - (w_i_j_kmh * interp(w_i_j_kmh, tex1Dfetch_w(idx_mk), w_idx )); dwdt[idx_no_offset] = -duw*invhx - dvw*invhy - dww*invhz; } } #ifdef OCU_DOUBLESUPPORT texture<int2, 1, hipReadModeElementType> dtex_u; texture<int2, 1, hipReadModeElementType> dtex_v; texture<int2, 1, hipReadModeElementType> dtex_w; __inline__ __device__ double tex1Dfetchd_u(const int& i) { int2 v = tex1Dfetch(dtex_u, i); return __hiloint2double(v.y, v.x); } __inline__ __device__ double tex1Dfetchd_v(const int& i) { int2 v = tex1Dfetch(dtex_v, i); return __hiloint2double(v.y, v.x); } __inline__ __device__ double tex1Dfetchd_w(const int& i) { int2 v = tex1Dfetch(dtex_w, i); return __hiloint2double(v.y, v.x); } template<typename INTERP> __global__ void Advection3DD_apply_upwind_TEX( double *dudt, double *dvdt, double *dwdt, double invhx, double invhy, double invhz, int xstride, int ystride, int tex_offset, int nx, int ny, int nz, int blocksInY, float invBlocksInY, INTERP interp) { int blockIdxz = truncf(blockIdx.y * invBlocksInY); int blockIdxy = blockIdx.y - __mul24(blockIdxz,blocksInY); // transpose for coalescing since k is the fastest changing index int k = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; int j = __mul24(blockIdxy ,blockDim.y) + threadIdx.y; int i = __mul24(blockIdxz ,blockDim.z) + threadIdx.z; // shift so we will get maximum coalescing. This means that we will need to test if k>0 below. if (i < nx && j < ny && k < nz) { // calc phi indexing int idx_no_offset = __mul24(i, xstride) + __mul24(j,ystride) + k; int idx = idx_no_offset + tex_offset; int idx_pi = idx + xstride; int idx_pj = idx + ystride; int idx_pk = idx + 1; int idx_mi = idx - xstride; int idx_mj = idx - ystride; int idx_mk = idx - 1; double u_idx = tex1Dfetchd_u(idx); double v_idx = tex1Dfetchd_v(idx); double w_idx = tex1Dfetchd_w(idx); //---- dudt ---- double u_iph_j_k = .5 * (u_idx + tex1Dfetchd_u(idx_pi)); double u_imh_j_k = .5 * (u_idx + tex1Dfetchd_u(idx_mi)); double duu = (u_iph_j_k * interp(u_iph_j_k, u_idx , tex1Dfetchd_u(idx_pi))) - (u_imh_j_k * interp(u_imh_j_k, tex1Dfetchd_u(idx_mi), u_idx )); double v_atu_i_jph_k = .5* (tex1Dfetchd_v(idx_pj) + tex1Dfetchd_v(idx_pj - xstride)); double v_atu_i_jmh_k = .5* (v_idx + tex1Dfetchd_v(idx_mi)); double dvu = (v_atu_i_jph_k * interp(v_atu_i_jph_k, u_idx , tex1Dfetchd_u(idx_pj))) - (v_atu_i_jmh_k * interp(v_atu_i_jmh_k, tex1Dfetchd_u(idx_mj), u_idx)); double w_atu_i_j_kph = .5* (tex1Dfetchd_w(idx_pk) + tex1Dfetchd_w(idx_pk - xstride)); double w_atu_i_j_kmh = .5* (w_idx + tex1Dfetchd_w(idx_mi)); double dwu = (w_atu_i_j_kph * interp(w_atu_i_j_kph, u_idx , tex1Dfetchd_u(idx_pk))) - (w_atu_i_j_kmh * interp(w_atu_i_j_kmh, tex1Dfetchd_u(idx_mk), u_idx)); dudt[idx_no_offset] = -duu*invhx - dvu*invhy - dwu*invhz; //---- dvdt ---- double u_atv_iph_j_k = .5* (tex1Dfetchd_u(idx_pi) + tex1Dfetchd_u(idx_pi - ystride)); double u_atv_imh_j_k = .5* (u_idx + tex1Dfetchd_u(idx_mj)); double duv = (u_atv_iph_j_k * interp(u_atv_iph_j_k, v_idx , tex1Dfetchd_v(idx_pi))) - (u_atv_imh_j_k * interp(u_atv_imh_j_k, tex1Dfetchd_v(idx_mi), v_idx)); double v_i_jph_k = .5 * (v_idx + tex1Dfetchd_v(idx_pj)); double v_i_jmh_k = .5 * (v_idx + tex1Dfetchd_v(idx_mj)); double dvv = (v_i_jph_k * interp(v_i_jph_k, v_idx , tex1Dfetchd_v(idx_pj))) - (v_i_jmh_k * interp(v_i_jmh_k, tex1Dfetchd_v(idx_mj), v_idx )); double w_atv_i_j_kph = .5* (tex1Dfetchd_w(idx_pk) + tex1Dfetchd_w(idx_pk - ystride)); double w_atv_i_j_kmh = .5* (w_idx + tex1Dfetchd_w(idx_mj)); double dwv = (w_atv_i_j_kph * interp(w_atv_i_j_kph, v_idx , tex1Dfetchd_v(idx_pk))) - (w_atv_i_j_kmh * interp(w_atv_i_j_kmh, tex1Dfetchd_v(idx_mk), v_idx)); dvdt[idx_no_offset] = -duv*invhx - dvv*invhy - dwv*invhz; //---- dwdt ---- double u_atw_iph_j_k = .5* (tex1Dfetchd_u(idx_pi) + tex1Dfetchd_u(idx_pi - 1)); double u_atw_imh_j_k = .5* (u_idx + tex1Dfetchd_u(idx_mk)); double duw = (u_atw_iph_j_k * interp(u_atw_iph_j_k, w_idx , tex1Dfetchd_w(idx_pi))) - (u_atw_imh_j_k * interp(u_atw_imh_j_k, tex1Dfetchd_w(idx_mi), w_idx)); double v_atw_i_jph_k = .5* (tex1Dfetchd_v(idx_pj) + tex1Dfetchd_v(idx_pj - 1)); double v_atw_i_jmh_k = .5* (v_idx + tex1Dfetchd_v(idx_mk)); double dvw = (v_atw_i_jph_k * interp(v_atw_i_jph_k, w_idx , tex1Dfetchd_w(idx_pj))) - (v_atw_i_jmh_k * interp(v_atw_i_jmh_k, tex1Dfetchd_w(idx_mj), w_idx)); double w_i_j_kph = .5 * (w_idx + tex1Dfetchd_w(idx_pk)); double w_i_j_kmh = .5 * (w_idx + tex1Dfetchd_w(idx_mk)); double dww = (w_i_j_kph * interp(w_i_j_kph, w_idx , tex1Dfetchd_w(idx_pk))) - (w_i_j_kmh * interp(w_i_j_kmh, tex1Dfetchd_w(idx_mk), w_idx )); dwdt[idx_no_offset] = -duw*invhx - dvw*invhy - dww*invhz; } } #endif // OCU_DOUBLESUPPORT namespace ocu { template<typename T> Sol_SelfAdvection3DDevice<T>::Sol_SelfAdvection3DDevice() { _nx = _ny = _nz = 0; u = 0; v = 0; w = 0; interp_type = IT_FIRST_ORDER_UPWIND; } template<typename T> Sol_SelfAdvection3DDevice<T>::~Sol_SelfAdvection3DDevice() { unbind_textures(); } template<typename T> bool Sol_SelfAdvection3DDevice<T>::solve_naive() { int tnx = _nz; int tny = _ny; int tnz = _nx; int threadsInX = 16; int threadsInY = 2; int threadsInZ = 2; int blocksInX = (tnx+threadsInX-1)/threadsInX; int blocksInY = (tny+threadsInY-1)/threadsInY; int blocksInZ = (tnz+threadsInZ-1)/threadsInZ; dim3 Dg = dim3(blocksInX, blocksInY*blocksInZ); dim3 Db = dim3(threadsInX, threadsInY, threadsInZ); PreKernel(); if (interp_type == IT_FIRST_ORDER_UPWIND) { hipLaunchKernelGGL(( Sol_SelfAdvection3D_apply_upwind), dim3(Dg), dim3(Db), 0, ThreadManager::get_compute_stream(), &u->at(0,0,0),&v->at(0,0,0),&w->at(0,0,0),&deriv_udt->at(0,0,0),&deriv_vdt->at(0,0,0),&deriv_wdt->at(0,0,0), (T)(1/_hx), (T)(1/_hy), (T)(1/_hz), u->xstride(), u->ystride(), _nx, _ny, _nz, blocksInY, 1.0f / (float)blocksInY, InterpolatorFirstOrderUpwind<T>()); } else if (interp_type == IT_SECOND_ORDER_CENTERED) { hipLaunchKernelGGL(( Sol_SelfAdvection3D_apply_upwind), dim3(Dg), dim3(Db), 0, ThreadManager::get_compute_stream(), &u->at(0,0,0),&v->at(0,0,0),&w->at(0,0,0),&deriv_udt->at(0,0,0),&deriv_vdt->at(0,0,0),&deriv_wdt->at(0,0,0), (T)(1/_hx), (T)(1/_hy), (T)(1/_hz), u->xstride(), u->ystride(), _nx, _ny, _nz, blocksInY, 1.0f / (float)blocksInY, InterpolatorSecondOrderCentered<T>()); } else { printf("[ERROR] Sol_SelfAdvection3DDevice::solve_naive - invalid interpolation type %d\n", interp_type); return false; } return PostKernelDim("Sol_SelfAdvection3D_apply_upwind", Dg, Db); } template<> bool Sol_SelfAdvection3DDevice<float>::bind_textures() { hipChannelFormatDesc channelDesc_u = hipCreateChannelDesc<float>(); hipChannelFormatDesc channelDesc_v = hipCreateChannelDesc<float>(); hipChannelFormatDesc channelDesc_w = hipCreateChannelDesc<float>(); // set up texture tex_u.filterMode = hipFilterModePoint; tex_u.normalized = false; tex_u.channelDesc = channelDesc_u; tex_v.filterMode = hipFilterModePoint; tex_v.normalized = false; tex_v.channelDesc = channelDesc_v; tex_w.filterMode = hipFilterModePoint; tex_w.normalized = false; tex_w.channelDesc = channelDesc_w; if (hipBindTexture(NULL, &tex_u, u->buffer(), &channelDesc_u, u->num_allocated_elements() * sizeof(float)) != (unsigned int) hipSuccess) { printf("[ERROR] Sol_SelfAdvection3DDevice<float>::solve_tex - Could not bind texture u\n"); return false; } if (hipBindTexture(NULL, &tex_v, v->buffer(), &channelDesc_v, v->num_allocated_elements() * sizeof(float)) != (unsigned int) hipSuccess) { printf("[ERROR] Sol_SelfAdvection3DDevice<float>::solve_tex - Could not bind texture v\n"); return false; } if (hipBindTexture(NULL, &tex_w, w->buffer(), &channelDesc_w, w->num_allocated_elements() * sizeof(float)) != (unsigned int) hipSuccess) { printf("[ERROR] Sol_SelfAdvection3DDevice<float>::solve_tex - Could not bind texture w\n"); return false; } return true; } template<> bool Sol_SelfAdvection3DDevice<float>::unbind_textures() { hipUnbindTexture(&tex_u); hipUnbindTexture(&tex_v); hipUnbindTexture(&tex_w); return true; } template<> bool Sol_SelfAdvection3DDevice<float>::solve_tex() { int tnx = _nz; int tny = _ny; int tnz = _nx; int threadsInX = 16; int threadsInY = 2; int threadsInZ = 2; int blocksInX = (tnx+threadsInX-1)/threadsInX; int blocksInY = (tny+threadsInY-1)/threadsInY; int blocksInZ = (tnz+threadsInZ-1)/threadsInZ; dim3 Dg = dim3(blocksInX, blocksInY*blocksInZ); dim3 Db = dim3(threadsInX, threadsInY, threadsInZ); PreKernel(); if (interp_type == IT_FIRST_ORDER_UPWIND) { hipLaunchKernelGGL(( Advection3DF_apply_upwind_TEX), dim3(Dg), dim3(Db), 0, ThreadManager::get_compute_stream(), &deriv_udt->at(0,0,0),&deriv_vdt->at(0,0,0),&deriv_wdt->at(0,0,0), (float)(1/_hx), (float)(1/_hy), (float)(1/_hz), u->xstride(), u->ystride(), u->shift_amount(), _nx, _ny, _nz, blocksInY, 1.0f / (float)blocksInY, InterpolatorFirstOrderUpwind<float>()); } else if (interp_type == IT_SECOND_ORDER_CENTERED) { hipLaunchKernelGGL(( Advection3DF_apply_upwind_TEX), dim3(Dg), dim3(Db), 0, ThreadManager::get_compute_stream(), &deriv_udt->at(0,0,0),&deriv_vdt->at(0,0,0),&deriv_wdt->at(0,0,0), (float)(1/_hx), (float)(1/_hy), (float)(1/_hz), u->xstride(), u->ystride(), u->shift_amount(), _nx, _ny, _nz, blocksInY, 1.0f / (float)blocksInY, InterpolatorSecondOrderCentered<float>()); } else { printf("[ERROR] Sol_SelfAdvection3DDevice::solve_tex - invalid interpolation type %d\n", interp_type); return false; } return PostKernelDim("Advection3DF_apply_upwind_TEX", Dg, Db); } #ifdef OCU_DOUBLESUPPORT template<> bool Sol_SelfAdvection3DDevice<double>::bind_textures() { hipChannelFormatDesc channelDesc_u = hipCreateChannelDesc<int2>(); hipChannelFormatDesc channelDesc_v = hipCreateChannelDesc<int2>(); hipChannelFormatDesc channelDesc_w = hipCreateChannelDesc<int2>(); // set up texture dtex_u.filterMode = hipFilterModePoint; dtex_u.normalized = false; dtex_u.channelDesc = channelDesc_u; dtex_v.filterMode = hipFilterModePoint; dtex_v.normalized = false; dtex_v.channelDesc = channelDesc_v; dtex_w.filterMode = hipFilterModePoint; dtex_w.normalized = false; dtex_w.channelDesc = channelDesc_w; if (hipBindTexture(NULL, &dtex_u, u->buffer(), &channelDesc_u, u->num_allocated_elements() * sizeof(double)) != (unsigned int) hipSuccess) { printf("[ERROR] Sol_SelfAdvection3DDevice<double>::bind_textures - Could not bind texture u\n"); return false; } if (hipBindTexture(NULL, &dtex_v, v->buffer(), &channelDesc_v, v->num_allocated_elements() * sizeof(double)) != (unsigned int) hipSuccess) { printf("[ERROR] Sol_SelfAdvection3DDevice<double>::bind_textures - Could not bind texture v\n"); return false; } if (hipBindTexture(NULL, &dtex_w, w->buffer(), &channelDesc_w, w->num_allocated_elements() * sizeof(double)) != (unsigned int) hipSuccess) { printf("[ERROR] Sol_SelfAdvection3DDevice<double>::bind_textures - Could not bind texture w\n"); return false; } return true; } template<> bool Sol_SelfAdvection3DDevice<double>::unbind_textures() { hipUnbindTexture(&dtex_u); hipUnbindTexture(&dtex_v); hipUnbindTexture(&dtex_w); return true; } template<> bool Sol_SelfAdvection3DDevice<double>::solve_tex() { int tnx = _nz; int tny = _ny; int tnz = _nx; int threadsInX = 16; int threadsInY = 4; int threadsInZ = 4; int blocksInX = (tnx+threadsInX-1)/threadsInX; int blocksInY = (tny+threadsInY-1)/threadsInY; int blocksInZ = (tnz+threadsInZ-1)/threadsInZ; dim3 Dg = dim3(blocksInX, blocksInY*blocksInZ); dim3 Db = dim3(threadsInX, threadsInY, threadsInZ); PreKernel(); if (interp_type == IT_FIRST_ORDER_UPWIND) { hipLaunchKernelGGL(( Advection3DD_apply_upwind_TEX), dim3(Dg), dim3(Db), 0, ThreadManager::get_compute_stream(), &deriv_udt->at(0,0,0),&deriv_vdt->at(0,0,0),&deriv_wdt->at(0,0,0), (double)(1/_hx), (double)(1/_hy), (double)(1/_hz), u->xstride(), u->ystride(), u->shift_amount(), _nx, _ny, _nz, blocksInY, 1.0f / (float)blocksInY, InterpolatorFirstOrderUpwind<double>()); } else if (interp_type == IT_SECOND_ORDER_CENTERED) { hipLaunchKernelGGL(( Advection3DD_apply_upwind_TEX), dim3(Dg), dim3(Db), 0, ThreadManager::get_compute_stream(), &deriv_udt->at(0,0,0),&deriv_vdt->at(0,0,0),&deriv_wdt->at(0,0,0), (double)(1/_hx), (double)(1/_hy), (double)(1/_hz), u->xstride(), u->ystride(), u->shift_amount(), _nx, _ny, _nz, blocksInY, 1.0f / (float)blocksInY, InterpolatorSecondOrderCentered<double>()); } else { printf("[ERROR] Sol_SelfAdvection3DDevice::solve_tex - invalid interpolation type %d\n", interp_type); return false; } return PostKernelDim("Advection3DD_apply_upwind_TEX", Dg, Db); } #endif // OCU_DOUBLESUPPORT template<typename T> bool Sol_SelfAdvection3DDevice<T>::solve() { //return solve_naive(); return solve_tex(); } template<typename T> bool Sol_SelfAdvection3DDevice<T>::initialize_storage(int nx, int ny, int nz, double hx, double hy, double hz, Grid3DDevice<T> *u_val, Grid3DDevice<T> *v_val, Grid3DDevice<T> *w_val, Grid3DDevice<T> *deriv_udt_val, Grid3DDevice<T> *deriv_vdt_val, Grid3DDevice<T> *deriv_wdt_val) { // u,v,w must be the proper dimensions, i.e. staggered grid if (u_val->nx() != nx+1 || u_val->ny() != ny || u_val->nz() != nz) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - u dimensions mismatch\n"); return false; } if (v_val->nx() != nx || v_val->ny() != ny+1 || v_val->nz() != nz) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - v dimensions mismatch\n"); return false; } if (w_val->nx() != nx || w_val->ny() != ny || w_val->nz() != nz+1) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - v dimensions mismatch\n"); return false; } // u,v,w must all share the same memory layout. This is a cuda optimization to simplify indexing. if (!u_val->check_layout_match(*v_val) || !u_val->check_layout_match(*w_val)) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - u,v,w layout mismatch\n"); return false; } if (u_val->gx() < 1 || u_val->gy() < 1 || u_val->gz() < 1) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - u has no ghost cells \n"); return false; } if (v_val->gx() < 1 || v_val->gy() < 1 || v_val->gz() < 1) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - v has no ghost cells \n"); return false; } if (w_val->gx() < 1 || w_val->gy() < 1 || w_val->gz() < 1) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - w has no ghost cells \n"); return false; } u = u_val; v = v_val; w = w_val; deriv_udt = deriv_udt_val; deriv_vdt = deriv_vdt_val; deriv_wdt = deriv_wdt_val; if (!check_float(hx) || !check_float(hy) || !check_float(hz)) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - garbage hx,hy,hz value\n"); return false; } _hx = hx; _hy = hy; _hz = hz; _nx = nx; _ny = ny; _nz = nz; if (!u->check_layout_match(*deriv_udt) || !v->check_layout_match(*deriv_vdt) || !w->check_layout_match(*deriv_wdt)) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - derivative layout error\n"); return false; } if (!bind_textures()) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - failed on texture binding\n"); return false; } return true; } template class Sol_SelfAdvection3DDevice<float>; #ifdef OCU_DOUBLESUPPORT template class Sol_SelfAdvection3DDevice<double>; #endif //OCU_DOUBLESUPPORT } // end namespace
4c42ac254ede5b54acc26f19178e814d8b6f0545.cu
/* * Copyright 2008-2009 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdio> #include <cuda.h> #include "ocuutil/float_routines.h" #include "ocuutil/thread.h" #include "ocustorage/grid3dops.h" #include "ocuequation/sol_selfadvection3d.h" //! This routine works because u,v,w,phi, and dphidt must all be padded so that they have the same memory layout, //! even though they have different dimensions. Then we can calculate indexing math once, and reuse it for //! all of the grids. template<typename T, typename INTERP> __global__ void Sol_SelfAdvection3D_apply_upwind( T *u, T *v, T *w, T *dudt, T *dvdt, T *dwdt, T invhx, T invhy, T invhz, int xstride, int ystride, int nx, int ny, int nz, int blocksInY, float invBlocksInY, INTERP interp) { int blockIdxz = truncf(blockIdx.y * invBlocksInY); int blockIdxy = blockIdx.y - __mul24(blockIdxz,blocksInY); // transpose for coalescing since k is the fastest changing index int k = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; int j = __mul24(blockIdxy ,blockDim.y) + threadIdx.y; int i = __mul24(blockIdxz ,blockDim.z) + threadIdx.z; if (i < nx && j < ny && k < nz) { // calc phi indexing int idx = __mul24(i, xstride) + __mul24(j,ystride) + k; int idx_pi = idx + xstride; int idx_pj = idx + ystride; int idx_pk = idx + 1; int idx_mi = idx - xstride; int idx_mj = idx - ystride; int idx_mk = idx - 1; T u_idx = u[idx]; T v_idx = v[idx]; T w_idx = w[idx]; //---- dudt ---- T u_iph_j_k = .5 * (u_idx + u[idx_pi]); T u_imh_j_k = .5 * (u_idx + u[idx_mi]); T duu = (u_iph_j_k * interp(u_iph_j_k, u_idx , u[idx_pi])) - (u_imh_j_k * interp(u_imh_j_k, u[idx_mi], u_idx )); T v_atu_i_jph_k = .5* (v[idx_pj] + v[idx_pj - xstride]); T v_atu_i_jmh_k = .5* (v_idx + v[idx_mi]); T dvu = (v_atu_i_jph_k * interp(v_atu_i_jph_k, u_idx , u[idx_pj])) - (v_atu_i_jmh_k * interp(v_atu_i_jmh_k, u[idx_mj], u_idx)); T w_atu_i_j_kph = .5* (w[idx_pk] + w[idx_pk - xstride]); T w_atu_i_j_kmh = .5* (w_idx + w[idx_mi]); T dwu = (w_atu_i_j_kph * interp(w_atu_i_j_kph, u_idx , u[idx_pk])) - (w_atu_i_j_kmh * interp(w_atu_i_j_kmh, u[idx_mk], u_idx)); dudt[idx] = -duu*invhx - dvu*invhy - dwu*invhz; //---- dvdt ---- T u_atv_iph_j_k = .5* (u[idx_pi] + u[idx_pi - ystride]); T u_atv_imh_j_k = .5* (u_idx + u[idx_mj]); T duv = (u_atv_iph_j_k * interp(u_atv_iph_j_k, v_idx , v[idx_pi])) - (u_atv_imh_j_k * interp(u_atv_imh_j_k, v[idx_mi], v_idx)); T v_i_jph_k = .5 * (v_idx + v[idx_pj]); T v_i_jmh_k = .5 * (v_idx + v[idx_mj]); T dvv = (v_i_jph_k * interp(v_i_jph_k, v_idx , v[idx_pj])) - (v_i_jmh_k * interp(v_i_jmh_k, v[idx_mj], v_idx)); T w_atv_i_j_kph = .5* (w[idx_pk] + w[idx_pk - ystride]); T w_atv_i_j_kmh = .5* (w_idx + w[idx_mj]); T dwv = (w_atv_i_j_kph * interp(w_atv_i_j_kph, v_idx , v[idx_pk])) - (w_atv_i_j_kmh * interp(w_atv_i_j_kmh, v[idx_mk], v_idx)); dvdt[idx] = -duv*invhx - dvv*invhy - dwv*invhz; //---- dwdt ---- T u_atw_iph_j_k = .5* (u[idx_pi] + u[idx_pi - 1]); T u_atw_imh_j_k = .5* (u_idx + u[idx_mk]); T duw = (u_atw_iph_j_k * interp(u_atw_iph_j_k, w_idx , w[idx_pi])) - (u_atw_imh_j_k * interp(u_atw_imh_j_k, w[idx_mi], w_idx)); T v_atw_i_jph_k = .5* (v[idx_pj] + v[idx_pj - 1]); T v_atw_i_jmh_k = .5* (v_idx + v[idx_mk]); T dvw = (v_atw_i_jph_k * interp(v_atw_i_jph_k, w_idx , w[idx_pj])) - (v_atw_i_jmh_k * interp(v_atw_i_jmh_k, w[idx_mj], w_idx)); T w_i_j_kph = .5 * (w_idx + w[idx_pk]); T w_i_j_kmh = .5 * (w_idx + w[idx_mk]); T dww = (w_i_j_kph * interp(w_i_j_kph, w_idx , w[idx_pk])) - (w_i_j_kmh * interp(w_i_j_kmh, w[idx_mk], w_idx)); dwdt[idx] = -duw*invhx - dvw*invhy - dww*invhz; } } texture<float, 1, cudaReadModeElementType> tex_u; texture<float, 1, cudaReadModeElementType> tex_v; texture<float, 1, cudaReadModeElementType> tex_w; __inline__ __device__ float tex1Dfetch_u(const int& i) { return tex1Dfetch(tex_u, i); } __inline__ __device__ float tex1Dfetch_v(const int& i) { return tex1Dfetch(tex_v, i); } __inline__ __device__ float tex1Dfetch_w(const int& i) { return tex1Dfetch(tex_w, i); } template<typename INTERP> __global__ void Advection3DF_apply_upwind_TEX( float *dudt, float *dvdt, float *dwdt, float invhx, float invhy, float invhz, int xstride, int ystride, int tex_offset, int nx, int ny, int nz, int blocksInY, float invBlocksInY, INTERP interp) { int blockIdxz = truncf(blockIdx.y * invBlocksInY); int blockIdxy = blockIdx.y - __mul24(blockIdxz,blocksInY); // transpose for coalescing since k is the fastest changing index int k = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; int j = __mul24(blockIdxy ,blockDim.y) + threadIdx.y; int i = __mul24(blockIdxz ,blockDim.z) + threadIdx.z; if (i < nx && j < ny && k < nz) { // calc phi indexing int idx_no_offset = __mul24(i, xstride) + __mul24(j,ystride) + k; int idx = idx_no_offset + tex_offset; int idx_pi = idx + xstride; int idx_pj = idx + ystride; int idx_pk = idx + 1; int idx_mi = idx - xstride; int idx_mj = idx - ystride; int idx_mk = idx - 1; float u_idx = tex1Dfetch_u(idx); float v_idx = tex1Dfetch_v(idx); float w_idx = tex1Dfetch_w(idx); //---- dudt ---- float u_iph_j_k = .5f * (u_idx + tex1Dfetch_u(idx_pi)); float u_imh_j_k = .5f * (u_idx + tex1Dfetch_u(idx_mi)); float duu = (u_iph_j_k * interp(u_iph_j_k, u_idx , tex1Dfetch_u(idx_pi))) - (u_imh_j_k * interp(u_imh_j_k, tex1Dfetch_u(idx_mi) , u_idx )); float v_atu_i_jph_k = .5f* (tex1Dfetch_v(idx_pj) + tex1Dfetch_v(idx_pj - xstride)); float v_atu_i_jmh_k = .5f* (v_idx + tex1Dfetch_v(idx_mi)); float dvu = (v_atu_i_jph_k * interp(v_atu_i_jph_k, u_idx , tex1Dfetch_u(idx_pj))) - (v_atu_i_jmh_k * interp(v_atu_i_jmh_k, tex1Dfetch_u(idx_mj), u_idx)); float w_atu_i_j_kph = .5f* (tex1Dfetch_w(idx_pk) + tex1Dfetch_w(idx_pk - xstride)); float w_atu_i_j_kmh = .5f* (w_idx + tex1Dfetch_w(idx_mi)); float dwu = (w_atu_i_j_kph * interp(w_atu_i_j_kph, u_idx , tex1Dfetch_u(idx_pk))) - (w_atu_i_j_kmh * interp(w_atu_i_j_kmh, tex1Dfetch_u(idx_mk), u_idx)); dudt[idx_no_offset] = -duu*invhx - dvu*invhy - dwu*invhz; //---- dvdt ---- float u_atv_iph_j_k = .5f* (tex1Dfetch_u(idx_pi) + tex1Dfetch_u(idx_pi - ystride)); float u_atv_imh_j_k = .5f* (u_idx + tex1Dfetch_u(idx_mj)); float duv = (u_atv_iph_j_k * interp(u_atv_iph_j_k, v_idx , tex1Dfetch_v(idx_pi))) - (u_atv_imh_j_k * interp(u_atv_imh_j_k, tex1Dfetch_v(idx_mi), v_idx)); float v_i_jph_k = .5f * (v_idx + tex1Dfetch_v(idx_pj)); float v_i_jmh_k = .5f * (v_idx + tex1Dfetch_v(idx_mj)); float dvv = (v_i_jph_k * interp(v_i_jph_k, v_idx , tex1Dfetch_v(idx_pj))) - (v_i_jmh_k * interp(v_i_jmh_k, tex1Dfetch_v(idx_mj), v_idx )); float w_atv_i_j_kph = .5f* (tex1Dfetch_w(idx_pk) + tex1Dfetch_w(idx_pk - ystride)); float w_atv_i_j_kmh = .5f* (w_idx + tex1Dfetch_w(idx_mj)); float dwv = (w_atv_i_j_kph * interp(w_atv_i_j_kph, v_idx , tex1Dfetch_v(idx_pk))) - (w_atv_i_j_kmh * interp(w_atv_i_j_kmh, tex1Dfetch_v(idx_mk), v_idx)); dvdt[idx_no_offset] = -duv*invhx - dvv*invhy - dwv*invhz; //---- dwdt ---- float u_atw_iph_j_k = .5f* (tex1Dfetch_u(idx_pi) + tex1Dfetch_u(idx_pi - 1)); float u_atw_imh_j_k = .5f* (u_idx + tex1Dfetch_u(idx_mk)); float duw = (u_atw_iph_j_k * interp(u_atw_iph_j_k, w_idx , tex1Dfetch_w(idx_pi))) - (u_atw_imh_j_k * interp(u_atw_imh_j_k, tex1Dfetch_w(idx_mi), w_idx)); float v_atw_i_jph_k = .5f* (tex1Dfetch_v(idx_pj) + tex1Dfetch_v(idx_pj - 1)); float v_atw_i_jmh_k = .5f* (v_idx + tex1Dfetch_v(idx_mk)); float dvw = (v_atw_i_jph_k * interp(v_atw_i_jph_k, w_idx , tex1Dfetch_w(idx_pj))) - (v_atw_i_jmh_k * interp(v_atw_i_jmh_k, tex1Dfetch_w(idx_mj), w_idx)); float w_i_j_kph = .5f * (w_idx + tex1Dfetch_w(idx_pk)); float w_i_j_kmh = .5f * (w_idx + tex1Dfetch_w(idx_mk)); float dww = (w_i_j_kph * interp(w_i_j_kph, w_idx , tex1Dfetch_w(idx_pk))) - (w_i_j_kmh * interp(w_i_j_kmh, tex1Dfetch_w(idx_mk), w_idx )); dwdt[idx_no_offset] = -duw*invhx - dvw*invhy - dww*invhz; } } #ifdef OCU_DOUBLESUPPORT texture<int2, 1, cudaReadModeElementType> dtex_u; texture<int2, 1, cudaReadModeElementType> dtex_v; texture<int2, 1, cudaReadModeElementType> dtex_w; __inline__ __device__ double tex1Dfetchd_u(const int& i) { int2 v = tex1Dfetch(dtex_u, i); return __hiloint2double(v.y, v.x); } __inline__ __device__ double tex1Dfetchd_v(const int& i) { int2 v = tex1Dfetch(dtex_v, i); return __hiloint2double(v.y, v.x); } __inline__ __device__ double tex1Dfetchd_w(const int& i) { int2 v = tex1Dfetch(dtex_w, i); return __hiloint2double(v.y, v.x); } template<typename INTERP> __global__ void Advection3DD_apply_upwind_TEX( double *dudt, double *dvdt, double *dwdt, double invhx, double invhy, double invhz, int xstride, int ystride, int tex_offset, int nx, int ny, int nz, int blocksInY, float invBlocksInY, INTERP interp) { int blockIdxz = truncf(blockIdx.y * invBlocksInY); int blockIdxy = blockIdx.y - __mul24(blockIdxz,blocksInY); // transpose for coalescing since k is the fastest changing index int k = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; int j = __mul24(blockIdxy ,blockDim.y) + threadIdx.y; int i = __mul24(blockIdxz ,blockDim.z) + threadIdx.z; // shift so we will get maximum coalescing. This means that we will need to test if k>0 below. if (i < nx && j < ny && k < nz) { // calc phi indexing int idx_no_offset = __mul24(i, xstride) + __mul24(j,ystride) + k; int idx = idx_no_offset + tex_offset; int idx_pi = idx + xstride; int idx_pj = idx + ystride; int idx_pk = idx + 1; int idx_mi = idx - xstride; int idx_mj = idx - ystride; int idx_mk = idx - 1; double u_idx = tex1Dfetchd_u(idx); double v_idx = tex1Dfetchd_v(idx); double w_idx = tex1Dfetchd_w(idx); //---- dudt ---- double u_iph_j_k = .5 * (u_idx + tex1Dfetchd_u(idx_pi)); double u_imh_j_k = .5 * (u_idx + tex1Dfetchd_u(idx_mi)); double duu = (u_iph_j_k * interp(u_iph_j_k, u_idx , tex1Dfetchd_u(idx_pi))) - (u_imh_j_k * interp(u_imh_j_k, tex1Dfetchd_u(idx_mi), u_idx )); double v_atu_i_jph_k = .5* (tex1Dfetchd_v(idx_pj) + tex1Dfetchd_v(idx_pj - xstride)); double v_atu_i_jmh_k = .5* (v_idx + tex1Dfetchd_v(idx_mi)); double dvu = (v_atu_i_jph_k * interp(v_atu_i_jph_k, u_idx , tex1Dfetchd_u(idx_pj))) - (v_atu_i_jmh_k * interp(v_atu_i_jmh_k, tex1Dfetchd_u(idx_mj), u_idx)); double w_atu_i_j_kph = .5* (tex1Dfetchd_w(idx_pk) + tex1Dfetchd_w(idx_pk - xstride)); double w_atu_i_j_kmh = .5* (w_idx + tex1Dfetchd_w(idx_mi)); double dwu = (w_atu_i_j_kph * interp(w_atu_i_j_kph, u_idx , tex1Dfetchd_u(idx_pk))) - (w_atu_i_j_kmh * interp(w_atu_i_j_kmh, tex1Dfetchd_u(idx_mk), u_idx)); dudt[idx_no_offset] = -duu*invhx - dvu*invhy - dwu*invhz; //---- dvdt ---- double u_atv_iph_j_k = .5* (tex1Dfetchd_u(idx_pi) + tex1Dfetchd_u(idx_pi - ystride)); double u_atv_imh_j_k = .5* (u_idx + tex1Dfetchd_u(idx_mj)); double duv = (u_atv_iph_j_k * interp(u_atv_iph_j_k, v_idx , tex1Dfetchd_v(idx_pi))) - (u_atv_imh_j_k * interp(u_atv_imh_j_k, tex1Dfetchd_v(idx_mi), v_idx)); double v_i_jph_k = .5 * (v_idx + tex1Dfetchd_v(idx_pj)); double v_i_jmh_k = .5 * (v_idx + tex1Dfetchd_v(idx_mj)); double dvv = (v_i_jph_k * interp(v_i_jph_k, v_idx , tex1Dfetchd_v(idx_pj))) - (v_i_jmh_k * interp(v_i_jmh_k, tex1Dfetchd_v(idx_mj), v_idx )); double w_atv_i_j_kph = .5* (tex1Dfetchd_w(idx_pk) + tex1Dfetchd_w(idx_pk - ystride)); double w_atv_i_j_kmh = .5* (w_idx + tex1Dfetchd_w(idx_mj)); double dwv = (w_atv_i_j_kph * interp(w_atv_i_j_kph, v_idx , tex1Dfetchd_v(idx_pk))) - (w_atv_i_j_kmh * interp(w_atv_i_j_kmh, tex1Dfetchd_v(idx_mk), v_idx)); dvdt[idx_no_offset] = -duv*invhx - dvv*invhy - dwv*invhz; //---- dwdt ---- double u_atw_iph_j_k = .5* (tex1Dfetchd_u(idx_pi) + tex1Dfetchd_u(idx_pi - 1)); double u_atw_imh_j_k = .5* (u_idx + tex1Dfetchd_u(idx_mk)); double duw = (u_atw_iph_j_k * interp(u_atw_iph_j_k, w_idx , tex1Dfetchd_w(idx_pi))) - (u_atw_imh_j_k * interp(u_atw_imh_j_k, tex1Dfetchd_w(idx_mi), w_idx)); double v_atw_i_jph_k = .5* (tex1Dfetchd_v(idx_pj) + tex1Dfetchd_v(idx_pj - 1)); double v_atw_i_jmh_k = .5* (v_idx + tex1Dfetchd_v(idx_mk)); double dvw = (v_atw_i_jph_k * interp(v_atw_i_jph_k, w_idx , tex1Dfetchd_w(idx_pj))) - (v_atw_i_jmh_k * interp(v_atw_i_jmh_k, tex1Dfetchd_w(idx_mj), w_idx)); double w_i_j_kph = .5 * (w_idx + tex1Dfetchd_w(idx_pk)); double w_i_j_kmh = .5 * (w_idx + tex1Dfetchd_w(idx_mk)); double dww = (w_i_j_kph * interp(w_i_j_kph, w_idx , tex1Dfetchd_w(idx_pk))) - (w_i_j_kmh * interp(w_i_j_kmh, tex1Dfetchd_w(idx_mk), w_idx )); dwdt[idx_no_offset] = -duw*invhx - dvw*invhy - dww*invhz; } } #endif // OCU_DOUBLESUPPORT namespace ocu { template<typename T> Sol_SelfAdvection3DDevice<T>::Sol_SelfAdvection3DDevice() { _nx = _ny = _nz = 0; u = 0; v = 0; w = 0; interp_type = IT_FIRST_ORDER_UPWIND; } template<typename T> Sol_SelfAdvection3DDevice<T>::~Sol_SelfAdvection3DDevice() { unbind_textures(); } template<typename T> bool Sol_SelfAdvection3DDevice<T>::solve_naive() { int tnx = _nz; int tny = _ny; int tnz = _nx; int threadsInX = 16; int threadsInY = 2; int threadsInZ = 2; int blocksInX = (tnx+threadsInX-1)/threadsInX; int blocksInY = (tny+threadsInY-1)/threadsInY; int blocksInZ = (tnz+threadsInZ-1)/threadsInZ; dim3 Dg = dim3(blocksInX, blocksInY*blocksInZ); dim3 Db = dim3(threadsInX, threadsInY, threadsInZ); PreKernel(); if (interp_type == IT_FIRST_ORDER_UPWIND) { Sol_SelfAdvection3D_apply_upwind<<<Dg, Db, 0, ThreadManager::get_compute_stream()>>>(&u->at(0,0,0),&v->at(0,0,0),&w->at(0,0,0),&deriv_udt->at(0,0,0),&deriv_vdt->at(0,0,0),&deriv_wdt->at(0,0,0), (T)(1/_hx), (T)(1/_hy), (T)(1/_hz), u->xstride(), u->ystride(), _nx, _ny, _nz, blocksInY, 1.0f / (float)blocksInY, InterpolatorFirstOrderUpwind<T>()); } else if (interp_type == IT_SECOND_ORDER_CENTERED) { Sol_SelfAdvection3D_apply_upwind<<<Dg, Db, 0, ThreadManager::get_compute_stream()>>>(&u->at(0,0,0),&v->at(0,0,0),&w->at(0,0,0),&deriv_udt->at(0,0,0),&deriv_vdt->at(0,0,0),&deriv_wdt->at(0,0,0), (T)(1/_hx), (T)(1/_hy), (T)(1/_hz), u->xstride(), u->ystride(), _nx, _ny, _nz, blocksInY, 1.0f / (float)blocksInY, InterpolatorSecondOrderCentered<T>()); } else { printf("[ERROR] Sol_SelfAdvection3DDevice::solve_naive - invalid interpolation type %d\n", interp_type); return false; } return PostKernelDim("Sol_SelfAdvection3D_apply_upwind", Dg, Db); } template<> bool Sol_SelfAdvection3DDevice<float>::bind_textures() { cudaChannelFormatDesc channelDesc_u = cudaCreateChannelDesc<float>(); cudaChannelFormatDesc channelDesc_v = cudaCreateChannelDesc<float>(); cudaChannelFormatDesc channelDesc_w = cudaCreateChannelDesc<float>(); // set up texture tex_u.filterMode = cudaFilterModePoint; tex_u.normalized = false; tex_u.channelDesc = channelDesc_u; tex_v.filterMode = cudaFilterModePoint; tex_v.normalized = false; tex_v.channelDesc = channelDesc_v; tex_w.filterMode = cudaFilterModePoint; tex_w.normalized = false; tex_w.channelDesc = channelDesc_w; if (cudaBindTexture(NULL, &tex_u, u->buffer(), &channelDesc_u, u->num_allocated_elements() * sizeof(float)) != (unsigned int) CUDA_SUCCESS) { printf("[ERROR] Sol_SelfAdvection3DDevice<float>::solve_tex - Could not bind texture u\n"); return false; } if (cudaBindTexture(NULL, &tex_v, v->buffer(), &channelDesc_v, v->num_allocated_elements() * sizeof(float)) != (unsigned int) CUDA_SUCCESS) { printf("[ERROR] Sol_SelfAdvection3DDevice<float>::solve_tex - Could not bind texture v\n"); return false; } if (cudaBindTexture(NULL, &tex_w, w->buffer(), &channelDesc_w, w->num_allocated_elements() * sizeof(float)) != (unsigned int) CUDA_SUCCESS) { printf("[ERROR] Sol_SelfAdvection3DDevice<float>::solve_tex - Could not bind texture w\n"); return false; } return true; } template<> bool Sol_SelfAdvection3DDevice<float>::unbind_textures() { cudaUnbindTexture(&tex_u); cudaUnbindTexture(&tex_v); cudaUnbindTexture(&tex_w); return true; } template<> bool Sol_SelfAdvection3DDevice<float>::solve_tex() { int tnx = _nz; int tny = _ny; int tnz = _nx; int threadsInX = 16; int threadsInY = 2; int threadsInZ = 2; int blocksInX = (tnx+threadsInX-1)/threadsInX; int blocksInY = (tny+threadsInY-1)/threadsInY; int blocksInZ = (tnz+threadsInZ-1)/threadsInZ; dim3 Dg = dim3(blocksInX, blocksInY*blocksInZ); dim3 Db = dim3(threadsInX, threadsInY, threadsInZ); PreKernel(); if (interp_type == IT_FIRST_ORDER_UPWIND) { Advection3DF_apply_upwind_TEX<<<Dg, Db, 0, ThreadManager::get_compute_stream()>>>(&deriv_udt->at(0,0,0),&deriv_vdt->at(0,0,0),&deriv_wdt->at(0,0,0), (float)(1/_hx), (float)(1/_hy), (float)(1/_hz), u->xstride(), u->ystride(), u->shift_amount(), _nx, _ny, _nz, blocksInY, 1.0f / (float)blocksInY, InterpolatorFirstOrderUpwind<float>()); } else if (interp_type == IT_SECOND_ORDER_CENTERED) { Advection3DF_apply_upwind_TEX<<<Dg, Db, 0, ThreadManager::get_compute_stream()>>>(&deriv_udt->at(0,0,0),&deriv_vdt->at(0,0,0),&deriv_wdt->at(0,0,0), (float)(1/_hx), (float)(1/_hy), (float)(1/_hz), u->xstride(), u->ystride(), u->shift_amount(), _nx, _ny, _nz, blocksInY, 1.0f / (float)blocksInY, InterpolatorSecondOrderCentered<float>()); } else { printf("[ERROR] Sol_SelfAdvection3DDevice::solve_tex - invalid interpolation type %d\n", interp_type); return false; } return PostKernelDim("Advection3DF_apply_upwind_TEX", Dg, Db); } #ifdef OCU_DOUBLESUPPORT template<> bool Sol_SelfAdvection3DDevice<double>::bind_textures() { cudaChannelFormatDesc channelDesc_u = cudaCreateChannelDesc<int2>(); cudaChannelFormatDesc channelDesc_v = cudaCreateChannelDesc<int2>(); cudaChannelFormatDesc channelDesc_w = cudaCreateChannelDesc<int2>(); // set up texture dtex_u.filterMode = cudaFilterModePoint; dtex_u.normalized = false; dtex_u.channelDesc = channelDesc_u; dtex_v.filterMode = cudaFilterModePoint; dtex_v.normalized = false; dtex_v.channelDesc = channelDesc_v; dtex_w.filterMode = cudaFilterModePoint; dtex_w.normalized = false; dtex_w.channelDesc = channelDesc_w; if (cudaBindTexture(NULL, &dtex_u, u->buffer(), &channelDesc_u, u->num_allocated_elements() * sizeof(double)) != (unsigned int) CUDA_SUCCESS) { printf("[ERROR] Sol_SelfAdvection3DDevice<double>::bind_textures - Could not bind texture u\n"); return false; } if (cudaBindTexture(NULL, &dtex_v, v->buffer(), &channelDesc_v, v->num_allocated_elements() * sizeof(double)) != (unsigned int) CUDA_SUCCESS) { printf("[ERROR] Sol_SelfAdvection3DDevice<double>::bind_textures - Could not bind texture v\n"); return false; } if (cudaBindTexture(NULL, &dtex_w, w->buffer(), &channelDesc_w, w->num_allocated_elements() * sizeof(double)) != (unsigned int) CUDA_SUCCESS) { printf("[ERROR] Sol_SelfAdvection3DDevice<double>::bind_textures - Could not bind texture w\n"); return false; } return true; } template<> bool Sol_SelfAdvection3DDevice<double>::unbind_textures() { cudaUnbindTexture(&dtex_u); cudaUnbindTexture(&dtex_v); cudaUnbindTexture(&dtex_w); return true; } template<> bool Sol_SelfAdvection3DDevice<double>::solve_tex() { int tnx = _nz; int tny = _ny; int tnz = _nx; int threadsInX = 16; int threadsInY = 4; int threadsInZ = 4; int blocksInX = (tnx+threadsInX-1)/threadsInX; int blocksInY = (tny+threadsInY-1)/threadsInY; int blocksInZ = (tnz+threadsInZ-1)/threadsInZ; dim3 Dg = dim3(blocksInX, blocksInY*blocksInZ); dim3 Db = dim3(threadsInX, threadsInY, threadsInZ); PreKernel(); if (interp_type == IT_FIRST_ORDER_UPWIND) { Advection3DD_apply_upwind_TEX<<<Dg, Db, 0, ThreadManager::get_compute_stream()>>>(&deriv_udt->at(0,0,0),&deriv_vdt->at(0,0,0),&deriv_wdt->at(0,0,0), (double)(1/_hx), (double)(1/_hy), (double)(1/_hz), u->xstride(), u->ystride(), u->shift_amount(), _nx, _ny, _nz, blocksInY, 1.0f / (float)blocksInY, InterpolatorFirstOrderUpwind<double>()); } else if (interp_type == IT_SECOND_ORDER_CENTERED) { Advection3DD_apply_upwind_TEX<<<Dg, Db, 0, ThreadManager::get_compute_stream()>>>(&deriv_udt->at(0,0,0),&deriv_vdt->at(0,0,0),&deriv_wdt->at(0,0,0), (double)(1/_hx), (double)(1/_hy), (double)(1/_hz), u->xstride(), u->ystride(), u->shift_amount(), _nx, _ny, _nz, blocksInY, 1.0f / (float)blocksInY, InterpolatorSecondOrderCentered<double>()); } else { printf("[ERROR] Sol_SelfAdvection3DDevice::solve_tex - invalid interpolation type %d\n", interp_type); return false; } return PostKernelDim("Advection3DD_apply_upwind_TEX", Dg, Db); } #endif // OCU_DOUBLESUPPORT template<typename T> bool Sol_SelfAdvection3DDevice<T>::solve() { //return solve_naive(); return solve_tex(); } template<typename T> bool Sol_SelfAdvection3DDevice<T>::initialize_storage(int nx, int ny, int nz, double hx, double hy, double hz, Grid3DDevice<T> *u_val, Grid3DDevice<T> *v_val, Grid3DDevice<T> *w_val, Grid3DDevice<T> *deriv_udt_val, Grid3DDevice<T> *deriv_vdt_val, Grid3DDevice<T> *deriv_wdt_val) { // u,v,w must be the proper dimensions, i.e. staggered grid if (u_val->nx() != nx+1 || u_val->ny() != ny || u_val->nz() != nz) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - u dimensions mismatch\n"); return false; } if (v_val->nx() != nx || v_val->ny() != ny+1 || v_val->nz() != nz) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - v dimensions mismatch\n"); return false; } if (w_val->nx() != nx || w_val->ny() != ny || w_val->nz() != nz+1) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - v dimensions mismatch\n"); return false; } // u,v,w must all share the same memory layout. This is a cuda optimization to simplify indexing. if (!u_val->check_layout_match(*v_val) || !u_val->check_layout_match(*w_val)) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - u,v,w layout mismatch\n"); return false; } if (u_val->gx() < 1 || u_val->gy() < 1 || u_val->gz() < 1) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - u has no ghost cells \n"); return false; } if (v_val->gx() < 1 || v_val->gy() < 1 || v_val->gz() < 1) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - v has no ghost cells \n"); return false; } if (w_val->gx() < 1 || w_val->gy() < 1 || w_val->gz() < 1) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - w has no ghost cells \n"); return false; } u = u_val; v = v_val; w = w_val; deriv_udt = deriv_udt_val; deriv_vdt = deriv_vdt_val; deriv_wdt = deriv_wdt_val; if (!check_float(hx) || !check_float(hy) || !check_float(hz)) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - garbage hx,hy,hz value\n"); return false; } _hx = hx; _hy = hy; _hz = hz; _nx = nx; _ny = ny; _nz = nz; if (!u->check_layout_match(*deriv_udt) || !v->check_layout_match(*deriv_vdt) || !w->check_layout_match(*deriv_wdt)) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - derivative layout error\n"); return false; } if (!bind_textures()) { printf("[ERROR] Sol_SelfAdvection3DDevice::initialize_storage - failed on texture binding\n"); return false; } return true; } template class Sol_SelfAdvection3DDevice<float>; #ifdef OCU_DOUBLESUPPORT template class Sol_SelfAdvection3DDevice<double>; #endif //OCU_DOUBLESUPPORT } // end namespace
b8303d42b6c09bcde279f8b6e7dd1154df137145.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef TFHE_TEST_ENVIRONMENT #include <cstdlib> #include <iostream> #include <random> #include <cassert> #include "tfhe_core.h" #include "numeric_functions.h" #include "lweparams.h" #include "lwekey.h" #include "lwesamples.h" #include "lwekeyswitch.h" #include "lwe-functions.h" #include "lwebootstrappingkey.h" #include "tfhe.h" #include <fstream> #include <cstdint> using namespace std; #define H2D hipMemcpyHostToDevice #define D2D hipMemcpyDeviceToDevice #define D2H hipMemcpyDeviceToHost #else #undef EXPORT #define EXPORT static #endif #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) inline void __cudaSafeCall( hipError_t err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( hipSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } #endif return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK hipError_t err = hipGetLastError(); if ( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment away if needed. err = hipDeviceSynchronize(); if( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } #endif return; } #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, hipGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) //*//***************************************** // zones on the torus -> to see //*//***************************************** /* * Homomorphic bootstrapped NAND gate * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsNAND(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,1/8) - ca - cb static const Torus32 NandConst = modSwitchToTorus32(1, 8); lweNoiselessTrivial(temp_result, NandConst, in_out_params); lweSubTo(temp_result, ca, in_out_params); lweSubTo(temp_result, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped OR gate * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsOR(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,1/8) + ca + cb static const Torus32 OrConst = modSwitchToTorus32(1, 8); lweNoiselessTrivial(temp_result, OrConst, in_out_params); lweAddTo(temp_result, ca, in_out_params); lweAddTo(temp_result, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped AND gate * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsAND(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); lweNoiselessTrivial(temp_result, AndConst, in_out_params); lweAddTo(temp_result, ca, in_out_params); lweAddTo(temp_result, cb, in_out_params); // cout << "old: "; // for (int i = 0; i < 10; ++i) { // cout << temp_result->a[i] << " "; // } // cout << endl; //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); // cout << "old: "; // for (int i = 0; i < 10; ++i) { // cout << result->a[i] << " "; // } // cout << result->b; // cout << endl; delete_LweSample(temp_result); } /* * Homomorphic bootstrapped XOR gate * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsXOR(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); lweNoiselessTrivial(temp_result, XorConst, in_out_params); lweAddMulTo(temp_result, 2, ca, in_out_params); lweAddMulTo(temp_result, 2, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped XNOR gate * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsXNOR(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,-1/4) + 2*(-ca-cb) static const Torus32 XnorConst = modSwitchToTorus32(-1, 4); lweNoiselessTrivial(temp_result, XnorConst, in_out_params); lweSubMulTo(temp_result, 2, ca, in_out_params); lweSubMulTo(temp_result, 2, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped NOT gate (doesn't need to be bootstrapped) * Takes in input 1 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsNOT(LweSample *result, const LweSample *ca, const TFheGateBootstrappingCloudKeySet *bk) { const LweParams *in_out_params = bk->params->in_out_params; lweNegate(result, ca, in_out_params); } /* * Homomorphic bootstrapped COPY gate (doesn't need to be bootstrapped) * Takes in input 1 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsCOPY(LweSample *result, const LweSample *ca, const TFheGateBootstrappingCloudKeySet *bk) { const LweParams *in_out_params = bk->params->in_out_params; lweCopy(result, ca, in_out_params); } /* * Homomorphic Trivial Constant gate (doesn't need to be bootstrapped) * Takes a boolean value) * Outputs a LWE sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsCONSTANT(LweSample *result, int value, const TFheGateBootstrappingCloudKeySet *bk) { const LweParams *in_out_params = bk->params->in_out_params; static const Torus32 MU = modSwitchToTorus32(1, 8); lweNoiselessTrivial(result, value ? MU : -MU, in_out_params); } /* * Homomorphic bootstrapped NOR gate * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsNOR(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,-1/8) - ca - cb static const Torus32 NorConst = modSwitchToTorus32(-1, 8); lweNoiselessTrivial(temp_result, NorConst, in_out_params); lweSubTo(temp_result, ca, in_out_params); lweSubTo(temp_result, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped AndNY Gate: not(a) and b * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsANDNY(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,-1/8) - ca + cb static const Torus32 AndNYConst = modSwitchToTorus32(-1, 8); lweNoiselessTrivial(temp_result, AndNYConst, in_out_params); lweSubTo(temp_result, ca, in_out_params); lweAddTo(temp_result, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped AndYN Gate: a and not(b) * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsANDYN(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,-1/8) + ca - cb static const Torus32 AndYNConst = modSwitchToTorus32(-1, 8); lweNoiselessTrivial(temp_result, AndYNConst, in_out_params); lweAddTo(temp_result, ca, in_out_params); lweSubTo(temp_result, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped OrNY Gate: not(a) or b * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsORNY(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,1/8) - ca + cb static const Torus32 OrNYConst = modSwitchToTorus32(1, 8); lweNoiselessTrivial(temp_result, OrNYConst, in_out_params); lweSubTo(temp_result, ca, in_out_params); lweAddTo(temp_result, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped OrYN Gate: a or not(b) * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsORYN(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,1/8) + ca - cb static const Torus32 OrYNConst = modSwitchToTorus32(1, 8); lweNoiselessTrivial(temp_result, OrYNConst, in_out_params); lweAddTo(temp_result, ca, in_out_params); lweSubTo(temp_result, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped Mux(a,b,c) = a?b:c = a*b + not(a)*c * Takes in input 3 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsMUX(LweSample *result, const LweSample *a, const LweSample *b, const LweSample *c, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; const LweParams *extracted_params = &bk->params->tgsw_params->tlwe_params->extracted_lweparams; LweSample *temp_result = new_LweSample(in_out_params); LweSample *temp_result1 = new_LweSample(extracted_params); LweSample *u1 = new_LweSample(extracted_params); LweSample *u2 = new_LweSample(extracted_params); //compute "AND(a,b)": (0,-1/8) + a + b static const Torus32 AndConst = modSwitchToTorus32(-1, 8); lweNoiselessTrivial(temp_result, AndConst, in_out_params); lweAddTo(temp_result, a, in_out_params); lweAddTo(temp_result, b, in_out_params); // Bootstrap without KeySwitch tfhe_bootstrap_woKS_FFT(u1, bk->bkFFT, MU, temp_result); //compute "AND(not(a),c)": (0,-1/8) - a + c lweNoiselessTrivial(temp_result, AndConst, in_out_params); lweSubTo(temp_result, a, in_out_params); lweAddTo(temp_result, c, in_out_params); // Bootstrap without KeySwitch tfhe_bootstrap_woKS_FFT(u2, bk->bkFFT, MU, temp_result); // Add u1=u1+u2 static const Torus32 MuxConst = modSwitchToTorus32(1, 8); lweNoiselessTrivial(temp_result1, MuxConst, extracted_params); lweAddTo(temp_result1, u1, extracted_params); lweAddTo(temp_result1, u2, extracted_params); // Key switching lweKeySwitch(result, bk->bkFFT->ks, temp_result1); delete_LweSample(u2); delete_LweSample(u1); delete_LweSample(temp_result1); delete_LweSample(temp_result); } /////new for gpu EXPORT LweSample_16* convertBitToNumberZero(int bitSize, const TFheGateBootstrappingCloudKeySet *bk) { int polySize = bk->params->in_out_params->n; LweSample_16* temp = (LweSample_16 *)malloc(sizeof(LweSample_16)); temp->a = (int*)calloc(bitSize*polySize, sizeof(int)); temp->b = (int*)calloc(bitSize, sizeof(int)); temp->current_variance = (double*)calloc(bitSize, sizeof(double)); return temp; } EXPORT LweSample_16 *convertBitToNumberZero_GPU(int bitSize, const TFheGateBootstrappingCloudKeySet *bk) { int polySize = bk->params->in_out_params->n; LweSample_16 *temp = (LweSample_16 *) malloc(sizeof(LweSample_16)); hipMalloc(&(temp->a), bitSize * polySize * sizeof(int)); temp->b = (int *) calloc(bitSize, sizeof(int)); //testing start static const Torus32 MU = modSwitchToTorus32(1, 8); for (int i = 0; i < bitSize; ++i) { temp->b[i] = -MU; } // testing end temp->current_variance = (double *) calloc(bitSize, sizeof(double)); return temp; } EXPORT LweSample_16 *convertBitToNumberZero_GPU_2(int nOutputs, int bitSize, const TFheGateBootstrappingCloudKeySet *bk) { int polySize = bk->params->in_out_params->n; LweSample_16 *temp = (LweSample_16 *) malloc(sizeof(LweSample_16)); hipMalloc(&(temp->a), nOutputs * bitSize * polySize * sizeof(int)); temp->b = (int *) calloc(nOutputs * bitSize, sizeof(int)); temp->current_variance = (double *) calloc(nOutputs * bitSize, sizeof(double)); return temp; } EXPORT LweSample_16 * newLweSample_16(int bitSize, const LweParams *params) { int polySize = params->n; LweSample_16 *temp = (LweSample_16 *) malloc(sizeof(LweSample_16)); temp->a = (int *) calloc(bitSize * polySize, sizeof(int)); temp->b = (int *) calloc(bitSize, sizeof(int)); temp->current_variance = (double *) calloc(bitSize, sizeof(double)); return temp; } EXPORT LweSample_16 * newLweSample_16_2(int nOutputs, int bitSize, const LweParams *params) { int polySize = params->n; LweSample_16 *temp = (LweSample_16 *) malloc(sizeof(LweSample_16)); temp->a = (int *) calloc(nOutputs * bitSize * polySize, sizeof(int)); temp->b = (int *) calloc(nOutputs * bitSize, sizeof(int)); temp->current_variance = (double *) calloc(nOutputs * bitSize, sizeof(double)); return temp; } EXPORT LweSample_16* convertBitToNumber(const LweSample* input, int bitSize, const TFheGateBootstrappingCloudKeySet *bk) { int polySize = bk->params->in_out_params->n; LweSample_16* temp = (LweSample_16 *)malloc(sizeof(LweSample_16)); temp->a = (int*)malloc(sizeof(int)*bitSize*polySize); temp->b = (int*)malloc(sizeof(int)*bitSize); temp->current_variance = (double*)malloc(sizeof(double)*bitSize); for (int i = 0; i < bitSize; ++i) { for (int j = 0; j < polySize; ++j) { temp->a[i * polySize + j] = (int)input[i].a[j]; } temp->b[i] = input[i].b; temp->current_variance[i] = input[i].current_variance; } return temp; } EXPORT LweSample* convertNumberToBits(LweSample_16* number, int bitSize, const TFheGateBootstrappingCloudKeySet *bk) { LweSample *tempCiphertext = new_gate_bootstrapping_ciphertext_array(bitSize, bk->params); const int n = bk->params->in_out_params->n; for (int i = 0; i < bitSize; ++i) { int startIndex = i * n; for (int j = 0; j < n; ++j) { tempCiphertext[i].a[j] = number->a[startIndex + j]; } tempCiphertext[i].b = number->b[i]; tempCiphertext[i].current_variance = number->current_variance[i]; } return tempCiphertext; } EXPORT void freeLweSample_16(LweSample_16* input) { free(input->a); free(input->b); free(input->current_variance); free(input); } int* allocateAndCopyIntVectorFromHostToDevice(int *source, int len) { int *d_temp; int bytes = len * sizeof(int); hipMalloc(&d_temp, bytes); hipMemcpy(d_temp, source, bytes, hipMemcpyHostToDevice); return d_temp; } int* allocateAndCopyIntVectorFromDeviceToHost(int *d_source, int len) { int bytes = len * sizeof(int); int *temp = (int*)malloc(bytes); hipMemcpy(temp, d_source, bytes, hipMemcpyDeviceToHost); return temp; } __global__ void vecAdd(int *result, int *a, int *b, int length) { int id = blockIdx.x*blockDim.x+threadIdx.x; if (id < length) { result[id] = a[id] + b[id]; } } __global__ void vecAddMulTo(int *result, int mulVal, int *a, int *b, int length) { int id = blockIdx.x*blockDim.x+threadIdx.x; if (id < length) { result[id] = (mulVal * (a[id] + b[id])); } } void sendLweSmaple_16_a_ToGPU(LweSample_16 *sample, int bitSize, int polySize) { int *temp = sample->a; int byteLength = bitSize * polySize * sizeof(int); hipMalloc(&(sample->a), byteLength); hipMemcpy(sample->a, temp, byteLength, hipMemcpyHostToDevice); } EXPORT void bootsAND_16(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int bitSize, const TFheGateBootstrappingCloudKeySet *bk, hipfftDoubleComplex ****cudaBkFFT, hipfftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32* ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; int BLOCKSIZE = in_out_params->n; int gridSize = (int) ceil((float) (in_out_params->n * bitSize) / BLOCKSIZE); //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); LweSample_16 *temp_result = convertBitToNumberZero_GPU(bitSize, bk); for (int i = 0; i < bitSize; ++i) { temp_result->b[i] = AndConst; } hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, temp_result->a, ca->a, cb->a, in_out_params->n * bitSize); for (int i = 0; i < bitSize; ++i) { temp_result->b[i] += (ca->b[i] + cb->b[i]); // cout << temp_result->b[i] << " "; // temp_result->current_variance[i] += (ca->current_variance[i] + cb->current_variance[i]); } //test start // cout << "Inside AND:" << endl; // int *tempaa = new int[in_out_params->n * bitSize]; //////// int *tempba = new int[in_out_params->n * bitSize]; // hipMemcpy(tempaa, temp_result->a, in_out_params->n * bitSize * sizeof(int), hipMemcpyDeviceToHost); ////////// hipMemcpy(tempba, cb->a, in_out_params->n * bitSize * sizeof(int), hipMemcpyDeviceToHost); // for (int i = 0; i < bitSize; ++i) { // int sI = i * in_out_params->n; //// cout << "ca: "; // for (int j = 0; j < 10; ++j) { // cout << tempaa[sI + j] << " "; // } // cout << endl; //// cout << "cb: "; //// for (int j = 0; j < 10; ++j) { //// cout << tempba[sI + j] << " "; //// } // cout << temp_result->b[i] << " "; // cout << endl; // } // cout << endl; //test end //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // assert(bitSize%2 == 0); // tfhe_bootstrap_FFT_16_2(result, bk->bkFFT, MU, 1, bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, // ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // // int *temp = new int[in_out_params->n * bitSize]; // hipMemcpy(temp, result->a, in_out_params->n * bitSize * sizeof(int), hipMemcpyDeviceToHost); // for (int i = 0; i < bitSize; ++i) { // int sI = i * in_out_params->n; //// cout << "new: "; // for (int j = 0; j < 10; ++j) { // cout << temp[sI + j] << " "; // } // cout << endl; //// cout << result->b[i]; //// cout << endl; // } hipFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } EXPORT void bootsXOR_16(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int bitSize, const TFheGateBootstrappingCloudKeySet *bk, hipfftDoubleComplex ****cudaBkFFT, hipfftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32* ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; int BLOCKSIZE = in_out_params->n; int gridSize = (int) ceil((float) (in_out_params->n * bitSize) / BLOCKSIZE); //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); LweSample_16 *temp_result = convertBitToNumberZero_GPU(bitSize, bk); for (int i = 0; i < bitSize; ++i) { temp_result->b[i] = XorConst; } int mulVal = 2; hipLaunchKernelGGL(( vecAddMulTo), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, temp_result->a, mulVal, ca->a, cb->a, in_out_params->n * bitSize); for (int i = 0; i < bitSize; ++i) { temp_result->b[i] += (mulVal * (ca->b[i] + cb->b[i])); temp_result->current_variance[i] += ((mulVal * mulVal) * (ca->current_variance[i] + cb->current_variance[i])); } //test start // cout << "Inside xor: " << endl; // int *tempaa = new int[in_out_params->n * bitSize]; // hipMemcpy(tempaa, temp_result->a, in_out_params->n * bitSize * sizeof(int), hipMemcpyDeviceToHost); // // hipMemcpy(tempba, cb->a, in_out_params->n * bitSize * sizeof(int), hipMemcpyDeviceToHost); // for (int i = 0; i < bitSize; ++i) { // int sI = i * in_out_params->n; // cout << "a: "; // for (int j = 0; j < 10; ++j) { // cout << tempaa[sI + j] << " "; // } //// cout << temp_result->b[i] << " "; // cout << endl; // } // cout << endl; // cout << endl; //test end //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // int *temp = new int[in_out_params->n * bitSize]; // hipMemcpy(temp, result->a, in_out_params->n * bitSize * sizeof(int), hipMemcpyDeviceToHost); // for (int i = 0; i < bitSize; ++i) { // int sI = i * in_out_params->n; // cout << "new: "; // for (int j = 0; j < 10; ++j) { // cout << temp[sI + j] << " "; // } // cout << result->b[i]; // cout << endl; // } hipFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } __global__ void ANDXORvecMulAllto(int *destination, int *ca, int *cb, int n, int bitSize, int length) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < length) { int mulVal = (id / (n * bitSize)) + 1; destination[id] = (mulVal * (ca[id % (n * bitSize)] + cb[id % (n * bitSize)])); } } EXPORT void bootsANDXOR_16(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int nOutputs, int bitSize, const TFheGateBootstrappingCloudKeySet *bk, hipfftDoubleComplex ****cudaBkFFT, hipfftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32* ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); static const int mulValXor = 2; LweSample_16 *temp_result = convertBitToNumberZero_GPU_2(nOutputs, bitSize, bk); //compute temp_result->a int BLOCKSIZE = in_out_params->n; int length = in_out_params->n * bitSize * nOutputs; int gridSize = (int) ceil((float) (length) / BLOCKSIZE); // cout << "gridSize " << gridSize << endl; hipLaunchKernelGGL(( ANDXORvecMulAllto), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, temp_result->a, ca->a, cb->a, in_out_params->n, bitSize, length); //compute temp_result->b for (int i = 0; i < bitSize; ++i) { temp_result->b[i] = ca->b[i] + cb->b[i] + AndConst; //for and temp_result->b[i + bitSize] = mulValXor * (ca->b[i] + cb->b[i]) + XorConst;// for xor temp_result->current_variance[i] = ca->current_variance[i] + cb->current_variance[i]; //for and temp_result->current_variance[i + bitSize] = (mulValXor * mulValXor) * (ca->current_variance[i] + cb->current_variance[i]);// for xor } /*//test start // cout << "Inside AND:" << endl; // int *tempaa = new int[in_out_params->n * bitSize * nOutputs]; // hipMemcpy(tempaa, temp_result->a, nOutputs * in_out_params->n * bitSize * sizeof(int), hipMemcpyDeviceToHost); // for (int i = 0; i < bitSize; ++i) { // int sI = i * in_out_params->n ; // for (int j = 0; j < 10; ++j) { // cout << tempaa[sI + j] << " "; // } // cout << endl; // } // cout << endl; // cout << "Inside XOR:" << endl; // for (int i = 0; i < bitSize; ++i) { // int sI = (bitSize + i) * in_out_params->n ; // for (int j = 0; j < 10; ++j) { // cout << tempaa[sI + j] << " "; // } // cout << endl; // } // cout << endl;*/ // cout << "compute temp_result->b" << endl; // cout << "total: " << nOutputs * bitSize << endl; tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, bitSize * nOutputs, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // tfhe_bootstrap_FFT_16_2(result, bk->bkFFT, MU, nOutputs, bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, // ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // int *temp = new int[length]; // hipMemcpy(temp, result->a, length * sizeof(int), hipMemcpyDeviceToHost); // for (int i = 0; i < nOutputs * bitSize; ++i) { // int sI = i * 500; // for (int j = 0; j < 10; ++j) { // cout << temp[sI + j] << " "; // } // cout << endl; //// cout << result->b[i] << " " << result->current_variance[i] << endl; // } // cout << endl; // cout << "I am inside the function" << endl; hipFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } __global__ void XORXORvecMulAllto(int *destination, int *ca, int *cb, int n, int bitSize, int length) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < length) { int mulVal = 2; destination[id] = (mulVal * (ca[id % (n * bitSize)] + cb[id % (n * bitSize)])); } } EXPORT void bootsXORXOR_16(LweSample_16 *result, const LweSample_16 *ca1, const LweSample_16 *ca2, const LweSample_16 *cb1, const LweSample_16 *cb2, int nOutputs, int bitSize, const TFheGateBootstrappingCloudKeySet *bk, hipfftDoubleComplex ****cudaBkFFT, hipfftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32* ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); static const int mulValXor = 2, n = in_out_params->n; //compute temp_result->a int BLOCKSIZE = n; int length = n * bitSize; int gridSize = (int) ceil((float) (length) / BLOCKSIZE); // cout << "bitSize: " << bitSize<< endl; // cout << "length: " << length << endl; // cout << "nOut: " << nOutputs << endl; // cout << "gridSize: " << gridSize << endl; LweSample_16 *temp_result = convertBitToNumberZero_GPU_2(nOutputs, bitSize, bk); //compute temp_result->a hipLaunchKernelGGL(( XORXORvecMulAllto), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, temp_result->a, ca1->a, ca2->a, n, bitSize, length); hipLaunchKernelGGL(( XORXORvecMulAllto), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, temp_result->a + n, cb1->a, cb2->a, n, bitSize, length); //compute temp_result->b for (int i = 0; i < bitSize; ++i) { temp_result->b[i] = mulValXor * (ca1->b[i] + ca2->b[i]) + XorConst; //for and temp_result->b[i + bitSize] = mulValXor * (cb1->b[i] + cb2->b[i]) + XorConst;// for xor temp_result->current_variance[i] = (mulValXor * mulValXor) * (ca1->current_variance[i] + ca2->current_variance[i]); //for and temp_result->current_variance[i + bitSize] = (mulValXor * mulValXor) * (cb1->current_variance[i] + cb2->current_variance[i]);// for xor } // tfhe_bootstrap_FFT_16_2(result, bk->bkFFT, MU, nOutputs, bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, // ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, bitSize * nOutputs, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); hipFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } __global__ void XORXORvecMulAllto_vector(int *destination, int *ca, int *cb, int n, int bitSize, int length) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < length) { int mulVal = 2; destination[id] = (mulVal * (ca[id] + cb[id])); } } EXPORT void bootsXORXOR_16_vector(LweSample_16 *result, const LweSample_16 *ca1, const LweSample_16 *ca2, const LweSample_16 *cb1, const LweSample_16 *cb2, int vLength, int nOutputs, int bitSize, const TFheGateBootstrappingCloudKeySet *bk, hipfftDoubleComplex ****cudaBkFFT, hipfftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32* ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); static const int mulValXor = 2, n = in_out_params->n; int totalBitSize = vLength * bitSize; //compute temp_result->a int BLOCKSIZE = n; int length = n * totalBitSize;//svLength * bitSize; int gridSize = (int) ceil((float) (length) / BLOCKSIZE); // cout << "vLen: " << vLength << endl; // cout << "bitSize: " << bitSize<< endl; // cout << "length: " << length << endl; // cout << "nOut: " << nOutputs << endl; // cout << "gridSize: " << gridSize << endl; LweSample_16 *temp_result = convertBitToNumberZero_GPU_2(nOutputs * vLength, bitSize, bk); //compute temp_result->a hipLaunchKernelGGL(( XORXORvecMulAllto_vector), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, temp_result->a, ca1->a, ca2->a, n, bitSize, length); hipLaunchKernelGGL(( XORXORvecMulAllto_vector), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, temp_result->a + n * vLength, cb1->a, cb2->a, n, bitSize, length); //compute temp_result->b for (int i = 0; i < totalBitSize; ++i) { temp_result->b[i] = mulValXor * (ca1->b[i] + ca2->b[i]) + XorConst; //for and temp_result->b[i + totalBitSize] = mulValXor * (cb1->b[i] + cb2->b[i]) + XorConst;// for xor temp_result->current_variance[i] = (mulValXor * mulValXor) * (ca1->current_variance[i] + ca2->current_variance[i]); //for and temp_result->current_variance[i + totalBitSize] = (mulValXor * mulValXor) * (cb1->current_variance[i] + cb2->current_variance[i]);// for xor } // cout << "HEREZZZZZZZZ----" << endl; // tfhe_bootstrap_FFT_16_2(result, bk->bkFFT, MU, nOutputs, bitSize * vLength, temp_result, cudaBkFFT, cudaBkFFTCoalesce, // ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, bitSize * nOutputs * vLength, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // tfhe_bootstrap_FFT_16_2_vector(result, bk->bkFFT, MU, vLength, nOutputs, bitSize, temp_result, cudaBkFFT, // cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); hipFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } __global__ void ANDXORvecMulAllto_vector(int *destination, int *ca, int *cb, int vLength, int bitSize, int n, int length) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < length) { int mulVal = (id / (vLength * bitSize * n)) + 1; destination[id] = (mulVal * (ca[id % (vLength * bitSize * n)] + cb[id % (vLength * bitSize * n)])); } } EXPORT void bootsANDXOR_16_vector(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int nOutputs, int vLength, int bitSize, const TFheGateBootstrappingCloudKeySet *bk, hipfftDoubleComplex ****cudaBkFFT, hipfftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; const int n = in_out_params->n;//500 //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); static const int mulValXor = 2; LweSample_16 *temp_result = convertBitToNumberZero_GPU_2(nOutputs, vLength * bitSize, bk); int BLOCKSIZE = 1024; int length = vLength * bitSize * nOutputs * n; int gridSize = (int) ceil((float) (length) / BLOCKSIZE); hipLaunchKernelGGL(( ANDXORvecMulAllto_vector), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, temp_result->a, ca->a, cb->a, vLength, bitSize, n, length); //compute temp_result->b int totalBitSize = vLength * bitSize; for (int i = 0; i < totalBitSize; ++i) { temp_result->b[i] = ca->b[i] + cb->b[i] + AndConst; //for and temp_result->b[i + totalBitSize] = mulValXor * (ca->b[i] + cb->b[i]) + XorConst;// for xor temp_result->current_variance[i] = ca->current_variance[i] + cb->current_variance[i]; //for and temp_result->current_variance[i + totalBitSize] = (mulValXor * mulValXor) * (ca->current_variance[i] + cb->current_variance[i]);// for xor } //test start // cout << "Inside AND:" << endl; // int *tempaa = new int[n * bitSize * nOutputs * vLength]; // hipMemcpy(tempaa, temp_result->a, vLength * nOutputs * n * bitSize * sizeof(int), hipMemcpyDeviceToHost); // for (int i = 0; i < bitSize * vLength; ++i) { // int sI = i * n ; // for (int j = 0; j < 10; ++j) { // cout << tempaa[sI + j] << " "; // } // cout << endl; // } // cout << endl; // cout << "Inside XOR:" << endl; // for (int i = 0; i < bitSize * vLength; ++i) { // int sI = (bitSize * vLength + i) * n ; // for (int j = 0; j < 10; ++j) { // cout << tempaa[sI + j] << " "; // } // cout << endl; // } // cout << endl; // cout << "HEREZZZZZZZZZZZ" <bootsAND_fullGPU_OneBit< endl; tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, nOutputs * vLength * bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // // tfhe_bootstrap_FFT_16_2(result, bk->bkFFT, MU, nOutputs, vLength * bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, // ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // cout << "HEREZZZZZZZZZZZ--" << endl; // tfhe_bootstrap_FFT_16_2_vector(result, bk->bkFFT, MU, vLength, nOutputs, bitSize, temp_result, cudaBkFFT, // cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // int *temp = new int[length]; // hipMemcpy(temp, result->a, length * sizeof(int), hipMemcpyDeviceToHost); // cout << "AND PART" << endl; // for (int i = 0; i < 16 * bitSize; ++i) { // int sI = i * 500; // for (int j = 0; j < 10; ++j) { // cout << temp[sI + j] << " "; // } // cout << endl; //// cout << result->b[i] << " " << result->current_variance[i] << endl; // } // cout << endl; // cout << endl << "XOR PART" << endl; // for (int i = 0; i < 16 * bitSize; ++i) { // int sIB = bitSize * vLength ; // int sI = i * 500 + bitSize * vLength * 500; // for (int j = 0; j < 10; ++j) { // cout << temp[sI + j] << " "; // } // cout << endl; //// cout << result->b[sIB + i] << " " << result->current_variance[sI + i] << endl; // } // cout << endl; hipFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } //only used for multiplication __global__ void vecAdd_MULT(int *result, int *a, int *b, int bAStart, int n, int length) { int id = blockIdx.x*blockDim.x+threadIdx.x; if (id < length) { result[id] = a[id] + b[(id % n) + bAStart]; } } EXPORT void bootsAND_MULT(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int resBitSize, int bitSize_A, int bIndex, const TFheGateBootstrappingCloudKeySet *bk, hipfftDoubleComplex ****cudaBkFFT, hipfftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { assert(bitSize_A == resBitSize); static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; int n = in_out_params->n; int BLOCKSIZE = 1024; int gridSize = (int) ceil((float) (in_out_params->n * bitSize_A) / BLOCKSIZE); int bAstartIndex = bIndex * n; //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); LweSample_16 *temp_result = convertBitToNumberZero_GPU(bitSize_A, bk); for (int i = 0; i < bitSize_A; ++i) { temp_result->b[i] = AndConst; } hipLaunchKernelGGL(( vecAdd_MULT), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, temp_result->a, ca->a, cb->a, bAstartIndex, n, n * bitSize_A); for (int i = 0; i < bitSize_A; ++i) { temp_result->b[i] += (ca->b[i] + cb->b[bIndex]); temp_result->current_variance[i] += (ca->current_variance[i] + cb->current_variance[bIndex]); } int bitSize = bitSize_A; tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); //dhor tokta mar perek //find out later on // hipMemset(result->a + (n * bitSize_A), 0, n * (resBitSize - bitSize_A) * sizeof(int)); // for (int i = bitSize_A; i < resBitSize; ++i) { // cout << result->b[i] << " "; // } // cout << endl; hipFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } EXPORT void bootsAND_MULT_con(LweSample_16 *result, LweSample_16 **ca, LweSample_16 **cb, int nConMul, int resBitSize, int bitSize_A, int bIndex, const TFheGateBootstrappingCloudKeySet *bk, hipfftDoubleComplex ****cudaBkFFT, hipfftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { assert(bitSize_A == resBitSize); static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; int n = in_out_params->n; int BLOCKSIZE = n; int gridSize = (int) ceil((float) (n * bitSize_A) / BLOCKSIZE); int bAstartIndex = bIndex * n; //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); LweSample_16 *temp_result = convertBitToNumberZero_GPU(bitSize_A * nConMul, bk); // for (int i = 0; i < bitSize_A; ++i) { // temp_result->b[i] = AndConst; // } for (int i = 0; i < nConMul; ++i) { hipLaunchKernelGGL(( vecAdd_MULT), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, temp_result->a + i * bitSize_A * n, ca[i]->a, cb[i]->a, bAstartIndex, n, n * bitSize_A); } for (int j = 0; j < nConMul; ++j) { int sI = j * bitSize_A; for (int i = 0; i < bitSize_A; ++i) { int sI2 = sI + i; temp_result->b[sI2] = (ca[j]->b[i] + cb[j]->b[bIndex]) + AndConst; temp_result->current_variance[sI2] = (ca[j]->b[i] + cb[j]->b[bIndex]); } } int toalBitSize = bitSize_A * nConMul; // cout << "totalBitSize:" << toalBitSize << endl; // int nOutputs = 2; // int vLength = nConMul/2; // int bitSize = bitSize_A; tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, toalBitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // cout << "bootsAND_MULT_con: bitSize: " << bitSize << " vLen: " << vLength << endl; // if (nConMul % 2 == 1) { // cout << "ERROR: Provide even number of vector" << endl; // exit(1); // } // tfhe_bootstrap_FFT_16_2_vector(result, bk->bkFFT, MU, vLength, nOutputs, bitSize, temp_result, cudaBkFFT, // cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); hipFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } //(a xor b) and c EXPORT void bootsXOR_AND(LweSample *result, const LweSample *ca, const LweSample *cb, const LweSample *cc, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); LweSample *temp_result1 = new_LweSample(in_out_params); LweSample *temp_result2 = new_LweSample(in_out_params); //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); static const Torus32 AndConst = modSwitchToTorus32(-1, 8); lweNoiselessTrivial(temp_result, XorConst, in_out_params); lweAddMulTo(temp_result, 2, ca, in_out_params); lweAddMulTo(temp_result, 2, cb, in_out_params); tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); lweNoiselessTrivial(temp_result2, AndConst, in_out_params); lweAddTo(temp_result2, cc, in_out_params); lweAddTo(temp_result2, result, in_out_params); // static const Torus32 MU = modSwitchToTorus32(1, 8); // const LweParams *in_out_params = bk->params->in_out_params; // LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,-1/8) + ca + cb // lweAddTo(temp_result, cb, in_out_params); tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result2); delete_LweSample(temp_result); } __global__ void reverseLweSample(int *dest, int *source, int length) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < length) { dest[id] = -source[id]; } } void bootsNOT_16(LweSample_16 *output, LweSample_16 *input, int bitSize, int params_n) { int length = bitSize * params_n, BLOCKSIZE = 1024, gridSize = (int) ceil((float) (length) / BLOCKSIZE); hipLaunchKernelGGL(( reverseLweSample), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, output->a, input->a, length); for (int i = 0; i < bitSize; ++i) { output->b[i] = -input->b[i]; output->current_variance[i] = input->current_variance[i]; } } //add vector __global__ void ANDvec_vector(int *destination, int *ca, int *cb, int vLength, int bitSize, int n, int length) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < length) { destination[id] = ca[id] + cb[id]; } } EXPORT void bootsAND_16_vector(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int nOutputs, int vLength, int bitSize, const TFheGateBootstrappingCloudKeySet *bk, hipfftDoubleComplex ****cudaBkFFT, hipfftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { assert(nOutputs == 1); static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; const int n = in_out_params->n;//500 //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); LweSample_16 *temp_result = convertBitToNumberZero_GPU_2(nOutputs, vLength * bitSize, bk); int BLOCKSIZE = 1024; int length = vLength * bitSize * nOutputs * n; int gridSize = (int) ceil((float) (length) / BLOCKSIZE); hipLaunchKernelGGL(( ANDvec_vector), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, temp_result->a, ca->a, cb->a, vLength, bitSize, n, length); //compute temp_result->b int totalBitSize = vLength * bitSize; for (int i = 0; i < totalBitSize; ++i) { temp_result->b[i] = ca->b[i] + cb->b[i] + AndConst; //for and temp_result->current_variance[i] = ca->current_variance[i] + cb->current_variance[i]; //for and } // cout << "xxxxxxxxxxxxxxxxxxxx" << endl; // cout << nOutputs << endl; tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, vLength * bitSize * nOutputs, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // if (vLength % 2 == 1 && vLength < 2) { //// cout << "vLen: " << vLength << " bitSize: " << bitSize << endl; // tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, // ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); //// bitSize = bitSize/2; //// tfhe_bootstrap_FFT_16_2_vector(result, bk->bkFFT, MU, 2, 2, bitSize/4, temp_result, cudaBkFFT, //// cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, //// ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // } else { // nOutputs = 2; // vLength = vLength / 2; // tfhe_bootstrap_FFT_16_2_vector(result, bk->bkFFT, MU, vLength, nOutputs, bitSize, temp_result, cudaBkFFT, // cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // } hipFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } __global__ void SUBvec_vector(int *destination, int *ca, int *cc, int vLength, int bitSize, int n, int length) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < length) { destination[id] = cc[id] - ca[id]; } } EXPORT void bootsMUX_16_vector(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, const LweSample_16 *cc, int vLength, int bitSize, const TFheGateBootstrappingCloudKeySet *bk, hipfftDoubleComplex ****cudaBkFFT, hipfftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; const LweParams *extracted_params = &bk->params->tgsw_params->tlwe_params->extracted_lweparams; const int n = in_out_params->n;//500 const int extracted_n = extracted_params->n;//1024 int nOutputs = 2; //for now vLength = 1 assert(vLength == 1); // cout << "n: " << n << endl; // cout << "nOutputs: " << nOutputs << endl; // cout << "vLength: " << vLength << endl; // cout << "extracted_n: " << extracted_n << endl; int ex_length = vLength * bitSize * extracted_n;//ex_length does not include nOutputs int length = vLength * bitSize * n;//length does not include nOutputs int BLOCKSIZE = 1024; LweSample_16 *temp_result = convertBitToNumberZero_GPU_2(nOutputs, vLength * bitSize, bk); LweSample_16 *u = newLweSample_16_2(nOutputs, vLength * bitSize, extracted_params); LweSample_16 *ex_temp_result = newLweSample_16_2(1, vLength * bitSize, extracted_params); free(u->a); free(ex_temp_result->a); hipMalloc(&(u->a), ex_length * nOutputs * sizeof(int)); hipMalloc(&(ex_temp_result->a), ex_length * sizeof(int)); //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); static const Torus32 MuxConst = modSwitchToTorus32(1, 8); int gridSize = (int) ceil((float) (length) / BLOCKSIZE); hipLaunchKernelGGL(( ANDvec_vector), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, temp_result->a, ca->a, cb->a, vLength, bitSize, n, length); hipLaunchKernelGGL(( SUBvec_vector), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, temp_result->a + length, ca->a, cc->a, vLength, bitSize, n, length); //compute temp_result->b int totalBitSize = vLength * bitSize; for (int i = 0; i < totalBitSize; ++i) { temp_result->b[i] = ca->b[i] + cb->b[i] + AndConst; temp_result->current_variance[i] = ca->current_variance[i] + cb->current_variance[i]; temp_result->b[i + totalBitSize] = - ca->b[i] + cc->b[i] + AndConst; temp_result->current_variance[i + totalBitSize] = - ca->current_variance[i] + cc->current_variance[i]; //for and } tfhe_bootstrap_woKS_FFT_16(u, bk->bkFFT, MU, vLength*nOutputs*bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce); // tfhe_bootstrap_woKS_FFT_16_2_vector(u, bk->bkFFT, MU, vLength, nOutputs, bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce); // tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, vLength * bitSize * nOutputs, temp_result, cudaBkFFT, cudaBkFFTCoalesce, // ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); gridSize = (int) ceil((float) (ex_length) / BLOCKSIZE); hipLaunchKernelGGL(( ANDvec_vector), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, ex_temp_result->a, u->a, u->a + ex_length, vLength, bitSize, extracted_n, ex_length); for (int i = 0; i < vLength * bitSize; ++i) { ex_temp_result->b[i] = u->b[i] + u->b[i + vLength * bitSize] + MuxConst; ex_temp_result->current_variance[i] = u->current_variance[i] + u->current_variance[i + vLength * bitSize]; } // lweKeySwitch_16(result, bk->ks, u, bitSize, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); lweKeySwitch_16(result, bk->bkFFT->ks, ex_temp_result, vLength*nOutputs*bitSize, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // lweKeySwitch_16_2_vector(result, bk->bkFFT->ks, ex_temp_result, vLength, 1, bitSize, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, // ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // length = 500 * bitSize; // int *tempx = new int[length]; // hipMemcpy(tempx, result->a, length * sizeof(Torus32), hipMemcpyDeviceToHost); // for (int bI = 0; bI < bitSize; ++bI) { // int sI = bI * 500; // for (int i = 0; i < 10; ++i) { // cout << tempx[sI + i] << " "; // } // cout << endl; // cout << result->b[bI] << endl; // } // cout << endl; // tfhe_bootstrap_FFT_16_2_vector(result, bk->bkFFT, MU, vLength, nOutputs, bitSize, temp_result, cudaBkFFT, // cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // if (vLength % 2 == 1 && vLength < 2) { // cout << "Odd number in bootsAND_16_vector" << endl; // } // nOutputs = 2; // vLength = vLength/2; // tfhe_bootstrap_FFT_16_2_vector(result, bk->bkFFT, MU, vLength, nOutputs, bitSize, temp_result, cudaBkFFT, // cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // // // // hipFree(temp_result->a); // temp_result->a = NULL; // freeLweSample_16(temp_result); } __device__ int modSwitchFromTorus32_GPU_device(Torus32 phase, int Msize){ uint64_t interv = ((UINT64_C(1)<<63)/Msize)*2; // width of each intervall uint64_t half_interval = interv/2; // begin of the first intervall uint64_t phase64 = (uint64_t(phase)<<32) + half_interval; //floor to the nearest multiples of interv return phase64/interv; } __global__ void bootstrappingUptoBlindRotate_OneBit(int *accum_a_b, int *temp_accum_a_b, int *bara_g, Torus32 MU, int *temp_res_a, int temp_res_b, double temp_res_cv, hipfftDoubleComplex *cudaBkFFTCoalesceExt) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < 1024) { //x is temp_res register int n = 500, N = 1024, _2N = 2048, Ns2 = 512, Nx2 = 2048; //tfhe_bootstrap_FFT_16--> u // __shared__ int u_a[1024], u_b;//N // __shared__ double u_cv; //tfhe_bootstrap_woKS_FFT_16 // __shared__ int bara[1024];//N//torusPolyTestvect_coef[1024], register int barb; bara_g[id] = 0; // torusPolyTestvect_coef[id] = MU; if (id < n) {//500 bara_g[id] = modSwitchFromTorus32_GPU_device(temp_res_a[id], Nx2); } __syncthreads(); barb = modSwitchFromTorus32_GPU_device(temp_res_b, Nx2); //tfhe_blindRotateAndExtract_FFT_16 -> here v = torusPolyTestvect_coef __shared__ int testvectbis[1024];//N //torusPolynomialMulByXai_16 -> res ->testvectbis, v-> torusPolyTestvect_coef register int a = _2N - barb; if (a < N) {//1024 if (id < a) { testvectbis[id] = -MU;//torusPolyTestvect_coef[id - a + N]; } else { testvectbis[id] = MU;//torusPolyTestvect_coef[id - a]; } } else { register int aa = a - N; if (id < aa) { testvectbis[id] = MU;//torusPolyTestvect_coef[id - aa + N]; } else { testvectbis[id] = -MU;//torusPolyTestvect_coef[id - aa]; } } __syncthreads(); accum_a_b[id] = 0;//accum_a accum_a_b[1024 + id] = testvectbis[id]; temp_accum_a_b[id] = 0;//accum_a temp_accum_a_b[1024 + id] = 0; // bara_g[id] = bara[id]; } } __global__ void prepareForiFFT_1_Bit(int *des, int *decaCoalesce, hipfftDoubleReal *d_rev_in, int *bara, int baraIndex, int *source) { register int id = blockIdx.x * blockDim.x + threadIdx.x; register int N = 1024, _2N = 2048, Ns2 = 512; register int tIndex = id % N; register int a = bara[baraIndex]; register int aa = a - N; register bool l1 = a < N, l2 = tIndex < a, l3 = tIndex < aa; int des_id = l1 * (l2 * (-source[id - a + N] - source[id]) + (!l2) * (source[id - a] - source[id])) + (!l1) * (l3 * (source[id - aa + N] - source[id]) + (!l3) * (-source[id - aa] - source[id])); register uint32_t halfBg = 512, maskMod = 1023, Bgbit = 10; // register uint32_t offset = 2149580800; register int p = 0; register int decal = (32 - (p + 1) * Bgbit); register uint32_t temp1 = (((uint32_t)(des_id + 2149580800)) >> decal) & maskMod;//offset register int xxxxx1 = (temp1 - halfBg); // decaCoalesce[((id / (N)) * (N)) + id] = // (middleBlock) * xxxxx1 + (!middleBlock) * (decaCoalesce[((id / (N)) * (N)) + id]); p = 1; decal = (32 - (p + 1) * Bgbit); temp1 = (((uint32_t)(des_id + 2149580800)) >> decal) & maskMod;//offset register int xxxxx2 = temp1 - halfBg; // decaCoalesce[((id / (N)) * (N)) + id + (N)] = middleBlock * xxxxx2 + (!middleBlock) * decaCoalesce[((id / (N)) * (N)) + id + (N)]; register int bIndex = id / N; int destTod_rev_in = bIndex * _2N + tIndex + (bIndex >= 1) * N * 2; d_rev_in[destTod_rev_in] = xxxxx1/2.; d_rev_in[destTod_rev_in + 1024] = -xxxxx1/2.; destTod_rev_in += 2 * 1024; d_rev_in[destTod_rev_in] = xxxxx2/2.; d_rev_in[destTod_rev_in + 1024] = -xxxxx2/2.; } __global__ void prepareForFFT_1_Bit(hipfftDoubleComplex *cuDecaFFTCoalesce, hipfftDoubleComplex *tmpa_gpuCoal, hipfftDoubleComplex *d_in, hipfftDoubleComplex *d_rev_out, hipfftDoubleComplex *bki, int keyIndex, int N, int Ns2, int length) { register int id = blockIdx.x*blockDim.x+threadIdx.x; register int k = 1, kpl = 4, keySI = keyIndex * (k + 1) * kpl * Ns2, aID, bID, offset; // if(id < 512) { int tempId = id; int bitIndex = tempId / Ns2; register hipfftDoubleComplex v0 = d_rev_out[2 * tempId + 1 + bitIndex];//d_rev_out[2 * id + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v0; tempId = tempId + (Ns2); bitIndex = (tempId) / Ns2; register hipfftDoubleComplex v1 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v1; tempId = tempId + (Ns2); bitIndex = (tempId) / Ns2; register hipfftDoubleComplex v2 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v2; tempId = tempId + (Ns2); bitIndex = (tempId) / Ns2; register hipfftDoubleComplex v3 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v3; int i = 0; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; hipfftDoubleComplex temp_a0 = cuCmul(v0, bki[aID]); hipfftDoubleComplex temp_b0 = cuCmul(v0, bki[bID]); i = 1; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; hipfftDoubleComplex temp_a1 = cuCmul(v1, bki[aID]); hipfftDoubleComplex temp_b1 = cuCmul(v1, bki[bID]); i = 2; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; hipfftDoubleComplex temp_a2 = cuCmul(v2, bki[aID]); hipfftDoubleComplex temp_b2 = cuCmul(v2, bki[bID]); i = 3; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; hipfftDoubleComplex temp_a3 = cuCmul(v3, bki[aID]); hipfftDoubleComplex temp_b3 = cuCmul(v3, bki[bID]); hipfftDoubleComplex tmpa_gpuCoal0; tmpa_gpuCoal0.x = temp_a0.x + temp_a1.x + temp_a2.x + temp_a3.x; tmpa_gpuCoal0.y = temp_a0.y + temp_a1.y + temp_a2.y + temp_a3.y; // tmpa_gpuCoal[id] = tmpa_gpuCoal0; hipfftDoubleComplex tmpa_gpuCoal1; tmpa_gpuCoal1.x = temp_b0.x + temp_b1.x + temp_b2.x + temp_b3.x; tmpa_gpuCoal1.y = temp_b0.y + temp_b1.y + temp_b2.y + temp_b3.y; // tmpa_gpuCoal[id + Ns2] = tmpa_gpuCoal1; register int largeSI = (id / Ns2) * (N + 1); register int tid = id % Ns2; d_in[largeSI + 2 * tid + 1] = tmpa_gpuCoal0; largeSI = (id / Ns2 + 1) * (N + 1); d_in[largeSI + 2 * tid + 1] = tmpa_gpuCoal1; //init with 0 // tmpa_gpuCoal[id].x = 0; // tmpa_gpuCoal[id].y = 0; // tmpa_gpuCoal[Ns2 + id].x = 0; // tmpa_gpuCoal[Ns2 + id].y = 0; //#pragma unroll // for (int i = 0; i < kpl; ++i) {//kpl // offset = i * Ns2; // aID = keySI + offset + id; // bID = keySI + offset + id + Ns2 * kpl; // // hipfftDoubleComplex temp_a = cuCmul(cuDecaFFTCoalesce[offset + id], bki[aID]); // tmpa_gpuCoal[id].x += temp_a.x; // tmpa_gpuCoal[id].y += temp_a.y; // // hipfftDoubleComplex temp_b = cuCmul(cuDecaFFTCoalesce[offset + id], bki[bID]); // tmpa_gpuCoal[Ns2 + id].x += temp_b.x; // tmpa_gpuCoal[Ns2 + id].y += temp_b.y; // // } // } // __syncthreads(); // __syncthreads(); // __syncthreads(); // __syncthreads(); // __syncthreads(); // __syncthreads(); // if (id < 1024) { // register int largeSI = (id / Ns2) * (N + 1); // register int tid = id % Ns2; // d_in[largeSI + 2 * tid + 1] = tmpa_gpuCoal[id]; //// d_in[largeSI + 2 * tid + 1].y = 1;//tmpa_gpuCoal[id]; // } } //__global__ void finishUpFFT_n_Bit(int *temp2, hipfftDoubleReal *d_out, int *temp3) { // int id = blockIdx.x*blockDim.x+threadIdx.x; // register int N = 1024, _2N = 2048; // register double _2p32 = double(INT64_C(1) << 32); // register double _1sN = double(1) / double(N); // register int bitIndex = id / N; // register int tIndex = id % N; // register int startIndexLarge = bitIndex * _2N; // temp2[id] = Torus32(int64_t(d_out[startIndexLarge + tIndex] * _1sN * _2p32)) + temp3[id];// // //} __global__ void extractionAndKeySwitch_1_Bit(int *result_a, int *result_b, uint32_t *coal_d_aibar, uint32_t *coal_d_aij, int *accum_a_b, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { int id = blockIdx.x*blockDim.x+threadIdx.x; register int N = 1024, _2N = 2048, basebit = 2, base = 1 << basebit, mask = base - 1, t =8; register int32_t prec_offset = 1 << (32 - (1 + basebit * t)); register int index = 0; register int bitIndex = id / N; register int tIndex = id % N;//corresponding to j register int startIndex = bitIndex * N; __shared__ uint32_t s_coal_d_aibar[1024]; // __shared__ uint32_t coal_d_aij[1024 * 8]; bool multipleOfN = id % N == 0; s_coal_d_aibar[id] = (multipleOfN) * (accum_a_b[index - tIndex + startIndex] + prec_offset) + (!multipleOfN) * (-accum_a_b[index - tIndex + startIndex + N] + prec_offset); // if (id % N == 0) { // coal_d_aibar[id] = accum_a_b[index - tIndex + startIndex] + prec_offset; // } else { // coal_d_aibar[id] = -accum_a_b[index - tIndex + startIndex + N] + prec_offset; // } __syncthreads(); // __syncthreads(); // __syncthreads(); // __syncthreads(); // __syncthreads(); // __syncthreads(); // __syncthreads(); // __syncthreads(); // if(id < 1024) {//t register int tempID = id; register int i = tempID / t; register int j = tempID % t; coal_d_aij[tempID] = (s_coal_d_aibar[i] >> (32 - (j + 1) * basebit)) & mask; tempID += 1024; i = tempID / t; j = tempID % t; coal_d_aij[tempID] = (s_coal_d_aibar[i] >> (32 - (j + 1) * basebit)) & mask; tempID += 1024; i = tempID / t; j = tempID % t; coal_d_aij[tempID] = (s_coal_d_aibar[i] >> (32 - (j + 1) * basebit)) & mask; tempID += 1024; i = tempID / t; j = tempID % t; coal_d_aij[tempID] = (s_coal_d_aibar[i] >> (32 - (j + 1) * basebit)) & mask; tempID += 1024; i = tempID / t; j = tempID % t; coal_d_aij[tempID] = (s_coal_d_aibar[i] >> (32 - (j + 1) * basebit)) & mask; tempID += 1024; i = tempID / t; j = tempID % t; coal_d_aij[tempID] = (s_coal_d_aibar[i] >> (32 - (j + 1) * basebit)) & mask; tempID += 1024; i = tempID / t; j = tempID % t; coal_d_aij[tempID] = (s_coal_d_aibar[i] >> (32 - (j + 1) * basebit)) & mask; tempID += 1024; i = tempID / t; j = tempID % t; coal_d_aij[tempID] = (s_coal_d_aibar[i] >> (32 - (j + 1) * basebit)) & mask; // } // __syncthreads(); int subFromB = 0; int bi; if (id < 500) { result_a[id] = 0; register int A = 1024, B = t, C = base, D = 500, ks_t = 8; #pragma unroll 0 for (int i = 0; i < 1024; ++i) { int sI = i * ks_t; #pragma unroll 0 for (int j = 0; j < 8; ++j) {//ks_t int sI2 = sI + j; int aij = coal_d_aij[sI2]; if (aij != 0) { result_a[id] -= ks_a_gpu_extendedPtr[i * B * C * D + j * C * D + aij * D + (id % D)];//sourceA[(i * B * C * D + j * C * D+ aij * params_n + id)];//source[aij][id]; } // if(id < 1) { bi = coal_d_aij[sI2 + id]; subFromB += ks_b_gpu_extendedPtr[i * B * C + j * C + bi]; // } } } } if (id < 1) { result_b[0] = accum_a_b[N] - subFromB; } } /* void bootstrapping_gull_gpu_1_bit_wise(LweSample_16 *result, int *temp_res_a, int *temp_res_b, int nBits, hipfftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { //bootstrapping woks uptoFFT int nThreads = 1024, BLOCKSIZE = 1024, k = 1, N = 1024, kpl = 4, Ns2 = 512, _2N = 2048; static const Torus32 MU = modSwitchToTorus32(1, 8); int gridSize = (int) ceil((float) (nThreads) / BLOCKSIZE);//1 int *accum_a_b, *bara, *temp_accum_a_b;//accum a and accum b together; bara; tempaccum for mux rotate hipMalloc(&accum_a_b, nBits * 1024 * (k + 1) * sizeof(int)); hipMalloc(&temp_accum_a_b, nBits * 1024 * (k + 1) * sizeof(int)); hipMalloc(&bara, nBits * 1024 * sizeof(int)); hipDeviceProp_t cProfile; hipGetDeviceProperties(&cProfile, 0); int nSM = cProfile.multiProcessorCount; cout << "#SM: " << nSM << endl; //20 hipStream_t streams[nSM]; #pragma unroll for (int i = 0; i < 20; ++i) {//nSM hipStreamCreateWithFlags(&streams[i], hipStreamNonBlocking); } for (int bIndex = 0; bIndex < nBits; ++bIndex) { int accumStart = bIndex * (1024 * (k + 1)); int baraStart = bIndex * 1024; int temp_res_aStart = bIndex * 500; bootstrappingUptoBlindRotate_OneBit<<<gridSize, BLOCKSIZE, 0, streams[bIndex % nSM]>>> (accum_a_b + accumStart, temp_accum_a_b + accumStart, bara + baraStart, MU, temp_res_a + temp_res_aStart, temp_res_b[bIndex], NULL, cudaBkFFTCoalesceExt); } hipDeviceSynchronize(); //after blind rotate int *decaCoalesce; hipMalloc(&decaCoalesce, nBits * N * kpl * sizeof(int));//1024*4 hipfftDoubleComplex *cuDecaFFTCoalesce; hipMalloc(&cuDecaFFTCoalesce, nBits * kpl * Ns2 * sizeof(hipfftDoubleComplex));//512*4 hipfftDoubleComplex *tmpa_gpuCoal; hipMalloc(&tmpa_gpuCoal, nBits * Ns2 * sizeof(hipfftDoubleComplex) * (k + 1)); //fft variables hipfftDoubleReal* d_rev_in; hipfftDoubleComplex *d_rev_out; hipfftDoubleComplex *d_in; hipfftDoubleReal *d_out; int batch = kpl; int dParts = 4; //fft plans hipfftHandle p; hipfftHandle rev_p; //fft variables allocation hipMalloc(&d_rev_in, nBits * sizeof(hipfftDoubleReal) * _2N * batch); hipMalloc(&d_rev_out, nBits * sizeof(hipfftDoubleComplex) * (N + 1) * batch); hipfftPlan1d(&rev_p, _2N, HIPFFT_D2Z, nBits * batch);//(nBits * batch)/dParts);// (batch - (batch/dParts))); batch = 2;//batch/dParts;//a and b together hipMalloc(&d_in, nBits * sizeof(hipfftDoubleComplex) * (N + 1) * batch);//batch hipMalloc(&d_out, nBits * sizeof(hipfftDoubleReal) * _2N * batch); hipfftPlan1d(&p, _2N, HIPFFT_Z2D, nBits * batch); int *temp2 = temp_accum_a_b; int *temp3 = accum_a_b; // assert(nBits == 1); //call tfhe_MuxRotate_FFT_16 #pragma unroll for (int j = 0; j < 500; ++j) {//500 gridSize = 2;//2;//as accum is of (k + 1) for (int bIndex = 0; bIndex < nBits; ++bIndex) { //find starting indices int accumStart = bIndex * 1024 * (k + 1); int decaCoalesceStart = bIndex * 1024 * kpl; int d_rev_inStart = bIndex * _2N * kpl; int baraStart = bIndex * N; prepareForiFFT_1_Bit<<<gridSize, BLOCKSIZE, 0, streams[bIndex % nSM]>>> (temp2 + accumStart, decaCoalesce + decaCoalesceStart, d_rev_in + d_rev_inStart, bara + baraStart, j, temp3 + accumStart); } hipDeviceSynchronize(); hipfftExecD2Z(rev_p, d_rev_in, d_rev_out); hipDeviceSynchronize(); int length = kpl * Ns2;//4 * 512 = 2048 gridSize = 1;//(int) ceil((float) (length) / BLOCKSIZE); //2 for (int bIndex = 0; bIndex < nBits; ++bIndex) { int cuDecaFFTCoalesceStart = bIndex * kpl * Ns2; int tmpa_gpuCoalStart = bIndex * (k + 1) * Ns2; int d_inStart = bIndex * (N + 1) * (k + 1); int d_rev_outStart = bIndex *(N + 1) * kpl; prepareForFFT_1_Bit<<<gridSize, 512, 0, streams[bIndex % nSM]>>> (cuDecaFFTCoalesce + cuDecaFFTCoalesceStart, tmpa_gpuCoal + tmpa_gpuCoalStart, d_in + d_inStart, d_rev_out + d_rev_outStart, cudaBkFFTCoalesceExt, j, N, Ns2, length); } hipDeviceSynchronize(); hipfftExecZ2D(p, d_in, d_out); hipDeviceSynchronize(); //after fft length = N * 2; gridSize = (int) ceil((float) (length) / BLOCKSIZE); //2 for (int bIndex = 0; bIndex < nBits; ++bIndex) { int accumStart = bIndex * 1024 * (k + 1); int d_outStart = bIndex * _2N * (k + 1); finishUpFFT_n_Bit<<<gridSize, BLOCKSIZE, 0, streams[bIndex % nSM]>>> (temp2 + accumStart, d_out + d_outStart, temp3 + accumStart); } hipDeviceSynchronize(); swap(temp2, temp3); } //output is in temp3 //extract and ks //intermediate variables to test u (delete afterwards) int *result_b; double *result_cv = NULL; hipMalloc(&result_b, nBits * sizeof(int)); // hipMalloc(&result_cv, 1 * sizeof(double)); uint32_t *coal_d_aibar; hipMalloc(&coal_d_aibar, nBits * N * sizeof(uint32_t)); int coal_d_aijSize = nBits * N * 8;//t uint32_t *coal_d_aij; hipMalloc(&coal_d_aij, coal_d_aijSize * sizeof(uint32_t)); // int length = N * 8;//t gridSize = 1;//(int) ceil((float) (length) / BLOCKSIZE); for (int bIndex = 0; bIndex < nBits; ++bIndex) { int result_aStart = bIndex * 500; int result_bStart = bIndex; int coal_d_aibarStart = bIndex * N; int coal_d_aijStart = bIndex * N * 8; int accumStart = bIndex * (k + 1) * 1024; extractionAndKeySwitch_1_Bit<<<gridSize, BLOCKSIZE, 0, streams[bIndex % nSM]>>> (result->a + result_aStart, result_b + result_bStart, coal_d_aibar + coal_d_aibarStart, coal_d_aij + coal_d_aijStart, temp3 + accumStart, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); } hipDeviceSynchronize(); hipMemcpy(result->b, result_b, nBits * sizeof(int), hipMemcpyDeviceToHost); // int *temp = new int[500]; // hipMemcpy(temp, result->a, 500 * sizeof(int), hipMemcpyDeviceToHost); // for (int i = 0; i < 500; ++i) { // cout << temp[i] << " "; // } // cout << endl; // cout << result->b[0] << endl; // assert(nBits == 1); #pragma unroll for (int i = 0; i < 20; ++i) { //nSM hipStreamDestroy(streams[i]); } hipFree(temp_res_a); hipFree(accum_a_b); hipFree(temp_accum_a_b); hipFree(bara); hipFree(decaCoalesce);//1024*4 hipFree(cuDecaFFTCoalesce);//512*4 hipFree(tmpa_gpuCoal); hipFree(d_rev_in); hipFree(d_rev_out); hipFree(d_in);//batch hipFree(d_out); hipFree(result_b); hipFree(coal_d_aibar); hipFree(coal_d_aij); hipfftDestroy(rev_p); hipfftDestroy(p); } EXPORT void bootsAND_fullGPU_OneBit(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int nBits, hipfftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { const int n = 500, BLOCKSIZE = 1024, N = 1024, _2N = 2048, Ns2 = 512, k = 1, kpl = 4, l = 2, offset = 2149580800, halfBg = 512, maskMod = 1023; static const Torus32 MU = modSwitchToTorus32(1, 8); //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); int *temp_res_a, *temp_res_b; hipMalloc(&temp_res_a, n * nBits * sizeof(Torus32)); temp_res_b = new int[nBits]; int gridSize = (int) ceil((float) (n * nBits) / BLOCKSIZE); hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, temp_res_a, ca->a, cb->a, n * nBits); for (int i = 0; i < nBits; ++i) { temp_res_b[i] = ca->b[i] + cb->b[i] + AndConst; } bootstrapping_gull_gpu_1_bit_wise(result, temp_res_a, temp_res_b, nBits, cudaBkFFTCoalesceExt, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); hipFree(temp_res_a); delete [] temp_res_b; } */ __constant__ int n = 500, N = 1024, _2N = 2048, Ns2 = 512, Nx2 = 2048, k = 1; __constant__ uint32_t halfBg = 512, maskMod = 1023, Bgbit = 10, kpl = 4, l = 2; __constant__ uint32_t offset = 2149580800; __constant__ double _1sN = double(1) / double(1024); __constant__ double _2p32 = double(INT64_C(1) << 32); __global__ void bootstrappingUptoBlindRotate_n_Bit(int *accum_a_b, int *bara, Torus32 MU, int nBits, int *temp_res_a, int *barb) { register int id = blockIdx.x * blockDim.x + threadIdx.x; int bIndex = id / N; int baraIndex = id % N; int a = _2N - barb[bIndex]; int aa = a - N; register bool L1 = a < N, L2 = baraIndex < a, L3 = baraIndex < aa; register int acc_a_b_id = L1 * (L2 * (-MU) + (!L2) * (MU)) + (!L1) * (L3 * (MU) + (!L3) * (-MU)); accum_a_b[id] = acc_a_b_id; if(id < n * nBits) { bIndex = id / n; register int temp_res_a_id = temp_res_a[id]; register int bara_id = modSwitchFromTorus32_GPU_device(temp_res_a_id, Nx2); bara[bIndex * N + id % n] = bara_id; } } __global__ void prepareForiFFT_n_Bit(int *des, int *decaCoalesce, hipfftDoubleReal *d_rev_in, int nBits, int *bara, int baraIndex, int *source, int length) { register int id = blockIdx.x * blockDim.x + threadIdx.x; // if (id < length) { // bool outerBlock = id < nBits * 2 * 1024; // if (id < nBits * 2 * 1024) {//nBits * (k + 1) * 1024 register int bitIndex = (id / N) % nBits; register int threadIdModN = id % N; register int a = bara[bitIndex * N + baraIndex]; register int aa = a - N; register bool L1 = a < N, L2 = threadIdModN < a, L3 = threadIdModN < aa; // des[id] = (!outerBlock) * des[id] // + outerBlock * (l1 * (l2 * (-source[id - a + N] - source[id]) // + (!l2) * (source[id - a] - source[id])) // + (!l1) * (l3 * (source[id - aa + N] - source[id]) // + (!l3) * (-source[id - aa] - source[id]))); register int des_id = 0; register int s1_id = L1 * ((L2) * (id - a + N) + (!L2) * (id - a)) + (!L1) * ((L3) * (id - aa + N) + (!L3) * (id - aa)); // int des_id = (L1 * (L2 * (-source[id - a + N] - source[id]) // + (!L2) * (source[id - a] - source[id])) // + (!L1) * (L3 * (source[id - aa + N] - source[id]) // + (!L3) * (-source[id - aa] - source[id]))); des_id = (L1 * (L2 * (-source[s1_id] - source[id]) + (!L2) * (source[s1_id] - source[id])) + (!L1) * (L3 * (source[s1_id] - source[id]) + (!L3) * (-source[s1_id] - source[id]))); // if (a < N) { // if (threadIdModN < a) { //// des[id] = -source[id - a + N] - source[id]; // des_id = -source[id - a + N] - source[id]; // } else { //// des[id] = source[id - a] - source[id]; // des_id = source[id - a] - source[id]; // } // } else { // if (threadIdModN < aa) { //// des[id] = source[id - aa + N] - source[id]; // des_id = source[id - aa + N] - source[id]; // } else { //// des[id] = -source[id - aa] - source[id]; // des_id = -source[id - aa] - source[id]; // } // } // bool middleBlock = id < nBits * 2 * 1024;//4//kpl // decaCoalesce[id] = middleBlock * (id) + (!middleBlock) * (decaCoalesce[id]);// middleBlock * (temp1 - halfBg) + (!middleBlock) * decaCoalesce[id]; register int p = 0; register int decal = (32 - (p + 1) * Bgbit); register uint32_t val = ((uint32_t)(des_id + offset)); register uint32_t temp1 = (val >> decal) & maskMod; register int xxxxx1 = (temp1 - halfBg);// + (!middleBlock) * (decaCoalesce[((id / (N * nBits)) * (N * nBits)) + id]); // decaCoalesce[((id / (N * nBits)) * (N * nBits)) + id] = xxxxx1; // middleBlock * (temp1 - halfBg) + // (!middleBlock) * (decaCoalesce[((id / (N * nBits)) * (N * nBits)) + id]); p = 1; decal = (32 - (p + 1) * Bgbit); val = ((uint32_t)(des_id + offset)); temp1 = (val >> decal) & maskMod; register int xxxxx2 = temp1 - halfBg; // decaCoalesce[((id / (N * nBits)) * (N * nBits)) + id + (N * nBits)] = xxxxx2; // middleBlock * (temp1 - halfBg) + // (!middleBlock) * decaCoalesce[((id / (N * nBits)) * (N * nBits)) + id + (N * // nBits)];//(temp1 - halfBg) + (!middleBlock) * decaCoalesce[id];//middleBlock * id;//middleBlock * id; //1;//middleBlock * (id) + (!middleBlock) * (decaCoalesce[id]);// middleBlock * (temp1 - halfBg) + (!middleBlock) * decaCoalesce[id]; //(!middleBlock) * decaCoalesce[((id / (N * nBits)) * (N * nBits)) + id + (N * nBits)]; // decaCoalesce[(nBits * N) + id] = middleBlock * id;//middleBlock * id; //1;//middleBlock * (id) + (!middleBlock) * (decaCoalesce[id]);// middleBlock * (temp1 - halfBg) + (!middleBlock) * decaCoalesce[id]; // decaCoalesce[(nBits * N) + id] = middleBlock * id;//middleBlock * id; //1;//middleBlock * (id) + (!middleBlock) * (decaCoalesce[id]);// middleBlock * (temp1 - halfBg) + (!middleBlock) * decaCoalesce[id]; // } // register int startIndexSmall = bIndex * N; // middleBlock = tIndex < N; // d_rev_in[id] = middleBlock * (decaCoalesce[startIndexSmall + tIndex] / 2.) // + (!middleBlock) * (d_rev_in[id] = -decaCoalesce[startIndexSmall + tIndex - N] / 2.); // d_rev_in[((id / (N * nBits)) * (N * nBits)) + id + (tIndex >= N) * 1024 * bIndex] = id;//middleBlock * (1) + (!middleBlock) * (d_rev_in[id]); int bIndex = (id / N); int tIndex = id % N; int destTod_rev_in = bIndex * _2N + tIndex + (bIndex >= nBits) * nBits * N * 2; d_rev_in[destTod_rev_in] = xxxxx1 / 2.;//id;// d_rev_in[destTod_rev_in + 1024] = -xxxxx1 / 2.;//id;// destTod_rev_in += nBits * 2 * 1024; d_rev_in[destTod_rev_in] = xxxxx2 / 2.;//id; d_rev_in[destTod_rev_in + 1024] = -xxxxx2 / 2.;//id; // } } __global__ void prepareForFFT_n_Bit(hipfftDoubleComplex *cuDecaFFTCoalesce, hipfftDoubleComplex *tmpa_gpuCoal, hipfftDoubleComplex *d_in, hipfftDoubleComplex *d_rev_out, hipfftDoubleComplex *bki, int keyIndex, int nBits) { int id = blockIdx.x * blockDim.x + threadIdx.x; // if (id < nBits * 4 * 512) {//nBits * kpl * Ns2 int tempId = id; int bitIndex = tempId/Ns2; hipfftDoubleComplex v0 = d_rev_out[2 * tempId + 1 + bitIndex];//d_rev_out[2 * id + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v0; tempId = tempId + (Ns2 * nBits); bitIndex = (tempId)/Ns2; hipfftDoubleComplex v1 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v1; tempId = tempId + (Ns2 * nBits); bitIndex = (tempId)/Ns2; hipfftDoubleComplex v2 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v2; tempId = tempId + (Ns2 * nBits); bitIndex = (tempId)/Ns2; hipfftDoubleComplex v3 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v3; int keySI = keyIndex * (k + 1) * kpl * Ns2, aID, bID, offset; int i = 0; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; hipfftDoubleComplex bki_aid = bki[aID]; hipfftDoubleComplex bki_bid = bki[bID]; hipfftDoubleComplex temp_a0 = cuCmul(v0, bki_aid); hipfftDoubleComplex temp_b0 = cuCmul(v0, bki_bid); i = 1; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; bki_aid = bki[aID]; bki_bid = bki[bID]; hipfftDoubleComplex temp_a1 = cuCmul(v1, bki_aid); hipfftDoubleComplex temp_b1 = cuCmul(v1, bki_bid); i = 2; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; bki_aid = bki[aID]; bki_bid = bki[bID]; hipfftDoubleComplex temp_a2 = cuCmul(v2, bki_aid); hipfftDoubleComplex temp_b2 = cuCmul(v2, bki_bid); i = 3; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; bki_aid = bki[aID]; bki_bid = bki[bID]; hipfftDoubleComplex temp_a3 = cuCmul(v3, bki_aid); hipfftDoubleComplex temp_b3 = cuCmul(v3, bki_bid); hipfftDoubleComplex tmpa_gpuCoal0; tmpa_gpuCoal0.x = temp_a0.x + temp_a1.x +temp_a2.x +temp_a3.x; tmpa_gpuCoal0.y = temp_a0.y + temp_a1.y +temp_a2.y +temp_a3.y; // tmpa_gpuCoal[id] = tmpa_gpuCoal0; hipfftDoubleComplex tmpa_gpuCoal1; tmpa_gpuCoal1.x = temp_b0.x + temp_b1.x +temp_b2.x +temp_b3.x; tmpa_gpuCoal1.y = temp_b0.y + temp_b1.y +temp_b2.y +temp_b3.y; // tmpa_gpuCoal[nBits * Ns2 + id] = tmpa_gpuCoal1; // hipfftDoubleComplex temp_a = cuCmul(cuDecaFFTCoalesce[i * (Ns2 * nBits) + id], bki[aID]); // hipfftDoubleComplex temp_b = cuCmul(cuDecaFFTCoalesce[i * (Ns2 * nBits) + id], bki[bID]); int largeSI = (id / Ns2) * (N + 1); int tid = id % Ns2; d_in[largeSI + 2 * tid + 1] = tmpa_gpuCoal0; largeSI = (id / Ns2 + nBits) * (N + 1); d_in[largeSI + 2 * tid + 1] = tmpa_gpuCoal1; } __global__ void finishUpFFT_n_Bit(int *temp2, hipfftDoubleReal *d_out, int *temp3, int nBits) { register int id = blockIdx.x*blockDim.x+threadIdx.x; register int bitIndex = id / N; register int tIndex = id % N; register int startIndexLarge = bitIndex * _2N; int temp3_id = temp3[id]; register hipfftDoubleReal d_out_id = d_out[startIndexLarge + tIndex]; temp2[id] = Torus32(int64_t(d_out_id * _1sN * _2p32)) + temp3_id; } __global__ void extract_gpu_n_Bit(int *destination, int *source) { register int id = blockIdx.x * blockDim.x + threadIdx.x; register int bitIndex = id / N; register int tIndex = id % N;//corresponding to j register int startIndex = bitIndex * N; register bool L1 = id % N == 0; register int s_id = L1 * (-tIndex + startIndex) + (!L1) * (-tIndex + startIndex + N); register int des_id = source[s_id]; des_id = L1 * des_id + (!L1) * (-des_id);// + 32768; destination[id] = des_id; } __global__ void getAibarCoalesce_n_Bit(uint32_t *d_aibar, const Torus32 *ai, int32_t prec_offset, int bitSize, int n) { register int id = blockIdx.x * blockDim.x + threadIdx.x; register int i = id/bitSize; register int tID = id % bitSize; register int startIndex = tID * n; Torus32 ai_i = ai[startIndex + i]; d_aibar[id] = ai_i + prec_offset; } __global__ void calculateAijFromAibarCoalesce_n_Bit(uint32_t *aij, uint32_t *aibar, int bitSize, int t, int basebit, int mask) { register int id = blockIdx.x * blockDim.x + threadIdx.x; register int i = id /(bitSize * t); register int j = (id / bitSize) % t; register int tId = id % bitSize; register uint32_t aibar_id = aibar[i * bitSize + tId]; aij[id] = (aibar_id >> (32 - (j + 1) * basebit)) & mask; } __global__ void lweKeySwitchVectorSubstraction_gpu_testing_coalesce_n_Bit(int *destinationA, Torus32 *sourceA, uint32_t *d_aij, int *destinationB, int *sourceB, int ks_n, int ks_t, int ks_base, int bitSize, int n, int params_n) { int id = blockIdx.x * blockDim.x + threadIdx.x; int A = ks_n, B = ks_t, C = ks_base, D = params_n; register int sourceA_id; register int desA_id = destinationA[id]; register int desB_id = 0; desB_id = destinationB[id % bitSize];// the modu is to avoid if and aray index out of bound register int sourceB_id; int bitIndex = id / params_n; int tId = id % (bitSize * params_n); #pragma unroll for (register int i = 0; i < 1024; ++i) { int sI = i * (ks_t * bitSize); for (register int j = 0; j < ks_t; ++j) { int sI2 = sI + j * bitSize; int aij = d_aij[sI2 + bitIndex]; // if (aij != 0) { sourceA_id = sourceA[i * B * C * D + j * C * D + aij * D + (id % D)]; desA_id -= sourceA_id; // } // bool id_lt_bitSize = id < bitSize; // int bi = id_lt_bitSize * d_aij[sI2 + id] + (!id_lt_bitSize) * 0; // if(id < bitSize) { int bi = d_aij[sI2 + (id % bitSize)];//this mod is to avoid the out of bound and to avoid if else desB_id -= sourceB[i * B * C + j * C + bi]; // sourceB_id = sourceB[i * B * C + j * C + bi]; // desB_id -= (id_lt_bitSize * (sourceB_id) + (!id_lt_bitSize) * 0); // } } } destinationA[id] = desA_id; if (id < bitSize) { destinationB[id] = desB_id; } } void keySwitch_n_Bit(LweSample_16* result, int *u_a_GPU, int *u_b_GPU, int nBits, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { //key switch const static int n = 500, ks_n = 1024, ks_basebit = 2, ks_t = 8, ks_out_params_n = 500, nTHREADS = 1024; const static int base = 1 << ks_basebit;// base=2 in [CGGI16] const static int32_t prec_offset = 1 << (32 - (1 + ks_basebit * ks_t)); //precision const static int mask = base - 1; // cout << "nBits: " << nBits << endl; int coal_d_aibarSize = nBits * ks_n;//16*1024 uint32_t *coal_d_aibar; CudaSafeCall(hipMalloc(&coal_d_aibar, coal_d_aibarSize * sizeof(uint32_t))); cudaCheckErrors("ks: 0"); hipLaunchKernelGGL(( getAibarCoalesce_n_Bit), dim3(nBits), dim3(nTHREADS), 0, 0, coal_d_aibar, u_a_GPU, prec_offset, nBits, ks_n); int coal_d_aijSize = ks_n * ks_t * nBits; uint32_t *coal_d_aij; CudaSafeCall(hipMalloc(&coal_d_aij, coal_d_aijSize * sizeof(uint32_t))); cudaCheckErrors("ks: 1"); hipLaunchKernelGGL(( calculateAijFromAibarCoalesce_n_Bit), dim3(8 * nBits), dim3(nTHREADS), 0, 0, coal_d_aij, coal_d_aibar, nBits, ks_t, ks_basebit, mask); cudaCheckErrors("ks: 2"); int nBLOCKS = (int) ceil((float) (nBits * n) / nTHREADS);//500 hipLaunchKernelGGL(( lweKeySwitchVectorSubstraction_gpu_testing_coalesce_n_Bit), dim3(nBits), dim3(n), 0, 0, result->a, ks_a_gpu_extendedPtr, coal_d_aij, u_b_GPU, ks_b_gpu_extendedPtr, ks_n,//1024 ks_t,//8 base, nBits, ks_n,//1024 n);//500/**/ cudaCheckErrors("ks: 3"); // hipDeviceSynchronize(); CudaSafeCall(hipMemcpy(result->b, u_b_GPU, nBits * sizeof(int), D2H)); cudaCheckErrors("ks: 4"); hipFree(coal_d_aibar); hipFree(coal_d_aij); } void bootstrapAndKeySwitch_n_Bit(LweSample_16* result, int *temp_res_a_gpu, int *temp_res_b_cpu, int nBits, hipfftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { register int n = 500, nTHREADS = 1024, N = 1024, _2N = 2048, Ns2 = 512, k = 1, kpl = 4, l = 2, offset = 2149580800, halfBg = 512, maskMod = 1023; static const Torus32 MU = modSwitchToTorus32(1, 8); for (int i = 0; i < nBits; ++i) { temp_res_b_cpu[i] = modSwitchFromTorus32(temp_res_b_cpu[i], _2N); } int *accum_a_b, *bara, *temp_accum_a_b, *barb;//, *testvectbis;//accum a and accum b together; bara; tempaccum for mux rotate hipMalloc(&accum_a_b, nBits * N * (k + 1) * sizeof(int)); hipMalloc(&temp_accum_a_b, nBits * N * (k + 1) * sizeof(int)); hipMalloc(&bara, nBits * N * sizeof(int)); hipMalloc(&barb, nBits * sizeof(int)); hipMemset(accum_a_b, 0, nBits * N * (k + 1) * sizeof(int)); hipMemset(temp_accum_a_b, 0, nBits * N * (k + 1) * sizeof(int)); hipMemset(bara, 0, nBits * N * sizeof(int)); hipMemcpy(barb, temp_res_b_cpu, nBits * sizeof(int), H2D); cudaCheckErrors("Here0"); hipLaunchKernelGGL(( bootstrappingUptoBlindRotate_n_Bit), dim3(nBits), dim3(nTHREADS), 0, 0, accum_a_b + nBits * N, bara, MU, nBits, temp_res_a_gpu, barb); cudaCheckErrors("Here1"); //cufft helper variables int iFFTBatch = nBits * kpl;//64 int FFTBatch = nBits * (k + 1);//32 hipfftDoubleReal* d_rev_in; hipfftDoubleComplex *d_rev_out; hipfftDoubleComplex *d_in; hipfftDoubleReal *d_out; //cufft plans hipfftHandle p; hipfftHandle rev_p; //ifft variables allocation CudaSafeCall(hipMalloc(&d_rev_in, iFFTBatch * _2N * sizeof(hipfftDoubleReal))); CudaSafeCall(hipMalloc(&d_rev_out, iFFTBatch * (N + 1) * sizeof(hipfftDoubleComplex))); hipfftPlan1d(&rev_p, _2N, HIPFFT_D2Z, iFFTBatch);// - nBits);// - (iFFTBatch / dParts)); // CudaSafeCall(hipMemset(d_rev_in, 0, iFFTBatch * _2N * sizeof(hipfftDoubleReal))); //fft variables allocation CudaSafeCall(hipMalloc(&d_in, FFTBatch * (N + 1) * sizeof(hipfftDoubleComplex))); CudaSafeCall(hipMalloc(&d_out, FFTBatch * _2N * sizeof(hipfftDoubleReal))); hipfftPlan1d(&p, _2N, HIPFFT_Z2D, FFTBatch); CudaSafeCall(hipMemset(d_in, 0, FFTBatch * (N + 1) * sizeof(hipfftDoubleComplex))); int *temp2 = temp_accum_a_b; int *temp3 = accum_a_b; cudaCheckErrors("Here2"); for (int j = 0; j < 500; ++j) {//500 cudaCheckErrors("HereInside1"); hipLaunchKernelGGL(( prepareForiFFT_n_Bit), dim3(nBits * 2), dim3(nTHREADS), 0, 0, temp2, NULL,//decaCoalesce, d_rev_in, nBits, bara, j, temp3, nBits * 2 * nTHREADS); cudaCheckErrors("HereInside2"); hipfftExecD2Z(rev_p, d_rev_in, d_rev_out); // hipDeviceSynchronize(); cudaCheckErrors("HereInside3"); hipLaunchKernelGGL(( prepareForFFT_n_Bit), dim3(nBits), dim3(512), 0, 0, NULL,//cuDecaFFTCoalesce, NULL,//tmpa_gpuCoal, d_in, d_rev_out, cudaBkFFTCoalesceExt, j, nBits); cudaCheckErrors("HereInside4"); hipfftExecZ2D(p, d_in, d_out); // hipDeviceSynchronize(); cudaCheckErrors("HereInside5"); hipLaunchKernelGGL(( finishUpFFT_n_Bit), dim3(nBits * 2), dim3(nTHREADS), 0, 0, temp2, d_out, temp3, nBits); cudaCheckErrors("HereInside6"); swap(temp2, temp3); // int *x = temp2; // temp2 = temp3; // temp3 = x; cudaCheckErrors("HereInside7"); } //extract int *u_a_GPU, *u_b_CPU, *temp_u_b; hipMalloc(&u_a_GPU, nBits * N * sizeof(int)); u_b_CPU = new int[nBits]; temp_u_b = new int[nBits * N * (k + 1)]; hipMemcpy(temp_u_b, accum_a_b, nBits * N * (k + 1) * sizeof(int), D2H); hipLaunchKernelGGL(( extract_gpu_n_Bit), dim3(nBits), dim3(1024), 0, 0, u_a_GPU, accum_a_b); for (int i = 0; i < nBits; ++i) { u_b_CPU[i] = temp_u_b[i * N + nBits * N]; } int *u_b_GPU; CudaSafeCall(hipMalloc(&u_b_GPU, nBits * sizeof(int))); cudaCheckErrors("Before Extracting"); CudaSafeCall(hipMemset(result->a, 0, nBits * 500 * sizeof(int))); hipMemcpy(u_b_GPU, u_b_CPU, nBits * sizeof(int), H2D); //key switch cudaCheckErrors("Before starting KS"); keySwitch_n_Bit(result, u_a_GPU, u_b_GPU, nBits, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr);/**/ hipFree(accum_a_b); hipFree(temp_accum_a_b); hipFree(bara); hipFree(barb); //cufft helper variables hipFree(d_rev_in); hipFree(d_rev_out); hipFree(d_in); hipFree(d_out); hipfftDestroy(rev_p); hipfftDestroy(p); //KS vars hipFree(u_a_GPU); delete [] u_b_CPU; delete [] temp_u_b; hipFree(u_b_GPU); } void bootstrapAndKeySwitch_n_Bit_MUX(LweSample_16* result, Torus32 *temp_res_a_gpu, int *temp_res_b_cpu, int nBits, hipfftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { register int n = 500, nTHREADS = 1024, N = 1024, _2N = 2048, Ns2 = 512, k = 1, kpl = 4, l = 2, offset = 2149580800, halfBg = 512, maskMod = 1023; static const Torus32 MU = modSwitchToTorus32(1, 8); for (int i = 0; i < nBits; ++i) { temp_res_b_cpu[i] = modSwitchFromTorus32(temp_res_b_cpu[i], _2N); } int *accum_a_b, *bara, *temp_accum_a_b, *barb;//, *testvectbis;//accum a and accum b together; bara; tempaccum for mux rotate CudaSafeCall(hipMalloc(&accum_a_b, nBits * N * (k + 1) * sizeof(int))); CudaSafeCall(hipMalloc(&temp_accum_a_b, nBits * N * (k + 1) * sizeof(int))); CudaSafeCall(hipMalloc(&bara, nBits * N * sizeof(int))); CudaSafeCall(hipMalloc(&barb, nBits * sizeof(int))); CudaSafeCall(hipMemset(accum_a_b, 0, nBits * N * (k + 1) * sizeof(int))); CudaSafeCall(hipMemset(temp_accum_a_b, 0, nBits * N * (k + 1) * sizeof(int))); CudaSafeCall(hipMemset(bara, 0, nBits * N * sizeof(int))); CudaSafeCall(hipMemcpy(barb, temp_res_b_cpu, nBits * sizeof(int), H2D)); cudaCheckErrors("Here0"); hipLaunchKernelGGL(( bootstrappingUptoBlindRotate_n_Bit), dim3(nBits), dim3(nTHREADS), 0, 0, accum_a_b + nBits * N, bara, MU, nBits, temp_res_a_gpu, barb); cudaCheckErrors("Here1"); //cufft helper variables int iFFTBatch = nBits * kpl;//64 int FFTBatch = nBits * (k + 1);//32 hipfftDoubleReal* d_rev_in; hipfftDoubleComplex *d_rev_out; hipfftDoubleComplex *d_in; hipfftDoubleReal *d_out; //cufft plans hipfftHandle p; hipfftHandle rev_p; //ifft variables allocation CudaSafeCall(hipMalloc(&d_rev_in, iFFTBatch * _2N * sizeof(hipfftDoubleReal))); CudaSafeCall(hipMalloc(&d_rev_out, iFFTBatch * (N + 1) * sizeof(hipfftDoubleComplex))); hipfftPlan1d(&rev_p, _2N, HIPFFT_D2Z, iFFTBatch);// - nBits);// - (iFFTBatch / dParts)); CudaSafeCall(hipMemset(d_rev_in, 0, iFFTBatch * _2N * sizeof(hipfftDoubleReal))); //fft variables allocation CudaSafeCall(hipMalloc(&d_in, FFTBatch * (N + 1) * sizeof(hipfftDoubleComplex))); CudaSafeCall(hipMalloc(&d_out, FFTBatch * _2N * sizeof(hipfftDoubleReal))); hipfftPlan1d(&p, _2N, HIPFFT_Z2D, FFTBatch); CudaSafeCall(hipMemset(d_in, 0, FFTBatch * (N + 1) * sizeof(hipfftDoubleComplex))); int *temp2 = temp_accum_a_b; int *temp3 = accum_a_b; // cout << nBits << endl; cudaCheckErrors("Here2"); for (int j = 0; j < 500; ++j) {//500 cudaCheckErrors("HereInside1"); hipLaunchKernelGGL(( prepareForiFFT_n_Bit), dim3(nBits * 2), dim3(nTHREADS), 0, 0, temp2, NULL,//decaCoalesce, d_rev_in, nBits, bara, j, temp3, nBits * 2 * nTHREADS); cudaCheckErrors("HereInside2"); hipfftExecD2Z(rev_p, d_rev_in, d_rev_out); // hipDeviceSynchronize(); cudaCheckErrors("HereInside3"); hipLaunchKernelGGL(( prepareForFFT_n_Bit), dim3(nBits), dim3(512), 0, 0, NULL,//cuDecaFFTCoalesce, NULL,//tmpa_gpuCoal, d_in, d_rev_out, cudaBkFFTCoalesceExt, j, nBits); cudaCheckErrors("HereInside4"); hipfftExecZ2D(p, d_in, d_out); // hipDeviceSynchronize(); cudaCheckErrors("HereInside5"); hipLaunchKernelGGL(( finishUpFFT_n_Bit), dim3(nBits * 2), dim3(nTHREADS), 0, 0, temp2, d_out, temp3, nBits); cudaCheckErrors("HereInside6"); // swap(temp2, temp3); int* x = temp2; temp2 = temp3; temp3 = x; cudaCheckErrors("HereInside7"); } //extract int *u_a_GPU, *u_b_CPU, *temp_u_b; CudaSafeCall(hipMalloc(&u_a_GPU, nBits * N * sizeof(int))); u_b_CPU = new int[nBits]; temp_u_b = new int[nBits * N * (k + 1)]; CudaSafeCall(hipMemcpy(temp_u_b, accum_a_b, nBits * N * (k + 1) * sizeof(int), D2H)); hipLaunchKernelGGL(( extract_gpu_n_Bit), dim3(nBits), dim3(1024), 0, 0, u_a_GPU, accum_a_b); for (int i = 0; i < nBits; ++i) { u_b_CPU[i] = temp_u_b[i * N + nBits * N]; } nBits = nBits/2; static const Torus32 MuxConst = modSwitchToTorus32(1, 8); int *u_a_GPU_halfBits, *u_b_CPU_halfBits, *u_b_GPU_halfBits; CudaSafeCall(hipMalloc(&u_a_GPU_halfBits, nBits * N * sizeof(int))); CudaSafeCall(hipMalloc(&u_b_GPU_halfBits, nBits * sizeof(int))); u_b_CPU_halfBits = new int[nBits]; hipLaunchKernelGGL(( ANDvec_vector), dim3(nBits), dim3(nTHREADS), 0, 0, u_a_GPU_halfBits, u_a_GPU, u_a_GPU + nBits * N, 1, nBits, N, nBits * N);//the three params are redundant and not used for (int i = 0; i < nBits; ++i) { u_b_CPU_halfBits[i] = u_b_CPU[i] + u_b_CPU[i + nBits] + MuxConst; } // hipMemset(result->a, 0, nBits * 500 * sizeof(Torus32));//TAKEN TO THE CALLER CudaSafeCall(hipMemcpy(u_b_GPU_halfBits, u_b_CPU_halfBits, nBits * sizeof(int), H2D)); //key switch const static int ks_n = 1024, ks_basebit = 2, ks_t = 8, ks_out_params_n = 500; const static int base = 1 << ks_basebit;// base=2 in [CGGI16] const static int32_t prec_offset = 1 << (32 - (1 + ks_basebit * ks_t)); //precision const static int mask = base - 1; // cout << "nBits: " << nBits << endl; int coal_d_aibarSize = nBits * ks_n;//16*1024 uint32_t *coal_d_aibar; CudaSafeCall(hipMalloc(&coal_d_aibar, coal_d_aibarSize * sizeof(uint32_t))); cudaCheckErrors("ks: 0"); hipLaunchKernelGGL(( getAibarCoalesce_n_Bit), dim3(nBits), dim3(nTHREADS), 0, 0, coal_d_aibar, u_a_GPU_halfBits, prec_offset, nBits, ks_n); int coal_d_aijSize = ks_n * ks_t * nBits; uint32_t *coal_d_aij; CudaSafeCall(hipMalloc(&coal_d_aij, coal_d_aijSize * sizeof(uint32_t))); cudaCheckErrors("ks: 1"); hipLaunchKernelGGL(( calculateAijFromAibarCoalesce_n_Bit), dim3(8 * nBits), dim3(nTHREADS), 0, 0, coal_d_aij, coal_d_aibar, nBits, ks_t, ks_basebit, mask); cudaCheckErrors("ks: 2"); hipLaunchKernelGGL(( lweKeySwitchVectorSubstraction_gpu_testing_coalesce_n_Bit), dim3(nBits), dim3(n), 0, 0, result->a, ks_a_gpu_extendedPtr, coal_d_aij, u_b_GPU_halfBits, ks_b_gpu_extendedPtr, ks_n,//1024 ks_t,//8 base, nBits, ks_n,//1024 n);//500 cudaCheckErrors("ks: 3"); // hipDeviceSynchronize(); CudaSafeCall(hipMemcpy(result->b, u_b_GPU_halfBits, nBits * sizeof(int), D2H)); cudaCheckErrors("ks: 4"); cudaCheckErrors("BootsMUX: n"); hipFree(accum_a_b); hipFree(temp_accum_a_b); hipFree(bara); hipFree(barb); //cufft helper variables hipFree(d_rev_in); hipFree(d_rev_out); hipFree(d_in); hipFree(d_out); hipfftDestroy(rev_p); hipfftDestroy(p); //KS vars hipFree(u_a_GPU); delete [] u_b_CPU; delete [] temp_u_b; hipFree(u_a_GPU_halfBits); delete [] u_b_CPU_halfBits; hipFree(u_b_GPU_halfBits); //ks hipFree(coal_d_aibar); hipFree(coal_d_aij); } EXPORT void bootsAND_fullGPU_n_Bit(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int nBits, hipfftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { int n = 500, nTHREADS = 1024; static const Torus32 MU = modSwitchToTorus32(1, 8); //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); Torus32 *temp_res_a_gpu; int *temp_res_b_cpu; CudaSafeCall(hipMalloc(&temp_res_a_gpu, n * nBits * sizeof(Torus32))); temp_res_b_cpu = new int[nBits]; register int length = nBits * n; int nBLOCKS = (int) ceil((float) (length) / nTHREADS); cudaCheckErrors("AND: Here-2"); hipLaunchKernelGGL(( vecAdd), dim3(nBLOCKS), dim3(nTHREADS), 0, 0, temp_res_a_gpu, ca->a, cb->a, length); cudaCheckErrors("AND: Here-1"); for (int i = 0; i < nBits; ++i) { temp_res_b_cpu[i] = ca->b[i] + cb->b[i] + AndConst; // temp_res_b_cpu[i] = modSwitchFromTorus32(temp_res_b_cpu[i], _2N); } bootstrapAndKeySwitch_n_Bit(result, temp_res_a_gpu, temp_res_b_cpu, nBits, cudaBkFFTCoalesceExt, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr); delete [] temp_res_b_cpu; hipFree(temp_res_a_gpu); } EXPORT void bootsXOR_fullGPU_n_Bit(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int nBits, hipfftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { int n = 500, nTHREADS = 1024; static const Torus32 MU = modSwitchToTorus32(1, 8); //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); Torus32 *temp_res_a_gpu; int *temp_res_b_cpu; CudaSafeCall(hipMalloc(&temp_res_a_gpu, n * nBits * sizeof(Torus32))); temp_res_b_cpu = new int[nBits]; int length = nBits * n; int nBLOCKS = (int) ceil((float) (length) / nTHREADS); cudaCheckErrors("XOR: Here-2"); int mulVal = 2; hipLaunchKernelGGL(( vecAddMulTo), dim3(nBLOCKS), dim3(nTHREADS), 0, 0, temp_res_a_gpu, mulVal, ca->a, cb->a, length); cudaCheckErrors("XOR: Here-1"); for (int i = 0; i < nBits; ++i) { temp_res_b_cpu[i] = (ca->b[i] + cb->b[i]) * mulVal + XorConst; // temp_res_b_cpu[i] = modSwitchFromTorus32(temp_res_b_cpu[i], _2N); } bootstrapAndKeySwitch_n_Bit(result, temp_res_a_gpu, temp_res_b_cpu, nBits, cudaBkFFTCoalesceExt, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr); delete [] temp_res_b_cpu; hipFree(temp_res_a_gpu); } EXPORT void bootsXNOR_fullGPU_n_Bit(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int nBits, hipfftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { int n = 500, nTHREADS = 1024; static const Torus32 MU = modSwitchToTorus32(1, 8); //compute: (0,-1/4) + 2*(-ca-cb) static const Torus32 XnorConst = modSwitchToTorus32(-1, 4); Torus32 *temp_res_a_gpu; int *temp_res_b_cpu; CudaSafeCall(hipMalloc(&temp_res_a_gpu, n * nBits * sizeof(Torus32))); temp_res_b_cpu = new int[nBits]; int length = nBits * n; int nBLOCKS = (int) ceil((float) (length) / nTHREADS); cudaCheckErrors("XNOR: Here-2"); int mulVal = 2; hipLaunchKernelGGL(( vecAddMulTo), dim3(nBLOCKS), dim3(nTHREADS), 0, 0, temp_res_a_gpu, mulVal, ca->a, cb->a, length); cudaCheckErrors("XNOR: Here-1"); for (int i = 0; i < nBits; ++i) { temp_res_b_cpu[i] = (ca->b[i] + cb->b[i]) * mulVal + XnorConst; // temp_res_b_cpu[i] = modSwitchFromTorus32(temp_res_b_cpu[i], _2N); result->b[i] = -MU; } bootstrapAndKeySwitch_n_Bit(result, temp_res_a_gpu, temp_res_b_cpu, nBits, cudaBkFFTCoalesceExt, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr); delete [] temp_res_b_cpu; hipFree(temp_res_a_gpu); } EXPORT void bootsMUX_fullGPU_n_Bit(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, const LweSample_16 *cc, int nBits, hipfftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { static const int n = 500, nTHREADS = 1024; static const Torus32 MU = modSwitchToTorus32(1, 8); static const Torus32 AndConst = modSwitchToTorus32(-1, 8); static const Torus32 MuxConst = modSwitchToTorus32(1, 8); Torus32 *temp_res_a_gpu; int *temp_res_b_cpu; int nOutputs = 2; int nBootsBits = nBits * nOutputs; CudaSafeCall(hipMalloc(&temp_res_a_gpu, n * nBootsBits * sizeof(Torus32))); temp_res_b_cpu = new int[nBootsBits]; int length = nBits * n; int nBLOCKS = (int) ceil((float) (length) / nTHREADS); cudaCheckErrors("XMUX: Here-2"); hipLaunchKernelGGL(( ANDvec_vector), dim3(nBLOCKS), dim3(nTHREADS), 0, 0, temp_res_a_gpu, ca->a, cb->a, 1, nBits, n, length);// dummy variables (last4) hipLaunchKernelGGL(( SUBvec_vector), dim3(nBLOCKS), dim3(nTHREADS), 0, 0, temp_res_a_gpu + length, ca->a, cc->a, 1, nBits, n, length);// dummy variables (last4) for (int i = 0; i < nBits; ++i) { temp_res_b_cpu[i] = ca->b[i] + cb->b[i] + AndConst; temp_res_b_cpu[i + nBits] = - ca->b[i] + cc->b[i] + AndConst; result->b[i] = -MU; } hipMemset(result->a, 0, nBits * 500 * sizeof(Torus32)); bootstrapAndKeySwitch_n_Bit_MUX(result, temp_res_a_gpu, temp_res_b_cpu, nBootsBits, cudaBkFFTCoalesceExt, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr); hipFree(temp_res_a_gpu); delete [] temp_res_b_cpu; } EXPORT void bootsANDXOR_fullGPU_n_Bit_vector(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, const int vLength, const int nBits, hipfftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { static const int n = 500, nTHREADS = 1024, nOut = 2; int nTotalInputBits = vLength * nBits; int nTotalOutputBits = vLength * nBits * nOut; static const Torus32 MU = modSwitchToTorus32(1, 8); //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); static const int mulValXor = 2; Torus32 *temp_res_a_gpu; CudaSafeCall(hipMalloc(&temp_res_a_gpu, n * nTotalOutputBits * sizeof(Torus32))); int *temp_res_b_cpu = new int[nTotalOutputBits]; register int length = nTotalOutputBits * n; int nBLOCKS = (int) ceil((float) (length) / nTHREADS); cudaCheckErrors("ANDXOR_vec: Here - 0"); hipLaunchKernelGGL(( ANDXORvecMulAllto_vector), dim3(nBLOCKS), dim3(nTHREADS), 0, 0, temp_res_a_gpu, ca->a, cb->a, vLength, nBits, n, length); cudaCheckErrors("AND: Here - 1"); for (int i = 0; i < nTotalInputBits; ++i) { temp_res_b_cpu[i] = ca->b[i] + cb->b[i] + AndConst; //for and temp_res_b_cpu[i + nTotalInputBits] = mulValXor * (ca->b[i] + cb->b[i]) + XorConst;// for xor } bootstrapAndKeySwitch_n_Bit(result, temp_res_a_gpu, temp_res_b_cpu, nTotalOutputBits, cudaBkFFTCoalesceExt, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr); cudaCheckErrors("AND: Here - 2"); delete [] temp_res_b_cpu; hipFree(temp_res_a_gpu); } EXPORT void bootsXORXOR_fullGPU_n_Bit_vector(LweSample_16 *result, const LweSample_16 *ca1, const LweSample_16 *ca2, const LweSample_16 *cb1, const LweSample_16 *cb2, const int vLength, const int nBits, hipfftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { static const int n = 500, nTHREADS = 1024, nOut = 2; int nTotalInputBits = vLength * nBits; int nTotalOutputBits = vLength * nBits * nOut; static const Torus32 MU = modSwitchToTorus32(1, 8); //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); static const int mulValXor = 2; Torus32 *temp_res_a_gpu; CudaSafeCall(hipMalloc(&temp_res_a_gpu, n * nTotalOutputBits * sizeof(Torus32))); int *temp_res_b_cpu = new int[nTotalOutputBits]; register int length = nTotalInputBits * n; int nBLOCKS = (int) ceil((float) (length) / nTHREADS); cudaCheckErrors("ANDXOR_vec: Here - 0"); hipLaunchKernelGGL(( XORXORvecMulAllto_vector), dim3(nBLOCKS), dim3(nTHREADS), 0, 0, temp_res_a_gpu, ca1->a, ca2->a, n, nBits, length); hipLaunchKernelGGL(( XORXORvecMulAllto_vector), dim3(nBLOCKS), dim3(nTHREADS), 0, 0, temp_res_a_gpu + length, cb1->a, cb2->a, n, nBits, length); cudaCheckErrors("AND: Here - 1"); for (int i = 0; i < nTotalInputBits; ++i) { temp_res_b_cpu[i] = mulValXor * (ca1->b[i] + ca2->b[i]) + XorConst; //for and temp_res_b_cpu[i + nTotalInputBits] = mulValXor * (cb1->b[i] + cb2->b[i]) + XorConst;// for xor } bootstrapAndKeySwitch_n_Bit(result, temp_res_a_gpu, temp_res_b_cpu, nTotalOutputBits, cudaBkFFTCoalesceExt, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr); cudaCheckErrors("AND: Here - 2"); delete [] temp_res_b_cpu; hipFree(temp_res_a_gpu); } __global__ void bootstrappingUptoBlindRotate_1_Bit_stream(int *accum_a_b, int *temp_accum_a_b, int *bara, int *testvectbis, Torus32 MU, int nBits, int *temp_res_a, int barb) { register int id = blockIdx.x * blockDim.x + threadIdx.x; register bool L1 = (_2N - barb) < N;//a register bool L2 = id < (_2N - barb);//a; register bool L3 = id >= ((_2N - barb) - N);//a//aa; register int testvectbis_local = L1 * (L2 * (-1) + (!L2)) * MU + (!L1) * ((L3 * (-1) + (!L3)) * MU); accum_a_b[id] = testvectbis_local;//previously it was id + 1024 register bool id_lt_500 = id < 500; register int temp_res_a_id = temp_res_a[id]; temp_res_a_id = temp_res_a_id * id_lt_500; register int bara_id = modSwitchFromTorus32_GPU_device(temp_res_a_id, Nx2); bara[id] = bara_id; } __global__ void prepareForiFFT_1_Bit_stream(int *des, int *decaCoalesce, hipfftDoubleReal *d_rev_in, int nBits, int *bara, int baraIndex, int *source) { register int id = blockIdx.x * blockDim.x + threadIdx.x; // register int bitIndex = (id / N) % nBits; register int threadIdModN = id % N; register int a = bara[baraIndex];//[bitIndex * N + baraIndex]; register int aa = a - N; register bool L1 = a < N, L2 = threadIdModN < a, L3 = threadIdModN < aa; int source_id = source[id], source_id_a_N = source[id - a + N], source_id_a = source[id - a]; int source_id_aa = source[id - aa], source_id_aa_N = source[id - aa + N]; register int des_id = (L1 * (L2 * (-source_id_a_N - source_id) + (!L2) * (source_id_a - source_id)) + (!L1) * (L3 * (source_id_aa_N - source_id) + (!L3) * (-source_id_aa - source_id))); // des[id] = des_id; register int p = 0; register int decal = (32 - (p + 1) * Bgbit); register uint32_t val = ((uint32_t)(des_id + offset)); register uint32_t temp1 = (val >> decal) & maskMod; register int xxxxx1 = (temp1 - halfBg); // decaCoalesce[((id / N) * N) + id] = xxxxx1; p = 1; decal = (32 - (p + 1) * Bgbit); val = ((uint32_t)(des_id + offset)); temp1 = (val >> decal) & maskMod; register int xxxxx2 = temp1 - halfBg;// + // decaCoalesce[((id / N) * N) + id + N] = xxxxx2; register int bIndex = id / N, tIndex = id % N;//, startIndexSmall = bIndex * N; int destTod_rev_in = bIndex * _2N + tIndex + (bIndex >= nBits) * nBits * N * 2; d_rev_in[destTod_rev_in] = xxxxx1/2.;//id;// d_rev_in[destTod_rev_in + 1024] = -xxxxx1/2.;//id;// destTod_rev_in += 2 * 1024; d_rev_in[destTod_rev_in] = xxxxx2/2.;//id; d_rev_in[destTod_rev_in + 1024] = -xxxxx2/2.;//id; } __global__ void prepareForFFT_1_Bit_Stream(hipfftDoubleComplex *cuDecaFFTCoalesce, hipfftDoubleComplex *tmpa_gpuCoal, hipfftDoubleComplex *d_in, hipfftDoubleComplex *d_rev_out, hipfftDoubleComplex *bki, int keyIndex, int nBits) { register int id = blockIdx.x*blockDim.x+threadIdx.x; register int tempId = id; int bitIndex = tempId/Ns2; register hipfftDoubleComplex v0 = d_rev_out[2 * tempId + 1 + bitIndex];//d_rev_out[2 * id + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v0; tempId = tempId + (Ns2 * nBits); bitIndex = (tempId)/Ns2; register hipfftDoubleComplex v1 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v1; tempId = tempId + (Ns2 * nBits); bitIndex = (tempId)/Ns2; register hipfftDoubleComplex v2 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v2; tempId = tempId + (Ns2 * nBits); bitIndex = (tempId)/Ns2; register hipfftDoubleComplex v3 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v3; register int keySI = keyIndex * (k + 1) * kpl * Ns2, aID, bID, offset; int i = 0; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; hipfftDoubleComplex bki_aid = bki[aID]; hipfftDoubleComplex bki_bid = bki[bID]; hipfftDoubleComplex temp_a0 = cuCmul(v0, bki_aid); hipfftDoubleComplex temp_b0 = cuCmul(v0, bki_bid); i = 1; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; bki_aid = bki[aID]; bki_bid = bki[bID]; hipfftDoubleComplex temp_a1 = cuCmul(v1, bki_aid); hipfftDoubleComplex temp_b1 = cuCmul(v1, bki_bid); i = 2; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; bki_aid = bki[aID]; bki_bid = bki[bID]; hipfftDoubleComplex temp_a2 = cuCmul(v2, bki_aid); hipfftDoubleComplex temp_b2 = cuCmul(v2, bki_bid); i = 3; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; bki_aid = bki[aID]; bki_bid = bki[bID]; hipfftDoubleComplex temp_a3 = cuCmul(v3, bki_aid); hipfftDoubleComplex temp_b3 = cuCmul(v3, bki_bid); hipfftDoubleComplex tmpa_gpuCoal0; tmpa_gpuCoal0.x = temp_a0.x + temp_a1.x +temp_a2.x +temp_a3.x; tmpa_gpuCoal0.y = temp_a0.y + temp_a1.y +temp_a2.y +temp_a3.y; // tmpa_gpuCoal[id] = tmpa_gpuCoal0; hipfftDoubleComplex tmpa_gpuCoal1; tmpa_gpuCoal1.x = temp_b0.x + temp_b1.x +temp_b2.x +temp_b3.x; tmpa_gpuCoal1.y = temp_b0.y + temp_b1.y +temp_b2.y +temp_b3.y; // tmpa_gpuCoal[nBits * Ns2 + id] = tmpa_gpuCoal1; register int largeSI = 0;//(id / Ns2) * (N + 1); // register int tid = id % Ns2; d_in[largeSI + 2 * id + 1] = tmpa_gpuCoal0; largeSI = (N + 1); d_in[largeSI + 2 * id + 1] = tmpa_gpuCoal1; } __global__ void finishUpFFT_1_Bit_Stream(int *temp2, hipfftDoubleReal *d_out, int *temp3) { register int id = blockIdx.x*blockDim.x+threadIdx.x; register double _2p32 = double(INT64_C(1) << 32); register double _1sN = double(1) / double(N); register int bitIndex = id / N; register int tIndex = id % N; register int startIndexLarge = bitIndex * _2N; int temp3_id = temp3[id]; int temp2_id = Torus32(int64_t(d_out[startIndexLarge + tIndex] * _1sN * _2p32)) + temp3_id;// temp2[id] = temp2_id; } __global__ void extract_gpu_1_Bit_Stream(int *destination, int *source) { register int id = blockIdx.x * blockDim.x + threadIdx.x; register int s_id, des_id; register bool L1 = id == 0; s_id = L1 * id + (!L1) * (N - id); des_id = source[s_id]; des_id = L1 * des_id + (!L1) * (-des_id) + 32768; destination[id] = des_id; } __global__ void getAibarCoalesce_1_Bit_Stream(uint32_t *d_aibar, Torus32 *ai, int32_t prec_offset) { register int id = blockIdx.x * blockDim.x + threadIdx.x; register int ai_id = ai[id]; ai_id += prec_offset; d_aibar[id] = ai_id; } __global__ void calculateAijFromAibarCoalesce_1_Bit_Stream(uint32_t *aij, uint32_t *aibar, int t, int basebit, int mask) { int id = blockIdx.x * blockDim.x + threadIdx.x; int i = id / t; int j = id % t; int aibar_i = aibar[i]; aij[id] = (aibar_i >> (32 - (j + 1) * basebit)) & mask; } __global__ void lweKeySwitchVectorSubstraction_gpu_testing_coalesce_1_Bit_Stream(int *destinationA, Torus32 *sourceA, uint32_t *d_aij, int *destinationB, int *sourceB, int ks_n, int ks_t, int ks_base, int n, int params_n) { register int id = blockIdx.x * blockDim.x + threadIdx.x; int desB = destinationB[0]; register int desAid = destinationA[id]; register int A = ks_n, B = ks_t, C = ks_base, D = params_n; #pragma unroll for (int i = 0; i < 1024; ++i) {//n #pragma unroll for (int j = 0; j < 8; ++j) {//ks_t int sI2 = i * ks_t + j; register int aij = d_aij[sI2]; register int sa_id = sourceA[i * B * C * D + j * C * D + aij * D + id]; desAid -= sa_id; int bi = d_aij[sI2 + id]; int sb_id = sourceB[i * B * C + j * C + bi]; desB -= sb_id; } } destinationA[id] = desAid; if(id < 1) { destinationB[0] = desB; } } EXPORT void bootsAND_fullGPU_1_Bit_Stream(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int nBits, hipfftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { register int n = 500, nTHREADS = 1024, N = 1024, _2N = 2048, Ns2 = 512, k = 1, kpl = 4, l = 2, offset = 2149580800, halfBg = 512, maskMod = 1023; static const Torus32 MU = modSwitchToTorus32(1, 8); //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); int *temp_res_a, *temp_res_b; hipMalloc(&temp_res_a, n * nBits * sizeof(Torus32)); temp_res_b = new Torus32[nBits]; register int length = 500 * nBits; int nBLOCKS = (int) ceil((float) (length) / nTHREADS); hipLaunchKernelGGL(( vecAdd), dim3(nBLOCKS), dim3(nTHREADS), 0, 0, temp_res_a, ca->a, cb->a, length); for (int i = 0; i < nBits; ++i) { temp_res_b[i] = ca->b[i] + cb->b[i] + AndConst; temp_res_b[i] = modSwitchFromTorus32(temp_res_b[i], _2N); } //create streams hipDeviceProp_t cProfile; hipGetDeviceProperties(&cProfile, 0); int nSM = cProfile.multiProcessorCount; cout << "#SM: " << nSM << endl; //20 hipStream_t streams[nSM]; for (int i = 0; i < nSM; ++i) {//nSM hipStreamCreateWithFlags(&streams[i], hipStreamNonBlocking); } //bootstrapping woks uptoFFT int *accum_a_b, *bara, *temp_accum_a_b;//, *barb, *testvectbis;//accum a and accum b together; bara; tempaccum for mux rotate hipMalloc(&accum_a_b, nBits * N * (k + 1) * sizeof(int)); hipMalloc(&temp_accum_a_b, nBits * N * (k + 1) * sizeof(int)); hipMalloc(&bara, nBits * N * sizeof(int)); hipMemset(accum_a_b, 0, nBits * N * (k + 1) * sizeof(int)); hipMemset(temp_accum_a_b, 0, nBits * N * (k + 1) * sizeof(int)); hipMemset(bara, 0, nBits * N * sizeof(int)); for (int i = 0; i < nBits; ++i) { int sI = i * 1024 * (k + 1); int si = i * 1024; hipLaunchKernelGGL(( bootstrappingUptoBlindRotate_1_Bit_stream), dim3(1), dim3(nTHREADS), 0, streams[i % 20], accum_a_b + sI + 1024, NULL,//temp_accum_a_b + sI, bara + si, NULL, MU, 1, temp_res_a + i * 500, temp_res_b[i]); } for (int i = 0; i < 20; ++i) { hipStreamSynchronize(streams[i]); } int *decaCoalesce; hipMalloc(&decaCoalesce, nBits * N * kpl * sizeof(int));//1024*4 hipfftDoubleComplex *cuDecaFFTCoalesce; hipMalloc(&cuDecaFFTCoalesce, nBits * kpl * Ns2 * sizeof(hipfftDoubleComplex));//512*4 hipfftDoubleComplex *tmpa_gpuCoal; hipMalloc(&tmpa_gpuCoal, nBits * Ns2 * sizeof(hipfftDoubleComplex) * (k + 1));//512*2 //fft variables int iFFTBatch = nBits * kpl;//64 int FFTBatch = nBits * (k + 1);//32 //cufft helper variables hipfftDoubleReal* d_rev_in; hipfftDoubleComplex *d_rev_out; hipfftDoubleComplex *d_in; hipfftDoubleReal *d_out; //cufft plans hipfftHandle p; hipfftHandle rev_p; //ifft variables allocation hipMalloc(&d_rev_in, iFFTBatch * _2N * sizeof(hipfftDoubleReal)); hipMalloc(&d_rev_out, iFFTBatch * (N + 1) * sizeof(hipfftDoubleComplex)); hipfftPlan1d(&rev_p, _2N, HIPFFT_D2Z, iFFTBatch);// - (iFFTBatch / dParts)); hipMemset(d_rev_in, 0, iFFTBatch * _2N * sizeof(hipfftDoubleReal)); //fft variables allocation hipMalloc(&d_in, FFTBatch * (N + 1) * sizeof(hipfftDoubleComplex)); hipMalloc(&d_out, FFTBatch * _2N * sizeof(hipfftDoubleReal)); hipfftPlan1d(&p, _2N, HIPFFT_Z2D, FFTBatch); hipMemset(d_in, 0, FFTBatch * (N + 1) * sizeof(hipfftDoubleComplex)); int *temp2 = temp_accum_a_b; int *temp3 = accum_a_b; for (int j = 0; j < 500; ++j) { // nBLOCKS = nBits * (k + 1);//as accum is of (k + 1) * 1024; for (int i = 0; i < nBits; ++i) { int tLweSampleStart = i * (k + 1) * N; int baraStart = i * N; int dCoalesceStart = i * kpl * N; int d_rev_inStart = i * kpl * _2N; hipLaunchKernelGGL(( prepareForiFFT_1_Bit_stream), dim3(2), dim3(nTHREADS), 0, streams[i % nSM], temp2 + tLweSampleStart, decaCoalesce + dCoalesceStart, d_rev_in + d_rev_inStart, 1,//nBits, bara + baraStart, j, temp3 + tLweSampleStart); } for (int i = 0; i < nSM; ++i) { hipStreamSynchronize(streams[i]); } hipfftExecD2Z(rev_p, d_rev_in, d_rev_out); hipDeviceSynchronize(); for (int i = 0; i < nBits; ++i) { int cuDecaFFTCoalesceStart = i * kpl * Ns2; int d_rev_outStart = i * kpl * (N + 1); int tmpa_gpuCoalStart = i * Ns2 * (k + 1); int d_inStart = i * (N + 1) * (k + 1); hipLaunchKernelGGL(( prepareForFFT_1_Bit_Stream), dim3(1), dim3(512), 0, streams[i % nSM], cuDecaFFTCoalesce + cuDecaFFTCoalesceStart, tmpa_gpuCoal + tmpa_gpuCoalStart, d_in + d_inStart, d_rev_out + d_rev_outStart, cudaBkFFTCoalesceExt, j, 1); } for (int i = 0; i < nSM; ++i) { hipStreamSynchronize(streams[i]); } hipfftExecZ2D(p, d_in, d_out); hipDeviceSynchronize(); for (int i = 0; i < nBits; ++i) { int tlweSampleStart = i * (k + 1) * N; int d_outStart = i * (k + 1) * _2N; hipLaunchKernelGGL(( finishUpFFT_1_Bit_Stream), dim3(2), dim3(nTHREADS), 0, streams[i % nSM], temp2 + tlweSampleStart, d_out + d_outStart, temp3 + tlweSampleStart); } for (int i = 0; i < nSM; ++i) { hipStreamSynchronize(streams[i]); } swap(temp2, temp3); } //extract int *u_a, *u_b, *temp_u_b; hipMalloc(&u_a, nBits * N * sizeof(int)); u_b = new int[nBits]; temp_u_b = new int[nBits * N * (k + 1)]; hipMemcpy(temp_u_b, accum_a_b, nBits * N * (k + 1) * sizeof(int), D2H); for (int i = 0; i < nBits; ++i) { int accum_a_bStart = i * N * (k + 1); int u_aStart = i * N; hipLaunchKernelGGL(( extract_gpu_1_Bit_Stream), dim3(1), dim3(1024), 0, streams[i % nSM], u_a + u_aStart, accum_a_b + accum_a_bStart); u_b[i] = temp_u_b[accum_a_bStart + N]; } int *result_b_gpu; hipMalloc(&result_b_gpu, nBits * sizeof(int)); hipMemset(result->a, 0, nBits * 500 * sizeof(int)); hipMemcpy(result_b_gpu, u_b, nBits * sizeof(int), H2D); for (int i = 0; i < nSM; ++i) { hipStreamSynchronize(streams[i]); } //key switch const int ks_n = 1024, ks_basebit = 2, ks_t = 8, ks_out_params_n = 500; const int base = 1 << ks_basebit;// base=2 in [CGGI16] const int32_t prec_offset = 1 << (32 - (1 + ks_basebit * ks_t)); //precision const int mask = base - 1; /* // int coal_d_aibarSize = nBits * ks_n;//16*1024 // uint32_t *coal_d_aibar; // hipMalloc(&coal_d_aibar, coal_d_aibarSize * sizeof(uint32_t)); // // for (int i = 0; i < nBits; ++i) { // int coal_d_aibarStart = i * N; // int u_aStart = i * N; // getAibarCoalesce_1_Bit_Stream<<<1, 1024, 0, streams[i % nSM]>>> // (coal_d_aibar + coal_d_aibarStart, // u_a + u_aStart, // prec_offset); // } // // for (int i = 0; i < nSM; ++i) { // hipStreamSynchronize(streams[i]); // }*/ int coal_d_aijSize = ks_n * ks_t * nBits; uint32_t *coal_d_aij; hipMalloc(&coal_d_aij, coal_d_aijSize * sizeof(uint32_t)); for (int i = 0; i < nBits; ++i) { int coal_d_aijStart = i * ks_n * ks_t; // int coal_d_aibarStart = i * ks_n; int u_aStart = i * ks_n;//1024 hipLaunchKernelGGL(( calculateAijFromAibarCoalesce_1_Bit_Stream), dim3(8), dim3(1024), 0, streams[i % nSM], coal_d_aij + coal_d_aijStart, (uint32_t*)u_a + u_aStart, ks_t, ks_basebit, mask); } for (int i = 0; i < nSM; ++i) { hipStreamSynchronize(streams[i]); } for (int i = 0; i < nBits; ++i) { int res_aStart = i * 500; int coal_d_aijStart = i * ks_n * ks_t; int result_b_gpuStart = i; hipLaunchKernelGGL(( lweKeySwitchVectorSubstraction_gpu_testing_coalesce_1_Bit_Stream), dim3(1), dim3(500), 0, streams[i % nSM], result->a + res_aStart, ks_a_gpu_extendedPtr, coal_d_aij + coal_d_aijStart, result_b_gpu + i, ks_b_gpu_extendedPtr, ks_n, ks_t, base, 1024, 500); } for (int i = 0; i < nSM; ++i) { hipStreamSynchronize(streams[i]); } hipMemcpy(result->b, result_b_gpu, nBits * sizeof(int), D2H); // int *h_res_a = new int[nBits * 500]; // hipMemcpy(h_res_a, result->a, nBits * 500 * sizeof(int), D2H); // for (int i = 0; i < nBits; ++i) { // int sI = i * 500; // for (int j = 0; j < 10; ++j) { // cout << h_res_a[sI + j] << " "; // } // cout << endl; // } // cout << endl; delete [] temp_res_b; hipFree(temp_res_a); hipFree(accum_a_b); hipFree(temp_accum_a_b); hipFree(bara); // hipFree(barb); // hipFree(testvectbis); hipFree(decaCoalesce); hipFree(cuDecaFFTCoalesce); hipFree(tmpa_gpuCoal); //cufft helper variables hipFree(d_rev_in); hipFree(d_rev_out); hipFree(d_in); hipFree(d_out); hipfftDestroy(rev_p); hipfftDestroy(p); hipFree(u_a); free(u_b); free(temp_u_b); hipFree(result_b_gpu); // hipFree(coal_d_aibar); for (int i = 0; i < nSM; ++i) { //nSM hipStreamDestroy(streams[i]); } }
b8303d42b6c09bcde279f8b6e7dd1154df137145.cu
#ifndef TFHE_TEST_ENVIRONMENT #include <cstdlib> #include <iostream> #include <random> #include <cassert> #include "tfhe_core.h" #include "numeric_functions.h" #include "lweparams.h" #include "lwekey.h" #include "lwesamples.h" #include "lwekeyswitch.h" #include "lwe-functions.h" #include "lwebootstrappingkey.h" #include "tfhe.h" #include <fstream> #include <cstdint> using namespace std; #define H2D cudaMemcpyHostToDevice #define D2D cudaMemcpyDeviceToDevice #define D2H cudaMemcpyDeviceToHost #else #undef EXPORT #define EXPORT static #endif #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) inline void __cudaSafeCall( cudaError err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( cudaSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK cudaError err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment away if needed. err = cudaDeviceSynchronize(); if( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) //*//***************************************** // zones on the torus -> to see //*//***************************************** /* * Homomorphic bootstrapped NAND gate * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsNAND(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,1/8) - ca - cb static const Torus32 NandConst = modSwitchToTorus32(1, 8); lweNoiselessTrivial(temp_result, NandConst, in_out_params); lweSubTo(temp_result, ca, in_out_params); lweSubTo(temp_result, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped OR gate * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsOR(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,1/8) + ca + cb static const Torus32 OrConst = modSwitchToTorus32(1, 8); lweNoiselessTrivial(temp_result, OrConst, in_out_params); lweAddTo(temp_result, ca, in_out_params); lweAddTo(temp_result, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped AND gate * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsAND(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); lweNoiselessTrivial(temp_result, AndConst, in_out_params); lweAddTo(temp_result, ca, in_out_params); lweAddTo(temp_result, cb, in_out_params); // cout << "old: "; // for (int i = 0; i < 10; ++i) { // cout << temp_result->a[i] << " "; // } // cout << endl; //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); // cout << "old: "; // for (int i = 0; i < 10; ++i) { // cout << result->a[i] << " "; // } // cout << result->b; // cout << endl; delete_LweSample(temp_result); } /* * Homomorphic bootstrapped XOR gate * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsXOR(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); lweNoiselessTrivial(temp_result, XorConst, in_out_params); lweAddMulTo(temp_result, 2, ca, in_out_params); lweAddMulTo(temp_result, 2, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped XNOR gate * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsXNOR(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,-1/4) + 2*(-ca-cb) static const Torus32 XnorConst = modSwitchToTorus32(-1, 4); lweNoiselessTrivial(temp_result, XnorConst, in_out_params); lweSubMulTo(temp_result, 2, ca, in_out_params); lweSubMulTo(temp_result, 2, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped NOT gate (doesn't need to be bootstrapped) * Takes in input 1 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsNOT(LweSample *result, const LweSample *ca, const TFheGateBootstrappingCloudKeySet *bk) { const LweParams *in_out_params = bk->params->in_out_params; lweNegate(result, ca, in_out_params); } /* * Homomorphic bootstrapped COPY gate (doesn't need to be bootstrapped) * Takes in input 1 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsCOPY(LweSample *result, const LweSample *ca, const TFheGateBootstrappingCloudKeySet *bk) { const LweParams *in_out_params = bk->params->in_out_params; lweCopy(result, ca, in_out_params); } /* * Homomorphic Trivial Constant gate (doesn't need to be bootstrapped) * Takes a boolean value) * Outputs a LWE sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsCONSTANT(LweSample *result, int value, const TFheGateBootstrappingCloudKeySet *bk) { const LweParams *in_out_params = bk->params->in_out_params; static const Torus32 MU = modSwitchToTorus32(1, 8); lweNoiselessTrivial(result, value ? MU : -MU, in_out_params); } /* * Homomorphic bootstrapped NOR gate * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsNOR(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,-1/8) - ca - cb static const Torus32 NorConst = modSwitchToTorus32(-1, 8); lweNoiselessTrivial(temp_result, NorConst, in_out_params); lweSubTo(temp_result, ca, in_out_params); lweSubTo(temp_result, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped AndNY Gate: not(a) and b * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsANDNY(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,-1/8) - ca + cb static const Torus32 AndNYConst = modSwitchToTorus32(-1, 8); lweNoiselessTrivial(temp_result, AndNYConst, in_out_params); lweSubTo(temp_result, ca, in_out_params); lweAddTo(temp_result, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped AndYN Gate: a and not(b) * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsANDYN(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,-1/8) + ca - cb static const Torus32 AndYNConst = modSwitchToTorus32(-1, 8); lweNoiselessTrivial(temp_result, AndYNConst, in_out_params); lweAddTo(temp_result, ca, in_out_params); lweSubTo(temp_result, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped OrNY Gate: not(a) or b * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsORNY(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,1/8) - ca + cb static const Torus32 OrNYConst = modSwitchToTorus32(1, 8); lweNoiselessTrivial(temp_result, OrNYConst, in_out_params); lweSubTo(temp_result, ca, in_out_params); lweAddTo(temp_result, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped OrYN Gate: a or not(b) * Takes in input 2 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsORYN(LweSample *result, const LweSample *ca, const LweSample *cb, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,1/8) + ca - cb static const Torus32 OrYNConst = modSwitchToTorus32(1, 8); lweNoiselessTrivial(temp_result, OrYNConst, in_out_params); lweAddTo(temp_result, ca, in_out_params); lweSubTo(temp_result, cb, in_out_params); //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); delete_LweSample(temp_result); } /* * Homomorphic bootstrapped Mux(a,b,c) = a?b:c = a*b + not(a)*c * Takes in input 3 LWE samples (with message space [-1/8,1/8], noise<1/16) * Outputs a LWE bootstrapped sample (with message space [-1/8,1/8], noise<1/16) */ EXPORT void bootsMUX(LweSample *result, const LweSample *a, const LweSample *b, const LweSample *c, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; const LweParams *extracted_params = &bk->params->tgsw_params->tlwe_params->extracted_lweparams; LweSample *temp_result = new_LweSample(in_out_params); LweSample *temp_result1 = new_LweSample(extracted_params); LweSample *u1 = new_LweSample(extracted_params); LweSample *u2 = new_LweSample(extracted_params); //compute "AND(a,b)": (0,-1/8) + a + b static const Torus32 AndConst = modSwitchToTorus32(-1, 8); lweNoiselessTrivial(temp_result, AndConst, in_out_params); lweAddTo(temp_result, a, in_out_params); lweAddTo(temp_result, b, in_out_params); // Bootstrap without KeySwitch tfhe_bootstrap_woKS_FFT(u1, bk->bkFFT, MU, temp_result); //compute "AND(not(a),c)": (0,-1/8) - a + c lweNoiselessTrivial(temp_result, AndConst, in_out_params); lweSubTo(temp_result, a, in_out_params); lweAddTo(temp_result, c, in_out_params); // Bootstrap without KeySwitch tfhe_bootstrap_woKS_FFT(u2, bk->bkFFT, MU, temp_result); // Add u1=u1+u2 static const Torus32 MuxConst = modSwitchToTorus32(1, 8); lweNoiselessTrivial(temp_result1, MuxConst, extracted_params); lweAddTo(temp_result1, u1, extracted_params); lweAddTo(temp_result1, u2, extracted_params); // Key switching lweKeySwitch(result, bk->bkFFT->ks, temp_result1); delete_LweSample(u2); delete_LweSample(u1); delete_LweSample(temp_result1); delete_LweSample(temp_result); } /////new for gpu EXPORT LweSample_16* convertBitToNumberZero(int bitSize, const TFheGateBootstrappingCloudKeySet *bk) { int polySize = bk->params->in_out_params->n; LweSample_16* temp = (LweSample_16 *)malloc(sizeof(LweSample_16)); temp->a = (int*)calloc(bitSize*polySize, sizeof(int)); temp->b = (int*)calloc(bitSize, sizeof(int)); temp->current_variance = (double*)calloc(bitSize, sizeof(double)); return temp; } EXPORT LweSample_16 *convertBitToNumberZero_GPU(int bitSize, const TFheGateBootstrappingCloudKeySet *bk) { int polySize = bk->params->in_out_params->n; LweSample_16 *temp = (LweSample_16 *) malloc(sizeof(LweSample_16)); cudaMalloc(&(temp->a), bitSize * polySize * sizeof(int)); temp->b = (int *) calloc(bitSize, sizeof(int)); //testing start static const Torus32 MU = modSwitchToTorus32(1, 8); for (int i = 0; i < bitSize; ++i) { temp->b[i] = -MU; } // testing end temp->current_variance = (double *) calloc(bitSize, sizeof(double)); return temp; } EXPORT LweSample_16 *convertBitToNumberZero_GPU_2(int nOutputs, int bitSize, const TFheGateBootstrappingCloudKeySet *bk) { int polySize = bk->params->in_out_params->n; LweSample_16 *temp = (LweSample_16 *) malloc(sizeof(LweSample_16)); cudaMalloc(&(temp->a), nOutputs * bitSize * polySize * sizeof(int)); temp->b = (int *) calloc(nOutputs * bitSize, sizeof(int)); temp->current_variance = (double *) calloc(nOutputs * bitSize, sizeof(double)); return temp; } EXPORT LweSample_16 * newLweSample_16(int bitSize, const LweParams *params) { int polySize = params->n; LweSample_16 *temp = (LweSample_16 *) malloc(sizeof(LweSample_16)); temp->a = (int *) calloc(bitSize * polySize, sizeof(int)); temp->b = (int *) calloc(bitSize, sizeof(int)); temp->current_variance = (double *) calloc(bitSize, sizeof(double)); return temp; } EXPORT LweSample_16 * newLweSample_16_2(int nOutputs, int bitSize, const LweParams *params) { int polySize = params->n; LweSample_16 *temp = (LweSample_16 *) malloc(sizeof(LweSample_16)); temp->a = (int *) calloc(nOutputs * bitSize * polySize, sizeof(int)); temp->b = (int *) calloc(nOutputs * bitSize, sizeof(int)); temp->current_variance = (double *) calloc(nOutputs * bitSize, sizeof(double)); return temp; } EXPORT LweSample_16* convertBitToNumber(const LweSample* input, int bitSize, const TFheGateBootstrappingCloudKeySet *bk) { int polySize = bk->params->in_out_params->n; LweSample_16* temp = (LweSample_16 *)malloc(sizeof(LweSample_16)); temp->a = (int*)malloc(sizeof(int)*bitSize*polySize); temp->b = (int*)malloc(sizeof(int)*bitSize); temp->current_variance = (double*)malloc(sizeof(double)*bitSize); for (int i = 0; i < bitSize; ++i) { for (int j = 0; j < polySize; ++j) { temp->a[i * polySize + j] = (int)input[i].a[j]; } temp->b[i] = input[i].b; temp->current_variance[i] = input[i].current_variance; } return temp; } EXPORT LweSample* convertNumberToBits(LweSample_16* number, int bitSize, const TFheGateBootstrappingCloudKeySet *bk) { LweSample *tempCiphertext = new_gate_bootstrapping_ciphertext_array(bitSize, bk->params); const int n = bk->params->in_out_params->n; for (int i = 0; i < bitSize; ++i) { int startIndex = i * n; for (int j = 0; j < n; ++j) { tempCiphertext[i].a[j] = number->a[startIndex + j]; } tempCiphertext[i].b = number->b[i]; tempCiphertext[i].current_variance = number->current_variance[i]; } return tempCiphertext; } EXPORT void freeLweSample_16(LweSample_16* input) { free(input->a); free(input->b); free(input->current_variance); free(input); } int* allocateAndCopyIntVectorFromHostToDevice(int *source, int len) { int *d_temp; int bytes = len * sizeof(int); cudaMalloc(&d_temp, bytes); cudaMemcpy(d_temp, source, bytes, cudaMemcpyHostToDevice); return d_temp; } int* allocateAndCopyIntVectorFromDeviceToHost(int *d_source, int len) { int bytes = len * sizeof(int); int *temp = (int*)malloc(bytes); cudaMemcpy(temp, d_source, bytes, cudaMemcpyDeviceToHost); return temp; } __global__ void vecAdd(int *result, int *a, int *b, int length) { int id = blockIdx.x*blockDim.x+threadIdx.x; if (id < length) { result[id] = a[id] + b[id]; } } __global__ void vecAddMulTo(int *result, int mulVal, int *a, int *b, int length) { int id = blockIdx.x*blockDim.x+threadIdx.x; if (id < length) { result[id] = (mulVal * (a[id] + b[id])); } } void sendLweSmaple_16_a_ToGPU(LweSample_16 *sample, int bitSize, int polySize) { int *temp = sample->a; int byteLength = bitSize * polySize * sizeof(int); cudaMalloc(&(sample->a), byteLength); cudaMemcpy(sample->a, temp, byteLength, cudaMemcpyHostToDevice); } EXPORT void bootsAND_16(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int bitSize, const TFheGateBootstrappingCloudKeySet *bk, cufftDoubleComplex ****cudaBkFFT, cufftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32* ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; int BLOCKSIZE = in_out_params->n; int gridSize = (int) ceil((float) (in_out_params->n * bitSize) / BLOCKSIZE); //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); LweSample_16 *temp_result = convertBitToNumberZero_GPU(bitSize, bk); for (int i = 0; i < bitSize; ++i) { temp_result->b[i] = AndConst; } vecAdd<<<gridSize, BLOCKSIZE>>>(temp_result->a, ca->a, cb->a, in_out_params->n * bitSize); for (int i = 0; i < bitSize; ++i) { temp_result->b[i] += (ca->b[i] + cb->b[i]); // cout << temp_result->b[i] << " "; // temp_result->current_variance[i] += (ca->current_variance[i] + cb->current_variance[i]); } //test start // cout << "Inside AND:" << endl; // int *tempaa = new int[in_out_params->n * bitSize]; //////// int *tempba = new int[in_out_params->n * bitSize]; // cudaMemcpy(tempaa, temp_result->a, in_out_params->n * bitSize * sizeof(int), cudaMemcpyDeviceToHost); ////////// cudaMemcpy(tempba, cb->a, in_out_params->n * bitSize * sizeof(int), cudaMemcpyDeviceToHost); // for (int i = 0; i < bitSize; ++i) { // int sI = i * in_out_params->n; //// cout << "ca: "; // for (int j = 0; j < 10; ++j) { // cout << tempaa[sI + j] << " "; // } // cout << endl; //// cout << "cb: "; //// for (int j = 0; j < 10; ++j) { //// cout << tempba[sI + j] << " "; //// } // cout << temp_result->b[i] << " "; // cout << endl; // } // cout << endl; //test end //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // assert(bitSize%2 == 0); // tfhe_bootstrap_FFT_16_2(result, bk->bkFFT, MU, 1, bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, // ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // // int *temp = new int[in_out_params->n * bitSize]; // cudaMemcpy(temp, result->a, in_out_params->n * bitSize * sizeof(int), cudaMemcpyDeviceToHost); // for (int i = 0; i < bitSize; ++i) { // int sI = i * in_out_params->n; //// cout << "new: "; // for (int j = 0; j < 10; ++j) { // cout << temp[sI + j] << " "; // } // cout << endl; //// cout << result->b[i]; //// cout << endl; // } cudaFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } EXPORT void bootsXOR_16(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int bitSize, const TFheGateBootstrappingCloudKeySet *bk, cufftDoubleComplex ****cudaBkFFT, cufftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32* ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; int BLOCKSIZE = in_out_params->n; int gridSize = (int) ceil((float) (in_out_params->n * bitSize) / BLOCKSIZE); //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); LweSample_16 *temp_result = convertBitToNumberZero_GPU(bitSize, bk); for (int i = 0; i < bitSize; ++i) { temp_result->b[i] = XorConst; } int mulVal = 2; vecAddMulTo<<<gridSize, BLOCKSIZE>>>(temp_result->a, mulVal, ca->a, cb->a, in_out_params->n * bitSize); for (int i = 0; i < bitSize; ++i) { temp_result->b[i] += (mulVal * (ca->b[i] + cb->b[i])); temp_result->current_variance[i] += ((mulVal * mulVal) * (ca->current_variance[i] + cb->current_variance[i])); } //test start // cout << "Inside xor: " << endl; // int *tempaa = new int[in_out_params->n * bitSize]; // cudaMemcpy(tempaa, temp_result->a, in_out_params->n * bitSize * sizeof(int), cudaMemcpyDeviceToHost); // // cudaMemcpy(tempba, cb->a, in_out_params->n * bitSize * sizeof(int), cudaMemcpyDeviceToHost); // for (int i = 0; i < bitSize; ++i) { // int sI = i * in_out_params->n; // cout << "a: "; // for (int j = 0; j < 10; ++j) { // cout << tempaa[sI + j] << " "; // } //// cout << temp_result->b[i] << " "; // cout << endl; // } // cout << endl; // cout << endl; //test end //if the phase is positive, the result is 1/8 //if the phase is positive, else the result is -1/8 tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // int *temp = new int[in_out_params->n * bitSize]; // cudaMemcpy(temp, result->a, in_out_params->n * bitSize * sizeof(int), cudaMemcpyDeviceToHost); // for (int i = 0; i < bitSize; ++i) { // int sI = i * in_out_params->n; // cout << "new: "; // for (int j = 0; j < 10; ++j) { // cout << temp[sI + j] << " "; // } // cout << result->b[i]; // cout << endl; // } cudaFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } __global__ void ANDXORvecMulAllto(int *destination, int *ca, int *cb, int n, int bitSize, int length) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < length) { int mulVal = (id / (n * bitSize)) + 1; destination[id] = (mulVal * (ca[id % (n * bitSize)] + cb[id % (n * bitSize)])); } } EXPORT void bootsANDXOR_16(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int nOutputs, int bitSize, const TFheGateBootstrappingCloudKeySet *bk, cufftDoubleComplex ****cudaBkFFT, cufftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32* ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); static const int mulValXor = 2; LweSample_16 *temp_result = convertBitToNumberZero_GPU_2(nOutputs, bitSize, bk); //compute temp_result->a int BLOCKSIZE = in_out_params->n; int length = in_out_params->n * bitSize * nOutputs; int gridSize = (int) ceil((float) (length) / BLOCKSIZE); // cout << "gridSize " << gridSize << endl; ANDXORvecMulAllto<<<gridSize, BLOCKSIZE>>>(temp_result->a, ca->a, cb->a, in_out_params->n, bitSize, length); //compute temp_result->b for (int i = 0; i < bitSize; ++i) { temp_result->b[i] = ca->b[i] + cb->b[i] + AndConst; //for and temp_result->b[i + bitSize] = mulValXor * (ca->b[i] + cb->b[i]) + XorConst;// for xor temp_result->current_variance[i] = ca->current_variance[i] + cb->current_variance[i]; //for and temp_result->current_variance[i + bitSize] = (mulValXor * mulValXor) * (ca->current_variance[i] + cb->current_variance[i]);// for xor } /*//test start // cout << "Inside AND:" << endl; // int *tempaa = new int[in_out_params->n * bitSize * nOutputs]; // cudaMemcpy(tempaa, temp_result->a, nOutputs * in_out_params->n * bitSize * sizeof(int), cudaMemcpyDeviceToHost); // for (int i = 0; i < bitSize; ++i) { // int sI = i * in_out_params->n ; // for (int j = 0; j < 10; ++j) { // cout << tempaa[sI + j] << " "; // } // cout << endl; // } // cout << endl; // cout << "Inside XOR:" << endl; // for (int i = 0; i < bitSize; ++i) { // int sI = (bitSize + i) * in_out_params->n ; // for (int j = 0; j < 10; ++j) { // cout << tempaa[sI + j] << " "; // } // cout << endl; // } // cout << endl;*/ // cout << "compute temp_result->b" << endl; // cout << "total: " << nOutputs * bitSize << endl; tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, bitSize * nOutputs, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // tfhe_bootstrap_FFT_16_2(result, bk->bkFFT, MU, nOutputs, bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, // ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // int *temp = new int[length]; // cudaMemcpy(temp, result->a, length * sizeof(int), cudaMemcpyDeviceToHost); // for (int i = 0; i < nOutputs * bitSize; ++i) { // int sI = i * 500; // for (int j = 0; j < 10; ++j) { // cout << temp[sI + j] << " "; // } // cout << endl; //// cout << result->b[i] << " " << result->current_variance[i] << endl; // } // cout << endl; // cout << "I am inside the function" << endl; cudaFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } __global__ void XORXORvecMulAllto(int *destination, int *ca, int *cb, int n, int bitSize, int length) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < length) { int mulVal = 2; destination[id] = (mulVal * (ca[id % (n * bitSize)] + cb[id % (n * bitSize)])); } } EXPORT void bootsXORXOR_16(LweSample_16 *result, const LweSample_16 *ca1, const LweSample_16 *ca2, const LweSample_16 *cb1, const LweSample_16 *cb2, int nOutputs, int bitSize, const TFheGateBootstrappingCloudKeySet *bk, cufftDoubleComplex ****cudaBkFFT, cufftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32* ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); static const int mulValXor = 2, n = in_out_params->n; //compute temp_result->a int BLOCKSIZE = n; int length = n * bitSize; int gridSize = (int) ceil((float) (length) / BLOCKSIZE); // cout << "bitSize: " << bitSize<< endl; // cout << "length: " << length << endl; // cout << "nOut: " << nOutputs << endl; // cout << "gridSize: " << gridSize << endl; LweSample_16 *temp_result = convertBitToNumberZero_GPU_2(nOutputs, bitSize, bk); //compute temp_result->a XORXORvecMulAllto<<<gridSize, BLOCKSIZE>>>(temp_result->a, ca1->a, ca2->a, n, bitSize, length); XORXORvecMulAllto<<<gridSize, BLOCKSIZE>>>(temp_result->a + n, cb1->a, cb2->a, n, bitSize, length); //compute temp_result->b for (int i = 0; i < bitSize; ++i) { temp_result->b[i] = mulValXor * (ca1->b[i] + ca2->b[i]) + XorConst; //for and temp_result->b[i + bitSize] = mulValXor * (cb1->b[i] + cb2->b[i]) + XorConst;// for xor temp_result->current_variance[i] = (mulValXor * mulValXor) * (ca1->current_variance[i] + ca2->current_variance[i]); //for and temp_result->current_variance[i + bitSize] = (mulValXor * mulValXor) * (cb1->current_variance[i] + cb2->current_variance[i]);// for xor } // tfhe_bootstrap_FFT_16_2(result, bk->bkFFT, MU, nOutputs, bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, // ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, bitSize * nOutputs, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); cudaFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } __global__ void XORXORvecMulAllto_vector(int *destination, int *ca, int *cb, int n, int bitSize, int length) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < length) { int mulVal = 2; destination[id] = (mulVal * (ca[id] + cb[id])); } } EXPORT void bootsXORXOR_16_vector(LweSample_16 *result, const LweSample_16 *ca1, const LweSample_16 *ca2, const LweSample_16 *cb1, const LweSample_16 *cb2, int vLength, int nOutputs, int bitSize, const TFheGateBootstrappingCloudKeySet *bk, cufftDoubleComplex ****cudaBkFFT, cufftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32* ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); static const int mulValXor = 2, n = in_out_params->n; int totalBitSize = vLength * bitSize; //compute temp_result->a int BLOCKSIZE = n; int length = n * totalBitSize;//svLength * bitSize; int gridSize = (int) ceil((float) (length) / BLOCKSIZE); // cout << "vLen: " << vLength << endl; // cout << "bitSize: " << bitSize<< endl; // cout << "length: " << length << endl; // cout << "nOut: " << nOutputs << endl; // cout << "gridSize: " << gridSize << endl; LweSample_16 *temp_result = convertBitToNumberZero_GPU_2(nOutputs * vLength, bitSize, bk); //compute temp_result->a XORXORvecMulAllto_vector<<<gridSize, BLOCKSIZE>>>(temp_result->a, ca1->a, ca2->a, n, bitSize, length); XORXORvecMulAllto_vector<<<gridSize, BLOCKSIZE>>>(temp_result->a + n * vLength, cb1->a, cb2->a, n, bitSize, length); //compute temp_result->b for (int i = 0; i < totalBitSize; ++i) { temp_result->b[i] = mulValXor * (ca1->b[i] + ca2->b[i]) + XorConst; //for and temp_result->b[i + totalBitSize] = mulValXor * (cb1->b[i] + cb2->b[i]) + XorConst;// for xor temp_result->current_variance[i] = (mulValXor * mulValXor) * (ca1->current_variance[i] + ca2->current_variance[i]); //for and temp_result->current_variance[i + totalBitSize] = (mulValXor * mulValXor) * (cb1->current_variance[i] + cb2->current_variance[i]);// for xor } // cout << "HEREZZZZZZZZ----" << endl; // tfhe_bootstrap_FFT_16_2(result, bk->bkFFT, MU, nOutputs, bitSize * vLength, temp_result, cudaBkFFT, cudaBkFFTCoalesce, // ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, bitSize * nOutputs * vLength, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // tfhe_bootstrap_FFT_16_2_vector(result, bk->bkFFT, MU, vLength, nOutputs, bitSize, temp_result, cudaBkFFT, // cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); cudaFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } __global__ void ANDXORvecMulAllto_vector(int *destination, int *ca, int *cb, int vLength, int bitSize, int n, int length) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < length) { int mulVal = (id / (vLength * bitSize * n)) + 1; destination[id] = (mulVal * (ca[id % (vLength * bitSize * n)] + cb[id % (vLength * bitSize * n)])); } } EXPORT void bootsANDXOR_16_vector(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int nOutputs, int vLength, int bitSize, const TFheGateBootstrappingCloudKeySet *bk, cufftDoubleComplex ****cudaBkFFT, cufftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; const int n = in_out_params->n;//500 //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); static const int mulValXor = 2; LweSample_16 *temp_result = convertBitToNumberZero_GPU_2(nOutputs, vLength * bitSize, bk); int BLOCKSIZE = 1024; int length = vLength * bitSize * nOutputs * n; int gridSize = (int) ceil((float) (length) / BLOCKSIZE); ANDXORvecMulAllto_vector<<<gridSize, BLOCKSIZE>>>(temp_result->a, ca->a, cb->a, vLength, bitSize, n, length); //compute temp_result->b int totalBitSize = vLength * bitSize; for (int i = 0; i < totalBitSize; ++i) { temp_result->b[i] = ca->b[i] + cb->b[i] + AndConst; //for and temp_result->b[i + totalBitSize] = mulValXor * (ca->b[i] + cb->b[i]) + XorConst;// for xor temp_result->current_variance[i] = ca->current_variance[i] + cb->current_variance[i]; //for and temp_result->current_variance[i + totalBitSize] = (mulValXor * mulValXor) * (ca->current_variance[i] + cb->current_variance[i]);// for xor } //test start // cout << "Inside AND:" << endl; // int *tempaa = new int[n * bitSize * nOutputs * vLength]; // cudaMemcpy(tempaa, temp_result->a, vLength * nOutputs * n * bitSize * sizeof(int), cudaMemcpyDeviceToHost); // for (int i = 0; i < bitSize * vLength; ++i) { // int sI = i * n ; // for (int j = 0; j < 10; ++j) { // cout << tempaa[sI + j] << " "; // } // cout << endl; // } // cout << endl; // cout << "Inside XOR:" << endl; // for (int i = 0; i < bitSize * vLength; ++i) { // int sI = (bitSize * vLength + i) * n ; // for (int j = 0; j < 10; ++j) { // cout << tempaa[sI + j] << " "; // } // cout << endl; // } // cout << endl; // cout << "HEREZZZZZZZZZZZ" <bootsAND_fullGPU_OneBit< endl; tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, nOutputs * vLength * bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // // tfhe_bootstrap_FFT_16_2(result, bk->bkFFT, MU, nOutputs, vLength * bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, // ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // cout << "HEREZZZZZZZZZZZ--" << endl; // tfhe_bootstrap_FFT_16_2_vector(result, bk->bkFFT, MU, vLength, nOutputs, bitSize, temp_result, cudaBkFFT, // cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // int *temp = new int[length]; // cudaMemcpy(temp, result->a, length * sizeof(int), cudaMemcpyDeviceToHost); // cout << "AND PART" << endl; // for (int i = 0; i < 16 * bitSize; ++i) { // int sI = i * 500; // for (int j = 0; j < 10; ++j) { // cout << temp[sI + j] << " "; // } // cout << endl; //// cout << result->b[i] << " " << result->current_variance[i] << endl; // } // cout << endl; // cout << endl << "XOR PART" << endl; // for (int i = 0; i < 16 * bitSize; ++i) { // int sIB = bitSize * vLength ; // int sI = i * 500 + bitSize * vLength * 500; // for (int j = 0; j < 10; ++j) { // cout << temp[sI + j] << " "; // } // cout << endl; //// cout << result->b[sIB + i] << " " << result->current_variance[sI + i] << endl; // } // cout << endl; cudaFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } //only used for multiplication __global__ void vecAdd_MULT(int *result, int *a, int *b, int bAStart, int n, int length) { int id = blockIdx.x*blockDim.x+threadIdx.x; if (id < length) { result[id] = a[id] + b[(id % n) + bAStart]; } } EXPORT void bootsAND_MULT(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int resBitSize, int bitSize_A, int bIndex, const TFheGateBootstrappingCloudKeySet *bk, cufftDoubleComplex ****cudaBkFFT, cufftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { assert(bitSize_A == resBitSize); static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; int n = in_out_params->n; int BLOCKSIZE = 1024; int gridSize = (int) ceil((float) (in_out_params->n * bitSize_A) / BLOCKSIZE); int bAstartIndex = bIndex * n; //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); LweSample_16 *temp_result = convertBitToNumberZero_GPU(bitSize_A, bk); for (int i = 0; i < bitSize_A; ++i) { temp_result->b[i] = AndConst; } vecAdd_MULT<<<gridSize, BLOCKSIZE>>>(temp_result->a, ca->a, cb->a, bAstartIndex, n, n * bitSize_A); for (int i = 0; i < bitSize_A; ++i) { temp_result->b[i] += (ca->b[i] + cb->b[bIndex]); temp_result->current_variance[i] += (ca->current_variance[i] + cb->current_variance[bIndex]); } int bitSize = bitSize_A; tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); //dhor tokta mar perek //find out later on // cudaMemset(result->a + (n * bitSize_A), 0, n * (resBitSize - bitSize_A) * sizeof(int)); // for (int i = bitSize_A; i < resBitSize; ++i) { // cout << result->b[i] << " "; // } // cout << endl; cudaFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } EXPORT void bootsAND_MULT_con(LweSample_16 *result, LweSample_16 **ca, LweSample_16 **cb, int nConMul, int resBitSize, int bitSize_A, int bIndex, const TFheGateBootstrappingCloudKeySet *bk, cufftDoubleComplex ****cudaBkFFT, cufftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { assert(bitSize_A == resBitSize); static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; int n = in_out_params->n; int BLOCKSIZE = n; int gridSize = (int) ceil((float) (n * bitSize_A) / BLOCKSIZE); int bAstartIndex = bIndex * n; //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); LweSample_16 *temp_result = convertBitToNumberZero_GPU(bitSize_A * nConMul, bk); // for (int i = 0; i < bitSize_A; ++i) { // temp_result->b[i] = AndConst; // } for (int i = 0; i < nConMul; ++i) { vecAdd_MULT<<<gridSize, BLOCKSIZE>>>(temp_result->a + i * bitSize_A * n, ca[i]->a, cb[i]->a, bAstartIndex, n, n * bitSize_A); } for (int j = 0; j < nConMul; ++j) { int sI = j * bitSize_A; for (int i = 0; i < bitSize_A; ++i) { int sI2 = sI + i; temp_result->b[sI2] = (ca[j]->b[i] + cb[j]->b[bIndex]) + AndConst; temp_result->current_variance[sI2] = (ca[j]->b[i] + cb[j]->b[bIndex]); } } int toalBitSize = bitSize_A * nConMul; // cout << "totalBitSize:" << toalBitSize << endl; // int nOutputs = 2; // int vLength = nConMul/2; // int bitSize = bitSize_A; tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, toalBitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // cout << "bootsAND_MULT_con: bitSize: " << bitSize << " vLen: " << vLength << endl; // if (nConMul % 2 == 1) { // cout << "ERROR: Provide even number of vector" << endl; // exit(1); // } // tfhe_bootstrap_FFT_16_2_vector(result, bk->bkFFT, MU, vLength, nOutputs, bitSize, temp_result, cudaBkFFT, // cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); cudaFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } //(a xor b) and c EXPORT void bootsXOR_AND(LweSample *result, const LweSample *ca, const LweSample *cb, const LweSample *cc, const TFheGateBootstrappingCloudKeySet *bk) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; LweSample *temp_result = new_LweSample(in_out_params); LweSample *temp_result1 = new_LweSample(in_out_params); LweSample *temp_result2 = new_LweSample(in_out_params); //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); static const Torus32 AndConst = modSwitchToTorus32(-1, 8); lweNoiselessTrivial(temp_result, XorConst, in_out_params); lweAddMulTo(temp_result, 2, ca, in_out_params); lweAddMulTo(temp_result, 2, cb, in_out_params); tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result); lweNoiselessTrivial(temp_result2, AndConst, in_out_params); lweAddTo(temp_result2, cc, in_out_params); lweAddTo(temp_result2, result, in_out_params); // static const Torus32 MU = modSwitchToTorus32(1, 8); // const LweParams *in_out_params = bk->params->in_out_params; // LweSample *temp_result = new_LweSample(in_out_params); //compute: (0,-1/8) + ca + cb // lweAddTo(temp_result, cb, in_out_params); tfhe_bootstrap_FFT(result, bk->bkFFT, MU, temp_result2); delete_LweSample(temp_result); } __global__ void reverseLweSample(int *dest, int *source, int length) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < length) { dest[id] = -source[id]; } } void bootsNOT_16(LweSample_16 *output, LweSample_16 *input, int bitSize, int params_n) { int length = bitSize * params_n, BLOCKSIZE = 1024, gridSize = (int) ceil((float) (length) / BLOCKSIZE); reverseLweSample<<<gridSize, BLOCKSIZE>>>(output->a, input->a, length); for (int i = 0; i < bitSize; ++i) { output->b[i] = -input->b[i]; output->current_variance[i] = input->current_variance[i]; } } //add vector __global__ void ANDvec_vector(int *destination, int *ca, int *cb, int vLength, int bitSize, int n, int length) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < length) { destination[id] = ca[id] + cb[id]; } } EXPORT void bootsAND_16_vector(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int nOutputs, int vLength, int bitSize, const TFheGateBootstrappingCloudKeySet *bk, cufftDoubleComplex ****cudaBkFFT, cufftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { assert(nOutputs == 1); static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; const int n = in_out_params->n;//500 //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); LweSample_16 *temp_result = convertBitToNumberZero_GPU_2(nOutputs, vLength * bitSize, bk); int BLOCKSIZE = 1024; int length = vLength * bitSize * nOutputs * n; int gridSize = (int) ceil((float) (length) / BLOCKSIZE); ANDvec_vector<<<gridSize, BLOCKSIZE>>>(temp_result->a, ca->a, cb->a, vLength, bitSize, n, length); //compute temp_result->b int totalBitSize = vLength * bitSize; for (int i = 0; i < totalBitSize; ++i) { temp_result->b[i] = ca->b[i] + cb->b[i] + AndConst; //for and temp_result->current_variance[i] = ca->current_variance[i] + cb->current_variance[i]; //for and } // cout << "xxxxxxxxxxxxxxxxxxxx" << endl; // cout << nOutputs << endl; tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, vLength * bitSize * nOutputs, temp_result, cudaBkFFT, cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // if (vLength % 2 == 1 && vLength < 2) { //// cout << "vLen: " << vLength << " bitSize: " << bitSize << endl; // tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce, // ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); //// bitSize = bitSize/2; //// tfhe_bootstrap_FFT_16_2_vector(result, bk->bkFFT, MU, 2, 2, bitSize/4, temp_result, cudaBkFFT, //// cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, //// ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // } else { // nOutputs = 2; // vLength = vLength / 2; // tfhe_bootstrap_FFT_16_2_vector(result, bk->bkFFT, MU, vLength, nOutputs, bitSize, temp_result, cudaBkFFT, // cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // } cudaFree(temp_result->a); temp_result->a = NULL; freeLweSample_16(temp_result); } __global__ void SUBvec_vector(int *destination, int *ca, int *cc, int vLength, int bitSize, int n, int length) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < length) { destination[id] = cc[id] - ca[id]; } } EXPORT void bootsMUX_16_vector(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, const LweSample_16 *cc, int vLength, int bitSize, const TFheGateBootstrappingCloudKeySet *bk, cufftDoubleComplex ****cudaBkFFT, cufftDoubleComplex ***cudaBkFFTCoalesce, Torus32 ****ks_a_gpu, Torus32 ****ks_a_gpu_extended, int ***ks_b_gpu, double ***ks_cv_gpu, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { static const Torus32 MU = modSwitchToTorus32(1, 8); const LweParams *in_out_params = bk->params->in_out_params; const LweParams *extracted_params = &bk->params->tgsw_params->tlwe_params->extracted_lweparams; const int n = in_out_params->n;//500 const int extracted_n = extracted_params->n;//1024 int nOutputs = 2; //for now vLength = 1 assert(vLength == 1); // cout << "n: " << n << endl; // cout << "nOutputs: " << nOutputs << endl; // cout << "vLength: " << vLength << endl; // cout << "extracted_n: " << extracted_n << endl; int ex_length = vLength * bitSize * extracted_n;//ex_length does not include nOutputs int length = vLength * bitSize * n;//length does not include nOutputs int BLOCKSIZE = 1024; LweSample_16 *temp_result = convertBitToNumberZero_GPU_2(nOutputs, vLength * bitSize, bk); LweSample_16 *u = newLweSample_16_2(nOutputs, vLength * bitSize, extracted_params); LweSample_16 *ex_temp_result = newLweSample_16_2(1, vLength * bitSize, extracted_params); free(u->a); free(ex_temp_result->a); cudaMalloc(&(u->a), ex_length * nOutputs * sizeof(int)); cudaMalloc(&(ex_temp_result->a), ex_length * sizeof(int)); //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); static const Torus32 MuxConst = modSwitchToTorus32(1, 8); int gridSize = (int) ceil((float) (length) / BLOCKSIZE); ANDvec_vector<<<gridSize, BLOCKSIZE>>>(temp_result->a, ca->a, cb->a, vLength, bitSize, n, length); SUBvec_vector<<<gridSize, BLOCKSIZE>>>(temp_result->a + length, ca->a, cc->a, vLength, bitSize, n, length); //compute temp_result->b int totalBitSize = vLength * bitSize; for (int i = 0; i < totalBitSize; ++i) { temp_result->b[i] = ca->b[i] + cb->b[i] + AndConst; temp_result->current_variance[i] = ca->current_variance[i] + cb->current_variance[i]; temp_result->b[i + totalBitSize] = - ca->b[i] + cc->b[i] + AndConst; temp_result->current_variance[i + totalBitSize] = - ca->current_variance[i] + cc->current_variance[i]; //for and } tfhe_bootstrap_woKS_FFT_16(u, bk->bkFFT, MU, vLength*nOutputs*bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce); // tfhe_bootstrap_woKS_FFT_16_2_vector(u, bk->bkFFT, MU, vLength, nOutputs, bitSize, temp_result, cudaBkFFT, cudaBkFFTCoalesce); // tfhe_bootstrap_FFT_16(result, bk->bkFFT, MU, vLength * bitSize * nOutputs, temp_result, cudaBkFFT, cudaBkFFTCoalesce, // ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); gridSize = (int) ceil((float) (ex_length) / BLOCKSIZE); ANDvec_vector<<<gridSize, BLOCKSIZE>>>(ex_temp_result->a, u->a, u->a + ex_length, vLength, bitSize, extracted_n, ex_length); for (int i = 0; i < vLength * bitSize; ++i) { ex_temp_result->b[i] = u->b[i] + u->b[i + vLength * bitSize] + MuxConst; ex_temp_result->current_variance[i] = u->current_variance[i] + u->current_variance[i + vLength * bitSize]; } // lweKeySwitch_16(result, bk->ks, u, bitSize, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); lweKeySwitch_16(result, bk->bkFFT->ks, ex_temp_result, vLength*nOutputs*bitSize, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // lweKeySwitch_16_2_vector(result, bk->bkFFT->ks, ex_temp_result, vLength, 1, bitSize, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, // ks_cv_gpu, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // length = 500 * bitSize; // int *tempx = new int[length]; // cudaMemcpy(tempx, result->a, length * sizeof(Torus32), cudaMemcpyDeviceToHost); // for (int bI = 0; bI < bitSize; ++bI) { // int sI = bI * 500; // for (int i = 0; i < 10; ++i) { // cout << tempx[sI + i] << " "; // } // cout << endl; // cout << result->b[bI] << endl; // } // cout << endl; // tfhe_bootstrap_FFT_16_2_vector(result, bk->bkFFT, MU, vLength, nOutputs, bitSize, temp_result, cudaBkFFT, // cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // if (vLength % 2 == 1 && vLength < 2) { // cout << "Odd number in bootsAND_16_vector" << endl; // } // nOutputs = 2; // vLength = vLength/2; // tfhe_bootstrap_FFT_16_2_vector(result, bk->bkFFT, MU, vLength, nOutputs, bitSize, temp_result, cudaBkFFT, // cudaBkFFTCoalesce, ks_a_gpu, ks_a_gpu_extended, ks_b_gpu, ks_cv_gpu, // ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); // // // // cudaFree(temp_result->a); // temp_result->a = NULL; // freeLweSample_16(temp_result); } __device__ int modSwitchFromTorus32_GPU_device(Torus32 phase, int Msize){ uint64_t interv = ((UINT64_C(1)<<63)/Msize)*2; // width of each intervall uint64_t half_interval = interv/2; // begin of the first intervall uint64_t phase64 = (uint64_t(phase)<<32) + half_interval; //floor to the nearest multiples of interv return phase64/interv; } __global__ void bootstrappingUptoBlindRotate_OneBit(int *accum_a_b, int *temp_accum_a_b, int *bara_g, Torus32 MU, int *temp_res_a, int temp_res_b, double temp_res_cv, cufftDoubleComplex *cudaBkFFTCoalesceExt) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < 1024) { //x is temp_res register int n = 500, N = 1024, _2N = 2048, Ns2 = 512, Nx2 = 2048; //tfhe_bootstrap_FFT_16--> u // __shared__ int u_a[1024], u_b;//N // __shared__ double u_cv; //tfhe_bootstrap_woKS_FFT_16 // __shared__ int bara[1024];//N//torusPolyTestvect_coef[1024], register int barb; bara_g[id] = 0; // torusPolyTestvect_coef[id] = MU; if (id < n) {//500 bara_g[id] = modSwitchFromTorus32_GPU_device(temp_res_a[id], Nx2); } __syncthreads(); barb = modSwitchFromTorus32_GPU_device(temp_res_b, Nx2); //tfhe_blindRotateAndExtract_FFT_16 -> here v = torusPolyTestvect_coef __shared__ int testvectbis[1024];//N //torusPolynomialMulByXai_16 -> res ->testvectbis, v-> torusPolyTestvect_coef register int a = _2N - barb; if (a < N) {//1024 if (id < a) { testvectbis[id] = -MU;//torusPolyTestvect_coef[id - a + N]; } else { testvectbis[id] = MU;//torusPolyTestvect_coef[id - a]; } } else { register int aa = a - N; if (id < aa) { testvectbis[id] = MU;//torusPolyTestvect_coef[id - aa + N]; } else { testvectbis[id] = -MU;//torusPolyTestvect_coef[id - aa]; } } __syncthreads(); accum_a_b[id] = 0;//accum_a accum_a_b[1024 + id] = testvectbis[id]; temp_accum_a_b[id] = 0;//accum_a temp_accum_a_b[1024 + id] = 0; // bara_g[id] = bara[id]; } } __global__ void prepareForiFFT_1_Bit(int *des, int *decaCoalesce, cufftDoubleReal *d_rev_in, int *bara, int baraIndex, int *source) { register int id = blockIdx.x * blockDim.x + threadIdx.x; register int N = 1024, _2N = 2048, Ns2 = 512; register int tIndex = id % N; register int a = bara[baraIndex]; register int aa = a - N; register bool l1 = a < N, l2 = tIndex < a, l3 = tIndex < aa; int des_id = l1 * (l2 * (-source[id - a + N] - source[id]) + (!l2) * (source[id - a] - source[id])) + (!l1) * (l3 * (source[id - aa + N] - source[id]) + (!l3) * (-source[id - aa] - source[id])); register uint32_t halfBg = 512, maskMod = 1023, Bgbit = 10; // register uint32_t offset = 2149580800; register int p = 0; register int decal = (32 - (p + 1) * Bgbit); register uint32_t temp1 = (((uint32_t)(des_id + 2149580800)) >> decal) & maskMod;//offset register int xxxxx1 = (temp1 - halfBg); // decaCoalesce[((id / (N)) * (N)) + id] = // (middleBlock) * xxxxx1 + (!middleBlock) * (decaCoalesce[((id / (N)) * (N)) + id]); p = 1; decal = (32 - (p + 1) * Bgbit); temp1 = (((uint32_t)(des_id + 2149580800)) >> decal) & maskMod;//offset register int xxxxx2 = temp1 - halfBg; // decaCoalesce[((id / (N)) * (N)) + id + (N)] = middleBlock * xxxxx2 + (!middleBlock) * decaCoalesce[((id / (N)) * (N)) + id + (N)]; register int bIndex = id / N; int destTod_rev_in = bIndex * _2N + tIndex + (bIndex >= 1) * N * 2; d_rev_in[destTod_rev_in] = xxxxx1/2.; d_rev_in[destTod_rev_in + 1024] = -xxxxx1/2.; destTod_rev_in += 2 * 1024; d_rev_in[destTod_rev_in] = xxxxx2/2.; d_rev_in[destTod_rev_in + 1024] = -xxxxx2/2.; } __global__ void prepareForFFT_1_Bit(cufftDoubleComplex *cuDecaFFTCoalesce, cufftDoubleComplex *tmpa_gpuCoal, cufftDoubleComplex *d_in, cufftDoubleComplex *d_rev_out, cufftDoubleComplex *bki, int keyIndex, int N, int Ns2, int length) { register int id = blockIdx.x*blockDim.x+threadIdx.x; register int k = 1, kpl = 4, keySI = keyIndex * (k + 1) * kpl * Ns2, aID, bID, offset; // if(id < 512) { int tempId = id; int bitIndex = tempId / Ns2; register cufftDoubleComplex v0 = d_rev_out[2 * tempId + 1 + bitIndex];//d_rev_out[2 * id + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v0; tempId = tempId + (Ns2); bitIndex = (tempId) / Ns2; register cufftDoubleComplex v1 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v1; tempId = tempId + (Ns2); bitIndex = (tempId) / Ns2; register cufftDoubleComplex v2 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v2; tempId = tempId + (Ns2); bitIndex = (tempId) / Ns2; register cufftDoubleComplex v3 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v3; int i = 0; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; cufftDoubleComplex temp_a0 = cuCmul(v0, bki[aID]); cufftDoubleComplex temp_b0 = cuCmul(v0, bki[bID]); i = 1; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; cufftDoubleComplex temp_a1 = cuCmul(v1, bki[aID]); cufftDoubleComplex temp_b1 = cuCmul(v1, bki[bID]); i = 2; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; cufftDoubleComplex temp_a2 = cuCmul(v2, bki[aID]); cufftDoubleComplex temp_b2 = cuCmul(v2, bki[bID]); i = 3; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; cufftDoubleComplex temp_a3 = cuCmul(v3, bki[aID]); cufftDoubleComplex temp_b3 = cuCmul(v3, bki[bID]); cufftDoubleComplex tmpa_gpuCoal0; tmpa_gpuCoal0.x = temp_a0.x + temp_a1.x + temp_a2.x + temp_a3.x; tmpa_gpuCoal0.y = temp_a0.y + temp_a1.y + temp_a2.y + temp_a3.y; // tmpa_gpuCoal[id] = tmpa_gpuCoal0; cufftDoubleComplex tmpa_gpuCoal1; tmpa_gpuCoal1.x = temp_b0.x + temp_b1.x + temp_b2.x + temp_b3.x; tmpa_gpuCoal1.y = temp_b0.y + temp_b1.y + temp_b2.y + temp_b3.y; // tmpa_gpuCoal[id + Ns2] = tmpa_gpuCoal1; register int largeSI = (id / Ns2) * (N + 1); register int tid = id % Ns2; d_in[largeSI + 2 * tid + 1] = tmpa_gpuCoal0; largeSI = (id / Ns2 + 1) * (N + 1); d_in[largeSI + 2 * tid + 1] = tmpa_gpuCoal1; //init with 0 // tmpa_gpuCoal[id].x = 0; // tmpa_gpuCoal[id].y = 0; // tmpa_gpuCoal[Ns2 + id].x = 0; // tmpa_gpuCoal[Ns2 + id].y = 0; //#pragma unroll // for (int i = 0; i < kpl; ++i) {//kpl // offset = i * Ns2; // aID = keySI + offset + id; // bID = keySI + offset + id + Ns2 * kpl; // // cufftDoubleComplex temp_a = cuCmul(cuDecaFFTCoalesce[offset + id], bki[aID]); // tmpa_gpuCoal[id].x += temp_a.x; // tmpa_gpuCoal[id].y += temp_a.y; // // cufftDoubleComplex temp_b = cuCmul(cuDecaFFTCoalesce[offset + id], bki[bID]); // tmpa_gpuCoal[Ns2 + id].x += temp_b.x; // tmpa_gpuCoal[Ns2 + id].y += temp_b.y; // // } // } // __syncthreads(); // __syncthreads(); // __syncthreads(); // __syncthreads(); // __syncthreads(); // __syncthreads(); // if (id < 1024) { // register int largeSI = (id / Ns2) * (N + 1); // register int tid = id % Ns2; // d_in[largeSI + 2 * tid + 1] = tmpa_gpuCoal[id]; //// d_in[largeSI + 2 * tid + 1].y = 1;//tmpa_gpuCoal[id]; // } } //__global__ void finishUpFFT_n_Bit(int *temp2, cufftDoubleReal *d_out, int *temp3) { // int id = blockIdx.x*blockDim.x+threadIdx.x; // register int N = 1024, _2N = 2048; // register double _2p32 = double(INT64_C(1) << 32); // register double _1sN = double(1) / double(N); // register int bitIndex = id / N; // register int tIndex = id % N; // register int startIndexLarge = bitIndex * _2N; // temp2[id] = Torus32(int64_t(d_out[startIndexLarge + tIndex] * _1sN * _2p32)) + temp3[id];// // //} __global__ void extractionAndKeySwitch_1_Bit(int *result_a, int *result_b, uint32_t *coal_d_aibar, uint32_t *coal_d_aij, int *accum_a_b, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { int id = blockIdx.x*blockDim.x+threadIdx.x; register int N = 1024, _2N = 2048, basebit = 2, base = 1 << basebit, mask = base - 1, t =8; register int32_t prec_offset = 1 << (32 - (1 + basebit * t)); register int index = 0; register int bitIndex = id / N; register int tIndex = id % N;//corresponding to j register int startIndex = bitIndex * N; __shared__ uint32_t s_coal_d_aibar[1024]; // __shared__ uint32_t coal_d_aij[1024 * 8]; bool multipleOfN = id % N == 0; s_coal_d_aibar[id] = (multipleOfN) * (accum_a_b[index - tIndex + startIndex] + prec_offset) + (!multipleOfN) * (-accum_a_b[index - tIndex + startIndex + N] + prec_offset); // if (id % N == 0) { // coal_d_aibar[id] = accum_a_b[index - tIndex + startIndex] + prec_offset; // } else { // coal_d_aibar[id] = -accum_a_b[index - tIndex + startIndex + N] + prec_offset; // } __syncthreads(); // __syncthreads(); // __syncthreads(); // __syncthreads(); // __syncthreads(); // __syncthreads(); // __syncthreads(); // __syncthreads(); // if(id < 1024) {//t register int tempID = id; register int i = tempID / t; register int j = tempID % t; coal_d_aij[tempID] = (s_coal_d_aibar[i] >> (32 - (j + 1) * basebit)) & mask; tempID += 1024; i = tempID / t; j = tempID % t; coal_d_aij[tempID] = (s_coal_d_aibar[i] >> (32 - (j + 1) * basebit)) & mask; tempID += 1024; i = tempID / t; j = tempID % t; coal_d_aij[tempID] = (s_coal_d_aibar[i] >> (32 - (j + 1) * basebit)) & mask; tempID += 1024; i = tempID / t; j = tempID % t; coal_d_aij[tempID] = (s_coal_d_aibar[i] >> (32 - (j + 1) * basebit)) & mask; tempID += 1024; i = tempID / t; j = tempID % t; coal_d_aij[tempID] = (s_coal_d_aibar[i] >> (32 - (j + 1) * basebit)) & mask; tempID += 1024; i = tempID / t; j = tempID % t; coal_d_aij[tempID] = (s_coal_d_aibar[i] >> (32 - (j + 1) * basebit)) & mask; tempID += 1024; i = tempID / t; j = tempID % t; coal_d_aij[tempID] = (s_coal_d_aibar[i] >> (32 - (j + 1) * basebit)) & mask; tempID += 1024; i = tempID / t; j = tempID % t; coal_d_aij[tempID] = (s_coal_d_aibar[i] >> (32 - (j + 1) * basebit)) & mask; // } // __syncthreads(); int subFromB = 0; int bi; if (id < 500) { result_a[id] = 0; register int A = 1024, B = t, C = base, D = 500, ks_t = 8; #pragma unroll 0 for (int i = 0; i < 1024; ++i) { int sI = i * ks_t; #pragma unroll 0 for (int j = 0; j < 8; ++j) {//ks_t int sI2 = sI + j; int aij = coal_d_aij[sI2]; if (aij != 0) { result_a[id] -= ks_a_gpu_extendedPtr[i * B * C * D + j * C * D + aij * D + (id % D)];//sourceA[(i * B * C * D + j * C * D+ aij * params_n + id)];//source[aij][id]; } // if(id < 1) { bi = coal_d_aij[sI2 + id]; subFromB += ks_b_gpu_extendedPtr[i * B * C + j * C + bi]; // } } } } if (id < 1) { result_b[0] = accum_a_b[N] - subFromB; } } /* void bootstrapping_gull_gpu_1_bit_wise(LweSample_16 *result, int *temp_res_a, int *temp_res_b, int nBits, cufftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { //bootstrapping woks uptoFFT int nThreads = 1024, BLOCKSIZE = 1024, k = 1, N = 1024, kpl = 4, Ns2 = 512, _2N = 2048; static const Torus32 MU = modSwitchToTorus32(1, 8); int gridSize = (int) ceil((float) (nThreads) / BLOCKSIZE);//1 int *accum_a_b, *bara, *temp_accum_a_b;//accum a and accum b together; bara; tempaccum for mux rotate cudaMalloc(&accum_a_b, nBits * 1024 * (k + 1) * sizeof(int)); cudaMalloc(&temp_accum_a_b, nBits * 1024 * (k + 1) * sizeof(int)); cudaMalloc(&bara, nBits * 1024 * sizeof(int)); cudaDeviceProp cProfile; cudaGetDeviceProperties(&cProfile, 0); int nSM = cProfile.multiProcessorCount; cout << "#SM: " << nSM << endl; //20 cudaStream_t streams[nSM]; #pragma unroll for (int i = 0; i < 20; ++i) {//nSM cudaStreamCreateWithFlags(&streams[i], cudaStreamNonBlocking); } for (int bIndex = 0; bIndex < nBits; ++bIndex) { int accumStart = bIndex * (1024 * (k + 1)); int baraStart = bIndex * 1024; int temp_res_aStart = bIndex * 500; bootstrappingUptoBlindRotate_OneBit<<<gridSize, BLOCKSIZE, 0, streams[bIndex % nSM]>>> (accum_a_b + accumStart, temp_accum_a_b + accumStart, bara + baraStart, MU, temp_res_a + temp_res_aStart, temp_res_b[bIndex], NULL, cudaBkFFTCoalesceExt); } cudaDeviceSynchronize(); //after blind rotate int *decaCoalesce; cudaMalloc(&decaCoalesce, nBits * N * kpl * sizeof(int));//1024*4 cufftDoubleComplex *cuDecaFFTCoalesce; cudaMalloc(&cuDecaFFTCoalesce, nBits * kpl * Ns2 * sizeof(cufftDoubleComplex));//512*4 cufftDoubleComplex *tmpa_gpuCoal; cudaMalloc(&tmpa_gpuCoal, nBits * Ns2 * sizeof(cufftDoubleComplex) * (k + 1)); //fft variables cufftDoubleReal* d_rev_in; cufftDoubleComplex *d_rev_out; cufftDoubleComplex *d_in; cufftDoubleReal *d_out; int batch = kpl; int dParts = 4; //fft plans cufftHandle p; cufftHandle rev_p; //fft variables allocation cudaMalloc(&d_rev_in, nBits * sizeof(cufftDoubleReal) * _2N * batch); cudaMalloc(&d_rev_out, nBits * sizeof(cufftDoubleComplex) * (N + 1) * batch); cufftPlan1d(&rev_p, _2N, CUFFT_D2Z, nBits * batch);//(nBits * batch)/dParts);// (batch - (batch/dParts))); batch = 2;//batch/dParts;//a and b together cudaMalloc(&d_in, nBits * sizeof(cufftDoubleComplex) * (N + 1) * batch);//batch cudaMalloc(&d_out, nBits * sizeof(cufftDoubleReal) * _2N * batch); cufftPlan1d(&p, _2N, CUFFT_Z2D, nBits * batch); int *temp2 = temp_accum_a_b; int *temp3 = accum_a_b; // assert(nBits == 1); //call tfhe_MuxRotate_FFT_16 #pragma unroll for (int j = 0; j < 500; ++j) {//500 gridSize = 2;//2;//as accum is of (k + 1) for (int bIndex = 0; bIndex < nBits; ++bIndex) { //find starting indices int accumStart = bIndex * 1024 * (k + 1); int decaCoalesceStart = bIndex * 1024 * kpl; int d_rev_inStart = bIndex * _2N * kpl; int baraStart = bIndex * N; prepareForiFFT_1_Bit<<<gridSize, BLOCKSIZE, 0, streams[bIndex % nSM]>>> (temp2 + accumStart, decaCoalesce + decaCoalesceStart, d_rev_in + d_rev_inStart, bara + baraStart, j, temp3 + accumStart); } cudaDeviceSynchronize(); cufftExecD2Z(rev_p, d_rev_in, d_rev_out); cudaDeviceSynchronize(); int length = kpl * Ns2;//4 * 512 = 2048 gridSize = 1;//(int) ceil((float) (length) / BLOCKSIZE); //2 for (int bIndex = 0; bIndex < nBits; ++bIndex) { int cuDecaFFTCoalesceStart = bIndex * kpl * Ns2; int tmpa_gpuCoalStart = bIndex * (k + 1) * Ns2; int d_inStart = bIndex * (N + 1) * (k + 1); int d_rev_outStart = bIndex *(N + 1) * kpl; prepareForFFT_1_Bit<<<gridSize, 512, 0, streams[bIndex % nSM]>>> (cuDecaFFTCoalesce + cuDecaFFTCoalesceStart, tmpa_gpuCoal + tmpa_gpuCoalStart, d_in + d_inStart, d_rev_out + d_rev_outStart, cudaBkFFTCoalesceExt, j, N, Ns2, length); } cudaDeviceSynchronize(); cufftExecZ2D(p, d_in, d_out); cudaDeviceSynchronize(); //after fft length = N * 2; gridSize = (int) ceil((float) (length) / BLOCKSIZE); //2 for (int bIndex = 0; bIndex < nBits; ++bIndex) { int accumStart = bIndex * 1024 * (k + 1); int d_outStart = bIndex * _2N * (k + 1); finishUpFFT_n_Bit<<<gridSize, BLOCKSIZE, 0, streams[bIndex % nSM]>>> (temp2 + accumStart, d_out + d_outStart, temp3 + accumStart); } cudaDeviceSynchronize(); swap(temp2, temp3); } //output is in temp3 //extract and ks //intermediate variables to test u (delete afterwards) int *result_b; double *result_cv = NULL; cudaMalloc(&result_b, nBits * sizeof(int)); // cudaMalloc(&result_cv, 1 * sizeof(double)); uint32_t *coal_d_aibar; cudaMalloc(&coal_d_aibar, nBits * N * sizeof(uint32_t)); int coal_d_aijSize = nBits * N * 8;//t uint32_t *coal_d_aij; cudaMalloc(&coal_d_aij, coal_d_aijSize * sizeof(uint32_t)); // int length = N * 8;//t gridSize = 1;//(int) ceil((float) (length) / BLOCKSIZE); for (int bIndex = 0; bIndex < nBits; ++bIndex) { int result_aStart = bIndex * 500; int result_bStart = bIndex; int coal_d_aibarStart = bIndex * N; int coal_d_aijStart = bIndex * N * 8; int accumStart = bIndex * (k + 1) * 1024; extractionAndKeySwitch_1_Bit<<<gridSize, BLOCKSIZE, 0, streams[bIndex % nSM]>>> (result->a + result_aStart, result_b + result_bStart, coal_d_aibar + coal_d_aibarStart, coal_d_aij + coal_d_aijStart, temp3 + accumStart, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); } cudaDeviceSynchronize(); cudaMemcpy(result->b, result_b, nBits * sizeof(int), cudaMemcpyDeviceToHost); // int *temp = new int[500]; // cudaMemcpy(temp, result->a, 500 * sizeof(int), cudaMemcpyDeviceToHost); // for (int i = 0; i < 500; ++i) { // cout << temp[i] << " "; // } // cout << endl; // cout << result->b[0] << endl; // assert(nBits == 1); #pragma unroll for (int i = 0; i < 20; ++i) { //nSM cudaStreamDestroy(streams[i]); } cudaFree(temp_res_a); cudaFree(accum_a_b); cudaFree(temp_accum_a_b); cudaFree(bara); cudaFree(decaCoalesce);//1024*4 cudaFree(cuDecaFFTCoalesce);//512*4 cudaFree(tmpa_gpuCoal); cudaFree(d_rev_in); cudaFree(d_rev_out); cudaFree(d_in);//batch cudaFree(d_out); cudaFree(result_b); cudaFree(coal_d_aibar); cudaFree(coal_d_aij); cufftDestroy(rev_p); cufftDestroy(p); } EXPORT void bootsAND_fullGPU_OneBit(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int nBits, cufftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr, double *ks_cv_gpu_extendedPtr) { const int n = 500, BLOCKSIZE = 1024, N = 1024, _2N = 2048, Ns2 = 512, k = 1, kpl = 4, l = 2, offset = 2149580800, halfBg = 512, maskMod = 1023; static const Torus32 MU = modSwitchToTorus32(1, 8); //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); int *temp_res_a, *temp_res_b; cudaMalloc(&temp_res_a, n * nBits * sizeof(Torus32)); temp_res_b = new int[nBits]; int gridSize = (int) ceil((float) (n * nBits) / BLOCKSIZE); vecAdd<<<gridSize, BLOCKSIZE>>>(temp_res_a, ca->a, cb->a, n * nBits); for (int i = 0; i < nBits; ++i) { temp_res_b[i] = ca->b[i] + cb->b[i] + AndConst; } bootstrapping_gull_gpu_1_bit_wise(result, temp_res_a, temp_res_b, nBits, cudaBkFFTCoalesceExt, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr, ks_cv_gpu_extendedPtr); cudaFree(temp_res_a); delete [] temp_res_b; } */ __constant__ int n = 500, N = 1024, _2N = 2048, Ns2 = 512, Nx2 = 2048, k = 1; __constant__ uint32_t halfBg = 512, maskMod = 1023, Bgbit = 10, kpl = 4, l = 2; __constant__ uint32_t offset = 2149580800; __constant__ double _1sN = double(1) / double(1024); __constant__ double _2p32 = double(INT64_C(1) << 32); __global__ void bootstrappingUptoBlindRotate_n_Bit(int *accum_a_b, int *bara, Torus32 MU, int nBits, int *temp_res_a, int *barb) { register int id = blockIdx.x * blockDim.x + threadIdx.x; int bIndex = id / N; int baraIndex = id % N; int a = _2N - barb[bIndex]; int aa = a - N; register bool L1 = a < N, L2 = baraIndex < a, L3 = baraIndex < aa; register int acc_a_b_id = L1 * (L2 * (-MU) + (!L2) * (MU)) + (!L1) * (L3 * (MU) + (!L3) * (-MU)); accum_a_b[id] = acc_a_b_id; if(id < n * nBits) { bIndex = id / n; register int temp_res_a_id = temp_res_a[id]; register int bara_id = modSwitchFromTorus32_GPU_device(temp_res_a_id, Nx2); bara[bIndex * N + id % n] = bara_id; } } __global__ void prepareForiFFT_n_Bit(int *des, int *decaCoalesce, cufftDoubleReal *d_rev_in, int nBits, int *bara, int baraIndex, int *source, int length) { register int id = blockIdx.x * blockDim.x + threadIdx.x; // if (id < length) { // bool outerBlock = id < nBits * 2 * 1024; // if (id < nBits * 2 * 1024) {//nBits * (k + 1) * 1024 register int bitIndex = (id / N) % nBits; register int threadIdModN = id % N; register int a = bara[bitIndex * N + baraIndex]; register int aa = a - N; register bool L1 = a < N, L2 = threadIdModN < a, L3 = threadIdModN < aa; // des[id] = (!outerBlock) * des[id] // + outerBlock * (l1 * (l2 * (-source[id - a + N] - source[id]) // + (!l2) * (source[id - a] - source[id])) // + (!l1) * (l3 * (source[id - aa + N] - source[id]) // + (!l3) * (-source[id - aa] - source[id]))); register int des_id = 0; register int s1_id = L1 * ((L2) * (id - a + N) + (!L2) * (id - a)) + (!L1) * ((L3) * (id - aa + N) + (!L3) * (id - aa)); // int des_id = (L1 * (L2 * (-source[id - a + N] - source[id]) // + (!L2) * (source[id - a] - source[id])) // + (!L1) * (L3 * (source[id - aa + N] - source[id]) // + (!L3) * (-source[id - aa] - source[id]))); des_id = (L1 * (L2 * (-source[s1_id] - source[id]) + (!L2) * (source[s1_id] - source[id])) + (!L1) * (L3 * (source[s1_id] - source[id]) + (!L3) * (-source[s1_id] - source[id]))); // if (a < N) { // if (threadIdModN < a) { //// des[id] = -source[id - a + N] - source[id]; // des_id = -source[id - a + N] - source[id]; // } else { //// des[id] = source[id - a] - source[id]; // des_id = source[id - a] - source[id]; // } // } else { // if (threadIdModN < aa) { //// des[id] = source[id - aa + N] - source[id]; // des_id = source[id - aa + N] - source[id]; // } else { //// des[id] = -source[id - aa] - source[id]; // des_id = -source[id - aa] - source[id]; // } // } // bool middleBlock = id < nBits * 2 * 1024;//4//kpl // decaCoalesce[id] = middleBlock * (id) + (!middleBlock) * (decaCoalesce[id]);// middleBlock * (temp1 - halfBg) + (!middleBlock) * decaCoalesce[id]; register int p = 0; register int decal = (32 - (p + 1) * Bgbit); register uint32_t val = ((uint32_t)(des_id + offset)); register uint32_t temp1 = (val >> decal) & maskMod; register int xxxxx1 = (temp1 - halfBg);// + (!middleBlock) * (decaCoalesce[((id / (N * nBits)) * (N * nBits)) + id]); // decaCoalesce[((id / (N * nBits)) * (N * nBits)) + id] = xxxxx1; // middleBlock * (temp1 - halfBg) + // (!middleBlock) * (decaCoalesce[((id / (N * nBits)) * (N * nBits)) + id]); p = 1; decal = (32 - (p + 1) * Bgbit); val = ((uint32_t)(des_id + offset)); temp1 = (val >> decal) & maskMod; register int xxxxx2 = temp1 - halfBg; // decaCoalesce[((id / (N * nBits)) * (N * nBits)) + id + (N * nBits)] = xxxxx2; // middleBlock * (temp1 - halfBg) + // (!middleBlock) * decaCoalesce[((id / (N * nBits)) * (N * nBits)) + id + (N * // nBits)];//(temp1 - halfBg) + (!middleBlock) * decaCoalesce[id];//middleBlock * id;//middleBlock * id; //1;//middleBlock * (id) + (!middleBlock) * (decaCoalesce[id]);// middleBlock * (temp1 - halfBg) + (!middleBlock) * decaCoalesce[id]; //(!middleBlock) * decaCoalesce[((id / (N * nBits)) * (N * nBits)) + id + (N * nBits)]; // decaCoalesce[(nBits * N) + id] = middleBlock * id;//middleBlock * id; //1;//middleBlock * (id) + (!middleBlock) * (decaCoalesce[id]);// middleBlock * (temp1 - halfBg) + (!middleBlock) * decaCoalesce[id]; // decaCoalesce[(nBits * N) + id] = middleBlock * id;//middleBlock * id; //1;//middleBlock * (id) + (!middleBlock) * (decaCoalesce[id]);// middleBlock * (temp1 - halfBg) + (!middleBlock) * decaCoalesce[id]; // } // register int startIndexSmall = bIndex * N; // middleBlock = tIndex < N; // d_rev_in[id] = middleBlock * (decaCoalesce[startIndexSmall + tIndex] / 2.) // + (!middleBlock) * (d_rev_in[id] = -decaCoalesce[startIndexSmall + tIndex - N] / 2.); // d_rev_in[((id / (N * nBits)) * (N * nBits)) + id + (tIndex >= N) * 1024 * bIndex] = id;//middleBlock * (1) + (!middleBlock) * (d_rev_in[id]); int bIndex = (id / N); int tIndex = id % N; int destTod_rev_in = bIndex * _2N + tIndex + (bIndex >= nBits) * nBits * N * 2; d_rev_in[destTod_rev_in] = xxxxx1 / 2.;//id;// d_rev_in[destTod_rev_in + 1024] = -xxxxx1 / 2.;//id;// destTod_rev_in += nBits * 2 * 1024; d_rev_in[destTod_rev_in] = xxxxx2 / 2.;//id; d_rev_in[destTod_rev_in + 1024] = -xxxxx2 / 2.;//id; // } } __global__ void prepareForFFT_n_Bit(cufftDoubleComplex *cuDecaFFTCoalesce, cufftDoubleComplex *tmpa_gpuCoal, cufftDoubleComplex *d_in, cufftDoubleComplex *d_rev_out, cufftDoubleComplex *bki, int keyIndex, int nBits) { int id = blockIdx.x * blockDim.x + threadIdx.x; // if (id < nBits * 4 * 512) {//nBits * kpl * Ns2 int tempId = id; int bitIndex = tempId/Ns2; cufftDoubleComplex v0 = d_rev_out[2 * tempId + 1 + bitIndex];//d_rev_out[2 * id + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v0; tempId = tempId + (Ns2 * nBits); bitIndex = (tempId)/Ns2; cufftDoubleComplex v1 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v1; tempId = tempId + (Ns2 * nBits); bitIndex = (tempId)/Ns2; cufftDoubleComplex v2 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v2; tempId = tempId + (Ns2 * nBits); bitIndex = (tempId)/Ns2; cufftDoubleComplex v3 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v3; int keySI = keyIndex * (k + 1) * kpl * Ns2, aID, bID, offset; int i = 0; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; cufftDoubleComplex bki_aid = bki[aID]; cufftDoubleComplex bki_bid = bki[bID]; cufftDoubleComplex temp_a0 = cuCmul(v0, bki_aid); cufftDoubleComplex temp_b0 = cuCmul(v0, bki_bid); i = 1; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; bki_aid = bki[aID]; bki_bid = bki[bID]; cufftDoubleComplex temp_a1 = cuCmul(v1, bki_aid); cufftDoubleComplex temp_b1 = cuCmul(v1, bki_bid); i = 2; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; bki_aid = bki[aID]; bki_bid = bki[bID]; cufftDoubleComplex temp_a2 = cuCmul(v2, bki_aid); cufftDoubleComplex temp_b2 = cuCmul(v2, bki_bid); i = 3; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; bki_aid = bki[aID]; bki_bid = bki[bID]; cufftDoubleComplex temp_a3 = cuCmul(v3, bki_aid); cufftDoubleComplex temp_b3 = cuCmul(v3, bki_bid); cufftDoubleComplex tmpa_gpuCoal0; tmpa_gpuCoal0.x = temp_a0.x + temp_a1.x +temp_a2.x +temp_a3.x; tmpa_gpuCoal0.y = temp_a0.y + temp_a1.y +temp_a2.y +temp_a3.y; // tmpa_gpuCoal[id] = tmpa_gpuCoal0; cufftDoubleComplex tmpa_gpuCoal1; tmpa_gpuCoal1.x = temp_b0.x + temp_b1.x +temp_b2.x +temp_b3.x; tmpa_gpuCoal1.y = temp_b0.y + temp_b1.y +temp_b2.y +temp_b3.y; // tmpa_gpuCoal[nBits * Ns2 + id] = tmpa_gpuCoal1; // cufftDoubleComplex temp_a = cuCmul(cuDecaFFTCoalesce[i * (Ns2 * nBits) + id], bki[aID]); // cufftDoubleComplex temp_b = cuCmul(cuDecaFFTCoalesce[i * (Ns2 * nBits) + id], bki[bID]); int largeSI = (id / Ns2) * (N + 1); int tid = id % Ns2; d_in[largeSI + 2 * tid + 1] = tmpa_gpuCoal0; largeSI = (id / Ns2 + nBits) * (N + 1); d_in[largeSI + 2 * tid + 1] = tmpa_gpuCoal1; } __global__ void finishUpFFT_n_Bit(int *temp2, cufftDoubleReal *d_out, int *temp3, int nBits) { register int id = blockIdx.x*blockDim.x+threadIdx.x; register int bitIndex = id / N; register int tIndex = id % N; register int startIndexLarge = bitIndex * _2N; int temp3_id = temp3[id]; register cufftDoubleReal d_out_id = d_out[startIndexLarge + tIndex]; temp2[id] = Torus32(int64_t(d_out_id * _1sN * _2p32)) + temp3_id; } __global__ void extract_gpu_n_Bit(int *destination, int *source) { register int id = blockIdx.x * blockDim.x + threadIdx.x; register int bitIndex = id / N; register int tIndex = id % N;//corresponding to j register int startIndex = bitIndex * N; register bool L1 = id % N == 0; register int s_id = L1 * (-tIndex + startIndex) + (!L1) * (-tIndex + startIndex + N); register int des_id = source[s_id]; des_id = L1 * des_id + (!L1) * (-des_id);// + 32768; destination[id] = des_id; } __global__ void getAibarCoalesce_n_Bit(uint32_t *d_aibar, const Torus32 *ai, int32_t prec_offset, int bitSize, int n) { register int id = blockIdx.x * blockDim.x + threadIdx.x; register int i = id/bitSize; register int tID = id % bitSize; register int startIndex = tID * n; Torus32 ai_i = ai[startIndex + i]; d_aibar[id] = ai_i + prec_offset; } __global__ void calculateAijFromAibarCoalesce_n_Bit(uint32_t *aij, uint32_t *aibar, int bitSize, int t, int basebit, int mask) { register int id = blockIdx.x * blockDim.x + threadIdx.x; register int i = id /(bitSize * t); register int j = (id / bitSize) % t; register int tId = id % bitSize; register uint32_t aibar_id = aibar[i * bitSize + tId]; aij[id] = (aibar_id >> (32 - (j + 1) * basebit)) & mask; } __global__ void lweKeySwitchVectorSubstraction_gpu_testing_coalesce_n_Bit(int *destinationA, Torus32 *sourceA, uint32_t *d_aij, int *destinationB, int *sourceB, int ks_n, int ks_t, int ks_base, int bitSize, int n, int params_n) { int id = blockIdx.x * blockDim.x + threadIdx.x; int A = ks_n, B = ks_t, C = ks_base, D = params_n; register int sourceA_id; register int desA_id = destinationA[id]; register int desB_id = 0; desB_id = destinationB[id % bitSize];// the modu is to avoid if and aray index out of bound register int sourceB_id; int bitIndex = id / params_n; int tId = id % (bitSize * params_n); #pragma unroll for (register int i = 0; i < 1024; ++i) { int sI = i * (ks_t * bitSize); for (register int j = 0; j < ks_t; ++j) { int sI2 = sI + j * bitSize; int aij = d_aij[sI2 + bitIndex]; // if (aij != 0) { sourceA_id = sourceA[i * B * C * D + j * C * D + aij * D + (id % D)]; desA_id -= sourceA_id; // } // bool id_lt_bitSize = id < bitSize; // int bi = id_lt_bitSize * d_aij[sI2 + id] + (!id_lt_bitSize) * 0; // if(id < bitSize) { int bi = d_aij[sI2 + (id % bitSize)];//this mod is to avoid the out of bound and to avoid if else desB_id -= sourceB[i * B * C + j * C + bi]; // sourceB_id = sourceB[i * B * C + j * C + bi]; // desB_id -= (id_lt_bitSize * (sourceB_id) + (!id_lt_bitSize) * 0); // } } } destinationA[id] = desA_id; if (id < bitSize) { destinationB[id] = desB_id; } } void keySwitch_n_Bit(LweSample_16* result, int *u_a_GPU, int *u_b_GPU, int nBits, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { //key switch const static int n = 500, ks_n = 1024, ks_basebit = 2, ks_t = 8, ks_out_params_n = 500, nTHREADS = 1024; const static int base = 1 << ks_basebit;// base=2 in [CGGI16] const static int32_t prec_offset = 1 << (32 - (1 + ks_basebit * ks_t)); //precision const static int mask = base - 1; // cout << "nBits: " << nBits << endl; int coal_d_aibarSize = nBits * ks_n;//16*1024 uint32_t *coal_d_aibar; CudaSafeCall(cudaMalloc(&coal_d_aibar, coal_d_aibarSize * sizeof(uint32_t))); cudaCheckErrors("ks: 0"); getAibarCoalesce_n_Bit<<<nBits, nTHREADS>>> (coal_d_aibar, u_a_GPU, prec_offset, nBits, ks_n); int coal_d_aijSize = ks_n * ks_t * nBits; uint32_t *coal_d_aij; CudaSafeCall(cudaMalloc(&coal_d_aij, coal_d_aijSize * sizeof(uint32_t))); cudaCheckErrors("ks: 1"); calculateAijFromAibarCoalesce_n_Bit<<<8 * nBits, nTHREADS>>> (coal_d_aij, coal_d_aibar, nBits, ks_t, ks_basebit, mask); cudaCheckErrors("ks: 2"); int nBLOCKS = (int) ceil((float) (nBits * n) / nTHREADS);//500 lweKeySwitchVectorSubstraction_gpu_testing_coalesce_n_Bit<<<nBits, n>>> (result->a, ks_a_gpu_extendedPtr, coal_d_aij, u_b_GPU, ks_b_gpu_extendedPtr, ks_n,//1024 ks_t,//8 base, nBits, ks_n,//1024 n);//500/**/ cudaCheckErrors("ks: 3"); // cudaDeviceSynchronize(); CudaSafeCall(cudaMemcpy(result->b, u_b_GPU, nBits * sizeof(int), D2H)); cudaCheckErrors("ks: 4"); cudaFree(coal_d_aibar); cudaFree(coal_d_aij); } void bootstrapAndKeySwitch_n_Bit(LweSample_16* result, int *temp_res_a_gpu, int *temp_res_b_cpu, int nBits, cufftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { register int n = 500, nTHREADS = 1024, N = 1024, _2N = 2048, Ns2 = 512, k = 1, kpl = 4, l = 2, offset = 2149580800, halfBg = 512, maskMod = 1023; static const Torus32 MU = modSwitchToTorus32(1, 8); for (int i = 0; i < nBits; ++i) { temp_res_b_cpu[i] = modSwitchFromTorus32(temp_res_b_cpu[i], _2N); } int *accum_a_b, *bara, *temp_accum_a_b, *barb;//, *testvectbis;//accum a and accum b together; bara; tempaccum for mux rotate cudaMalloc(&accum_a_b, nBits * N * (k + 1) * sizeof(int)); cudaMalloc(&temp_accum_a_b, nBits * N * (k + 1) * sizeof(int)); cudaMalloc(&bara, nBits * N * sizeof(int)); cudaMalloc(&barb, nBits * sizeof(int)); cudaMemset(accum_a_b, 0, nBits * N * (k + 1) * sizeof(int)); cudaMemset(temp_accum_a_b, 0, nBits * N * (k + 1) * sizeof(int)); cudaMemset(bara, 0, nBits * N * sizeof(int)); cudaMemcpy(barb, temp_res_b_cpu, nBits * sizeof(int), H2D); cudaCheckErrors("Here0"); bootstrappingUptoBlindRotate_n_Bit<<<nBits, nTHREADS>>> (accum_a_b + nBits * N, bara, MU, nBits, temp_res_a_gpu, barb); cudaCheckErrors("Here1"); //cufft helper variables int iFFTBatch = nBits * kpl;//64 int FFTBatch = nBits * (k + 1);//32 cufftDoubleReal* d_rev_in; cufftDoubleComplex *d_rev_out; cufftDoubleComplex *d_in; cufftDoubleReal *d_out; //cufft plans cufftHandle p; cufftHandle rev_p; //ifft variables allocation CudaSafeCall(cudaMalloc(&d_rev_in, iFFTBatch * _2N * sizeof(cufftDoubleReal))); CudaSafeCall(cudaMalloc(&d_rev_out, iFFTBatch * (N + 1) * sizeof(cufftDoubleComplex))); cufftPlan1d(&rev_p, _2N, CUFFT_D2Z, iFFTBatch);// - nBits);// - (iFFTBatch / dParts)); // CudaSafeCall(cudaMemset(d_rev_in, 0, iFFTBatch * _2N * sizeof(cufftDoubleReal))); //fft variables allocation CudaSafeCall(cudaMalloc(&d_in, FFTBatch * (N + 1) * sizeof(cufftDoubleComplex))); CudaSafeCall(cudaMalloc(&d_out, FFTBatch * _2N * sizeof(cufftDoubleReal))); cufftPlan1d(&p, _2N, CUFFT_Z2D, FFTBatch); CudaSafeCall(cudaMemset(d_in, 0, FFTBatch * (N + 1) * sizeof(cufftDoubleComplex))); int *temp2 = temp_accum_a_b; int *temp3 = accum_a_b; cudaCheckErrors("Here2"); for (int j = 0; j < 500; ++j) {//500 cudaCheckErrors("HereInside1"); prepareForiFFT_n_Bit<<<nBits * 2, nTHREADS>>> (temp2, NULL,//decaCoalesce, d_rev_in, nBits, bara, j, temp3, nBits * 2 * nTHREADS); cudaCheckErrors("HereInside2"); cufftExecD2Z(rev_p, d_rev_in, d_rev_out); // cudaDeviceSynchronize(); cudaCheckErrors("HereInside3"); prepareForFFT_n_Bit<<<nBits, 512>>> (NULL,//cuDecaFFTCoalesce, NULL,//tmpa_gpuCoal, d_in, d_rev_out, cudaBkFFTCoalesceExt, j, nBits); cudaCheckErrors("HereInside4"); cufftExecZ2D(p, d_in, d_out); // cudaDeviceSynchronize(); cudaCheckErrors("HereInside5"); finishUpFFT_n_Bit<<<nBits * 2, nTHREADS>>> (temp2, d_out, temp3, nBits); cudaCheckErrors("HereInside6"); swap(temp2, temp3); // int *x = temp2; // temp2 = temp3; // temp3 = x; cudaCheckErrors("HereInside7"); } //extract int *u_a_GPU, *u_b_CPU, *temp_u_b; cudaMalloc(&u_a_GPU, nBits * N * sizeof(int)); u_b_CPU = new int[nBits]; temp_u_b = new int[nBits * N * (k + 1)]; cudaMemcpy(temp_u_b, accum_a_b, nBits * N * (k + 1) * sizeof(int), D2H); extract_gpu_n_Bit<<<nBits, 1024>>>(u_a_GPU, accum_a_b); for (int i = 0; i < nBits; ++i) { u_b_CPU[i] = temp_u_b[i * N + nBits * N]; } int *u_b_GPU; CudaSafeCall(cudaMalloc(&u_b_GPU, nBits * sizeof(int))); cudaCheckErrors("Before Extracting"); CudaSafeCall(cudaMemset(result->a, 0, nBits * 500 * sizeof(int))); cudaMemcpy(u_b_GPU, u_b_CPU, nBits * sizeof(int), H2D); //key switch cudaCheckErrors("Before starting KS"); keySwitch_n_Bit(result, u_a_GPU, u_b_GPU, nBits, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr);/**/ cudaFree(accum_a_b); cudaFree(temp_accum_a_b); cudaFree(bara); cudaFree(barb); //cufft helper variables cudaFree(d_rev_in); cudaFree(d_rev_out); cudaFree(d_in); cudaFree(d_out); cufftDestroy(rev_p); cufftDestroy(p); //KS vars cudaFree(u_a_GPU); delete [] u_b_CPU; delete [] temp_u_b; cudaFree(u_b_GPU); } void bootstrapAndKeySwitch_n_Bit_MUX(LweSample_16* result, Torus32 *temp_res_a_gpu, int *temp_res_b_cpu, int nBits, cufftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { register int n = 500, nTHREADS = 1024, N = 1024, _2N = 2048, Ns2 = 512, k = 1, kpl = 4, l = 2, offset = 2149580800, halfBg = 512, maskMod = 1023; static const Torus32 MU = modSwitchToTorus32(1, 8); for (int i = 0; i < nBits; ++i) { temp_res_b_cpu[i] = modSwitchFromTorus32(temp_res_b_cpu[i], _2N); } int *accum_a_b, *bara, *temp_accum_a_b, *barb;//, *testvectbis;//accum a and accum b together; bara; tempaccum for mux rotate CudaSafeCall(cudaMalloc(&accum_a_b, nBits * N * (k + 1) * sizeof(int))); CudaSafeCall(cudaMalloc(&temp_accum_a_b, nBits * N * (k + 1) * sizeof(int))); CudaSafeCall(cudaMalloc(&bara, nBits * N * sizeof(int))); CudaSafeCall(cudaMalloc(&barb, nBits * sizeof(int))); CudaSafeCall(cudaMemset(accum_a_b, 0, nBits * N * (k + 1) * sizeof(int))); CudaSafeCall(cudaMemset(temp_accum_a_b, 0, nBits * N * (k + 1) * sizeof(int))); CudaSafeCall(cudaMemset(bara, 0, nBits * N * sizeof(int))); CudaSafeCall(cudaMemcpy(barb, temp_res_b_cpu, nBits * sizeof(int), H2D)); cudaCheckErrors("Here0"); bootstrappingUptoBlindRotate_n_Bit<<<nBits, nTHREADS>>> (accum_a_b + nBits * N, bara, MU, nBits, temp_res_a_gpu, barb); cudaCheckErrors("Here1"); //cufft helper variables int iFFTBatch = nBits * kpl;//64 int FFTBatch = nBits * (k + 1);//32 cufftDoubleReal* d_rev_in; cufftDoubleComplex *d_rev_out; cufftDoubleComplex *d_in; cufftDoubleReal *d_out; //cufft plans cufftHandle p; cufftHandle rev_p; //ifft variables allocation CudaSafeCall(cudaMalloc(&d_rev_in, iFFTBatch * _2N * sizeof(cufftDoubleReal))); CudaSafeCall(cudaMalloc(&d_rev_out, iFFTBatch * (N + 1) * sizeof(cufftDoubleComplex))); cufftPlan1d(&rev_p, _2N, CUFFT_D2Z, iFFTBatch);// - nBits);// - (iFFTBatch / dParts)); CudaSafeCall(cudaMemset(d_rev_in, 0, iFFTBatch * _2N * sizeof(cufftDoubleReal))); //fft variables allocation CudaSafeCall(cudaMalloc(&d_in, FFTBatch * (N + 1) * sizeof(cufftDoubleComplex))); CudaSafeCall(cudaMalloc(&d_out, FFTBatch * _2N * sizeof(cufftDoubleReal))); cufftPlan1d(&p, _2N, CUFFT_Z2D, FFTBatch); CudaSafeCall(cudaMemset(d_in, 0, FFTBatch * (N + 1) * sizeof(cufftDoubleComplex))); int *temp2 = temp_accum_a_b; int *temp3 = accum_a_b; // cout << nBits << endl; cudaCheckErrors("Here2"); for (int j = 0; j < 500; ++j) {//500 cudaCheckErrors("HereInside1"); prepareForiFFT_n_Bit<<<nBits * 2, nTHREADS>>> (temp2, NULL,//decaCoalesce, d_rev_in, nBits, bara, j, temp3, nBits * 2 * nTHREADS); cudaCheckErrors("HereInside2"); cufftExecD2Z(rev_p, d_rev_in, d_rev_out); // cudaDeviceSynchronize(); cudaCheckErrors("HereInside3"); prepareForFFT_n_Bit<<<nBits, 512>>> (NULL,//cuDecaFFTCoalesce, NULL,//tmpa_gpuCoal, d_in, d_rev_out, cudaBkFFTCoalesceExt, j, nBits); cudaCheckErrors("HereInside4"); cufftExecZ2D(p, d_in, d_out); // cudaDeviceSynchronize(); cudaCheckErrors("HereInside5"); finishUpFFT_n_Bit<<<nBits * 2, nTHREADS>>> (temp2, d_out, temp3, nBits); cudaCheckErrors("HereInside6"); // swap(temp2, temp3); int* x = temp2; temp2 = temp3; temp3 = x; cudaCheckErrors("HereInside7"); } //extract int *u_a_GPU, *u_b_CPU, *temp_u_b; CudaSafeCall(cudaMalloc(&u_a_GPU, nBits * N * sizeof(int))); u_b_CPU = new int[nBits]; temp_u_b = new int[nBits * N * (k + 1)]; CudaSafeCall(cudaMemcpy(temp_u_b, accum_a_b, nBits * N * (k + 1) * sizeof(int), D2H)); extract_gpu_n_Bit<<<nBits, 1024>>>(u_a_GPU, accum_a_b); for (int i = 0; i < nBits; ++i) { u_b_CPU[i] = temp_u_b[i * N + nBits * N]; } nBits = nBits/2; static const Torus32 MuxConst = modSwitchToTorus32(1, 8); int *u_a_GPU_halfBits, *u_b_CPU_halfBits, *u_b_GPU_halfBits; CudaSafeCall(cudaMalloc(&u_a_GPU_halfBits, nBits * N * sizeof(int))); CudaSafeCall(cudaMalloc(&u_b_GPU_halfBits, nBits * sizeof(int))); u_b_CPU_halfBits = new int[nBits]; ANDvec_vector<<<nBits, nTHREADS>>> (u_a_GPU_halfBits, u_a_GPU, u_a_GPU + nBits * N, 1, nBits, N, nBits * N);//the three params are redundant and not used for (int i = 0; i < nBits; ++i) { u_b_CPU_halfBits[i] = u_b_CPU[i] + u_b_CPU[i + nBits] + MuxConst; } // cudaMemset(result->a, 0, nBits * 500 * sizeof(Torus32));//TAKEN TO THE CALLER CudaSafeCall(cudaMemcpy(u_b_GPU_halfBits, u_b_CPU_halfBits, nBits * sizeof(int), H2D)); //key switch const static int ks_n = 1024, ks_basebit = 2, ks_t = 8, ks_out_params_n = 500; const static int base = 1 << ks_basebit;// base=2 in [CGGI16] const static int32_t prec_offset = 1 << (32 - (1 + ks_basebit * ks_t)); //precision const static int mask = base - 1; // cout << "nBits: " << nBits << endl; int coal_d_aibarSize = nBits * ks_n;//16*1024 uint32_t *coal_d_aibar; CudaSafeCall(cudaMalloc(&coal_d_aibar, coal_d_aibarSize * sizeof(uint32_t))); cudaCheckErrors("ks: 0"); getAibarCoalesce_n_Bit<<<nBits, nTHREADS>>> (coal_d_aibar, u_a_GPU_halfBits, prec_offset, nBits, ks_n); int coal_d_aijSize = ks_n * ks_t * nBits; uint32_t *coal_d_aij; CudaSafeCall(cudaMalloc(&coal_d_aij, coal_d_aijSize * sizeof(uint32_t))); cudaCheckErrors("ks: 1"); calculateAijFromAibarCoalesce_n_Bit<<<8 * nBits, nTHREADS>>> (coal_d_aij, coal_d_aibar, nBits, ks_t, ks_basebit, mask); cudaCheckErrors("ks: 2"); lweKeySwitchVectorSubstraction_gpu_testing_coalesce_n_Bit<<<nBits, n>>> (result->a, ks_a_gpu_extendedPtr, coal_d_aij, u_b_GPU_halfBits, ks_b_gpu_extendedPtr, ks_n,//1024 ks_t,//8 base, nBits, ks_n,//1024 n);//500 cudaCheckErrors("ks: 3"); // cudaDeviceSynchronize(); CudaSafeCall(cudaMemcpy(result->b, u_b_GPU_halfBits, nBits * sizeof(int), D2H)); cudaCheckErrors("ks: 4"); cudaCheckErrors("BootsMUX: n"); cudaFree(accum_a_b); cudaFree(temp_accum_a_b); cudaFree(bara); cudaFree(barb); //cufft helper variables cudaFree(d_rev_in); cudaFree(d_rev_out); cudaFree(d_in); cudaFree(d_out); cufftDestroy(rev_p); cufftDestroy(p); //KS vars cudaFree(u_a_GPU); delete [] u_b_CPU; delete [] temp_u_b; cudaFree(u_a_GPU_halfBits); delete [] u_b_CPU_halfBits; cudaFree(u_b_GPU_halfBits); //ks cudaFree(coal_d_aibar); cudaFree(coal_d_aij); } EXPORT void bootsAND_fullGPU_n_Bit(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int nBits, cufftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { int n = 500, nTHREADS = 1024; static const Torus32 MU = modSwitchToTorus32(1, 8); //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); Torus32 *temp_res_a_gpu; int *temp_res_b_cpu; CudaSafeCall(cudaMalloc(&temp_res_a_gpu, n * nBits * sizeof(Torus32))); temp_res_b_cpu = new int[nBits]; register int length = nBits * n; int nBLOCKS = (int) ceil((float) (length) / nTHREADS); cudaCheckErrors("AND: Here-2"); vecAdd<<<nBLOCKS, nTHREADS>>>(temp_res_a_gpu, ca->a, cb->a, length); cudaCheckErrors("AND: Here-1"); for (int i = 0; i < nBits; ++i) { temp_res_b_cpu[i] = ca->b[i] + cb->b[i] + AndConst; // temp_res_b_cpu[i] = modSwitchFromTorus32(temp_res_b_cpu[i], _2N); } bootstrapAndKeySwitch_n_Bit(result, temp_res_a_gpu, temp_res_b_cpu, nBits, cudaBkFFTCoalesceExt, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr); delete [] temp_res_b_cpu; cudaFree(temp_res_a_gpu); } EXPORT void bootsXOR_fullGPU_n_Bit(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int nBits, cufftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { int n = 500, nTHREADS = 1024; static const Torus32 MU = modSwitchToTorus32(1, 8); //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); Torus32 *temp_res_a_gpu; int *temp_res_b_cpu; CudaSafeCall(cudaMalloc(&temp_res_a_gpu, n * nBits * sizeof(Torus32))); temp_res_b_cpu = new int[nBits]; int length = nBits * n; int nBLOCKS = (int) ceil((float) (length) / nTHREADS); cudaCheckErrors("XOR: Here-2"); int mulVal = 2; vecAddMulTo<<<nBLOCKS, nTHREADS>>>(temp_res_a_gpu, mulVal, ca->a, cb->a, length); cudaCheckErrors("XOR: Here-1"); for (int i = 0; i < nBits; ++i) { temp_res_b_cpu[i] = (ca->b[i] + cb->b[i]) * mulVal + XorConst; // temp_res_b_cpu[i] = modSwitchFromTorus32(temp_res_b_cpu[i], _2N); } bootstrapAndKeySwitch_n_Bit(result, temp_res_a_gpu, temp_res_b_cpu, nBits, cudaBkFFTCoalesceExt, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr); delete [] temp_res_b_cpu; cudaFree(temp_res_a_gpu); } EXPORT void bootsXNOR_fullGPU_n_Bit(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int nBits, cufftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { int n = 500, nTHREADS = 1024; static const Torus32 MU = modSwitchToTorus32(1, 8); //compute: (0,-1/4) + 2*(-ca-cb) static const Torus32 XnorConst = modSwitchToTorus32(-1, 4); Torus32 *temp_res_a_gpu; int *temp_res_b_cpu; CudaSafeCall(cudaMalloc(&temp_res_a_gpu, n * nBits * sizeof(Torus32))); temp_res_b_cpu = new int[nBits]; int length = nBits * n; int nBLOCKS = (int) ceil((float) (length) / nTHREADS); cudaCheckErrors("XNOR: Here-2"); int mulVal = 2; vecAddMulTo<<<nBLOCKS, nTHREADS>>>(temp_res_a_gpu, mulVal, ca->a, cb->a, length); cudaCheckErrors("XNOR: Here-1"); for (int i = 0; i < nBits; ++i) { temp_res_b_cpu[i] = (ca->b[i] + cb->b[i]) * mulVal + XnorConst; // temp_res_b_cpu[i] = modSwitchFromTorus32(temp_res_b_cpu[i], _2N); result->b[i] = -MU; } bootstrapAndKeySwitch_n_Bit(result, temp_res_a_gpu, temp_res_b_cpu, nBits, cudaBkFFTCoalesceExt, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr); delete [] temp_res_b_cpu; cudaFree(temp_res_a_gpu); } EXPORT void bootsMUX_fullGPU_n_Bit(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, const LweSample_16 *cc, int nBits, cufftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { static const int n = 500, nTHREADS = 1024; static const Torus32 MU = modSwitchToTorus32(1, 8); static const Torus32 AndConst = modSwitchToTorus32(-1, 8); static const Torus32 MuxConst = modSwitchToTorus32(1, 8); Torus32 *temp_res_a_gpu; int *temp_res_b_cpu; int nOutputs = 2; int nBootsBits = nBits * nOutputs; CudaSafeCall(cudaMalloc(&temp_res_a_gpu, n * nBootsBits * sizeof(Torus32))); temp_res_b_cpu = new int[nBootsBits]; int length = nBits * n; int nBLOCKS = (int) ceil((float) (length) / nTHREADS); cudaCheckErrors("XMUX: Here-2"); ANDvec_vector<<<nBLOCKS, nTHREADS>>>(temp_res_a_gpu, ca->a, cb->a, 1, nBits, n, length);// dummy variables (last4) SUBvec_vector<<<nBLOCKS, nTHREADS>>>(temp_res_a_gpu + length, ca->a, cc->a, 1, nBits, n, length);// dummy variables (last4) for (int i = 0; i < nBits; ++i) { temp_res_b_cpu[i] = ca->b[i] + cb->b[i] + AndConst; temp_res_b_cpu[i + nBits] = - ca->b[i] + cc->b[i] + AndConst; result->b[i] = -MU; } cudaMemset(result->a, 0, nBits * 500 * sizeof(Torus32)); bootstrapAndKeySwitch_n_Bit_MUX(result, temp_res_a_gpu, temp_res_b_cpu, nBootsBits, cudaBkFFTCoalesceExt, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr); cudaFree(temp_res_a_gpu); delete [] temp_res_b_cpu; } EXPORT void bootsANDXOR_fullGPU_n_Bit_vector(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, const int vLength, const int nBits, cufftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { static const int n = 500, nTHREADS = 1024, nOut = 2; int nTotalInputBits = vLength * nBits; int nTotalOutputBits = vLength * nBits * nOut; static const Torus32 MU = modSwitchToTorus32(1, 8); //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); static const int mulValXor = 2; Torus32 *temp_res_a_gpu; CudaSafeCall(cudaMalloc(&temp_res_a_gpu, n * nTotalOutputBits * sizeof(Torus32))); int *temp_res_b_cpu = new int[nTotalOutputBits]; register int length = nTotalOutputBits * n; int nBLOCKS = (int) ceil((float) (length) / nTHREADS); cudaCheckErrors("ANDXOR_vec: Here - 0"); ANDXORvecMulAllto_vector<<<nBLOCKS, nTHREADS>>>(temp_res_a_gpu, ca->a, cb->a, vLength, nBits, n, length); cudaCheckErrors("AND: Here - 1"); for (int i = 0; i < nTotalInputBits; ++i) { temp_res_b_cpu[i] = ca->b[i] + cb->b[i] + AndConst; //for and temp_res_b_cpu[i + nTotalInputBits] = mulValXor * (ca->b[i] + cb->b[i]) + XorConst;// for xor } bootstrapAndKeySwitch_n_Bit(result, temp_res_a_gpu, temp_res_b_cpu, nTotalOutputBits, cudaBkFFTCoalesceExt, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr); cudaCheckErrors("AND: Here - 2"); delete [] temp_res_b_cpu; cudaFree(temp_res_a_gpu); } EXPORT void bootsXORXOR_fullGPU_n_Bit_vector(LweSample_16 *result, const LweSample_16 *ca1, const LweSample_16 *ca2, const LweSample_16 *cb1, const LweSample_16 *cb2, const int vLength, const int nBits, cufftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { static const int n = 500, nTHREADS = 1024, nOut = 2; int nTotalInputBits = vLength * nBits; int nTotalOutputBits = vLength * nBits * nOut; static const Torus32 MU = modSwitchToTorus32(1, 8); //compute: (0,1/4) + 2*(ca + cb) static const Torus32 XorConst = modSwitchToTorus32(1, 4); static const int mulValXor = 2; Torus32 *temp_res_a_gpu; CudaSafeCall(cudaMalloc(&temp_res_a_gpu, n * nTotalOutputBits * sizeof(Torus32))); int *temp_res_b_cpu = new int[nTotalOutputBits]; register int length = nTotalInputBits * n; int nBLOCKS = (int) ceil((float) (length) / nTHREADS); cudaCheckErrors("ANDXOR_vec: Here - 0"); XORXORvecMulAllto_vector<<<nBLOCKS, nTHREADS>>>(temp_res_a_gpu, ca1->a, ca2->a, n, nBits, length); XORXORvecMulAllto_vector<<<nBLOCKS, nTHREADS>>>(temp_res_a_gpu + length, cb1->a, cb2->a, n, nBits, length); cudaCheckErrors("AND: Here - 1"); for (int i = 0; i < nTotalInputBits; ++i) { temp_res_b_cpu[i] = mulValXor * (ca1->b[i] + ca2->b[i]) + XorConst; //for and temp_res_b_cpu[i + nTotalInputBits] = mulValXor * (cb1->b[i] + cb2->b[i]) + XorConst;// for xor } bootstrapAndKeySwitch_n_Bit(result, temp_res_a_gpu, temp_res_b_cpu, nTotalOutputBits, cudaBkFFTCoalesceExt, ks_a_gpu_extendedPtr, ks_b_gpu_extendedPtr); cudaCheckErrors("AND: Here - 2"); delete [] temp_res_b_cpu; cudaFree(temp_res_a_gpu); } __global__ void bootstrappingUptoBlindRotate_1_Bit_stream(int *accum_a_b, int *temp_accum_a_b, int *bara, int *testvectbis, Torus32 MU, int nBits, int *temp_res_a, int barb) { register int id = blockIdx.x * blockDim.x + threadIdx.x; register bool L1 = (_2N - barb) < N;//a register bool L2 = id < (_2N - barb);//a; register bool L3 = id >= ((_2N - barb) - N);//a//aa; register int testvectbis_local = L1 * (L2 * (-1) + (!L2)) * MU + (!L1) * ((L3 * (-1) + (!L3)) * MU); accum_a_b[id] = testvectbis_local;//previously it was id + 1024 register bool id_lt_500 = id < 500; register int temp_res_a_id = temp_res_a[id]; temp_res_a_id = temp_res_a_id * id_lt_500; register int bara_id = modSwitchFromTorus32_GPU_device(temp_res_a_id, Nx2); bara[id] = bara_id; } __global__ void prepareForiFFT_1_Bit_stream(int *des, int *decaCoalesce, cufftDoubleReal *d_rev_in, int nBits, int *bara, int baraIndex, int *source) { register int id = blockIdx.x * blockDim.x + threadIdx.x; // register int bitIndex = (id / N) % nBits; register int threadIdModN = id % N; register int a = bara[baraIndex];//[bitIndex * N + baraIndex]; register int aa = a - N; register bool L1 = a < N, L2 = threadIdModN < a, L3 = threadIdModN < aa; int source_id = source[id], source_id_a_N = source[id - a + N], source_id_a = source[id - a]; int source_id_aa = source[id - aa], source_id_aa_N = source[id - aa + N]; register int des_id = (L1 * (L2 * (-source_id_a_N - source_id) + (!L2) * (source_id_a - source_id)) + (!L1) * (L3 * (source_id_aa_N - source_id) + (!L3) * (-source_id_aa - source_id))); // des[id] = des_id; register int p = 0; register int decal = (32 - (p + 1) * Bgbit); register uint32_t val = ((uint32_t)(des_id + offset)); register uint32_t temp1 = (val >> decal) & maskMod; register int xxxxx1 = (temp1 - halfBg); // decaCoalesce[((id / N) * N) + id] = xxxxx1; p = 1; decal = (32 - (p + 1) * Bgbit); val = ((uint32_t)(des_id + offset)); temp1 = (val >> decal) & maskMod; register int xxxxx2 = temp1 - halfBg;// + // decaCoalesce[((id / N) * N) + id + N] = xxxxx2; register int bIndex = id / N, tIndex = id % N;//, startIndexSmall = bIndex * N; int destTod_rev_in = bIndex * _2N + tIndex + (bIndex >= nBits) * nBits * N * 2; d_rev_in[destTod_rev_in] = xxxxx1/2.;//id;// d_rev_in[destTod_rev_in + 1024] = -xxxxx1/2.;//id;// destTod_rev_in += 2 * 1024; d_rev_in[destTod_rev_in] = xxxxx2/2.;//id; d_rev_in[destTod_rev_in + 1024] = -xxxxx2/2.;//id; } __global__ void prepareForFFT_1_Bit_Stream(cufftDoubleComplex *cuDecaFFTCoalesce, cufftDoubleComplex *tmpa_gpuCoal, cufftDoubleComplex *d_in, cufftDoubleComplex *d_rev_out, cufftDoubleComplex *bki, int keyIndex, int nBits) { register int id = blockIdx.x*blockDim.x+threadIdx.x; register int tempId = id; int bitIndex = tempId/Ns2; register cufftDoubleComplex v0 = d_rev_out[2 * tempId + 1 + bitIndex];//d_rev_out[2 * id + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v0; tempId = tempId + (Ns2 * nBits); bitIndex = (tempId)/Ns2; register cufftDoubleComplex v1 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v1; tempId = tempId + (Ns2 * nBits); bitIndex = (tempId)/Ns2; register cufftDoubleComplex v2 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v2; tempId = tempId + (Ns2 * nBits); bitIndex = (tempId)/Ns2; register cufftDoubleComplex v3 = d_rev_out[2 * tempId + 1 + bitIndex]; // cuDecaFFTCoalesce[tempId] = v3; register int keySI = keyIndex * (k + 1) * kpl * Ns2, aID, bID, offset; int i = 0; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; cufftDoubleComplex bki_aid = bki[aID]; cufftDoubleComplex bki_bid = bki[bID]; cufftDoubleComplex temp_a0 = cuCmul(v0, bki_aid); cufftDoubleComplex temp_b0 = cuCmul(v0, bki_bid); i = 1; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; bki_aid = bki[aID]; bki_bid = bki[bID]; cufftDoubleComplex temp_a1 = cuCmul(v1, bki_aid); cufftDoubleComplex temp_b1 = cuCmul(v1, bki_bid); i = 2; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; bki_aid = bki[aID]; bki_bid = bki[bID]; cufftDoubleComplex temp_a2 = cuCmul(v2, bki_aid); cufftDoubleComplex temp_b2 = cuCmul(v2, bki_bid); i = 3; offset = i * Ns2; aID = keySI + offset + id % Ns2; bID = keySI + offset + id % Ns2 + Ns2 * kpl; bki_aid = bki[aID]; bki_bid = bki[bID]; cufftDoubleComplex temp_a3 = cuCmul(v3, bki_aid); cufftDoubleComplex temp_b3 = cuCmul(v3, bki_bid); cufftDoubleComplex tmpa_gpuCoal0; tmpa_gpuCoal0.x = temp_a0.x + temp_a1.x +temp_a2.x +temp_a3.x; tmpa_gpuCoal0.y = temp_a0.y + temp_a1.y +temp_a2.y +temp_a3.y; // tmpa_gpuCoal[id] = tmpa_gpuCoal0; cufftDoubleComplex tmpa_gpuCoal1; tmpa_gpuCoal1.x = temp_b0.x + temp_b1.x +temp_b2.x +temp_b3.x; tmpa_gpuCoal1.y = temp_b0.y + temp_b1.y +temp_b2.y +temp_b3.y; // tmpa_gpuCoal[nBits * Ns2 + id] = tmpa_gpuCoal1; register int largeSI = 0;//(id / Ns2) * (N + 1); // register int tid = id % Ns2; d_in[largeSI + 2 * id + 1] = tmpa_gpuCoal0; largeSI = (N + 1); d_in[largeSI + 2 * id + 1] = tmpa_gpuCoal1; } __global__ void finishUpFFT_1_Bit_Stream(int *temp2, cufftDoubleReal *d_out, int *temp3) { register int id = blockIdx.x*blockDim.x+threadIdx.x; register double _2p32 = double(INT64_C(1) << 32); register double _1sN = double(1) / double(N); register int bitIndex = id / N; register int tIndex = id % N; register int startIndexLarge = bitIndex * _2N; int temp3_id = temp3[id]; int temp2_id = Torus32(int64_t(d_out[startIndexLarge + tIndex] * _1sN * _2p32)) + temp3_id;// temp2[id] = temp2_id; } __global__ void extract_gpu_1_Bit_Stream(int *destination, int *source) { register int id = blockIdx.x * blockDim.x + threadIdx.x; register int s_id, des_id; register bool L1 = id == 0; s_id = L1 * id + (!L1) * (N - id); des_id = source[s_id]; des_id = L1 * des_id + (!L1) * (-des_id) + 32768; destination[id] = des_id; } __global__ void getAibarCoalesce_1_Bit_Stream(uint32_t *d_aibar, Torus32 *ai, int32_t prec_offset) { register int id = blockIdx.x * blockDim.x + threadIdx.x; register int ai_id = ai[id]; ai_id += prec_offset; d_aibar[id] = ai_id; } __global__ void calculateAijFromAibarCoalesce_1_Bit_Stream(uint32_t *aij, uint32_t *aibar, int t, int basebit, int mask) { int id = blockIdx.x * blockDim.x + threadIdx.x; int i = id / t; int j = id % t; int aibar_i = aibar[i]; aij[id] = (aibar_i >> (32 - (j + 1) * basebit)) & mask; } __global__ void lweKeySwitchVectorSubstraction_gpu_testing_coalesce_1_Bit_Stream(int *destinationA, Torus32 *sourceA, uint32_t *d_aij, int *destinationB, int *sourceB, int ks_n, int ks_t, int ks_base, int n, int params_n) { register int id = blockIdx.x * blockDim.x + threadIdx.x; int desB = destinationB[0]; register int desAid = destinationA[id]; register int A = ks_n, B = ks_t, C = ks_base, D = params_n; #pragma unroll for (int i = 0; i < 1024; ++i) {//n #pragma unroll for (int j = 0; j < 8; ++j) {//ks_t int sI2 = i * ks_t + j; register int aij = d_aij[sI2]; register int sa_id = sourceA[i * B * C * D + j * C * D + aij * D + id]; desAid -= sa_id; int bi = d_aij[sI2 + id]; int sb_id = sourceB[i * B * C + j * C + bi]; desB -= sb_id; } } destinationA[id] = desAid; if(id < 1) { destinationB[0] = desB; } } EXPORT void bootsAND_fullGPU_1_Bit_Stream(LweSample_16 *result, const LweSample_16 *ca, const LweSample_16 *cb, int nBits, cufftDoubleComplex *cudaBkFFTCoalesceExt, Torus32 *ks_a_gpu_extendedPtr, Torus32 *ks_b_gpu_extendedPtr) { register int n = 500, nTHREADS = 1024, N = 1024, _2N = 2048, Ns2 = 512, k = 1, kpl = 4, l = 2, offset = 2149580800, halfBg = 512, maskMod = 1023; static const Torus32 MU = modSwitchToTorus32(1, 8); //compute: (0,-1/8) + ca + cb static const Torus32 AndConst = modSwitchToTorus32(-1, 8); int *temp_res_a, *temp_res_b; cudaMalloc(&temp_res_a, n * nBits * sizeof(Torus32)); temp_res_b = new Torus32[nBits]; register int length = 500 * nBits; int nBLOCKS = (int) ceil((float) (length) / nTHREADS); vecAdd<<<nBLOCKS, nTHREADS>>>(temp_res_a, ca->a, cb->a, length); for (int i = 0; i < nBits; ++i) { temp_res_b[i] = ca->b[i] + cb->b[i] + AndConst; temp_res_b[i] = modSwitchFromTorus32(temp_res_b[i], _2N); } //create streams cudaDeviceProp cProfile; cudaGetDeviceProperties(&cProfile, 0); int nSM = cProfile.multiProcessorCount; cout << "#SM: " << nSM << endl; //20 cudaStream_t streams[nSM]; for (int i = 0; i < nSM; ++i) {//nSM cudaStreamCreateWithFlags(&streams[i], cudaStreamNonBlocking); } //bootstrapping woks uptoFFT int *accum_a_b, *bara, *temp_accum_a_b;//, *barb, *testvectbis;//accum a and accum b together; bara; tempaccum for mux rotate cudaMalloc(&accum_a_b, nBits * N * (k + 1) * sizeof(int)); cudaMalloc(&temp_accum_a_b, nBits * N * (k + 1) * sizeof(int)); cudaMalloc(&bara, nBits * N * sizeof(int)); cudaMemset(accum_a_b, 0, nBits * N * (k + 1) * sizeof(int)); cudaMemset(temp_accum_a_b, 0, nBits * N * (k + 1) * sizeof(int)); cudaMemset(bara, 0, nBits * N * sizeof(int)); for (int i = 0; i < nBits; ++i) { int sI = i * 1024 * (k + 1); int si = i * 1024; bootstrappingUptoBlindRotate_1_Bit_stream<<<1, nTHREADS, 0, streams[i % 20]>>> (accum_a_b + sI + 1024, NULL,//temp_accum_a_b + sI, bara + si, NULL, MU, 1, temp_res_a + i * 500, temp_res_b[i]); } for (int i = 0; i < 20; ++i) { cudaStreamSynchronize(streams[i]); } int *decaCoalesce; cudaMalloc(&decaCoalesce, nBits * N * kpl * sizeof(int));//1024*4 cufftDoubleComplex *cuDecaFFTCoalesce; cudaMalloc(&cuDecaFFTCoalesce, nBits * kpl * Ns2 * sizeof(cufftDoubleComplex));//512*4 cufftDoubleComplex *tmpa_gpuCoal; cudaMalloc(&tmpa_gpuCoal, nBits * Ns2 * sizeof(cufftDoubleComplex) * (k + 1));//512*2 //fft variables int iFFTBatch = nBits * kpl;//64 int FFTBatch = nBits * (k + 1);//32 //cufft helper variables cufftDoubleReal* d_rev_in; cufftDoubleComplex *d_rev_out; cufftDoubleComplex *d_in; cufftDoubleReal *d_out; //cufft plans cufftHandle p; cufftHandle rev_p; //ifft variables allocation cudaMalloc(&d_rev_in, iFFTBatch * _2N * sizeof(cufftDoubleReal)); cudaMalloc(&d_rev_out, iFFTBatch * (N + 1) * sizeof(cufftDoubleComplex)); cufftPlan1d(&rev_p, _2N, CUFFT_D2Z, iFFTBatch);// - (iFFTBatch / dParts)); cudaMemset(d_rev_in, 0, iFFTBatch * _2N * sizeof(cufftDoubleReal)); //fft variables allocation cudaMalloc(&d_in, FFTBatch * (N + 1) * sizeof(cufftDoubleComplex)); cudaMalloc(&d_out, FFTBatch * _2N * sizeof(cufftDoubleReal)); cufftPlan1d(&p, _2N, CUFFT_Z2D, FFTBatch); cudaMemset(d_in, 0, FFTBatch * (N + 1) * sizeof(cufftDoubleComplex)); int *temp2 = temp_accum_a_b; int *temp3 = accum_a_b; for (int j = 0; j < 500; ++j) { // nBLOCKS = nBits * (k + 1);//as accum is of (k + 1) * 1024; for (int i = 0; i < nBits; ++i) { int tLweSampleStart = i * (k + 1) * N; int baraStart = i * N; int dCoalesceStart = i * kpl * N; int d_rev_inStart = i * kpl * _2N; prepareForiFFT_1_Bit_stream<<<2, nTHREADS, 0, streams[i % nSM]>>> (temp2 + tLweSampleStart, decaCoalesce + dCoalesceStart, d_rev_in + d_rev_inStart, 1,//nBits, bara + baraStart, j, temp3 + tLweSampleStart); } for (int i = 0; i < nSM; ++i) { cudaStreamSynchronize(streams[i]); } cufftExecD2Z(rev_p, d_rev_in, d_rev_out); cudaDeviceSynchronize(); for (int i = 0; i < nBits; ++i) { int cuDecaFFTCoalesceStart = i * kpl * Ns2; int d_rev_outStart = i * kpl * (N + 1); int tmpa_gpuCoalStart = i * Ns2 * (k + 1); int d_inStart = i * (N + 1) * (k + 1); prepareForFFT_1_Bit_Stream<<<1, 512, 0, streams[i % nSM]>>> (cuDecaFFTCoalesce + cuDecaFFTCoalesceStart, tmpa_gpuCoal + tmpa_gpuCoalStart, d_in + d_inStart, d_rev_out + d_rev_outStart, cudaBkFFTCoalesceExt, j, 1); } for (int i = 0; i < nSM; ++i) { cudaStreamSynchronize(streams[i]); } cufftExecZ2D(p, d_in, d_out); cudaDeviceSynchronize(); for (int i = 0; i < nBits; ++i) { int tlweSampleStart = i * (k + 1) * N; int d_outStart = i * (k + 1) * _2N; finishUpFFT_1_Bit_Stream<<<2, nTHREADS, 0, streams[i % nSM]>>> (temp2 + tlweSampleStart, d_out + d_outStart, temp3 + tlweSampleStart); } for (int i = 0; i < nSM; ++i) { cudaStreamSynchronize(streams[i]); } swap(temp2, temp3); } //extract int *u_a, *u_b, *temp_u_b; cudaMalloc(&u_a, nBits * N * sizeof(int)); u_b = new int[nBits]; temp_u_b = new int[nBits * N * (k + 1)]; cudaMemcpy(temp_u_b, accum_a_b, nBits * N * (k + 1) * sizeof(int), D2H); for (int i = 0; i < nBits; ++i) { int accum_a_bStart = i * N * (k + 1); int u_aStart = i * N; extract_gpu_1_Bit_Stream<<<1, 1024, 0, streams[i % nSM]>>> (u_a + u_aStart, accum_a_b + accum_a_bStart); u_b[i] = temp_u_b[accum_a_bStart + N]; } int *result_b_gpu; cudaMalloc(&result_b_gpu, nBits * sizeof(int)); cudaMemset(result->a, 0, nBits * 500 * sizeof(int)); cudaMemcpy(result_b_gpu, u_b, nBits * sizeof(int), H2D); for (int i = 0; i < nSM; ++i) { cudaStreamSynchronize(streams[i]); } //key switch const int ks_n = 1024, ks_basebit = 2, ks_t = 8, ks_out_params_n = 500; const int base = 1 << ks_basebit;// base=2 in [CGGI16] const int32_t prec_offset = 1 << (32 - (1 + ks_basebit * ks_t)); //precision const int mask = base - 1; /* // int coal_d_aibarSize = nBits * ks_n;//16*1024 // uint32_t *coal_d_aibar; // cudaMalloc(&coal_d_aibar, coal_d_aibarSize * sizeof(uint32_t)); // // for (int i = 0; i < nBits; ++i) { // int coal_d_aibarStart = i * N; // int u_aStart = i * N; // getAibarCoalesce_1_Bit_Stream<<<1, 1024, 0, streams[i % nSM]>>> // (coal_d_aibar + coal_d_aibarStart, // u_a + u_aStart, // prec_offset); // } // // for (int i = 0; i < nSM; ++i) { // cudaStreamSynchronize(streams[i]); // }*/ int coal_d_aijSize = ks_n * ks_t * nBits; uint32_t *coal_d_aij; cudaMalloc(&coal_d_aij, coal_d_aijSize * sizeof(uint32_t)); for (int i = 0; i < nBits; ++i) { int coal_d_aijStart = i * ks_n * ks_t; // int coal_d_aibarStart = i * ks_n; int u_aStart = i * ks_n;//1024 calculateAijFromAibarCoalesce_1_Bit_Stream<<<8, 1024, 0, streams[i % nSM]>>> (coal_d_aij + coal_d_aijStart, (uint32_t*)u_a + u_aStart, ks_t, ks_basebit, mask); } for (int i = 0; i < nSM; ++i) { cudaStreamSynchronize(streams[i]); } for (int i = 0; i < nBits; ++i) { int res_aStart = i * 500; int coal_d_aijStart = i * ks_n * ks_t; int result_b_gpuStart = i; lweKeySwitchVectorSubstraction_gpu_testing_coalesce_1_Bit_Stream<<<1, 500, 0, streams[i % nSM]>>> (result->a + res_aStart, ks_a_gpu_extendedPtr, coal_d_aij + coal_d_aijStart, result_b_gpu + i, ks_b_gpu_extendedPtr, ks_n, ks_t, base, 1024, 500); } for (int i = 0; i < nSM; ++i) { cudaStreamSynchronize(streams[i]); } cudaMemcpy(result->b, result_b_gpu, nBits * sizeof(int), D2H); // int *h_res_a = new int[nBits * 500]; // cudaMemcpy(h_res_a, result->a, nBits * 500 * sizeof(int), D2H); // for (int i = 0; i < nBits; ++i) { // int sI = i * 500; // for (int j = 0; j < 10; ++j) { // cout << h_res_a[sI + j] << " "; // } // cout << endl; // } // cout << endl; delete [] temp_res_b; cudaFree(temp_res_a); cudaFree(accum_a_b); cudaFree(temp_accum_a_b); cudaFree(bara); // cudaFree(barb); // cudaFree(testvectbis); cudaFree(decaCoalesce); cudaFree(cuDecaFFTCoalesce); cudaFree(tmpa_gpuCoal); //cufft helper variables cudaFree(d_rev_in); cudaFree(d_rev_out); cudaFree(d_in); cudaFree(d_out); cufftDestroy(rev_p); cufftDestroy(p); cudaFree(u_a); free(u_b); free(temp_u_b); cudaFree(result_b_gpu); // cudaFree(coal_d_aibar); for (int i = 0; i < nSM; ++i) { //nSM cudaStreamDestroy(streams[i]); } }
afb98b4ab1813ede714890b4f6a956a08757f0a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Purpose: Time square matrix multiplication usingh CPU, GPU, cblas ddot, cblas daxpy, cublas ddot, cublas daxpy. * Author: Gurpal Singh * Date: April 6, 2017 * ME 571 Project 2 */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/resource.h> #include <time.h> #include "timer.h" #include "gsl_cblas.h" #include <rocblas.h> #include "cpumatrixmultiply.h" #include "cpuddot.h" #include "cpudaxpy.h" #include "gpumatrixmultiply.h" #include "gpuddot.h" #include "gpudaxpy.h" int main(void){ //Initializing matrix Dimensions and scanning for values from user int m, n, k; printf("This program performs square matrix multiplication where A is 'm x n' and B is 'n x k'.\n"); //Setting Matrix Dimensions printf("Enter the integer value for n: "); scanf("%d", &n); m = n; k = n; //Dynamic Memory Allocation for Matrices (I used flat arrays) //Matrix A double *a; hipMallocManaged( &a, m * n * sizeof(double)); //Matrix B double *b; hipMallocManaged( &b, n * k * sizeof(double)); //Matrix C double *c; hipMallocManaged( &c, m * k * sizeof(double)); //Initializing nmax and i and j used for loops int nmax = 19; int i, j; time_t t; srand( (unsigned) t ); //Setting random values in Matrix A for (i = 0; i < m; i++){ for (j = 0; j < n; j++){ a[i*n+j] = rand() % (nmax+1); } } //Setting random values in Matrix B for (i = 0; i < n; i++){ for (j = 0; j < k; j++){ b[i*k+j] = rand() % (nmax+1); } } //Transposing and Storing as Column Major arrays double *acol; hipMallocManaged( &acol,m * n * sizeof(double)); for(i = 0; i < n; i++){ for(j = 0; j < m; j++){ acol[i*m+j] = a[j*n+i]; } } double *bcol; hipMallocManaged( &bcol,k * n * sizeof(double)); for(int i = 0; i < k; i++){ for( j = 0; j < n; j++){ bcol[i*n+j] = b[j*k+i]; } } //Timing the CPU Matrix Multiplication Method StartTimer(); CPU_Matrix_Multiply(m, n, k, a, b, c); double CPU_time = GetTimer(); CPU_time = CPU_time*1000; //Converting to ms //Timing the CPU Matrix Multiplication using cblas ddot method StartTimer(); CPU_ddot(m, n, k, a, bcol, c); double CPU_ddot_time = GetTimer(); CPU_ddot_time = CPU_ddot_time*1000; //Converting to ms //Timing the CPU Matrix Multiplication using cblas daxpy StartTimer(); CPU_daxpy(m, n, k, acol, bcol, c); double CPU_daxpy_time = GetTimer(); CPU_daxpy_time = CPU_daxpy_time*1000; //Parallel GPU Code Block and Grid Dimensions dim3 block(16,16); dim3 grid( (n+15)/16, (n+15)/16 ); //Timing the GPU Matrix Multiplication Kernel hipEvent_t timeStart, timeStop; hipEventCreate(&timeStart); hipEventCreate(&timeStop); float elapsedTime; //Has to be type float units ms hipEventRecord(timeStart, 0); hipLaunchKernelGGL(( GPU_Matrix_Multiply_Kernel), dim3(grid), dim3(block), 0, 0, a, b, c, n); hipDeviceSynchronize(); hipEventRecord(timeStop,0); hipEventSynchronize(timeStop); hipEventElapsedTime(&elapsedTime, timeStart, timeStop); hipEventDestroy(timeStart); hipEventDestroy(timeStop); //Timing the GPU cublas DDOT Matrix Multiplication Method StartTimer(); GPU_ddot(m, n, k, a, bcol, c); double GPU_ddot_time = GetTimer(); GPU_ddot_time = GPU_ddot_time*1000; //Timing the GPU cublas DAXPY Matrix Multiplication Method StartTimer(); GPU_daxpy(m, n, k, acol, bcol, c); double GPU_daxpy_time = GetTimer(); GPU_daxpy_time = GPU_daxpy_time*1000; //Writing The results to a file FILE *fptr = fopen("Matrix_Multiply_Results.txt", "a+"); if (fptr == NULL) { printf("Error!"); exit(1); } fprintf(fptr, "\n"); fprintf(fptr, "Matrix Size: %d\n", n); fprintf(fptr, "elapsed wall time CPU Matrix Multiplication = %.3f ms\n", CPU_time); fprintf(fptr, "elapsed wall time CPU cblas DDOT %.3f ms\n", CPU_ddot_time); fprintf(fptr, "elapsed wall time CPU cblas DAXPY %.3f ms\n",CPU_daxpy_time); fprintf(fptr, "elapsed wall time GPU Matrix Multiplication %.3f ms\n", elapsedTime); fprintf(fptr, "elapsed wall time GPU cublas DDOT %.3f ms\n", GPU_ddot_time); fprintf(fptr, "elapsed wall time GPU cublas DAXPY %.3f ms\n", GPU_daxpy_time); fclose(fptr); //Clean Up hipFree(a); hipFree(b); hipFree(c); hipFree(acol); hipFree(bcol); }
afb98b4ab1813ede714890b4f6a956a08757f0a0.cu
/* * Purpose: Time square matrix multiplication usingh CPU, GPU, cblas ddot, cblas daxpy, cublas ddot, cublas daxpy. * Author: Gurpal Singh * Date: April 6, 2017 * ME 571 Project 2 */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/resource.h> #include <time.h> #include "timer.h" #include "gsl_cblas.h" #include <cublas.h> #include "cpumatrixmultiply.h" #include "cpuddot.h" #include "cpudaxpy.h" #include "gpumatrixmultiply.h" #include "gpuddot.h" #include "gpudaxpy.h" int main(void){ //Initializing matrix Dimensions and scanning for values from user int m, n, k; printf("This program performs square matrix multiplication where A is 'm x n' and B is 'n x k'.\n"); //Setting Matrix Dimensions printf("Enter the integer value for n: "); scanf("%d", &n); m = n; k = n; //Dynamic Memory Allocation for Matrices (I used flat arrays) //Matrix A double *a; cudaMallocManaged( &a, m * n * sizeof(double)); //Matrix B double *b; cudaMallocManaged( &b, n * k * sizeof(double)); //Matrix C double *c; cudaMallocManaged( &c, m * k * sizeof(double)); //Initializing nmax and i and j used for loops int nmax = 19; int i, j; time_t t; srand( (unsigned) t ); //Setting random values in Matrix A for (i = 0; i < m; i++){ for (j = 0; j < n; j++){ a[i*n+j] = rand() % (nmax+1); } } //Setting random values in Matrix B for (i = 0; i < n; i++){ for (j = 0; j < k; j++){ b[i*k+j] = rand() % (nmax+1); } } //Transposing and Storing as Column Major arrays double *acol; cudaMallocManaged( &acol,m * n * sizeof(double)); for(i = 0; i < n; i++){ for(j = 0; j < m; j++){ acol[i*m+j] = a[j*n+i]; } } double *bcol; cudaMallocManaged( &bcol,k * n * sizeof(double)); for(int i = 0; i < k; i++){ for( j = 0; j < n; j++){ bcol[i*n+j] = b[j*k+i]; } } //Timing the CPU Matrix Multiplication Method StartTimer(); CPU_Matrix_Multiply(m, n, k, a, b, c); double CPU_time = GetTimer(); CPU_time = CPU_time*1000; //Converting to ms //Timing the CPU Matrix Multiplication using cblas ddot method StartTimer(); CPU_ddot(m, n, k, a, bcol, c); double CPU_ddot_time = GetTimer(); CPU_ddot_time = CPU_ddot_time*1000; //Converting to ms //Timing the CPU Matrix Multiplication using cblas daxpy StartTimer(); CPU_daxpy(m, n, k, acol, bcol, c); double CPU_daxpy_time = GetTimer(); CPU_daxpy_time = CPU_daxpy_time*1000; //Parallel GPU Code Block and Grid Dimensions dim3 block(16,16); dim3 grid( (n+15)/16, (n+15)/16 ); //Timing the GPU Matrix Multiplication Kernel cudaEvent_t timeStart, timeStop; cudaEventCreate(&timeStart); cudaEventCreate(&timeStop); float elapsedTime; //Has to be type float units ms cudaEventRecord(timeStart, 0); GPU_Matrix_Multiply_Kernel<<<grid, block>>> (a, b, c, n); cudaDeviceSynchronize(); cudaEventRecord(timeStop,0); cudaEventSynchronize(timeStop); cudaEventElapsedTime(&elapsedTime, timeStart, timeStop); cudaEventDestroy(timeStart); cudaEventDestroy(timeStop); //Timing the GPU cublas DDOT Matrix Multiplication Method StartTimer(); GPU_ddot(m, n, k, a, bcol, c); double GPU_ddot_time = GetTimer(); GPU_ddot_time = GPU_ddot_time*1000; //Timing the GPU cublas DAXPY Matrix Multiplication Method StartTimer(); GPU_daxpy(m, n, k, acol, bcol, c); double GPU_daxpy_time = GetTimer(); GPU_daxpy_time = GPU_daxpy_time*1000; //Writing The results to a file FILE *fptr = fopen("Matrix_Multiply_Results.txt", "a+"); if (fptr == NULL) { printf("Error!"); exit(1); } fprintf(fptr, "\n"); fprintf(fptr, "Matrix Size: %d\n", n); fprintf(fptr, "elapsed wall time CPU Matrix Multiplication = %.3f ms\n", CPU_time); fprintf(fptr, "elapsed wall time CPU cblas DDOT %.3f ms\n", CPU_ddot_time); fprintf(fptr, "elapsed wall time CPU cblas DAXPY %.3f ms\n",CPU_daxpy_time); fprintf(fptr, "elapsed wall time GPU Matrix Multiplication %.3f ms\n", elapsedTime); fprintf(fptr, "elapsed wall time GPU cublas DDOT %.3f ms\n", GPU_ddot_time); fprintf(fptr, "elapsed wall time GPU cublas DAXPY %.3f ms\n", GPU_daxpy_time); fclose(fptr); //Clean Up cudaFree(a); cudaFree(b); cudaFree(c); cudaFree(acol); cudaFree(bcol); }
c3d57c6e88b2fad792f97d46ea1fa7f0b09222ab.hip
// !!! This is a file automatically generated by hipify!!! /* * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common/kernel.h" pluginStatus_t RPROIInferenceFused(hipStream_t stream, const int N, const int A, const int C, const int H, const int W, const int poolingH, const int poolingW, const int featureStride, const int preNmsTop, const int nmsMaxOut, const float iouThreshold, const float minBoxSize, const float spatialScale, const float* imInfo, const float* anchors, const DataType t_scores, const DLayout_t l_scores, const void* scores, const DataType t_deltas, const DLayout_t l_deltas, const void* deltas, const DataType t_featureMap, const DLayout_t l_featureMap, const void* featureMap, void* workspaces, const DataType t_rois, void* rois, const DataType t_top, const DLayout_t l_top, void* top, size_t deviceSmemSize) { if (imInfo == NULL || anchors == NULL || scores == NULL || deltas == NULL || featureMap == NULL || workspaces == NULL || rois == NULL || top == NULL) { return STATUS_BAD_PARAM; } pluginStatus_t status; // Region proposal inference // Getting the region of interests (ROIs) bounding box coordinates from region proposals using non maximum suppression (NMS) status = proposalsInference(stream, N, A, H, W, featureStride, preNmsTop, nmsMaxOut, iouThreshold, minBoxSize, imInfo, anchors, t_scores, l_scores, scores, t_deltas, l_deltas, deltas, workspaces, t_rois, rois); ASSERT_FAILURE(status == STATUS_SUCCESS); // ROI inference // ROI pooling for ROIs status = roiInference(stream, N * nmsMaxOut, // TOTAL number of rois -> ~nmsMaxOut * N N, // Batch size C, // Channels H, // Input feature map H W, // Input feature map W poolingH, // Output feature map H poolingW, // Output feature map W spatialScale, t_rois, rois, t_featureMap, l_featureMap, featureMap, t_top, l_top, top, deviceSmemSize); ASSERT_FAILURE(status == STATUS_SUCCESS); return STATUS_SUCCESS; } size_t RPROIInferenceFusedWorkspaceSize(int N, int A, int H, int W, int nmsMaxOut) { return proposalsInferenceWorkspaceSize(N, A, H, W, nmsMaxOut); }
c3d57c6e88b2fad792f97d46ea1fa7f0b09222ab.cu
/* * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common/kernel.h" pluginStatus_t RPROIInferenceFused(cudaStream_t stream, const int N, const int A, const int C, const int H, const int W, const int poolingH, const int poolingW, const int featureStride, const int preNmsTop, const int nmsMaxOut, const float iouThreshold, const float minBoxSize, const float spatialScale, const float* imInfo, const float* anchors, const DataType t_scores, const DLayout_t l_scores, const void* scores, const DataType t_deltas, const DLayout_t l_deltas, const void* deltas, const DataType t_featureMap, const DLayout_t l_featureMap, const void* featureMap, void* workspaces, const DataType t_rois, void* rois, const DataType t_top, const DLayout_t l_top, void* top, size_t deviceSmemSize) { if (imInfo == NULL || anchors == NULL || scores == NULL || deltas == NULL || featureMap == NULL || workspaces == NULL || rois == NULL || top == NULL) { return STATUS_BAD_PARAM; } pluginStatus_t status; // Region proposal inference // Getting the region of interests (ROIs) bounding box coordinates from region proposals using non maximum suppression (NMS) status = proposalsInference(stream, N, A, H, W, featureStride, preNmsTop, nmsMaxOut, iouThreshold, minBoxSize, imInfo, anchors, t_scores, l_scores, scores, t_deltas, l_deltas, deltas, workspaces, t_rois, rois); ASSERT_FAILURE(status == STATUS_SUCCESS); // ROI inference // ROI pooling for ROIs status = roiInference(stream, N * nmsMaxOut, // TOTAL number of rois -> ~nmsMaxOut * N N, // Batch size C, // Channels H, // Input feature map H W, // Input feature map W poolingH, // Output feature map H poolingW, // Output feature map W spatialScale, t_rois, rois, t_featureMap, l_featureMap, featureMap, t_top, l_top, top, deviceSmemSize); ASSERT_FAILURE(status == STATUS_SUCCESS); return STATUS_SUCCESS; } size_t RPROIInferenceFusedWorkspaceSize(int N, int A, int H, int W, int nmsMaxOut) { return proposalsInferenceWorkspaceSize(N, A, H, W, nmsMaxOut); }
229ead661309d78924d4cb0afdab056d783d2881.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2012-2013 The Ohio State University. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "common.h" #include "gpuCudaLib.h" #include <assert.h> #include <hip/hip_runtime.h> #include <float.h> #include <limits.h> #include <stdio.h> #ifdef HAS_GMM #include "gmm.h" #endif #define SHARED_SIZE_LIMIT 1024 __device__ static int gpu_strcmp(const char *s1, const char *s2, int len) { int res = 0; for (int i = 0; i < len; i++) { if (s1[i] < s2[i]) { res = -1; break; } else if (s1[i] > s2[i]) { res = 1; break; } } return res; } /* use one GPU thread to count the number of unique key */ extern "C" __global__ void count_unique_keys_int(int *key, int tupleNum, int *result) { int i = 0; int res = 1; for (i = 0; i < tupleNum - 1; i++) { if (key[i + 1] != key[i]) res++; } *result = res; } extern "C" __global__ void count_unique_keys_float(float *key, int tupleNum, int *result) { int i = 0; int res = 1; for (i = 0; i < tupleNum - 1; i++) { if (key[i + 1] != key[i]) res++; } *result = res; } extern "C" __global__ void count_unique_keys_string(char *key, int tupleNum, int keySize, int *result) { int i = 0; int res = 1; for (i = 0; i < tupleNum - 1; i++) { if (gpu_strcmp(key + i * keySize, key + (i + 1) * keySize, keySize) != 0) res++; } *result = res; } /* * Count the number of each key using one single GPU thread. */ extern "C" __global__ void count_key_num_int(int *key, int tupleNum, int *count) { int pos = 0, i = 0; int lcount = 1; for (i = 0; i < tupleNum - 1; i++) { if (i == tupleNum - 2) { if (key[i] != key[i + 1]) { count[pos] = lcount; count[pos + 1] = 1; } else { count[pos] = lcount + 1; } } else { if (key[i] != key[i + 1]) { count[pos] = lcount; lcount = 1; pos++; } else { lcount++; } } } } extern "C" __global__ void count_key_num_float(float *key, int tupleNum, int *count) { int pos = 0, i = 0; int lcount = 1; for (i = 0; i < tupleNum - 1; i++) { if (i == tupleNum - 2) { if (key[i] != key[i + 1]) { count[pos] = lcount; count[pos + 1] = 1; } else { count[pos] = lcount + 1; } } else { if (key[i] != key[i + 1]) { count[pos] = lcount; lcount = 1; pos++; } else { lcount++; } } } } extern "C" __global__ void count_key_num_string(char *key, int tupleNum, int keySize, int *count) { int pos = 0, i = 0; int lcount = 1; for (i = 0; i < tupleNum - 1; i++) { if (i == tupleNum - 2) { if (gpu_strcmp(key + i * keySize, key + (i + 1) * keySize, keySize) != 0) { count[pos] = lcount; count[pos + 1] = 1; } else { count[pos] = lcount + 1; } } else { if (gpu_strcmp(key + i * keySize, key + (i + 1) * keySize, keySize) != 0) { count[pos] = lcount; lcount = 1; pos++; } else { lcount++; } } } } __device__ static inline void ComparatorInt(int &keyA, int &valA, int &keyB, int &valB, int dir) { int t; if ((keyA > keyB) == dir) { t = keyA; keyA = keyB; keyB = t; t = valA; valA = valB; valB = t; } } __device__ static inline void ComparatorFloat(float &keyA, int &valA, float &keyB, int &valB, int dir) { float t1; int t2; if ((keyA > keyB) == dir) { t1 = keyA; keyA = keyB; keyB = t1; t2 = valA; valA = valB; valB = t2; } } __device__ static inline void Comparator(char *keyA, int &valA, char *keyB, int &valB, int keySize, int dir) { int t; char buf[32]; if ((gpu_strcmp(keyA, keyB, keySize) == 1) == dir) { memcpy(buf, keyA, keySize); memcpy(keyA, keyB, keySize); memcpy(keyB, buf, keySize); t = valA; valA = valB; valB = t; } } #define NTHREAD (SHARED_SIZE_LIMIT / 2) extern "C" __global__ void sort_key_string(char *key, int tupleNum, int keySize, char *result, int *pos, int dir) { int lid = threadIdx.x; int bid = blockIdx.x; __shared__ char bufKey[SHARED_SIZE_LIMIT * 32]; __shared__ int bufVal[SHARED_SIZE_LIMIT]; int gid = bid * SHARED_SIZE_LIMIT + lid; memcpy(bufKey + lid * keySize, key + gid * keySize, keySize); bufVal[lid] = gid; memcpy(bufKey + (lid + blockDim.x) * keySize, key + (gid + blockDim.x) * keySize, keySize); bufVal[lid + blockDim.x] = gid + blockDim.x; __syncthreads(); for (int size = 2; size < tupleNum && size < SHARED_SIZE_LIMIT; size <<= 1) { int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0); for (int stride = size / 2; stride > 0; stride >>= 1) { __syncthreads(); int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator(bufKey + pos * keySize, bufVal[pos + 0], bufKey + (pos + stride) * keySize, bufVal[pos + stride], keySize, ddd); } } { for (int stride = blockDim.x; stride > 0; stride >>= 1) { __syncthreads(); int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator(bufKey + pos * keySize, bufVal[pos + 0], bufKey + (pos + stride) * keySize, bufVal[pos + stride], keySize, dir); } } __syncthreads(); memcpy(result + gid * keySize, bufKey + lid * keySize, keySize); ((int *)pos)[gid] = bufVal[lid]; memcpy(result + (gid + blockDim.x) * keySize, bufKey + (lid + blockDim.x) * keySize, keySize); ((int *)pos)[gid + blockDim.x] = bufVal[lid + blockDim.x]; } /* * Sorting small number of intergers. */ extern "C" __global__ void sort_key_int(int *key, int tupleNum, int *result, int *pos, int dir) { int lid = threadIdx.x; int bid = blockIdx.x; __shared__ int bufKey[SHARED_SIZE_LIMIT]; __shared__ int bufVal[SHARED_SIZE_LIMIT]; int gid = bid * SHARED_SIZE_LIMIT + lid; bufKey[lid] = key[gid]; bufVal[lid] = gid; bufKey[lid + blockDim.x] = key[gid + blockDim.x]; bufVal[lid + blockDim.x] = gid + blockDim.x; __syncthreads(); for (int size = 2; size < tupleNum && size < SHARED_SIZE_LIMIT; size <<= 1) { int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0); for (int stride = size / 2; stride > 0; stride >>= 1) { __syncthreads(); int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); ComparatorInt(bufKey[pos + 0], bufVal[pos + 0], bufKey[pos + stride], bufVal[pos + stride], ddd); } } { for (int stride = blockDim.x; stride > 0; stride >>= 1) { __syncthreads(); int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); ComparatorInt(bufKey[pos + 0], bufVal[pos + 0], bufKey[pos + stride], bufVal[pos + stride], dir); } } __syncthreads(); result[gid] = bufKey[lid]; pos[gid] = bufVal[lid]; result[gid + blockDim.x] = bufKey[lid + blockDim.x]; pos[gid + blockDim.x] = bufVal[lid + blockDim.x]; } /* * Sorting small number of floats. */ extern "C" __global__ void sort_key_float(float *key, int tupleNum, float *result, int *pos, int dir) { int lid = threadIdx.x; int bid = blockIdx.x; __shared__ float bufKey[SHARED_SIZE_LIMIT]; __shared__ int bufVal[SHARED_SIZE_LIMIT]; int gid = bid * SHARED_SIZE_LIMIT + lid; bufKey[lid] = key[gid]; bufVal[lid] = gid; bufKey[lid + blockDim.x] = key[gid + blockDim.x]; bufVal[lid + blockDim.x] = gid + blockDim.x; __syncthreads(); for (int size = 2; size < tupleNum && size < SHARED_SIZE_LIMIT; size <<= 1) { int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0); for (int stride = size / 2; stride > 0; stride >>= 1) { __syncthreads(); int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); ComparatorFloat(bufKey[pos + 0], bufVal[pos + 0], bufKey[pos + stride], bufVal[pos + stride], ddd); } } { for (int stride = blockDim.x; stride > 0; stride >>= 1) { __syncthreads(); int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); ComparatorFloat(bufKey[pos + 0], bufVal[pos + 0], bufKey[pos + stride], bufVal[pos + stride], dir); } } __syncthreads(); result[gid] = bufKey[lid]; pos[gid] = bufVal[lid]; result[gid + blockDim.x] = bufKey[lid + blockDim.x]; pos[gid + blockDim.x] = bufVal[lid + blockDim.x]; } /* * Naive sort. One thread per block. */ extern "C" __global__ void sec_sort_key_int(int *key, int *psum, int *count, int tupleNum, int *inputPos, int *outputPos) { int tid = blockIdx.x; int start = psum[tid]; int end = start + count[tid]; for (int i = start; i < end - 1; i++) { int min = key[i]; int tmp = min; int pos = i; for (int j = i + 1; j < end; j++) { if (min > key[j]) { min = key[j]; pos = j; } } key[pos] = tmp; outputPos[i] = inputPos[pos]; inputPos[pos] = inputPos[i]; } outputPos[end - 1] = inputPos[end - 1]; } extern "C" __global__ void sec_sort_key_float(float *key, int *psum, int *count, int tupleNum, int *inputPos, int *outputPos) { int tid = blockIdx.x; int start = psum[tid]; int end = start + count[tid]; for (int i = start; i < end - 1; i++) { float min = key[i]; float tmp = min; int pos = i; for (int j = i + 1; j < end; j++) { if (min > key[j]) { min = key[j]; pos = j; } } key[pos] = tmp; outputPos[i] = inputPos[pos]; inputPos[pos] = inputPos[i]; } outputPos[end - 1] = inputPos[end - 1]; } extern "C" __global__ void sec_sort_key_string(char *key, int keySize, int *psum, int *count, int tupleNum, int *inputPos, int *outputPos) { int tid = blockIdx.x; int start = psum[tid]; int end = start + count[tid]; for (int i = start; i < end - 1; i++) { char min[128]; char tmp[128]; memcpy(min, key + i * keySize, keySize); memcpy(tmp, key + i * keySize, keySize); int pos = i; for (int j = i + 1; j < end; j++) { if (gpu_strcmp(min, key + j * keySize, keySize) > 0) { memcpy(min, key + j * keySize, keySize); pos = j; } } memcpy(key + pos * keySize, tmp, keySize); outputPos[i] = inputPos[pos]; inputPos[pos] = inputPos[i]; } outputPos[end - 1] = inputPos[end - 1]; } //__global__ static void sec_sort_key_int(int *key, int *psum, int *count ,int tupleNum, int *inputPos, int* outputPos){ // int tid = blockIdx.x; // int start = psum[tid]; // int end = start + count[tid] - 1; // // for(int i=start; i< end-1; i++){ // int min = key[i]; // int pos = i; // for(int j=i+1;j<end;j++){ // if(min > key[j]){ // min = key[j]; // pos = j; // } // } // outputPos[i] = inputPos[pos]; // } //} // //__global__ static void sec_sort_key_float(float *key, int *psum, int *count ,int tupleNum, int *inputPos, int* //outputPos){ int tid = blockIdx.x; int start = psum[tid]; int end = start + count[tid] - 1; // // for(int i=start; i< end-1; i++){ // float min = key[i]; // int pos = i; // for(int j=i+1;j<end;j++){ // if(min > key[j]){ // min = key[j]; // pos = j; // } // } // outputPos[i] = inputPos[pos]; // } //} // //__global__ static void sec_sort_key_string(char *key, int keySize, int *psum, int *count ,int tupleNum, int *inputPos, //int* outputPos){ int tid = blockIdx.x; int start = psum[tid]; int end = start + count[tid] - 1; // // for(int i=start; i< end-1; i++){ // char min[128]; // memcpy(min,key + i*keySize, keySize); // int pos = i; // for(int j=i+1;j<end;j++){ // if(gpu_strcmp(min, key+j*keySize,keySize)>0){ // memcpy(min,key + j*keySize, keySize); // pos = j; // } // } // outputPos[i] = inputPos[pos]; // } //} extern "C" __global__ void set_key_string(char *key, int tupleNum) { int stride = blockDim.x * gridDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = tid; i < tupleNum; i += stride) key[i] = CHAR_MAX; } extern "C" __global__ void set_key_int(int *key, int tupleNum) { int stride = blockDim.x * gridDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = tid; i < tupleNum; i += stride) key[i] = INT_MAX; } extern "C" __global__ void set_key_float(float *key, int tupleNum) { int stride = blockDim.x * gridDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = tid; i < tupleNum; i += stride) key[i] = FLT_MAX; } /* * gather the elements from the @col into @result. */ extern "C" __global__ void gather_col_int(int *keyPos, int *col, int newNum, int tupleNum, int *result) { int stride = blockDim.x * gridDim.x; int index = blockIdx.x * blockDim.x + threadIdx.x; for (int i = index; i < newNum; i += stride) { int pos = keyPos[i]; if (pos < tupleNum) result[i] = col[pos]; } } extern "C" __global__ void gather_col_float(int *keyPos, float *col, int newNum, int tupleNum, float *result) { int stride = blockDim.x * gridDim.x; int index = blockIdx.x * blockDim.x + threadIdx.x; for (int i = index; i < newNum; i += stride) { int pos = keyPos[i]; if (pos < tupleNum) result[i] = col[pos]; } } extern "C" __global__ void gather_col_string(int *keyPos, char *col, int newNum, int tupleNum, int keySize, char *result) { int stride = blockDim.x * gridDim.x; int index = blockIdx.x * blockDim.x + threadIdx.x; for (int i = index; i < newNum; i += stride) { int pos = keyPos[i]; if (pos < tupleNum) memcpy(result + i * keySize, col + pos * keySize, keySize); } } /* generate the final result*/ extern "C" __global__ void gather_result(int *keyPos, char **col, int newNum, int tupleNum, int *size, int colNum, char **result) { int stride = blockDim.x * gridDim.x; int index = blockIdx.x * blockDim.x + threadIdx.x; for (int j = 0; j < colNum; j++) { for (int i = index; i < newNum; i += stride) { int pos = keyPos[i]; if (pos < tupleNum) memcpy(result[j] + i * size[j], col[j] + pos * size[j], size[j]); } } } /* * orderBy: sort the input data by the order by columns * * Prerequisite: * input data are not compressed * * Input: * odNode: the groupby node which contains the input data and groupby information * pp: records the statistics such as kernel execution time * * Return: * a new table node */ struct tableNode *orderBy(struct orderByNode *odNode, struct statistic *pp) { extern char *col_buf; struct timeval t; struct tableNode *res = NULL; struct timespec start, end; clock_gettime(CLOCK_REALTIME, &start); assert(odNode->table->tupleNum < SHARED_SIZE_LIMIT); res = (struct tableNode *)malloc(sizeof(struct tableNode)); CHECK_POINTER(res); res->tupleNum = odNode->table->tupleNum; res->totalAttr = odNode->table->totalAttr; res->tupleSize = odNode->table->tupleSize; res->attrType = (int *)malloc(sizeof(int) * res->totalAttr); CHECK_POINTER(res->attrType); res->attrSize = (int *)malloc(sizeof(int) * res->totalAttr); CHECK_POINTER(res->attrSize); res->attrTotalSize = (int *)malloc(sizeof(int) * res->totalAttr); CHECK_POINTER(res->attrTotalSize); res->dataPos = (int *)malloc(sizeof(int) * res->totalAttr); CHECK_POINTER(res->dataPos); res->dataFormat = (int *)malloc(sizeof(int) * res->totalAttr); CHECK_POINTER(res->dataFormat); res->content = (char **)malloc(sizeof(char *) * res->totalAttr); CHECK_POINTER(res->content); int gpuTupleNum = odNode->table->tupleNum; char *gpuKey, **column, **gpuContent; char *gpuSortedKey; int *gpuSize, *gpuPos; column = (char **)malloc(sizeof(char *) * res->totalAttr); CHECK_POINTER(column); #ifdef HAS_GMM CUDA_SAFE_CALL_NO_SYNC(cudaMallocEx((void **)&gpuContent, sizeof(char *) * res->totalAttr, FLAG_PTARRAY)); #else CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuContent, sizeof(char *) * res->totalAttr)); #endif for (int i = 0; i < res->totalAttr; i++) { res->attrType[i] = odNode->table->attrType[i]; res->attrSize[i] = odNode->table->attrSize[i]; res->attrTotalSize[i] = odNode->table->attrTotalSize[i]; res->dataPos[i] = MEM; res->dataFormat[i] = UNCOMPRESSED; res->content[i] = (char *)malloc(res->attrSize[i] * res->tupleNum); CHECK_POINTER(res->content[i]); int attrSize = res->attrSize[i]; if (odNode->table->dataPos[i] == MEM) { CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&column[i], attrSize * res->tupleNum)); gettimeofday(&t, NULL); // printf("[gvm] %lf intercepting diskIO\n", t.tv_sec + t.tv_usec / 1000000.0); memcpy(col_buf, odNode->table->content[i], attrSize * res->tupleNum); gettimeofday(&t, NULL); // printf("[gvm] %lf intercepted diskIO\n", t.tv_sec + t.tv_usec / 1000000.0); CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(column[i], col_buf, attrSize * res->tupleNum, hipMemcpyHostToDevice)); } else if (odNode->table->dataPos[i] == GPU) { column[i] = odNode->table->content[i]; } CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&gpuContent[i], &column[i], sizeof(char *), hipMemcpyHostToDevice)); } int newNum = 1; while (newNum < gpuTupleNum) { newNum *= 2; } CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuPos, sizeof(int) * newNum)); CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuSize, sizeof(int) * res->totalAttr)); CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuSize, res->attrSize, sizeof(int) * res->totalAttr, hipMemcpyHostToDevice);); char **gpuResult; char **result; result = (char **)malloc(sizeof(char *) * res->totalAttr); CHECK_POINTER(result); #ifdef HAS_GMM CUDA_SAFE_CALL_NO_SYNC(cudaMallocEx((void **)&gpuResult, sizeof(char *) * res->totalAttr, FLAG_PTARRAY)); #else CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuResult, sizeof(char *) * res->totalAttr)); #endif for (int i = 0; i < res->totalAttr; i++) { CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&result[i], res->attrSize[i] * gpuTupleNum)); CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&gpuResult[i], &result[i], sizeof(char *), hipMemcpyHostToDevice)); } /* Sort by the first orderby column first */ int dir; if (odNode->orderBySeq[0] == ASC) dir = 1; else dir = 0; int index = odNode->orderByIndex[0]; int type = odNode->table->attrType[index]; printf("type: %s\n", type == INT ? "int" : (type == FLOAT ? "float" : "string")); if (type == INT) { CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuKey, sizeof(int) * newNum)); CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuSortedKey, sizeof(int) * newNum)); GMM_CALL(cudaAdvise(0, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(119)); hipLaunchKernelGGL(( set_key_int), dim3(8), dim3(128), 0, 0, (int *)gpuKey, newNum); CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuKey, column[index], sizeof(int) * gpuTupleNum, hipMemcpyDeviceToDevice)); GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_OUTPUT)); GMM_CALL(cudaAdvise(3, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(123)); hipLaunchKernelGGL(( sort_key_int), dim3(1), dim3(newNum / 2), 0, 0, (int *)gpuKey, newNum, (int *)gpuSortedKey, gpuPos, dir); } else if (type == FLOAT) { CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuKey, sizeof(float) * newNum)); CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuSortedKey, sizeof(float) * newNum)); GMM_CALL(cudaAdvise(0, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(118)); hipLaunchKernelGGL(( set_key_float), dim3(8), dim3(128), 0, 0, (float *)gpuKey, newNum); CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuKey, column[index], sizeof(int) * gpuTupleNum, hipMemcpyDeviceToDevice)); GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_OUTPUT)); GMM_CALL(cudaAdvise(3, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(122)); hipLaunchKernelGGL(( sort_key_float), dim3(1), dim3(newNum / 2), 0, 0, (float *)gpuKey, newNum, (float *)gpuSortedKey, gpuPos, dir); } else if (type == STRING) { int keySize = odNode->table->attrSize[index]; CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuKey, keySize * newNum)); CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuSortedKey, keySize * newNum)); GMM_CALL(cudaAdvise(0, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(120)); hipLaunchKernelGGL(( set_key_string), dim3(8), dim3(128), 0, 0, gpuKey, newNum * keySize); CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuKey, column[index], keySize * gpuTupleNum, hipMemcpyDeviceToDevice)); GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(3, CADV_OUTPUT)); GMM_CALL(cudaAdvise(4, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(124)); hipLaunchKernelGGL(( sort_key_string), dim3(1), dim3(newNum / 2), 0, 0, gpuKey, newNum, keySize, gpuSortedKey, gpuPos, dir); } /* Currently we only support no more than 2 orderBy columns */ if (odNode->orderByNum == 2) { int keySize = odNode->table->attrSize[index]; int secIndex = odNode->orderByIndex[1]; int keySize2 = odNode->table->attrSize[secIndex]; int secType = odNode->table->attrType[secIndex]; int *keyNum, *keyCount, *keyPsum; CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&keyNum, sizeof(int))); if (type == INT) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(21)); hipLaunchKernelGGL(( count_unique_keys_int), dim3(1), dim3(1), 0, 0, (int *)gpuSortedKey, gpuTupleNum, keyNum); } else if (type == FLOAT) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(20)); hipLaunchKernelGGL(( count_unique_keys_float), dim3(1), dim3(1), 0, 0, (float *)gpuSortedKey, gpuTupleNum, keyNum); } else if (type == STRING) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(3, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(22)); hipLaunchKernelGGL(( count_unique_keys_string), dim3(1), dim3(1), 0, 0, gpuSortedKey, gpuTupleNum, keySize, keyNum); } int cpuKeyNum; CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&cpuKeyNum, keyNum, sizeof(int), hipMemcpyDeviceToHost)); CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&keyCount, sizeof(int) * cpuKeyNum)); CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&keyPsum, sizeof(int) * cpuKeyNum)); CUDA_SAFE_CALL_NO_SYNC(hipMemset(keyPsum, 0, sizeof(int) * cpuKeyNum)); if (type == INT) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(17)); hipLaunchKernelGGL(( count_key_num_int), dim3(1), dim3(1), 0, 0, (int *)gpuSortedKey, gpuTupleNum, keyCount); } else if (type == FLOAT) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(16)); hipLaunchKernelGGL(( count_key_num_float), dim3(1), dim3(1), 0, 0, (float *)gpuSortedKey, gpuTupleNum, keyCount); } else if (type == STRING) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(3, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(18)); hipLaunchKernelGGL(( count_key_num_string), dim3(1), dim3(1), 0, 0, gpuSortedKey, gpuTupleNum, keySize, keyCount); } scanImpl(keyCount, cpuKeyNum, keyPsum, pp); int *gpuPos2; CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuPos2, sizeof(int) * newNum)); char *gpuKey2; CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuKey2, keySize2 * newNum)); printf("sectype: %s\n", secType == INT ? "int" : (secType == FLOAT ? "float" : "string")); if (secType == INT) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(1, CADV_INPUT)); GMM_CALL(cudaAdvise(4, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(27)); hipLaunchKernelGGL(( gather_col_int), dim3(8), dim3(128), 0, 0, gpuPos, (int *)column[secIndex], newNum, gpuTupleNum, (int *)gpuKey2); GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(1, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_INPUT)); GMM_CALL(cudaAdvise(4, CADV_INPUT)); GMM_CALL(cudaAdvise(5, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(116)); hipLaunchKernelGGL(( sec_sort_key_int), dim3(cpuKeyNum), dim3(1), 0, 0, (int *)gpuKey2, keyPsum, keyCount, gpuTupleNum, gpuPos, gpuPos2); } else if (secType == FLOAT) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(1, CADV_INPUT)); GMM_CALL(cudaAdvise(4, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(26)); hipLaunchKernelGGL(( gather_col_float), dim3(8), dim3(128), 0, 0, gpuPos, (float *)column[secIndex], newNum, gpuTupleNum, (float *)gpuKey2); GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(1, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_INPUT)); GMM_CALL(cudaAdvise(4, CADV_INPUT)); GMM_CALL(cudaAdvise(5, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(115)); hipLaunchKernelGGL(( sec_sort_key_float), dim3(cpuKeyNum), dim3(1), 0, 0, (float *)gpuKey2, keyPsum, keyCount, gpuTupleNum, gpuPos, gpuPos2); } else if (secType == STRING) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(1, CADV_INPUT)); GMM_CALL(cudaAdvise(5, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(28)); hipLaunchKernelGGL(( gather_col_string), dim3(8), dim3(128), 0, 0, gpuPos, column[secIndex], newNum, gpuTupleNum, keySize2, gpuKey2); GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_INPUT)); GMM_CALL(cudaAdvise(3, CADV_INPUT)); GMM_CALL(cudaAdvise(5, CADV_INPUT)); GMM_CALL(cudaAdvise(6, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(117)); hipLaunchKernelGGL(( sec_sort_key_string), dim3(cpuKeyNum), dim3(1), 0, 0, gpuKey2, keySize2, keyPsum, keyCount, gpuTupleNum, gpuPos, gpuPos2); } GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(1, CADV_INPUT | CADV_PTAINPUT)); GMM_CALL(cudaAdvise(4, CADV_INPUT)); GMM_CALL(cudaAdvise(6, CADV_INPUT | CADV_PTAOUTPUT)); GMM_CALL(cudaSetFunction(29)); hipLaunchKernelGGL(( gather_result), dim3(8), dim3(128), 0, 0, gpuPos2, gpuContent, newNum, gpuTupleNum, gpuSize, res->totalAttr, gpuResult); CUDA_SAFE_CALL_NO_SYNC(hipFree(keyCount)); CUDA_SAFE_CALL_NO_SYNC(hipFree(keyNum)); CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuPos2)); CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuKey2)); } else { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(1, CADV_INPUT | CADV_PTAINPUT)); GMM_CALL(cudaAdvise(4, CADV_INPUT)); GMM_CALL(cudaAdvise(6, CADV_INPUT | CADV_PTAOUTPUT)); GMM_CALL(cudaSetFunction(29)); hipLaunchKernelGGL(( gather_result), dim3(8), dim3(128), 0, 0, gpuPos, gpuContent, newNum, gpuTupleNum, gpuSize, res->totalAttr, gpuResult); } for (int i = 0; i < res->totalAttr; i++) { int size = res->attrSize[i] * gpuTupleNum; memset(res->content[i], 0, size); CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(res->content[i], result[i], size, hipMemcpyDeviceToHost)); } for (int i = 0; i < res->totalAttr; i++) { if (odNode->table->dataPos[i] == MEM) CUDA_SAFE_CALL_NO_SYNC(hipFree(column[i])); CUDA_SAFE_CALL_NO_SYNC(hipFree(result[i])); } free(column); free(result); CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuKey)); CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuSortedKey)); CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuContent)); CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuResult)); CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuSize)); CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuPos)); clock_gettime(CLOCK_REALTIME, &end); double timeE = (end.tv_sec - start.tv_sec) * BILLION + end.tv_nsec - start.tv_nsec; printf("OrderBy Time: %lf\n", timeE / (1000 * 1000)); return res; }
229ead661309d78924d4cb0afdab056d783d2881.cu
/* Copyright (c) 2012-2013 The Ohio State University. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "common.h" #include "gpuCudaLib.h" #include <assert.h> #include <cuda.h> #include <float.h> #include <limits.h> #include <stdio.h> #ifdef HAS_GMM #include "gmm.h" #endif #define SHARED_SIZE_LIMIT 1024 __device__ static int gpu_strcmp(const char *s1, const char *s2, int len) { int res = 0; for (int i = 0; i < len; i++) { if (s1[i] < s2[i]) { res = -1; break; } else if (s1[i] > s2[i]) { res = 1; break; } } return res; } /* use one GPU thread to count the number of unique key */ extern "C" __global__ void count_unique_keys_int(int *key, int tupleNum, int *result) { int i = 0; int res = 1; for (i = 0; i < tupleNum - 1; i++) { if (key[i + 1] != key[i]) res++; } *result = res; } extern "C" __global__ void count_unique_keys_float(float *key, int tupleNum, int *result) { int i = 0; int res = 1; for (i = 0; i < tupleNum - 1; i++) { if (key[i + 1] != key[i]) res++; } *result = res; } extern "C" __global__ void count_unique_keys_string(char *key, int tupleNum, int keySize, int *result) { int i = 0; int res = 1; for (i = 0; i < tupleNum - 1; i++) { if (gpu_strcmp(key + i * keySize, key + (i + 1) * keySize, keySize) != 0) res++; } *result = res; } /* * Count the number of each key using one single GPU thread. */ extern "C" __global__ void count_key_num_int(int *key, int tupleNum, int *count) { int pos = 0, i = 0; int lcount = 1; for (i = 0; i < tupleNum - 1; i++) { if (i == tupleNum - 2) { if (key[i] != key[i + 1]) { count[pos] = lcount; count[pos + 1] = 1; } else { count[pos] = lcount + 1; } } else { if (key[i] != key[i + 1]) { count[pos] = lcount; lcount = 1; pos++; } else { lcount++; } } } } extern "C" __global__ void count_key_num_float(float *key, int tupleNum, int *count) { int pos = 0, i = 0; int lcount = 1; for (i = 0; i < tupleNum - 1; i++) { if (i == tupleNum - 2) { if (key[i] != key[i + 1]) { count[pos] = lcount; count[pos + 1] = 1; } else { count[pos] = lcount + 1; } } else { if (key[i] != key[i + 1]) { count[pos] = lcount; lcount = 1; pos++; } else { lcount++; } } } } extern "C" __global__ void count_key_num_string(char *key, int tupleNum, int keySize, int *count) { int pos = 0, i = 0; int lcount = 1; for (i = 0; i < tupleNum - 1; i++) { if (i == tupleNum - 2) { if (gpu_strcmp(key + i * keySize, key + (i + 1) * keySize, keySize) != 0) { count[pos] = lcount; count[pos + 1] = 1; } else { count[pos] = lcount + 1; } } else { if (gpu_strcmp(key + i * keySize, key + (i + 1) * keySize, keySize) != 0) { count[pos] = lcount; lcount = 1; pos++; } else { lcount++; } } } } __device__ static inline void ComparatorInt(int &keyA, int &valA, int &keyB, int &valB, int dir) { int t; if ((keyA > keyB) == dir) { t = keyA; keyA = keyB; keyB = t; t = valA; valA = valB; valB = t; } } __device__ static inline void ComparatorFloat(float &keyA, int &valA, float &keyB, int &valB, int dir) { float t1; int t2; if ((keyA > keyB) == dir) { t1 = keyA; keyA = keyB; keyB = t1; t2 = valA; valA = valB; valB = t2; } } __device__ static inline void Comparator(char *keyA, int &valA, char *keyB, int &valB, int keySize, int dir) { int t; char buf[32]; if ((gpu_strcmp(keyA, keyB, keySize) == 1) == dir) { memcpy(buf, keyA, keySize); memcpy(keyA, keyB, keySize); memcpy(keyB, buf, keySize); t = valA; valA = valB; valB = t; } } #define NTHREAD (SHARED_SIZE_LIMIT / 2) extern "C" __global__ void sort_key_string(char *key, int tupleNum, int keySize, char *result, int *pos, int dir) { int lid = threadIdx.x; int bid = blockIdx.x; __shared__ char bufKey[SHARED_SIZE_LIMIT * 32]; __shared__ int bufVal[SHARED_SIZE_LIMIT]; int gid = bid * SHARED_SIZE_LIMIT + lid; memcpy(bufKey + lid * keySize, key + gid * keySize, keySize); bufVal[lid] = gid; memcpy(bufKey + (lid + blockDim.x) * keySize, key + (gid + blockDim.x) * keySize, keySize); bufVal[lid + blockDim.x] = gid + blockDim.x; __syncthreads(); for (int size = 2; size < tupleNum && size < SHARED_SIZE_LIMIT; size <<= 1) { int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0); for (int stride = size / 2; stride > 0; stride >>= 1) { __syncthreads(); int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator(bufKey + pos * keySize, bufVal[pos + 0], bufKey + (pos + stride) * keySize, bufVal[pos + stride], keySize, ddd); } } { for (int stride = blockDim.x; stride > 0; stride >>= 1) { __syncthreads(); int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator(bufKey + pos * keySize, bufVal[pos + 0], bufKey + (pos + stride) * keySize, bufVal[pos + stride], keySize, dir); } } __syncthreads(); memcpy(result + gid * keySize, bufKey + lid * keySize, keySize); ((int *)pos)[gid] = bufVal[lid]; memcpy(result + (gid + blockDim.x) * keySize, bufKey + (lid + blockDim.x) * keySize, keySize); ((int *)pos)[gid + blockDim.x] = bufVal[lid + blockDim.x]; } /* * Sorting small number of intergers. */ extern "C" __global__ void sort_key_int(int *key, int tupleNum, int *result, int *pos, int dir) { int lid = threadIdx.x; int bid = blockIdx.x; __shared__ int bufKey[SHARED_SIZE_LIMIT]; __shared__ int bufVal[SHARED_SIZE_LIMIT]; int gid = bid * SHARED_SIZE_LIMIT + lid; bufKey[lid] = key[gid]; bufVal[lid] = gid; bufKey[lid + blockDim.x] = key[gid + blockDim.x]; bufVal[lid + blockDim.x] = gid + blockDim.x; __syncthreads(); for (int size = 2; size < tupleNum && size < SHARED_SIZE_LIMIT; size <<= 1) { int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0); for (int stride = size / 2; stride > 0; stride >>= 1) { __syncthreads(); int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); ComparatorInt(bufKey[pos + 0], bufVal[pos + 0], bufKey[pos + stride], bufVal[pos + stride], ddd); } } { for (int stride = blockDim.x; stride > 0; stride >>= 1) { __syncthreads(); int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); ComparatorInt(bufKey[pos + 0], bufVal[pos + 0], bufKey[pos + stride], bufVal[pos + stride], dir); } } __syncthreads(); result[gid] = bufKey[lid]; pos[gid] = bufVal[lid]; result[gid + blockDim.x] = bufKey[lid + blockDim.x]; pos[gid + blockDim.x] = bufVal[lid + blockDim.x]; } /* * Sorting small number of floats. */ extern "C" __global__ void sort_key_float(float *key, int tupleNum, float *result, int *pos, int dir) { int lid = threadIdx.x; int bid = blockIdx.x; __shared__ float bufKey[SHARED_SIZE_LIMIT]; __shared__ int bufVal[SHARED_SIZE_LIMIT]; int gid = bid * SHARED_SIZE_LIMIT + lid; bufKey[lid] = key[gid]; bufVal[lid] = gid; bufKey[lid + blockDim.x] = key[gid + blockDim.x]; bufVal[lid + blockDim.x] = gid + blockDim.x; __syncthreads(); for (int size = 2; size < tupleNum && size < SHARED_SIZE_LIMIT; size <<= 1) { int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0); for (int stride = size / 2; stride > 0; stride >>= 1) { __syncthreads(); int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); ComparatorFloat(bufKey[pos + 0], bufVal[pos + 0], bufKey[pos + stride], bufVal[pos + stride], ddd); } } { for (int stride = blockDim.x; stride > 0; stride >>= 1) { __syncthreads(); int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); ComparatorFloat(bufKey[pos + 0], bufVal[pos + 0], bufKey[pos + stride], bufVal[pos + stride], dir); } } __syncthreads(); result[gid] = bufKey[lid]; pos[gid] = bufVal[lid]; result[gid + blockDim.x] = bufKey[lid + blockDim.x]; pos[gid + blockDim.x] = bufVal[lid + blockDim.x]; } /* * Naive sort. One thread per block. */ extern "C" __global__ void sec_sort_key_int(int *key, int *psum, int *count, int tupleNum, int *inputPos, int *outputPos) { int tid = blockIdx.x; int start = psum[tid]; int end = start + count[tid]; for (int i = start; i < end - 1; i++) { int min = key[i]; int tmp = min; int pos = i; for (int j = i + 1; j < end; j++) { if (min > key[j]) { min = key[j]; pos = j; } } key[pos] = tmp; outputPos[i] = inputPos[pos]; inputPos[pos] = inputPos[i]; } outputPos[end - 1] = inputPos[end - 1]; } extern "C" __global__ void sec_sort_key_float(float *key, int *psum, int *count, int tupleNum, int *inputPos, int *outputPos) { int tid = blockIdx.x; int start = psum[tid]; int end = start + count[tid]; for (int i = start; i < end - 1; i++) { float min = key[i]; float tmp = min; int pos = i; for (int j = i + 1; j < end; j++) { if (min > key[j]) { min = key[j]; pos = j; } } key[pos] = tmp; outputPos[i] = inputPos[pos]; inputPos[pos] = inputPos[i]; } outputPos[end - 1] = inputPos[end - 1]; } extern "C" __global__ void sec_sort_key_string(char *key, int keySize, int *psum, int *count, int tupleNum, int *inputPos, int *outputPos) { int tid = blockIdx.x; int start = psum[tid]; int end = start + count[tid]; for (int i = start; i < end - 1; i++) { char min[128]; char tmp[128]; memcpy(min, key + i * keySize, keySize); memcpy(tmp, key + i * keySize, keySize); int pos = i; for (int j = i + 1; j < end; j++) { if (gpu_strcmp(min, key + j * keySize, keySize) > 0) { memcpy(min, key + j * keySize, keySize); pos = j; } } memcpy(key + pos * keySize, tmp, keySize); outputPos[i] = inputPos[pos]; inputPos[pos] = inputPos[i]; } outputPos[end - 1] = inputPos[end - 1]; } //__global__ static void sec_sort_key_int(int *key, int *psum, int *count ,int tupleNum, int *inputPos, int* outputPos){ // int tid = blockIdx.x; // int start = psum[tid]; // int end = start + count[tid] - 1; // // for(int i=start; i< end-1; i++){ // int min = key[i]; // int pos = i; // for(int j=i+1;j<end;j++){ // if(min > key[j]){ // min = key[j]; // pos = j; // } // } // outputPos[i] = inputPos[pos]; // } //} // //__global__ static void sec_sort_key_float(float *key, int *psum, int *count ,int tupleNum, int *inputPos, int* //outputPos){ int tid = blockIdx.x; int start = psum[tid]; int end = start + count[tid] - 1; // // for(int i=start; i< end-1; i++){ // float min = key[i]; // int pos = i; // for(int j=i+1;j<end;j++){ // if(min > key[j]){ // min = key[j]; // pos = j; // } // } // outputPos[i] = inputPos[pos]; // } //} // //__global__ static void sec_sort_key_string(char *key, int keySize, int *psum, int *count ,int tupleNum, int *inputPos, //int* outputPos){ int tid = blockIdx.x; int start = psum[tid]; int end = start + count[tid] - 1; // // for(int i=start; i< end-1; i++){ // char min[128]; // memcpy(min,key + i*keySize, keySize); // int pos = i; // for(int j=i+1;j<end;j++){ // if(gpu_strcmp(min, key+j*keySize,keySize)>0){ // memcpy(min,key + j*keySize, keySize); // pos = j; // } // } // outputPos[i] = inputPos[pos]; // } //} extern "C" __global__ void set_key_string(char *key, int tupleNum) { int stride = blockDim.x * gridDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = tid; i < tupleNum; i += stride) key[i] = CHAR_MAX; } extern "C" __global__ void set_key_int(int *key, int tupleNum) { int stride = blockDim.x * gridDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = tid; i < tupleNum; i += stride) key[i] = INT_MAX; } extern "C" __global__ void set_key_float(float *key, int tupleNum) { int stride = blockDim.x * gridDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = tid; i < tupleNum; i += stride) key[i] = FLT_MAX; } /* * gather the elements from the @col into @result. */ extern "C" __global__ void gather_col_int(int *keyPos, int *col, int newNum, int tupleNum, int *result) { int stride = blockDim.x * gridDim.x; int index = blockIdx.x * blockDim.x + threadIdx.x; for (int i = index; i < newNum; i += stride) { int pos = keyPos[i]; if (pos < tupleNum) result[i] = col[pos]; } } extern "C" __global__ void gather_col_float(int *keyPos, float *col, int newNum, int tupleNum, float *result) { int stride = blockDim.x * gridDim.x; int index = blockIdx.x * blockDim.x + threadIdx.x; for (int i = index; i < newNum; i += stride) { int pos = keyPos[i]; if (pos < tupleNum) result[i] = col[pos]; } } extern "C" __global__ void gather_col_string(int *keyPos, char *col, int newNum, int tupleNum, int keySize, char *result) { int stride = blockDim.x * gridDim.x; int index = blockIdx.x * blockDim.x + threadIdx.x; for (int i = index; i < newNum; i += stride) { int pos = keyPos[i]; if (pos < tupleNum) memcpy(result + i * keySize, col + pos * keySize, keySize); } } /* generate the final result*/ extern "C" __global__ void gather_result(int *keyPos, char **col, int newNum, int tupleNum, int *size, int colNum, char **result) { int stride = blockDim.x * gridDim.x; int index = blockIdx.x * blockDim.x + threadIdx.x; for (int j = 0; j < colNum; j++) { for (int i = index; i < newNum; i += stride) { int pos = keyPos[i]; if (pos < tupleNum) memcpy(result[j] + i * size[j], col[j] + pos * size[j], size[j]); } } } /* * orderBy: sort the input data by the order by columns * * Prerequisite: * input data are not compressed * * Input: * odNode: the groupby node which contains the input data and groupby information * pp: records the statistics such as kernel execution time * * Return: * a new table node */ struct tableNode *orderBy(struct orderByNode *odNode, struct statistic *pp) { extern char *col_buf; struct timeval t; struct tableNode *res = NULL; struct timespec start, end; clock_gettime(CLOCK_REALTIME, &start); assert(odNode->table->tupleNum < SHARED_SIZE_LIMIT); res = (struct tableNode *)malloc(sizeof(struct tableNode)); CHECK_POINTER(res); res->tupleNum = odNode->table->tupleNum; res->totalAttr = odNode->table->totalAttr; res->tupleSize = odNode->table->tupleSize; res->attrType = (int *)malloc(sizeof(int) * res->totalAttr); CHECK_POINTER(res->attrType); res->attrSize = (int *)malloc(sizeof(int) * res->totalAttr); CHECK_POINTER(res->attrSize); res->attrTotalSize = (int *)malloc(sizeof(int) * res->totalAttr); CHECK_POINTER(res->attrTotalSize); res->dataPos = (int *)malloc(sizeof(int) * res->totalAttr); CHECK_POINTER(res->dataPos); res->dataFormat = (int *)malloc(sizeof(int) * res->totalAttr); CHECK_POINTER(res->dataFormat); res->content = (char **)malloc(sizeof(char *) * res->totalAttr); CHECK_POINTER(res->content); int gpuTupleNum = odNode->table->tupleNum; char *gpuKey, **column, **gpuContent; char *gpuSortedKey; int *gpuSize, *gpuPos; column = (char **)malloc(sizeof(char *) * res->totalAttr); CHECK_POINTER(column); #ifdef HAS_GMM CUDA_SAFE_CALL_NO_SYNC(cudaMallocEx((void **)&gpuContent, sizeof(char *) * res->totalAttr, FLAG_PTARRAY)); #else CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuContent, sizeof(char *) * res->totalAttr)); #endif for (int i = 0; i < res->totalAttr; i++) { res->attrType[i] = odNode->table->attrType[i]; res->attrSize[i] = odNode->table->attrSize[i]; res->attrTotalSize[i] = odNode->table->attrTotalSize[i]; res->dataPos[i] = MEM; res->dataFormat[i] = UNCOMPRESSED; res->content[i] = (char *)malloc(res->attrSize[i] * res->tupleNum); CHECK_POINTER(res->content[i]); int attrSize = res->attrSize[i]; if (odNode->table->dataPos[i] == MEM) { CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&column[i], attrSize * res->tupleNum)); gettimeofday(&t, NULL); // printf("[gvm] %lf intercepting diskIO\n", t.tv_sec + t.tv_usec / 1000000.0); memcpy(col_buf, odNode->table->content[i], attrSize * res->tupleNum); gettimeofday(&t, NULL); // printf("[gvm] %lf intercepted diskIO\n", t.tv_sec + t.tv_usec / 1000000.0); CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(column[i], col_buf, attrSize * res->tupleNum, cudaMemcpyHostToDevice)); } else if (odNode->table->dataPos[i] == GPU) { column[i] = odNode->table->content[i]; } CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&gpuContent[i], &column[i], sizeof(char *), cudaMemcpyHostToDevice)); } int newNum = 1; while (newNum < gpuTupleNum) { newNum *= 2; } CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuPos, sizeof(int) * newNum)); CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuSize, sizeof(int) * res->totalAttr)); CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuSize, res->attrSize, sizeof(int) * res->totalAttr, cudaMemcpyHostToDevice);); char **gpuResult; char **result; result = (char **)malloc(sizeof(char *) * res->totalAttr); CHECK_POINTER(result); #ifdef HAS_GMM CUDA_SAFE_CALL_NO_SYNC(cudaMallocEx((void **)&gpuResult, sizeof(char *) * res->totalAttr, FLAG_PTARRAY)); #else CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuResult, sizeof(char *) * res->totalAttr)); #endif for (int i = 0; i < res->totalAttr; i++) { CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&result[i], res->attrSize[i] * gpuTupleNum)); CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&gpuResult[i], &result[i], sizeof(char *), cudaMemcpyHostToDevice)); } /* Sort by the first orderby column first */ int dir; if (odNode->orderBySeq[0] == ASC) dir = 1; else dir = 0; int index = odNode->orderByIndex[0]; int type = odNode->table->attrType[index]; printf("type: %s\n", type == INT ? "int" : (type == FLOAT ? "float" : "string")); if (type == INT) { CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuKey, sizeof(int) * newNum)); CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuSortedKey, sizeof(int) * newNum)); GMM_CALL(cudaAdvise(0, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(119)); set_key_int<<<8, 128>>>((int *)gpuKey, newNum); CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuKey, column[index], sizeof(int) * gpuTupleNum, cudaMemcpyDeviceToDevice)); GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_OUTPUT)); GMM_CALL(cudaAdvise(3, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(123)); sort_key_int<<<1, newNum / 2>>>((int *)gpuKey, newNum, (int *)gpuSortedKey, gpuPos, dir); } else if (type == FLOAT) { CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuKey, sizeof(float) * newNum)); CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuSortedKey, sizeof(float) * newNum)); GMM_CALL(cudaAdvise(0, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(118)); set_key_float<<<8, 128>>>((float *)gpuKey, newNum); CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuKey, column[index], sizeof(int) * gpuTupleNum, cudaMemcpyDeviceToDevice)); GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_OUTPUT)); GMM_CALL(cudaAdvise(3, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(122)); sort_key_float<<<1, newNum / 2>>>((float *)gpuKey, newNum, (float *)gpuSortedKey, gpuPos, dir); } else if (type == STRING) { int keySize = odNode->table->attrSize[index]; CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuKey, keySize * newNum)); CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuSortedKey, keySize * newNum)); GMM_CALL(cudaAdvise(0, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(120)); set_key_string<<<8, 128>>>(gpuKey, newNum * keySize); CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuKey, column[index], keySize * gpuTupleNum, cudaMemcpyDeviceToDevice)); GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(3, CADV_OUTPUT)); GMM_CALL(cudaAdvise(4, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(124)); sort_key_string<<<1, newNum / 2>>>(gpuKey, newNum, keySize, gpuSortedKey, gpuPos, dir); } /* Currently we only support no more than 2 orderBy columns */ if (odNode->orderByNum == 2) { int keySize = odNode->table->attrSize[index]; int secIndex = odNode->orderByIndex[1]; int keySize2 = odNode->table->attrSize[secIndex]; int secType = odNode->table->attrType[secIndex]; int *keyNum, *keyCount, *keyPsum; CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&keyNum, sizeof(int))); if (type == INT) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(21)); count_unique_keys_int<<<1, 1>>>((int *)gpuSortedKey, gpuTupleNum, keyNum); } else if (type == FLOAT) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(20)); count_unique_keys_float<<<1, 1>>>((float *)gpuSortedKey, gpuTupleNum, keyNum); } else if (type == STRING) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(3, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(22)); count_unique_keys_string<<<1, 1>>>(gpuSortedKey, gpuTupleNum, keySize, keyNum); } int cpuKeyNum; CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&cpuKeyNum, keyNum, sizeof(int), cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&keyCount, sizeof(int) * cpuKeyNum)); CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&keyPsum, sizeof(int) * cpuKeyNum)); CUDA_SAFE_CALL_NO_SYNC(cudaMemset(keyPsum, 0, sizeof(int) * cpuKeyNum)); if (type == INT) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(17)); count_key_num_int<<<1, 1>>>((int *)gpuSortedKey, gpuTupleNum, keyCount); } else if (type == FLOAT) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(16)); count_key_num_float<<<1, 1>>>((float *)gpuSortedKey, gpuTupleNum, keyCount); } else if (type == STRING) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(3, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(18)); count_key_num_string<<<1, 1>>>(gpuSortedKey, gpuTupleNum, keySize, keyCount); } scanImpl(keyCount, cpuKeyNum, keyPsum, pp); int *gpuPos2; CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuPos2, sizeof(int) * newNum)); char *gpuKey2; CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuKey2, keySize2 * newNum)); printf("sectype: %s\n", secType == INT ? "int" : (secType == FLOAT ? "float" : "string")); if (secType == INT) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(1, CADV_INPUT)); GMM_CALL(cudaAdvise(4, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(27)); gather_col_int<<<8, 128>>>(gpuPos, (int *)column[secIndex], newNum, gpuTupleNum, (int *)gpuKey2); GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(1, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_INPUT)); GMM_CALL(cudaAdvise(4, CADV_INPUT)); GMM_CALL(cudaAdvise(5, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(116)); sec_sort_key_int<<<cpuKeyNum, 1>>>((int *)gpuKey2, keyPsum, keyCount, gpuTupleNum, gpuPos, gpuPos2); } else if (secType == FLOAT) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(1, CADV_INPUT)); GMM_CALL(cudaAdvise(4, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(26)); gather_col_float<<<8, 128>>>(gpuPos, (float *)column[secIndex], newNum, gpuTupleNum, (float *)gpuKey2); GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(1, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_INPUT)); GMM_CALL(cudaAdvise(4, CADV_INPUT)); GMM_CALL(cudaAdvise(5, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(115)); sec_sort_key_float<<<cpuKeyNum, 1>>>((float *)gpuKey2, keyPsum, keyCount, gpuTupleNum, gpuPos, gpuPos2); } else if (secType == STRING) { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(1, CADV_INPUT)); GMM_CALL(cudaAdvise(5, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(28)); gather_col_string<<<8, 128>>>(gpuPos, column[secIndex], newNum, gpuTupleNum, keySize2, gpuKey2); GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(2, CADV_INPUT)); GMM_CALL(cudaAdvise(3, CADV_INPUT)); GMM_CALL(cudaAdvise(5, CADV_INPUT)); GMM_CALL(cudaAdvise(6, CADV_OUTPUT)); GMM_CALL(cudaSetFunction(117)); sec_sort_key_string<<<cpuKeyNum, 1>>>(gpuKey2, keySize2, keyPsum, keyCount, gpuTupleNum, gpuPos, gpuPos2); } GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(1, CADV_INPUT | CADV_PTAINPUT)); GMM_CALL(cudaAdvise(4, CADV_INPUT)); GMM_CALL(cudaAdvise(6, CADV_INPUT | CADV_PTAOUTPUT)); GMM_CALL(cudaSetFunction(29)); gather_result<<<8, 128>>>(gpuPos2, gpuContent, newNum, gpuTupleNum, gpuSize, res->totalAttr, gpuResult); CUDA_SAFE_CALL_NO_SYNC(cudaFree(keyCount)); CUDA_SAFE_CALL_NO_SYNC(cudaFree(keyNum)); CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuPos2)); CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuKey2)); } else { GMM_CALL(cudaAdvise(0, CADV_INPUT)); GMM_CALL(cudaAdvise(1, CADV_INPUT | CADV_PTAINPUT)); GMM_CALL(cudaAdvise(4, CADV_INPUT)); GMM_CALL(cudaAdvise(6, CADV_INPUT | CADV_PTAOUTPUT)); GMM_CALL(cudaSetFunction(29)); gather_result<<<8, 128>>>(gpuPos, gpuContent, newNum, gpuTupleNum, gpuSize, res->totalAttr, gpuResult); } for (int i = 0; i < res->totalAttr; i++) { int size = res->attrSize[i] * gpuTupleNum; memset(res->content[i], 0, size); CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(res->content[i], result[i], size, cudaMemcpyDeviceToHost)); } for (int i = 0; i < res->totalAttr; i++) { if (odNode->table->dataPos[i] == MEM) CUDA_SAFE_CALL_NO_SYNC(cudaFree(column[i])); CUDA_SAFE_CALL_NO_SYNC(cudaFree(result[i])); } free(column); free(result); CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuKey)); CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuSortedKey)); CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuContent)); CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuResult)); CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuSize)); CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuPos)); clock_gettime(CLOCK_REALTIME, &end); double timeE = (end.tv_sec - start.tv_sec) * BILLION + end.tv_nsec - start.tv_nsec; printf("OrderBy Time: %lf\n", timeE / (1000 * 1000)); return res; }
80142320634420447af30e6ed0ec6d9643f4f694.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ============================================================================ Name : binconnected_components.cu Author : Diptanshu, Gaurav Version : Copyright : (c) 2018 Description : CUDA compute reciprocals ============================================================================ */ #include <stdio.h> #include <stdlib.h> #include <time.h> #define SWAP(a, b) {int swp=a; a=b; b=swp;} #define MAX_HEIGHT 10 int numIterations; /* * */ __global__ void bfs(int *adjList, int *offset, int *inpFrontier, int *outFrontier, int *parent, int *visited, int *treeEdges, int *s1, int *s2) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < *s1 && visited[inpFrontier[tid]] == 0) { int v = inpFrontier[tid]; // Current vertex // Put all the unvisited neighbors into outFrontier for (int i = offset[v]; i < offset[v + 1]; i++) { if (!visited[adjList[i]] && atomicCAS(&parent[adjList[i]], -1, v) == -1) { int old = atomicAdd(s2, 1); outFrontier[old] = adjList[i]; treeEdges[i] = -1; } else if (adjList[i] == parent[v]) { treeEdges[i] = -2; // Place the parent as the first element in adjList if (i != offset[v]) { SWAP(adjList[offset[v]], adjList[i]); SWAP(treeEdges[offset[v]], treeEdges[i]); } } else if (v < adjList[i]) { // Non tree edge, mark only in one direction such that a < b for any non-tree edge a->b. treeEdges[i] = v; } else { treeEdges[i] = -2; } } visited[v] = 1; } } /* * * */ __global__ void lca(int *adjList, int *offset, int *parent, int *nonTreeEdges, int *unfinished, int *threadEdge, int *lcaThread, int *auxAdjList, int vertexCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x, i = 0, len1, len2; int a = nonTreeEdges[3 * tid]; int b = nonTreeEdges[3 * tid + 1]; int eid = nonTreeEdges[3 * tid + 2]; int path_a[MAX_HEIGHT], path_b[MAX_HEIGHT]; while (a != 0) { path_a[i++] = a; a = parent[a]; } path_a[i++] = 0; len1 = i; i = 0; while (b != 0) { path_b[i++] = b; b = parent[b]; } path_b[i++] = 0; len2 = i; i = 0; while (i < len1 && i < len2 && path_a[len1 - i - 1] == path_b[len2 - i - 1]) i++; int lcaVertex = path_a[len1 - i]; //printf("Edge %d: %d %d LCA %d\n", eid, nonTreeEdges[3 * tid], nonTreeEdges[3 * tid + 1], lcaVertex); len1 -= i; len2 -= i; lcaThread[tid] = lcaVertex; // Mark the non-tree edge visited threadEdge[eid] = tid; // Mark the rest of the edges visited and the vertices as part of unfinished traversal for (i = 0; i < len1; i++) { threadEdge[offset[path_a[i]]] = tid; if (i != len1 - 1) unfinished[path_a[i]] = 1; } for (i = 0; i < len2; i++) { threadEdge[offset[path_b[i]]] = tid; if (i != len2 - 1) unfinished[path_b[i]] = 1; } __syncthreads(); // Create auxiliary vertex // Special case for root vertex // As root vertex doesn't have any parent, we don't set its parent. if (lcaVertex != 0) auxAdjList[2 * lcaVertex] = adjList[offset[lcaVertex]]; auxAdjList[2 * lcaVertex + 1] = lcaVertex; } __global__ void lca1(int *adjList, int *offset, int *parent, int *nonTreeEdges, int *unfinished, int *threadEdge, int *lcaThread, int *auxAdjList, int vertexCount, int edgeCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x, i = 0, len1, len2; int a = nonTreeEdges[3 * tid]; int b = nonTreeEdges[3 * tid + 1]; if (auxAdjList[2 * a + 1] != -1) a += vertexCount; if (auxAdjList[2 * b + 1] != -1) b += vertexCount; int eid = nonTreeEdges[3 * tid + 2]; int path_a[MAX_HEIGHT], path_b[MAX_HEIGHT]; while (a != 0) { path_a[i++] = a; if (a < vertexCount && auxAdjList[2 * a + 1] != -1) a = vertexCount + a; else if (a >= vertexCount) a = parent[a - vertexCount]; else a = parent[a]; } path_a[i++] = 0; len1 = i; i = 0; while (b != 0) { path_b[i++] = b; if (b < vertexCount && auxAdjList[2 * b + 1] != -1) b = vertexCount + b; else if (b >= vertexCount) b = parent[b - vertexCount]; else b = parent[b]; } path_b[i++] = 0; len2 = i; i = 0; while (i < len1 && i < len2 && path_a[len1 - i - 1] == path_b[len2 - i - 1]) i++; //int lcaVertex = path_a[len1 - i]; //printf("Edge %d: %d %d LCA %d\n", eid, nonTreeEdges[3 * tid], nonTreeEdges[3 * tid + 1], lcaVertex); len1 -= i; len2 -= i; // Mark the non-tree edge visited threadEdge[eid] = tid; for (i = 0; i < len1; i++) { if (path_a[i] >= vertexCount) { threadEdge[edgeCount + path_a[i] - vertexCount] = tid; } else { threadEdge[offset[path_a[i]]] = tid; } } for (i = 0; i < len2; i++) { if (path_b[i] >= vertexCount) { threadEdge[edgeCount + path_b[i] - vertexCount] = tid; } else { threadEdge[offset[path_b[i]]] = tid; } } } __global__ void auxGraph(int *adjList, int *offset, int *lcaThread, int vertexCount, int *rootLCACount, int *auxAdjList) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int lcaVertex = lcaThread[tid]; if (lcaVertex != 0) adjList[offset[lcaVertex]] = vertexCount + lcaVertex; else atomicAdd(rootLCACount, 1); // Update grandParent's child int grandParent = auxAdjList[2 * lcaVertex]; for (int i = offset[grandParent]; i < offset[grandParent + 1]; i++) { if (adjList[i] == lcaVertex) { adjList[i] = vertexCount + lcaVertex; break; } } } __global__ void markArtPoint(int *adjList, int *offset, int *lcaThread, int *artPoint, int *unfinished, int *rootLCACount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int lcaVertex = lcaThread[tid]; bool bridge = false; for (int i = offset[lcaVertex]; i < offset[lcaVertex + 1]; i++) { if (!unfinished[adjList[i]]) { bridge = true; break; } } printf("vertex %d rootLCACOUnt %d bridge %d\n", lcaVertex, *rootLCACount, bridge); if (lcaVertex != 0 && bridge) artPoint[lcaVertex] = 1; else if (lcaVertex == 0 && bridge && *rootLCACount > 1) artPoint[0] = 1; } /* * Finds BCC Id for each edge. If an edge was part of the path to an LCA and * that LCA happens to be an articulation point, we assign the LCA's vertex ID as BCC id to the edge. * Otherwise, we traverse up the tree to find an LCA which is an articulation point. */ __global__ void findBCC(int *adjList, int *offset, int *threadEdge, int *lcaThread, int *artPoint, int *bccId) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int lcaVertex = threadEdge[tid]; // TODO: Unfinished implementation // Note: For each undirected edge b/w a-b, only one direction is marked in the threadEdge if (lcaVertex != -1) { while (!artPoint[lcaVertex]) { //lcaVertex = adjList[offset[lcaVertex]]; } bccId[tid] = lcaVertex; } } int main(int argc, char **argv) { char* edgeListFile = argv[1]; FILE *fp; fp = fopen(edgeListFile, "r"); if (fp == NULL) { printf("ERROR: File does not exist!\n"); return 1; } int vertexCount, edgeCount; fscanf(fp, "%d", &vertexCount); fscanf(fp, "%d", &edgeCount); printf("VertexCount %d\n", vertexCount); printf("EdgeCount %d\n", edgeCount); // Data structure to represent the graph in CSR format int *adjList; // Concatenated adjacency list int *offset; // Stores offset of each vertex's adjacency list size_t adjListSize = edgeCount * sizeof(int); size_t offsetSize = (vertexCount + 1) * sizeof(int); size_t verticesSize = vertexCount * sizeof(int); adjList = (int *)malloc(adjListSize); offset = (int *)malloc(offsetSize); int edgeCounter = 0, vertexCounter = 0; int prevSource, source, dest; fscanf(fp, "%d %d", &prevSource, &dest); // Convert the graph to CSR format while (edgeCounter != edgeCount) { while (vertexCounter <= prevSource) // Includes the vertices with no edges offset[vertexCounter++] = edgeCounter; adjList[edgeCounter++] = dest; while (fscanf(fp, "%d %d", &source, &dest) == 2 && source == prevSource) adjList[edgeCounter++] = dest; prevSource = source; } // Mark the sentinel values so that the degree of any vertex i = offset[i + 1] - offset[i] while (vertexCounter <= vertexCount) offset[vertexCounter++] = edgeCount; // printf("Adjacency List\n"); // for(int i = 0; i < edgeCount; i++) { // printf("%d ", adjList[i]); // } // // printf("\nOffsets\n"); // for(int i = 0; i < vertexCount + 1; i++) { // printf("%d ", offset[i]); // } // printf("\n"); // Initialize other data structure to be used for bfs int *inpFrontier, *outFrontier, *visited, *parent, *treeEdges; int s1, s2; // Size of input and output frontiers int treeEdgeCount = 0; inpFrontier = (int *)calloc(vertexCount, sizeof(int)); outFrontier = (int *)calloc(vertexCount, sizeof(int)); visited = (int *)calloc(vertexCount, sizeof(int)); treeEdges = (int *)calloc(edgeCount, sizeof(int)); parent = (int *)malloc(verticesSize); memset(parent, -1, verticesSize); s1 = 1; s2 = 0; inpFrontier[0] = 0; // Inserting source vertex // Corresponding device data int *d_adjList, *d_offset; int *d_inpFrontier, *d_outFrontier, *d_visited, *d_parent, *d_treeEdges; int *d_s1, *d_s2; hipMalloc(&d_adjList, adjListSize); hipMalloc(&d_offset, offsetSize); hipMalloc(&d_inpFrontier, verticesSize); hipMalloc(&d_outFrontier, verticesSize); hipMalloc(&d_visited, verticesSize); hipMalloc(&d_treeEdges, edgeCount * sizeof(int)); hipMalloc(&d_parent, verticesSize); hipMalloc(&d_s1, sizeof(int)); hipMalloc(&d_s2, sizeof(int)); hipMemcpy(d_adjList, adjList, adjListSize, hipMemcpyHostToDevice); hipMemcpy(d_offset, offset, offsetSize, hipMemcpyHostToDevice); hipMemcpy(d_inpFrontier, inpFrontier, verticesSize, hipMemcpyHostToDevice); hipMemcpy(d_outFrontier, outFrontier, verticesSize, hipMemcpyHostToDevice); hipMemcpy(d_visited, visited, verticesSize, hipMemcpyHostToDevice); hipMemcpy(d_treeEdges, treeEdges, edgeCount * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_parent, parent, verticesSize, hipMemcpyHostToDevice); hipMemcpy(d_s1, &s1, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_s2, &s2, sizeof(int), hipMemcpyHostToDevice); // Start the bfs bool odd = true; int inpQSize = s1; numIterations = 0; while (inpQSize != 0) { dim3 blocksPerGrid ((inpQSize + 1023) / 1024); dim3 threadsPerBlock ((inpQSize > 1024) ? 1024 : inpQSize); if (odd) { hipLaunchKernelGGL(( bfs), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_adjList, d_offset, d_inpFrontier, d_outFrontier, d_parent, d_visited, d_treeEdges, d_s1, d_s2); hipMemcpy(&inpQSize, d_s2, sizeof(int), hipMemcpyDeviceToHost); s1 = 0; hipMemcpy(d_s1, &s1, sizeof(int), hipMemcpyHostToDevice); } else { hipLaunchKernelGGL(( bfs), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_adjList, d_offset, d_outFrontier, d_inpFrontier, d_parent, d_visited, d_treeEdges, d_s2, d_s1); hipMemcpy(&inpQSize, d_s1, sizeof(int), hipMemcpyDeviceToHost); s2 = 0; hipMemcpy(d_s2, &s2, sizeof(int), hipMemcpyHostToDevice); } odd = !odd; numIterations++; treeEdgeCount += inpQSize; } hipMemcpy(visited, d_visited, verticesSize, hipMemcpyDeviceToHost); hipMemcpy(parent, d_parent, verticesSize, hipMemcpyDeviceToHost); hipMemcpy(treeEdges, d_treeEdges, edgeCount * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(adjList, d_adjList, edgeCount * sizeof(int), hipMemcpyDeviceToHost); // printf("Parent array\n"); // for (int i = 0; i < vertexCount; i++) // printf("%d ", parent[i]); // printf("\n"); // // printf("Adjacency List\n"); // for(int i = 0; i < edgeCount; i++) { // printf("(%d %d) ", i, adjList[i]); // } // printf("\n"); // // for (int i = 0; i < vertexCount; i++) { // if (parent[i] != adjList[offset[i]]) // printf("WRONG %d\n", i); // } // // printf("Number of iterations %d \n", numIterations); // printf("Visited array\n"); // for (int i = 0; i < vertexCount; i++) // printf("%d ", visited[i]); // printf("\n"); // printf("Tree Edges\n"); // for (int i = 0; i < edgeCount; i++) // printf("%d ", treeEdges[i]); // printf("\n"); int nonTreeEdgeCount = (edgeCount - 2 * treeEdgeCount) / 2; // printf("treeEdgecount %d\n", treeEdgeCount); // printf("Non-tree edges count %d\n", nonTreeEdgeCount); dim3 blocksPerGrid ((nonTreeEdgeCount + 1023) / 1024); dim3 threadsPerBlock ((nonTreeEdgeCount > 1024) ? 1024 : nonTreeEdgeCount); int threadCount = blocksPerGrid.x * threadsPerBlock.x; //printf("ThreadCount = %d\n", threadCount); // Data structure to represent non tree edges // a b i : edge a->b with edge id i int *nonTreeEdges = (int *) calloc(3 * nonTreeEdgeCount, sizeof(int)); int *lcaThread = (int *) calloc(threadCount, sizeof(int)); int *threadEdge = (int *) malloc(edgeCount * sizeof(int)); memset(threadEdge, -1, edgeCount * sizeof(int)); int *unfinished = (int *) calloc(vertexCount, sizeof(int)); int *auxAdjList = (int *) malloc(2 * vertexCount * sizeof(int)); memset(auxAdjList, -1, 2 * vertexCount * sizeof(int)); int *artPoint = (int *) calloc(vertexCount, sizeof(int)); int rootLCACount = 0; // Populate non tree edges for (int i = 0, j = 0; i < edgeCount; i++) { if (treeEdges[i] >= 0) { nonTreeEdges[j++] = treeEdges[i]; nonTreeEdges[j++] = adjList[i]; nonTreeEdges[j++] = i; } } /* printf("Non tree edges\n"); for (int i = 0; i < 3 * nonTreeEdgeCount; i+=3) { printf("%d %d %d\n", nonTreeEdges[i], nonTreeEdges[i + 1], nonTreeEdges[i + 2]); }*/ int *d_nonTreeEdges, *d_lcaThread, *d_threadEdge, *d_unfinished, *d_auxAdjList, *d_artPoint, *d_rootLCACount; hipMalloc(&d_nonTreeEdges, 3 * nonTreeEdgeCount * sizeof(int)); hipMalloc(&d_lcaThread, threadCount * sizeof(int)); hipMalloc(&d_threadEdge, edgeCount * sizeof(int)); hipMalloc(&d_unfinished, vertexCount * sizeof(int)); hipMalloc(&d_auxAdjList, 2 * vertexCount * sizeof(int)); hipMalloc(&d_artPoint, vertexCount * sizeof(int)); hipMalloc(&d_rootLCACount, sizeof(int)); hipMemcpy(d_nonTreeEdges, nonTreeEdges, 3 * nonTreeEdgeCount * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_lcaThread, lcaThread, threadCount * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_threadEdge, threadEdge, edgeCount * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_unfinished, unfinished, vertexCount * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_auxAdjList, auxAdjList, 2 * vertexCount * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_artPoint, artPoint, vertexCount * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_rootLCACount, &rootLCACount, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( lca), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_adjList, d_offset, d_parent, d_nonTreeEdges, d_unfinished, d_threadEdge, d_lcaThread, d_auxAdjList, vertexCount); hipLaunchKernelGGL(( auxGraph), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_adjList, d_offset, d_lcaThread, vertexCount, d_rootLCACount, d_auxAdjList); int *threadEdge1 = (int *) malloc((edgeCount + vertexCount) * sizeof(int)); memset(threadEdge1, -1, (edgeCount + vertexCount) * sizeof(int)); int *d_threadEdge1; hipMalloc(&d_threadEdge1, (edgeCount + vertexCount) * sizeof(int)); hipMemcpy(d_threadEdge1, threadEdge1, (edgeCount + vertexCount) * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( lca1), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_adjList, d_offset, d_parent, d_nonTreeEdges, d_unfinished, d_threadEdge1, d_lcaThread, d_auxAdjList, vertexCount, edgeCount); hipMemcpy(threadEdge1, d_threadEdge1, (edgeCount + vertexCount) * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(lcaThread, d_lcaThread, threadCount * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(threadEdge, d_threadEdge, edgeCount * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(unfinished, d_unfinished, vertexCount * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(adjList, d_adjList, edgeCount * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(auxAdjList, d_auxAdjList, 2 * vertexCount * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(artPoint, d_artPoint, vertexCount * sizeof(int), hipMemcpyDeviceToHost); /* printf("LCA Thread\n"); for (int i = 0; i < threadCount; i++) printf("%d ", lcaThread[i]); printf("\nthread Edge\n"); for (int i = 0; i < edgeCount; i++) printf("%d ", threadEdge[i]); printf("\n unfinished \n"); for (int i = 0; i < vertexCount; i++) printf("%d ", unfinished[i]); printf("\n Adj List\n"); for (int i = 0; i < edgeCount; i++) printf("%d ", adjList[i]); printf("\n Aux Adj List \n"); for (int i = 0; i < 2 * vertexCount; i+=2) printf("%d %d\n", auxAdjList[i], auxAdjList[i + 1]); printf("\n Art Point \n"); for (int i = 0; i < vertexCount; i++) printf("%d %d\n", i, artPoint[i]); printf("\n THREAD EDGE\n"); for (int i = 0; i < (edgeCount + vertexCount); i++) printf("%d ", threadEdge1[i]); */ printf("\n"); for (int i = 0; i < threadCount; i++) { if (threadEdge1[offset[lcaThread[i]]] == -1) printf("%d ", lcaThread[i]); } printf("\n"); // Free allocated memory on device and host hipFree(d_adjList); hipFree(d_offset); hipFree(d_inpFrontier); hipFree(d_outFrontier); hipFree(d_visited); hipFree(d_treeEdges); hipFree(d_parent); hipFree(d_s1); hipFree(d_s2); hipFree(d_nonTreeEdges); hipFree(d_lcaThread); hipFree(d_threadEdge); hipFree(d_unfinished); hipFree(d_artPoint); hipFree(d_rootLCACount); free(inpFrontier); free(outFrontier); free(visited); free(treeEdges); free(parent); free(nonTreeEdges); free(lcaThread); free(threadEdge); free(unfinished); free(artPoint); return 0; }
80142320634420447af30e6ed0ec6d9643f4f694.cu
/* ============================================================================ Name : binconnected_components.cu Author : Diptanshu, Gaurav Version : Copyright : (c) 2018 Description : CUDA compute reciprocals ============================================================================ */ #include <stdio.h> #include <stdlib.h> #include <time.h> #define SWAP(a, b) {int swp=a; a=b; b=swp;} #define MAX_HEIGHT 10 int numIterations; /* * */ __global__ void bfs(int *adjList, int *offset, int *inpFrontier, int *outFrontier, int *parent, int *visited, int *treeEdges, int *s1, int *s2) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < *s1 && visited[inpFrontier[tid]] == 0) { int v = inpFrontier[tid]; // Current vertex // Put all the unvisited neighbors into outFrontier for (int i = offset[v]; i < offset[v + 1]; i++) { if (!visited[adjList[i]] && atomicCAS(&parent[adjList[i]], -1, v) == -1) { int old = atomicAdd(s2, 1); outFrontier[old] = adjList[i]; treeEdges[i] = -1; } else if (adjList[i] == parent[v]) { treeEdges[i] = -2; // Place the parent as the first element in adjList if (i != offset[v]) { SWAP(adjList[offset[v]], adjList[i]); SWAP(treeEdges[offset[v]], treeEdges[i]); } } else if (v < adjList[i]) { // Non tree edge, mark only in one direction such that a < b for any non-tree edge a->b. treeEdges[i] = v; } else { treeEdges[i] = -2; } } visited[v] = 1; } } /* * * */ __global__ void lca(int *adjList, int *offset, int *parent, int *nonTreeEdges, int *unfinished, int *threadEdge, int *lcaThread, int *auxAdjList, int vertexCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x, i = 0, len1, len2; int a = nonTreeEdges[3 * tid]; int b = nonTreeEdges[3 * tid + 1]; int eid = nonTreeEdges[3 * tid + 2]; int path_a[MAX_HEIGHT], path_b[MAX_HEIGHT]; while (a != 0) { path_a[i++] = a; a = parent[a]; } path_a[i++] = 0; len1 = i; i = 0; while (b != 0) { path_b[i++] = b; b = parent[b]; } path_b[i++] = 0; len2 = i; i = 0; while (i < len1 && i < len2 && path_a[len1 - i - 1] == path_b[len2 - i - 1]) i++; int lcaVertex = path_a[len1 - i]; //printf("Edge %d: %d %d LCA %d\n", eid, nonTreeEdges[3 * tid], nonTreeEdges[3 * tid + 1], lcaVertex); len1 -= i; len2 -= i; lcaThread[tid] = lcaVertex; // Mark the non-tree edge visited threadEdge[eid] = tid; // Mark the rest of the edges visited and the vertices as part of unfinished traversal for (i = 0; i < len1; i++) { threadEdge[offset[path_a[i]]] = tid; if (i != len1 - 1) unfinished[path_a[i]] = 1; } for (i = 0; i < len2; i++) { threadEdge[offset[path_b[i]]] = tid; if (i != len2 - 1) unfinished[path_b[i]] = 1; } __syncthreads(); // Create auxiliary vertex // Special case for root vertex // As root vertex doesn't have any parent, we don't set its parent. if (lcaVertex != 0) auxAdjList[2 * lcaVertex] = adjList[offset[lcaVertex]]; auxAdjList[2 * lcaVertex + 1] = lcaVertex; } __global__ void lca1(int *adjList, int *offset, int *parent, int *nonTreeEdges, int *unfinished, int *threadEdge, int *lcaThread, int *auxAdjList, int vertexCount, int edgeCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x, i = 0, len1, len2; int a = nonTreeEdges[3 * tid]; int b = nonTreeEdges[3 * tid + 1]; if (auxAdjList[2 * a + 1] != -1) a += vertexCount; if (auxAdjList[2 * b + 1] != -1) b += vertexCount; int eid = nonTreeEdges[3 * tid + 2]; int path_a[MAX_HEIGHT], path_b[MAX_HEIGHT]; while (a != 0) { path_a[i++] = a; if (a < vertexCount && auxAdjList[2 * a + 1] != -1) a = vertexCount + a; else if (a >= vertexCount) a = parent[a - vertexCount]; else a = parent[a]; } path_a[i++] = 0; len1 = i; i = 0; while (b != 0) { path_b[i++] = b; if (b < vertexCount && auxAdjList[2 * b + 1] != -1) b = vertexCount + b; else if (b >= vertexCount) b = parent[b - vertexCount]; else b = parent[b]; } path_b[i++] = 0; len2 = i; i = 0; while (i < len1 && i < len2 && path_a[len1 - i - 1] == path_b[len2 - i - 1]) i++; //int lcaVertex = path_a[len1 - i]; //printf("Edge %d: %d %d LCA %d\n", eid, nonTreeEdges[3 * tid], nonTreeEdges[3 * tid + 1], lcaVertex); len1 -= i; len2 -= i; // Mark the non-tree edge visited threadEdge[eid] = tid; for (i = 0; i < len1; i++) { if (path_a[i] >= vertexCount) { threadEdge[edgeCount + path_a[i] - vertexCount] = tid; } else { threadEdge[offset[path_a[i]]] = tid; } } for (i = 0; i < len2; i++) { if (path_b[i] >= vertexCount) { threadEdge[edgeCount + path_b[i] - vertexCount] = tid; } else { threadEdge[offset[path_b[i]]] = tid; } } } __global__ void auxGraph(int *adjList, int *offset, int *lcaThread, int vertexCount, int *rootLCACount, int *auxAdjList) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int lcaVertex = lcaThread[tid]; if (lcaVertex != 0) adjList[offset[lcaVertex]] = vertexCount + lcaVertex; else atomicAdd(rootLCACount, 1); // Update grandParent's child int grandParent = auxAdjList[2 * lcaVertex]; for (int i = offset[grandParent]; i < offset[grandParent + 1]; i++) { if (adjList[i] == lcaVertex) { adjList[i] = vertexCount + lcaVertex; break; } } } __global__ void markArtPoint(int *adjList, int *offset, int *lcaThread, int *artPoint, int *unfinished, int *rootLCACount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int lcaVertex = lcaThread[tid]; bool bridge = false; for (int i = offset[lcaVertex]; i < offset[lcaVertex + 1]; i++) { if (!unfinished[adjList[i]]) { bridge = true; break; } } printf("vertex %d rootLCACOUnt %d bridge %d\n", lcaVertex, *rootLCACount, bridge); if (lcaVertex != 0 && bridge) artPoint[lcaVertex] = 1; else if (lcaVertex == 0 && bridge && *rootLCACount > 1) artPoint[0] = 1; } /* * Finds BCC Id for each edge. If an edge was part of the path to an LCA and * that LCA happens to be an articulation point, we assign the LCA's vertex ID as BCC id to the edge. * Otherwise, we traverse up the tree to find an LCA which is an articulation point. */ __global__ void findBCC(int *adjList, int *offset, int *threadEdge, int *lcaThread, int *artPoint, int *bccId) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int lcaVertex = threadEdge[tid]; // TODO: Unfinished implementation // Note: For each undirected edge b/w a-b, only one direction is marked in the threadEdge if (lcaVertex != -1) { while (!artPoint[lcaVertex]) { //lcaVertex = adjList[offset[lcaVertex]]; } bccId[tid] = lcaVertex; } } int main(int argc, char **argv) { char* edgeListFile = argv[1]; FILE *fp; fp = fopen(edgeListFile, "r"); if (fp == NULL) { printf("ERROR: File does not exist!\n"); return 1; } int vertexCount, edgeCount; fscanf(fp, "%d", &vertexCount); fscanf(fp, "%d", &edgeCount); printf("VertexCount %d\n", vertexCount); printf("EdgeCount %d\n", edgeCount); // Data structure to represent the graph in CSR format int *adjList; // Concatenated adjacency list int *offset; // Stores offset of each vertex's adjacency list size_t adjListSize = edgeCount * sizeof(int); size_t offsetSize = (vertexCount + 1) * sizeof(int); size_t verticesSize = vertexCount * sizeof(int); adjList = (int *)malloc(adjListSize); offset = (int *)malloc(offsetSize); int edgeCounter = 0, vertexCounter = 0; int prevSource, source, dest; fscanf(fp, "%d %d", &prevSource, &dest); // Convert the graph to CSR format while (edgeCounter != edgeCount) { while (vertexCounter <= prevSource) // Includes the vertices with no edges offset[vertexCounter++] = edgeCounter; adjList[edgeCounter++] = dest; while (fscanf(fp, "%d %d", &source, &dest) == 2 && source == prevSource) adjList[edgeCounter++] = dest; prevSource = source; } // Mark the sentinel values so that the degree of any vertex i = offset[i + 1] - offset[i] while (vertexCounter <= vertexCount) offset[vertexCounter++] = edgeCount; // printf("Adjacency List\n"); // for(int i = 0; i < edgeCount; i++) { // printf("%d ", adjList[i]); // } // // printf("\nOffsets\n"); // for(int i = 0; i < vertexCount + 1; i++) { // printf("%d ", offset[i]); // } // printf("\n"); // Initialize other data structure to be used for bfs int *inpFrontier, *outFrontier, *visited, *parent, *treeEdges; int s1, s2; // Size of input and output frontiers int treeEdgeCount = 0; inpFrontier = (int *)calloc(vertexCount, sizeof(int)); outFrontier = (int *)calloc(vertexCount, sizeof(int)); visited = (int *)calloc(vertexCount, sizeof(int)); treeEdges = (int *)calloc(edgeCount, sizeof(int)); parent = (int *)malloc(verticesSize); memset(parent, -1, verticesSize); s1 = 1; s2 = 0; inpFrontier[0] = 0; // Inserting source vertex // Corresponding device data int *d_adjList, *d_offset; int *d_inpFrontier, *d_outFrontier, *d_visited, *d_parent, *d_treeEdges; int *d_s1, *d_s2; cudaMalloc(&d_adjList, adjListSize); cudaMalloc(&d_offset, offsetSize); cudaMalloc(&d_inpFrontier, verticesSize); cudaMalloc(&d_outFrontier, verticesSize); cudaMalloc(&d_visited, verticesSize); cudaMalloc(&d_treeEdges, edgeCount * sizeof(int)); cudaMalloc(&d_parent, verticesSize); cudaMalloc(&d_s1, sizeof(int)); cudaMalloc(&d_s2, sizeof(int)); cudaMemcpy(d_adjList, adjList, adjListSize, cudaMemcpyHostToDevice); cudaMemcpy(d_offset, offset, offsetSize, cudaMemcpyHostToDevice); cudaMemcpy(d_inpFrontier, inpFrontier, verticesSize, cudaMemcpyHostToDevice); cudaMemcpy(d_outFrontier, outFrontier, verticesSize, cudaMemcpyHostToDevice); cudaMemcpy(d_visited, visited, verticesSize, cudaMemcpyHostToDevice); cudaMemcpy(d_treeEdges, treeEdges, edgeCount * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_parent, parent, verticesSize, cudaMemcpyHostToDevice); cudaMemcpy(d_s1, &s1, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_s2, &s2, sizeof(int), cudaMemcpyHostToDevice); // Start the bfs bool odd = true; int inpQSize = s1; numIterations = 0; while (inpQSize != 0) { dim3 blocksPerGrid ((inpQSize + 1023) / 1024); dim3 threadsPerBlock ((inpQSize > 1024) ? 1024 : inpQSize); if (odd) { bfs<<<blocksPerGrid, threadsPerBlock>>>(d_adjList, d_offset, d_inpFrontier, d_outFrontier, d_parent, d_visited, d_treeEdges, d_s1, d_s2); cudaMemcpy(&inpQSize, d_s2, sizeof(int), cudaMemcpyDeviceToHost); s1 = 0; cudaMemcpy(d_s1, &s1, sizeof(int), cudaMemcpyHostToDevice); } else { bfs<<<blocksPerGrid, threadsPerBlock>>>(d_adjList, d_offset, d_outFrontier, d_inpFrontier, d_parent, d_visited, d_treeEdges, d_s2, d_s1); cudaMemcpy(&inpQSize, d_s1, sizeof(int), cudaMemcpyDeviceToHost); s2 = 0; cudaMemcpy(d_s2, &s2, sizeof(int), cudaMemcpyHostToDevice); } odd = !odd; numIterations++; treeEdgeCount += inpQSize; } cudaMemcpy(visited, d_visited, verticesSize, cudaMemcpyDeviceToHost); cudaMemcpy(parent, d_parent, verticesSize, cudaMemcpyDeviceToHost); cudaMemcpy(treeEdges, d_treeEdges, edgeCount * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(adjList, d_adjList, edgeCount * sizeof(int), cudaMemcpyDeviceToHost); // printf("Parent array\n"); // for (int i = 0; i < vertexCount; i++) // printf("%d ", parent[i]); // printf("\n"); // // printf("Adjacency List\n"); // for(int i = 0; i < edgeCount; i++) { // printf("(%d %d) ", i, adjList[i]); // } // printf("\n"); // // for (int i = 0; i < vertexCount; i++) { // if (parent[i] != adjList[offset[i]]) // printf("WRONG %d\n", i); // } // // printf("Number of iterations %d \n", numIterations); // printf("Visited array\n"); // for (int i = 0; i < vertexCount; i++) // printf("%d ", visited[i]); // printf("\n"); // printf("Tree Edges\n"); // for (int i = 0; i < edgeCount; i++) // printf("%d ", treeEdges[i]); // printf("\n"); int nonTreeEdgeCount = (edgeCount - 2 * treeEdgeCount) / 2; // printf("treeEdgecount %d\n", treeEdgeCount); // printf("Non-tree edges count %d\n", nonTreeEdgeCount); dim3 blocksPerGrid ((nonTreeEdgeCount + 1023) / 1024); dim3 threadsPerBlock ((nonTreeEdgeCount > 1024) ? 1024 : nonTreeEdgeCount); int threadCount = blocksPerGrid.x * threadsPerBlock.x; //printf("ThreadCount = %d\n", threadCount); // Data structure to represent non tree edges // a b i : edge a->b with edge id i int *nonTreeEdges = (int *) calloc(3 * nonTreeEdgeCount, sizeof(int)); int *lcaThread = (int *) calloc(threadCount, sizeof(int)); int *threadEdge = (int *) malloc(edgeCount * sizeof(int)); memset(threadEdge, -1, edgeCount * sizeof(int)); int *unfinished = (int *) calloc(vertexCount, sizeof(int)); int *auxAdjList = (int *) malloc(2 * vertexCount * sizeof(int)); memset(auxAdjList, -1, 2 * vertexCount * sizeof(int)); int *artPoint = (int *) calloc(vertexCount, sizeof(int)); int rootLCACount = 0; // Populate non tree edges for (int i = 0, j = 0; i < edgeCount; i++) { if (treeEdges[i] >= 0) { nonTreeEdges[j++] = treeEdges[i]; nonTreeEdges[j++] = adjList[i]; nonTreeEdges[j++] = i; } } /* printf("Non tree edges\n"); for (int i = 0; i < 3 * nonTreeEdgeCount; i+=3) { printf("%d %d %d\n", nonTreeEdges[i], nonTreeEdges[i + 1], nonTreeEdges[i + 2]); }*/ int *d_nonTreeEdges, *d_lcaThread, *d_threadEdge, *d_unfinished, *d_auxAdjList, *d_artPoint, *d_rootLCACount; cudaMalloc(&d_nonTreeEdges, 3 * nonTreeEdgeCount * sizeof(int)); cudaMalloc(&d_lcaThread, threadCount * sizeof(int)); cudaMalloc(&d_threadEdge, edgeCount * sizeof(int)); cudaMalloc(&d_unfinished, vertexCount * sizeof(int)); cudaMalloc(&d_auxAdjList, 2 * vertexCount * sizeof(int)); cudaMalloc(&d_artPoint, vertexCount * sizeof(int)); cudaMalloc(&d_rootLCACount, sizeof(int)); cudaMemcpy(d_nonTreeEdges, nonTreeEdges, 3 * nonTreeEdgeCount * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_lcaThread, lcaThread, threadCount * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_threadEdge, threadEdge, edgeCount * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_unfinished, unfinished, vertexCount * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_auxAdjList, auxAdjList, 2 * vertexCount * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_artPoint, artPoint, vertexCount * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_rootLCACount, &rootLCACount, sizeof(int), cudaMemcpyHostToDevice); lca<<<blocksPerGrid, threadsPerBlock>>>(d_adjList, d_offset, d_parent, d_nonTreeEdges, d_unfinished, d_threadEdge, d_lcaThread, d_auxAdjList, vertexCount); auxGraph<<<blocksPerGrid, threadsPerBlock>>>(d_adjList, d_offset, d_lcaThread, vertexCount, d_rootLCACount, d_auxAdjList); int *threadEdge1 = (int *) malloc((edgeCount + vertexCount) * sizeof(int)); memset(threadEdge1, -1, (edgeCount + vertexCount) * sizeof(int)); int *d_threadEdge1; cudaMalloc(&d_threadEdge1, (edgeCount + vertexCount) * sizeof(int)); cudaMemcpy(d_threadEdge1, threadEdge1, (edgeCount + vertexCount) * sizeof(int), cudaMemcpyHostToDevice); lca1<<<blocksPerGrid, threadsPerBlock>>>(d_adjList, d_offset, d_parent, d_nonTreeEdges, d_unfinished, d_threadEdge1, d_lcaThread, d_auxAdjList, vertexCount, edgeCount); cudaMemcpy(threadEdge1, d_threadEdge1, (edgeCount + vertexCount) * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(lcaThread, d_lcaThread, threadCount * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(threadEdge, d_threadEdge, edgeCount * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(unfinished, d_unfinished, vertexCount * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(adjList, d_adjList, edgeCount * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(auxAdjList, d_auxAdjList, 2 * vertexCount * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(artPoint, d_artPoint, vertexCount * sizeof(int), cudaMemcpyDeviceToHost); /* printf("LCA Thread\n"); for (int i = 0; i < threadCount; i++) printf("%d ", lcaThread[i]); printf("\nthread Edge\n"); for (int i = 0; i < edgeCount; i++) printf("%d ", threadEdge[i]); printf("\n unfinished \n"); for (int i = 0; i < vertexCount; i++) printf("%d ", unfinished[i]); printf("\n Adj List\n"); for (int i = 0; i < edgeCount; i++) printf("%d ", adjList[i]); printf("\n Aux Adj List \n"); for (int i = 0; i < 2 * vertexCount; i+=2) printf("%d %d\n", auxAdjList[i], auxAdjList[i + 1]); printf("\n Art Point \n"); for (int i = 0; i < vertexCount; i++) printf("%d %d\n", i, artPoint[i]); printf("\n THREAD EDGE\n"); for (int i = 0; i < (edgeCount + vertexCount); i++) printf("%d ", threadEdge1[i]); */ printf("\n"); for (int i = 0; i < threadCount; i++) { if (threadEdge1[offset[lcaThread[i]]] == -1) printf("%d ", lcaThread[i]); } printf("\n"); // Free allocated memory on device and host cudaFree(d_adjList); cudaFree(d_offset); cudaFree(d_inpFrontier); cudaFree(d_outFrontier); cudaFree(d_visited); cudaFree(d_treeEdges); cudaFree(d_parent); cudaFree(d_s1); cudaFree(d_s2); cudaFree(d_nonTreeEdges); cudaFree(d_lcaThread); cudaFree(d_threadEdge); cudaFree(d_unfinished); cudaFree(d_artPoint); cudaFree(d_rootLCACount); free(inpFrontier); free(outFrontier); free(visited); free(treeEdges); free(parent); free(nonTreeEdges); free(lcaThread); free(threadEdge); free(unfinished); free(artPoint); return 0; }
c897959016f9febdd0d637484a65e1ffb17833c9.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPGeneratorImpl.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/hip/DistributionTemplates.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand_kernel.h> #include <utility> #include <functional> #include <ATen/native/Distributions.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <cstdint> #include <limits> #include <utility> #include <type_traits> namespace at { namespace native { void geometric_kernel(TensorIteratorBase& iter, double p_, c10::optional<Generator> gen) { auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::geometric_kernel(iter, p_, generator); } REGISTER_DISPATCH(geometric_stub, &geometric_kernel); }} // namespace at::native
c897959016f9febdd0d637484a65e1ffb17833c9.cu
#include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAGeneratorImpl.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/cuda/DistributionTemplates.h> #include <curand.h> #include <curand_kernel.h> #include <curand_philox4x32_x.h> #include <utility> #include <functional> #include <ATen/native/Distributions.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <cstdint> #include <limits> #include <utility> #include <type_traits> namespace at { namespace native { void geometric_kernel(TensorIteratorBase& iter, double p_, c10::optional<Generator> gen) { auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::geometric_kernel(iter, p_, generator); } REGISTER_DISPATCH(geometric_stub, &geometric_kernel); }} // namespace at::native
2bd89a243ba48f0cf768cc958717daf4f465dc4d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuta/reduce.cuh" namespace cuta { namespace reduce { namespace { template <typename T> __device__ T warpSum(T value) { for (int i = warpSize / 2; i >= 1; i /= 2) { // Butterfly Reduction (This is beautiful.) value += __shfl_xor_sync(0xffffffff, value, i, warpSize); } return value; } } // namespace template <typename T> __global__ void sum(T *out_dev, T *in_dev, unsigned int count) { T psum{0}; // Grid-Stride Loop for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) { psum += in_dev[i]; } psum = warpSum(psum); if ((threadIdx.x & (warpSize - 1)) == 0) { atomicAdd(out_dev, psum); } } } // namespace reduce } // namespace cuta template __device__ int cuta::reduce::warpSum<int>(int); template __device__ float cuta::reduce::warpSum<float>(float); template __device__ double cuta::reduce::warpSum<double>(double); template __global__ void cuta::reduce::sum<int>(int *, int *, unsigned int); template __global__ void cuta::reduce::sum<float>(float *, float *, unsigned int); #if __CUDA_ARCH__ >= 600 template __global__ void cuta::reduce::sum<double>(double *, double *, unsigned int); #endif
2bd89a243ba48f0cf768cc958717daf4f465dc4d.cu
#include "cuta/reduce.cuh" namespace cuta { namespace reduce { namespace { template <typename T> __device__ T warpSum(T value) { for (int i = warpSize / 2; i >= 1; i /= 2) { // Butterfly Reduction (This is beautiful.) value += __shfl_xor_sync(0xffffffff, value, i, warpSize); } return value; } } // namespace template <typename T> __global__ void sum(T *out_dev, T *in_dev, unsigned int count) { T psum{0}; // Grid-Stride Loop for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) { psum += in_dev[i]; } psum = warpSum(psum); if ((threadIdx.x & (warpSize - 1)) == 0) { atomicAdd(out_dev, psum); } } } // namespace reduce } // namespace cuta template __device__ int cuta::reduce::warpSum<int>(int); template __device__ float cuta::reduce::warpSum<float>(float); template __device__ double cuta::reduce::warpSum<double>(double); template __global__ void cuta::reduce::sum<int>(int *, int *, unsigned int); template __global__ void cuta::reduce::sum<float>(float *, float *, unsigned int); #if __CUDA_ARCH__ >= 600 template __global__ void cuta::reduce::sum<double>(double *, double *, unsigned int); #endif
6ee7109d622c57f9752e01ec80b01657dbc073ed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) #define INDEX(b,c,h,w,channels,height,width) ((b * channels + c) * height + h) * width+ w extern "C" __global__ void IRNNBackward( float* grad_input, float* grad_weight_up_map, float* grad_weight_right_map, float* grad_weight_down_map, float* grad_weight_left_map, float* grad_bias_up_map, float* grad_bias_right_map, float* grad_bias_down_map, float* grad_bias_left_map, const float* weight_up, const float* weight_right, const float* weight_down, const float* weight_left, const float* grad_output_up, const float* grad_output_right, const float* grad_output_down, const float* grad_output_left, const float* output_up, const float* output_right, const float* output_down, const float* output_left, const int channels, const int height, const int width, const int n) { CUDA_KERNEL_LOOP(index,n){ int w = index % width; int h = index / width % height; int c = index / width / height % channels; int b = index / width / height / channels; float diff_left = 0; float diff_right = 0; float diff_up = 0; float diff_down = 0; //left for (int i = 0; i<=w; i++) { diff_left *= weight_left[c]; diff_left += grad_output_left[INDEX(b, c, h, i, channels, height, width)]; diff_left *= (output_left[INDEX(b, c, h, i, channels, height, width)]<=0)? 0 : 1; } float temp = grad_output_left[INDEX(b, c, h, 0, channels, height, width)]; for (int i = 1; i < w +1 ; i++) { temp = (output_left[INDEX(b, c, h, i-1, channels, height, width)] >0?1:0) * temp * weight_left[c] + grad_output_left[INDEX(b, c, h, i, channels, height, width)]; } if (w != width - 1){ grad_weight_left_map[index] = temp * output_left[INDEX(b, c, h, w+1, channels, height, width)] * (output_left[index] > 0? 1:0); grad_bias_left_map[index] = diff_left; } // right for (int i = width -1; i>=w; i--) { diff_right *= weight_right[c]; diff_right += grad_output_right[INDEX(b, c, h, i, channels, height, width)]; diff_right *= (output_right[INDEX(b, c, h, i, channels, height, width)]<=0)? 0 : 1; } temp = grad_output_right[INDEX(b, c, h, width-1, channels, height, width)]; for (int i = width -2; i > w - 1 ; i--) { temp = (output_right[INDEX(b, c, h, i+1, channels, height, width)] >0?1:0) * temp * weight_right[c] + grad_output_right[INDEX(b, c, h, i, channels, height, width)]; } if (w != 0){ grad_weight_right_map[index] = temp * output_right[INDEX(b, c, h, w-1, channels, height, width)] * (output_right[index] > 0? 1:0); grad_bias_right_map[index] = diff_right; } // up for (int i = 0; i<=h; i++) { diff_up *= weight_up[c]; diff_up += grad_output_up[INDEX(b, c, i, w, channels, height, width)]; diff_up *= (output_up[INDEX(b, c, i, w, channels, height, width)]<=0)? 0 : 1; } temp = grad_output_up[INDEX(b, c, 0, w, channels, height, width)]; for (int i = 1; i < h +1 ; i++) { temp = (output_up[INDEX(b, c, i-1, w, channels, height, width)] >0?1:0) * temp * weight_up[c] + grad_output_up[INDEX(b, c, i, w, channels, height, width)]; } if (h != height - 1){ grad_weight_up_map[index] = temp * output_up[INDEX(b, c, h+1, w, channels, height, width)] * (output_up[index] > 0? 1:0); grad_bias_up_map[index] = diff_up; } // down for (int i = height -1; i>=h; i--) { diff_down *= weight_down[c]; diff_down += grad_output_down[INDEX(b, c, i, w, channels, height, width)]; diff_down *= (output_down[INDEX(b, c, i, w, channels, height, width)]<=0)? 0 : 1; } temp = grad_output_down[INDEX(b, c, height-1, w, channels, height, width)]; for (int i = height -2; i > h - 1 ; i--) { temp = (output_down[INDEX(b, c, i+1, w, channels, height, width)] >0?1:0) * temp * weight_down[c] + grad_output_down[INDEX(b, c, i, w, channels, height, width)]; } if (h != 0){ grad_weight_down_map[index] = temp * output_down[INDEX(b, c, h-1, w, channels, height, width)] * (output_down[index] > 0? 1:0); grad_bias_down_map[index] = diff_down; } grad_input[index] = diff_down + diff_left + diff_right + diff_up; } }
6ee7109d622c57f9752e01ec80b01657dbc073ed.cu
#define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) #define INDEX(b,c,h,w,channels,height,width) ((b * channels + c) * height + h) * width+ w extern "C" __global__ void IRNNBackward( float* grad_input, float* grad_weight_up_map, float* grad_weight_right_map, float* grad_weight_down_map, float* grad_weight_left_map, float* grad_bias_up_map, float* grad_bias_right_map, float* grad_bias_down_map, float* grad_bias_left_map, const float* weight_up, const float* weight_right, const float* weight_down, const float* weight_left, const float* grad_output_up, const float* grad_output_right, const float* grad_output_down, const float* grad_output_left, const float* output_up, const float* output_right, const float* output_down, const float* output_left, const int channels, const int height, const int width, const int n) { CUDA_KERNEL_LOOP(index,n){ int w = index % width; int h = index / width % height; int c = index / width / height % channels; int b = index / width / height / channels; float diff_left = 0; float diff_right = 0; float diff_up = 0; float diff_down = 0; //left for (int i = 0; i<=w; i++) { diff_left *= weight_left[c]; diff_left += grad_output_left[INDEX(b, c, h, i, channels, height, width)]; diff_left *= (output_left[INDEX(b, c, h, i, channels, height, width)]<=0)? 0 : 1; } float temp = grad_output_left[INDEX(b, c, h, 0, channels, height, width)]; for (int i = 1; i < w +1 ; i++) { temp = (output_left[INDEX(b, c, h, i-1, channels, height, width)] >0?1:0) * temp * weight_left[c] + grad_output_left[INDEX(b, c, h, i, channels, height, width)]; } if (w != width - 1){ grad_weight_left_map[index] = temp * output_left[INDEX(b, c, h, w+1, channels, height, width)] * (output_left[index] > 0? 1:0); grad_bias_left_map[index] = diff_left; } // right for (int i = width -1; i>=w; i--) { diff_right *= weight_right[c]; diff_right += grad_output_right[INDEX(b, c, h, i, channels, height, width)]; diff_right *= (output_right[INDEX(b, c, h, i, channels, height, width)]<=0)? 0 : 1; } temp = grad_output_right[INDEX(b, c, h, width-1, channels, height, width)]; for (int i = width -2; i > w - 1 ; i--) { temp = (output_right[INDEX(b, c, h, i+1, channels, height, width)] >0?1:0) * temp * weight_right[c] + grad_output_right[INDEX(b, c, h, i, channels, height, width)]; } if (w != 0){ grad_weight_right_map[index] = temp * output_right[INDEX(b, c, h, w-1, channels, height, width)] * (output_right[index] > 0? 1:0); grad_bias_right_map[index] = diff_right; } // up for (int i = 0; i<=h; i++) { diff_up *= weight_up[c]; diff_up += grad_output_up[INDEX(b, c, i, w, channels, height, width)]; diff_up *= (output_up[INDEX(b, c, i, w, channels, height, width)]<=0)? 0 : 1; } temp = grad_output_up[INDEX(b, c, 0, w, channels, height, width)]; for (int i = 1; i < h +1 ; i++) { temp = (output_up[INDEX(b, c, i-1, w, channels, height, width)] >0?1:0) * temp * weight_up[c] + grad_output_up[INDEX(b, c, i, w, channels, height, width)]; } if (h != height - 1){ grad_weight_up_map[index] = temp * output_up[INDEX(b, c, h+1, w, channels, height, width)] * (output_up[index] > 0? 1:0); grad_bias_up_map[index] = diff_up; } // down for (int i = height -1; i>=h; i--) { diff_down *= weight_down[c]; diff_down += grad_output_down[INDEX(b, c, i, w, channels, height, width)]; diff_down *= (output_down[INDEX(b, c, i, w, channels, height, width)]<=0)? 0 : 1; } temp = grad_output_down[INDEX(b, c, height-1, w, channels, height, width)]; for (int i = height -2; i > h - 1 ; i--) { temp = (output_down[INDEX(b, c, i+1, w, channels, height, width)] >0?1:0) * temp * weight_down[c] + grad_output_down[INDEX(b, c, i, w, channels, height, width)]; } if (h != 0){ grad_weight_down_map[index] = temp * output_down[INDEX(b, c, h-1, w, channels, height, width)] * (output_down[index] > 0? 1:0); grad_bias_down_map[index] = diff_down; } grad_input[index] = diff_down + diff_left + diff_right + diff_up; } }
baeca4886525297273f3b89c8c8132cb442db9e7.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2020 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- // #include <ATen/hip/HIPContext.h> #include <torch/script.h> #include <vector> #include "open3d/ml/impl/sparse_conv/SparseConvTranspose.cuh" #include "open3d/ml/pytorch/TorchHelper.h" using namespace open3d::ml::impl; template <class TFeat, class TOut, class TIndex, class TKernelIndex> void SparseConvTransposeCUDA(const torch::Tensor& filters, const torch::Tensor& out_importance, const torch::Tensor& inp_features, const torch::Tensor& inp_neighbors_importance_sum, const torch::Tensor& inp_neighbors_row_splits, const torch::Tensor& neighbors_index, const torch::Tensor& neighbors_kernel_index, const torch::Tensor& neighbors_importance, const torch::Tensor& neighbors_row_splits, const bool normalize, const int64_t max_temp_mem_MB, torch::Tensor& out_features) { std::vector<int> filter_dims; for (auto d : filters.sizes()) { filter_dims.push_back(d); } auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto cuda_device_props = at::cuda::getCurrentDeviceProperties(); const int texture_alignment = cuda_device_props->textureAlignment; auto device = filters.device(); void* temp_ptr = nullptr; size_t temp_size = 0; size_t max_temp_size = 0; // determine temp_size SparseConvTransposeComputeFeaturesCUDA<TFeat, TOut, TIndex, TKernelIndex>( stream, temp_ptr, temp_size, max_temp_size, texture_alignment, out_features.data_ptr<TOut>(), filter_dims, filters.data_ptr<TFeat>(), neighbors_row_splits.size(0) - 1, out_importance.size(0) ? out_importance.data_ptr<TFeat>() : nullptr, inp_features.size(0), inp_features.data_ptr<TFeat>(), inp_neighbors_importance_sum.size(0) ? inp_neighbors_importance_sum.data_ptr<TFeat>() : nullptr, inp_neighbors_row_splits.data_ptr<int64_t>(), neighbors_index.size(0), neighbors_index.data_ptr<TIndex>(), neighbors_kernel_index.data_ptr<TKernelIndex>(), neighbors_importance.size(0) ? neighbors_importance.data_ptr<TFeat>() : nullptr, neighbors_row_splits.data_ptr<int64_t>(), normalize); temp_size = ::max( ::min(size_t(max_temp_mem_MB) * 1024 * 1024, max_temp_size), temp_size); auto temp_tensor = CreateTempTensor(temp_size, device, &temp_ptr); // actually run the operation SparseConvTransposeComputeFeaturesCUDA<TFeat, TOut, TIndex, TKernelIndex>( stream, temp_ptr, temp_size, max_temp_size, texture_alignment, out_features.data_ptr<TOut>(), filter_dims, filters.data_ptr<TFeat>(), neighbors_row_splits.size(0) - 1, out_importance.size(0) ? out_importance.data_ptr<TFeat>() : nullptr, inp_features.size(0), inp_features.data_ptr<TFeat>(), inp_neighbors_importance_sum.size(0) ? inp_neighbors_importance_sum.data_ptr<TFeat>() : nullptr, inp_neighbors_row_splits.data_ptr<int64_t>(), neighbors_index.size(0), neighbors_index.data_ptr<TIndex>(), neighbors_kernel_index.data_ptr<TKernelIndex>(), neighbors_importance.size(0) ? neighbors_importance.data_ptr<TFeat>() : nullptr, neighbors_row_splits.data_ptr<int64_t>(), normalize); } #define INSTANTIATE(TFeat, TOut, TIndex, TKernelIndex) \ template void SparseConvTransposeCUDA<TFeat, TOut, TIndex, TKernelIndex>( \ const torch::Tensor& filters, const torch::Tensor& out_importance, \ const torch::Tensor& inp_features, \ const torch::Tensor& inp_neighbors_importance_sum, \ const torch::Tensor& inp_neighbors_row_splits, \ const torch::Tensor& neighbors_index, \ const torch::Tensor& neighbors_kernel_index, \ const torch::Tensor& neighbors_importance, \ const torch::Tensor& neighbors_row_splits, const bool normalize, \ const int64_t max_temp_mem_MB, torch::Tensor& out_features); INSTANTIATE(float, float, int32_t, uint8_t)
baeca4886525297273f3b89c8c8132cb442db9e7.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2020 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- // #include <ATen/cuda/CUDAContext.h> #include <torch/script.h> #include <vector> #include "open3d/ml/impl/sparse_conv/SparseConvTranspose.cuh" #include "open3d/ml/pytorch/TorchHelper.h" using namespace open3d::ml::impl; template <class TFeat, class TOut, class TIndex, class TKernelIndex> void SparseConvTransposeCUDA(const torch::Tensor& filters, const torch::Tensor& out_importance, const torch::Tensor& inp_features, const torch::Tensor& inp_neighbors_importance_sum, const torch::Tensor& inp_neighbors_row_splits, const torch::Tensor& neighbors_index, const torch::Tensor& neighbors_kernel_index, const torch::Tensor& neighbors_importance, const torch::Tensor& neighbors_row_splits, const bool normalize, const int64_t max_temp_mem_MB, torch::Tensor& out_features) { std::vector<int> filter_dims; for (auto d : filters.sizes()) { filter_dims.push_back(d); } auto stream = at::cuda::getCurrentCUDAStream(); auto cuda_device_props = at::cuda::getCurrentDeviceProperties(); const int texture_alignment = cuda_device_props->textureAlignment; auto device = filters.device(); void* temp_ptr = nullptr; size_t temp_size = 0; size_t max_temp_size = 0; // determine temp_size SparseConvTransposeComputeFeaturesCUDA<TFeat, TOut, TIndex, TKernelIndex>( stream, temp_ptr, temp_size, max_temp_size, texture_alignment, out_features.data_ptr<TOut>(), filter_dims, filters.data_ptr<TFeat>(), neighbors_row_splits.size(0) - 1, out_importance.size(0) ? out_importance.data_ptr<TFeat>() : nullptr, inp_features.size(0), inp_features.data_ptr<TFeat>(), inp_neighbors_importance_sum.size(0) ? inp_neighbors_importance_sum.data_ptr<TFeat>() : nullptr, inp_neighbors_row_splits.data_ptr<int64_t>(), neighbors_index.size(0), neighbors_index.data_ptr<TIndex>(), neighbors_kernel_index.data_ptr<TKernelIndex>(), neighbors_importance.size(0) ? neighbors_importance.data_ptr<TFeat>() : nullptr, neighbors_row_splits.data_ptr<int64_t>(), normalize); temp_size = std::max( std::min(size_t(max_temp_mem_MB) * 1024 * 1024, max_temp_size), temp_size); auto temp_tensor = CreateTempTensor(temp_size, device, &temp_ptr); // actually run the operation SparseConvTransposeComputeFeaturesCUDA<TFeat, TOut, TIndex, TKernelIndex>( stream, temp_ptr, temp_size, max_temp_size, texture_alignment, out_features.data_ptr<TOut>(), filter_dims, filters.data_ptr<TFeat>(), neighbors_row_splits.size(0) - 1, out_importance.size(0) ? out_importance.data_ptr<TFeat>() : nullptr, inp_features.size(0), inp_features.data_ptr<TFeat>(), inp_neighbors_importance_sum.size(0) ? inp_neighbors_importance_sum.data_ptr<TFeat>() : nullptr, inp_neighbors_row_splits.data_ptr<int64_t>(), neighbors_index.size(0), neighbors_index.data_ptr<TIndex>(), neighbors_kernel_index.data_ptr<TKernelIndex>(), neighbors_importance.size(0) ? neighbors_importance.data_ptr<TFeat>() : nullptr, neighbors_row_splits.data_ptr<int64_t>(), normalize); } #define INSTANTIATE(TFeat, TOut, TIndex, TKernelIndex) \ template void SparseConvTransposeCUDA<TFeat, TOut, TIndex, TKernelIndex>( \ const torch::Tensor& filters, const torch::Tensor& out_importance, \ const torch::Tensor& inp_features, \ const torch::Tensor& inp_neighbors_importance_sum, \ const torch::Tensor& inp_neighbors_row_splits, \ const torch::Tensor& neighbors_index, \ const torch::Tensor& neighbors_kernel_index, \ const torch::Tensor& neighbors_importance, \ const torch::Tensor& neighbors_row_splits, const bool normalize, \ const int64_t max_temp_mem_MB, torch::Tensor& out_features); INSTANTIATE(float, float, int32_t, uint8_t)
505720fe1c937042fb77f10b976ab144996e73cf.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <random> #include "SingleGPUMGDMLP.h" extern char* LOG_DIRECTORY; using std::ios; SingleGPUMGDMLPImp::SingleGPUMGDMLPImp(EventProcessor& _scheduler) { ofDebug.open((string(LOG_DIRECTORY) + "SingleGPU-MGDMLP.log").c_str(), ios::out); // the scheduler scheduler.CopyFrom(_scheduler); // register messages RegisterMessageProcessor(SingleGPUMGDMLP_RunDataLoadMessage::type, &RunDataLoad, 100); RegisterMessageProcessor(SingleGPUMGDMLP_RunLossMessage::type, &RunLoss, 100); RegisterMessageProcessor(SingleGPUMGDMLP_RunTrainMessage::type, &RunTrain, 100); RegisterMessageProcessor(DieMessage::type, &newDieHandler, 100); } SingleGPUMGDMLPImp::~SingleGPUMGDMLPImp() { for(int i = 0; i < nn->num_grad; i++){ hipFree(d_weight[i]); } for(int i = 0; i < local_batches; i++){ hipFree(d_y[i][0]); hipFree(d_label[i]); hipFree(d_rsum[i]); hipFree(d_prodlog[i]); for(int j = 0; j < nn->num_grad; j++){ hipFree(d_y[i][j+1]); hipFree(d_gradient[i][j]); hipFree(d_dlossy[i][j]); hipFree(d_dlossx[i][j]); } } ofDebug.close(); } void SingleGPUMGDMLPImp::Init(InputData* _data, HyperPara* _hypar, NeuralNets* _nn, bool _isMGD, MatModel* _model) { gpu_idx = 0; data = _data; hypar = _hypar; nn = _nn; gpu_model = _model; isMGD = _isMGD; local_batches = isMGD? 1 : hypar->num_batches; d_weight.resize(nn->num_grad); d_gradient.resize(local_batches); d_y.resize(local_batches); d_label.resize(local_batches); d_dlossy.resize(local_batches); d_dlossx.resize(local_batches); d_rsum.resize(local_batches); d_prodlog.resize(local_batches); loss = 0.; //stepsize = hypar->N_0 / data->num_tuples * hypar->batch_size; stepsize = hypar->N_0; #pragma omp parallel for schedule(dynamic) for(int i = 0; i < local_batches; i++){ d_gradient[i].resize(nn->num_grad); d_y[i].resize(nn->num_layers); d_dlossy[i].resize(nn->num_grad); d_dlossx[i].resize(nn->num_grad); } for(int i = 0; i < nn->num_grad; i++){ hipMalloc(&d_weight[i], sizeof(double)*nn->num_units[i]*nn->num_units[i+1]); hipMemcpy(d_weight[i], gpu_model->weight[i], sizeof(double)*nn->num_units[i]*nn->num_units[i+1], hipMemcpyHostToDevice); } } MESSAGE_HANDLER_DEFINITION_BEGIN(SingleGPUMGDMLPImp, RunDataLoad, SingleGPUMGDMLP_RunDataLoadMessage) { int s_idx = msg.start_idx; int b_idx = 0; int n = msg.processed_tuples; if(msg.re_allocated){ // allocate GPU memory hipMalloc(&evProc.d_y[b_idx][0], sizeof(double)*n*evProc.data->gradient_size); hipMalloc(&evProc.d_label[b_idx], sizeof(double)*n*evProc.data->num_classes); hipMalloc(&evProc.d_rsum[b_idx], sizeof(double)*n); hipMalloc(&evProc.d_prodlog[b_idx], sizeof(double)*n*evProc.data->num_classes); for(int j = 0; j < evProc.nn->num_grad; j++){ hipMalloc(&evProc.d_gradient[b_idx][j], sizeof(double)*evProc.nn->num_units[j]*evProc.nn->num_units[j+1]); hipMalloc(&evProc.d_y[b_idx][j+1],sizeof(double)*n*evProc.nn->num_units[j+1]); hipMalloc(&evProc.d_dlossy[b_idx][j],sizeof(double)*n*evProc.nn->num_units[j+1]); hipMalloc(&evProc.d_dlossx[b_idx][j],sizeof(double)*n*evProc.nn->num_units[j+1]); } } Timer t_timer; t_timer.Restart(); hipMemcpy(evProc.d_y[b_idx][0], &evProc.data->h_data[s_idx*evProc.data->gradient_size], sizeof(double)*n*evProc.data->gradient_size, hipMemcpyHostToDevice); hipMemcpy(evProc.d_label[b_idx], &evProc.data->h_label[s_idx*evProc.data->num_classes], sizeof(double)*n*evProc.data->num_classes, hipMemcpyHostToDevice); double t_time = t_timer.GetTime(); evProc.ofDebug << "GPU (MGD_MLP) RunLoad time = " << t_time << endl; evProc.ofDebug.flush(); if(msg.taskId == 0) SingleGPUMGDMLP_ComputeBatchedLossMessage_Factory(evProc.scheduler, n); else SingleGPUMGDMLP_TrainBatchedDataMessage_Factory(evProc.scheduler, n); }MESSAGE_HANDLER_DEFINITION_END MESSAGE_HANDLER_DEFINITION_BEGIN(SingleGPUMGDMLPImp, RunLoss, SingleGPUMGDMLP_RunLossMessage) { int b_idx = 0; int n = msg.processed_tuples; hipblasHandle_t handle; hipblasCreate(&handle); Timer t_timer; t_timer.Restart(); evProc.loss = n*get_loss_bgd_cublas(handle, evProc.d_label[b_idx], evProc.d_y[b_idx], evProc.d_weight, evProc.nn->num_units, n, evProc.d_rsum[b_idx], evProc.d_prodlog[b_idx], evProc.alpha, evProc.beta, evProc.gpu_idx); double t_time = t_timer.GetTime(); evProc.ofDebug << "GPU (MGD_MLP) loss = " << evProc.loss << ", RunLoss time = " << t_time << endl; evProc.ofDebug.flush(); hipblasDestroy(handle); SingleGPUMGDMLP_LoadNextDataMessage_Factory(evProc.scheduler, evProc.loss, 0); }MESSAGE_HANDLER_DEFINITION_END MESSAGE_HANDLER_DEFINITION_BEGIN(SingleGPUMGDMLPImp, RunTrain, SingleGPUMGDMLP_RunTrainMessage) { int b_idx = 0; int n = msg.processed_tuples; hipblasHandle_t handle; hipblasCreate(&handle); Timer t_timer; t_timer.Restart(); forward_mgd_cublas(handle, evProc.d_label[b_idx], evProc.d_y[b_idx], evProc.d_weight, evProc.nn->num_units, n, evProc.d_rsum[b_idx], evProc.alpha, evProc.beta, evProc.gpu_idx); compute_gradient_bgd_cublas(handle, evProc.d_label[b_idx], evProc.d_y[b_idx], evProc.d_weight, evProc.nn, n, evProc.stepsize, evProc.alpha, evProc.beta, evProc.d_gradient[b_idx], evProc.d_dlossy[b_idx], evProc.d_dlossx[b_idx], evProc.gpu_idx); update_model_bgd_cublas(evProc.d_weight, evProc.d_gradient[b_idx], evProc.nn, evProc.stepsize, evProc.gpu_idx); double t_time = t_timer.GetTime(); evProc.ofDebug << "GPU (MGD_MLP) RunTrain time = " << t_time << endl; hipblasDestroy(handle); SingleGPUMGDMLP_LoadNextDataMessage_Factory(evProc.scheduler, 0., 1); }MESSAGE_HANDLER_DEFINITION_END MESSAGE_HANDLER_DEFINITION_BEGIN(SingleGPUMGDMLPImp, newDieHandler, DieMessage) return true; }
505720fe1c937042fb77f10b976ab144996e73cf.cu
#include <math.h> #include <random> #include "SingleGPUMGDMLP.h" extern char* LOG_DIRECTORY; using std::ios; SingleGPUMGDMLPImp::SingleGPUMGDMLPImp(EventProcessor& _scheduler) { ofDebug.open((string(LOG_DIRECTORY) + "SingleGPU-MGDMLP.log").c_str(), ios::out); // the scheduler scheduler.CopyFrom(_scheduler); // register messages RegisterMessageProcessor(SingleGPUMGDMLP_RunDataLoadMessage::type, &RunDataLoad, 100); RegisterMessageProcessor(SingleGPUMGDMLP_RunLossMessage::type, &RunLoss, 100); RegisterMessageProcessor(SingleGPUMGDMLP_RunTrainMessage::type, &RunTrain, 100); RegisterMessageProcessor(DieMessage::type, &newDieHandler, 100); } SingleGPUMGDMLPImp::~SingleGPUMGDMLPImp() { for(int i = 0; i < nn->num_grad; i++){ cudaFree(d_weight[i]); } for(int i = 0; i < local_batches; i++){ cudaFree(d_y[i][0]); cudaFree(d_label[i]); cudaFree(d_rsum[i]); cudaFree(d_prodlog[i]); for(int j = 0; j < nn->num_grad; j++){ cudaFree(d_y[i][j+1]); cudaFree(d_gradient[i][j]); cudaFree(d_dlossy[i][j]); cudaFree(d_dlossx[i][j]); } } ofDebug.close(); } void SingleGPUMGDMLPImp::Init(InputData* _data, HyperPara* _hypar, NeuralNets* _nn, bool _isMGD, MatModel* _model) { gpu_idx = 0; data = _data; hypar = _hypar; nn = _nn; gpu_model = _model; isMGD = _isMGD; local_batches = isMGD? 1 : hypar->num_batches; d_weight.resize(nn->num_grad); d_gradient.resize(local_batches); d_y.resize(local_batches); d_label.resize(local_batches); d_dlossy.resize(local_batches); d_dlossx.resize(local_batches); d_rsum.resize(local_batches); d_prodlog.resize(local_batches); loss = 0.; //stepsize = hypar->N_0 / data->num_tuples * hypar->batch_size; stepsize = hypar->N_0; #pragma omp parallel for schedule(dynamic) for(int i = 0; i < local_batches; i++){ d_gradient[i].resize(nn->num_grad); d_y[i].resize(nn->num_layers); d_dlossy[i].resize(nn->num_grad); d_dlossx[i].resize(nn->num_grad); } for(int i = 0; i < nn->num_grad; i++){ cudaMalloc(&d_weight[i], sizeof(double)*nn->num_units[i]*nn->num_units[i+1]); cudaMemcpy(d_weight[i], gpu_model->weight[i], sizeof(double)*nn->num_units[i]*nn->num_units[i+1], cudaMemcpyHostToDevice); } } MESSAGE_HANDLER_DEFINITION_BEGIN(SingleGPUMGDMLPImp, RunDataLoad, SingleGPUMGDMLP_RunDataLoadMessage) { int s_idx = msg.start_idx; int b_idx = 0; int n = msg.processed_tuples; if(msg.re_allocated){ // allocate GPU memory cudaMalloc(&evProc.d_y[b_idx][0], sizeof(double)*n*evProc.data->gradient_size); cudaMalloc(&evProc.d_label[b_idx], sizeof(double)*n*evProc.data->num_classes); cudaMalloc(&evProc.d_rsum[b_idx], sizeof(double)*n); cudaMalloc(&evProc.d_prodlog[b_idx], sizeof(double)*n*evProc.data->num_classes); for(int j = 0; j < evProc.nn->num_grad; j++){ cudaMalloc(&evProc.d_gradient[b_idx][j], sizeof(double)*evProc.nn->num_units[j]*evProc.nn->num_units[j+1]); cudaMalloc(&evProc.d_y[b_idx][j+1],sizeof(double)*n*evProc.nn->num_units[j+1]); cudaMalloc(&evProc.d_dlossy[b_idx][j],sizeof(double)*n*evProc.nn->num_units[j+1]); cudaMalloc(&evProc.d_dlossx[b_idx][j],sizeof(double)*n*evProc.nn->num_units[j+1]); } } Timer t_timer; t_timer.Restart(); cudaMemcpy(evProc.d_y[b_idx][0], &evProc.data->h_data[s_idx*evProc.data->gradient_size], sizeof(double)*n*evProc.data->gradient_size, cudaMemcpyHostToDevice); cudaMemcpy(evProc.d_label[b_idx], &evProc.data->h_label[s_idx*evProc.data->num_classes], sizeof(double)*n*evProc.data->num_classes, cudaMemcpyHostToDevice); double t_time = t_timer.GetTime(); evProc.ofDebug << "GPU (MGD_MLP) RunLoad time = " << t_time << endl; evProc.ofDebug.flush(); if(msg.taskId == 0) SingleGPUMGDMLP_ComputeBatchedLossMessage_Factory(evProc.scheduler, n); else SingleGPUMGDMLP_TrainBatchedDataMessage_Factory(evProc.scheduler, n); }MESSAGE_HANDLER_DEFINITION_END MESSAGE_HANDLER_DEFINITION_BEGIN(SingleGPUMGDMLPImp, RunLoss, SingleGPUMGDMLP_RunLossMessage) { int b_idx = 0; int n = msg.processed_tuples; cublasHandle_t handle; cublasCreate(&handle); Timer t_timer; t_timer.Restart(); evProc.loss = n*get_loss_bgd_cublas(handle, evProc.d_label[b_idx], evProc.d_y[b_idx], evProc.d_weight, evProc.nn->num_units, n, evProc.d_rsum[b_idx], evProc.d_prodlog[b_idx], evProc.alpha, evProc.beta, evProc.gpu_idx); double t_time = t_timer.GetTime(); evProc.ofDebug << "GPU (MGD_MLP) loss = " << evProc.loss << ", RunLoss time = " << t_time << endl; evProc.ofDebug.flush(); cublasDestroy(handle); SingleGPUMGDMLP_LoadNextDataMessage_Factory(evProc.scheduler, evProc.loss, 0); }MESSAGE_HANDLER_DEFINITION_END MESSAGE_HANDLER_DEFINITION_BEGIN(SingleGPUMGDMLPImp, RunTrain, SingleGPUMGDMLP_RunTrainMessage) { int b_idx = 0; int n = msg.processed_tuples; cublasHandle_t handle; cublasCreate(&handle); Timer t_timer; t_timer.Restart(); forward_mgd_cublas(handle, evProc.d_label[b_idx], evProc.d_y[b_idx], evProc.d_weight, evProc.nn->num_units, n, evProc.d_rsum[b_idx], evProc.alpha, evProc.beta, evProc.gpu_idx); compute_gradient_bgd_cublas(handle, evProc.d_label[b_idx], evProc.d_y[b_idx], evProc.d_weight, evProc.nn, n, evProc.stepsize, evProc.alpha, evProc.beta, evProc.d_gradient[b_idx], evProc.d_dlossy[b_idx], evProc.d_dlossx[b_idx], evProc.gpu_idx); update_model_bgd_cublas(evProc.d_weight, evProc.d_gradient[b_idx], evProc.nn, evProc.stepsize, evProc.gpu_idx); double t_time = t_timer.GetTime(); evProc.ofDebug << "GPU (MGD_MLP) RunTrain time = " << t_time << endl; cublasDestroy(handle); SingleGPUMGDMLP_LoadNextDataMessage_Factory(evProc.scheduler, 0., 1); }MESSAGE_HANDLER_DEFINITION_END MESSAGE_HANDLER_DEFINITION_BEGIN(SingleGPUMGDMLPImp, newDieHandler, DieMessage) return true; }
22c8b2952a2f84652fc19ad9d3522ee314261a8e.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include "flamegpu/simulation/detail/CUDAMessageList.h" #include "flamegpu/simulation/detail/CUDAMessage.h" #include "flamegpu/simulation/detail/CUDAErrorChecking.cuh" #include "flamegpu/runtime/messaging/MessageBruteForce/MessageBruteForceHost.h" #include "flamegpu/simulation/detail/CUDAScatter.cuh" #include "flamegpu/detail/cuda.cuh" namespace flamegpu { namespace detail { /** * CUDAMessageList class * @brief populates CUDA message map */ CUDAMessageList::CUDAMessageList(CUDAMessage& cuda_message, detail::CUDAScatter &scatter, hipStream_t stream, unsigned int streamId) : message(cuda_message) { // allocate message lists allocateDeviceMessageList(d_list); allocateDeviceMessageList(d_swap_list); zeroDeviceMessageList_async(d_list, stream); zeroDeviceMessageList_async(d_swap_list, stream); gpuErrchk(hipStreamSynchronize(stream)); } /** * A destructor. * @brief Destroys the CUDAMessageList object */ CUDAMessageList::~CUDAMessageList() { cleanupAllocatedData(); } void CUDAMessageList::cleanupAllocatedData() { // clean up releaseDeviceMessageList(d_list); releaseDeviceMessageList(d_swap_list); } void CUDAMessageList::allocateDeviceMessageList(CUDAMessageMap &memory_map) { // we use the messages memory map to iterate the message variables and do allocation within our GPU hash map const auto &mem = message.getMessageData().variables; // for each variable allocate a device array and add to map for (const auto &mm : mem) { // get the variable name std::string var_name = mm.first; // get the variable size from message description size_t var_size = mm.second.type_size * mm.second.elements; // do the device allocation void * d_ptr; #ifdef UNIFIED_GPU_MEMORY // unified memory allocation gpuErrchk(hipMallocManaged(reinterpret_cast<void**>(&d_ptr), var_size * message.getMaximumListSize())) #else // non unified memory allocation gpuErrchk(hipMalloc(reinterpret_cast<void**>(&d_ptr), var_size * message.getMaximumListSize())); #endif // store the pointer in the map memory_map.insert(CUDAMessageMap::value_type(var_name, d_ptr)); } } void CUDAMessageList::resize(CUDAScatter& scatter, hipStream_t stream, unsigned int streamId, unsigned int keep_len) { // Release d_swap_list, we don't retain this data releaseDeviceMessageList(d_swap_list); // Allocate the new d_list CUDAMessageMap d_list_old; std::swap(d_list, d_list_old); allocateDeviceMessageList(d_list); if (keep_len && keep_len <= message.getMessageCount()) { // Copy data from d_list_old to d_list // Note, if keep_len exceeds length of d_swap_list_old, this will crash scatter.scatterAll(streamId, stream, message.getMessageData().variables, d_list_old, d_list, keep_len, 0); } // Release d_list_old releaseDeviceMessageList(d_list_old); // Allocate the new d_swap_list allocateDeviceMessageList(d_swap_list); // Zero any new buffers with undefined data zeroDeviceMessageList_async(d_list, stream, keep_len); zeroDeviceMessageList_async(d_swap_list, stream); gpuErrchk(hipStreamSynchronize(stream)); } void CUDAMessageList::releaseDeviceMessageList(CUDAMessageMap& memory_map) { // for each device pointer in the cuda memory map we need to free these for (const auto &mm : memory_map) { // free the memory on the device gpuErrchk(flamegpu::detail::cuda::hipFree(mm.second)); } memory_map.clear(); } void CUDAMessageList::zeroDeviceMessageList_async(CUDAMessageMap& memory_map, hipStream_t stream, unsigned int skip_offset) { if (skip_offset >= message.getMaximumListSize()) return; // for each device pointer in the cuda memory map set the values to 0 for (const auto &mm : memory_map) { // get the variable size from message description const auto var = message.getMessageData().variables.at(mm.first); const size_t var_size = var.type_size * var.elements; // set the memory to zero gpuErrchk(hipMemsetAsync(static_cast<char*>(mm.second) + (var_size * skip_offset), 0, var_size * (message.getMaximumListSize() - skip_offset), stream)); } } void* CUDAMessageList::getReadMessageListVariablePointer(std::string variable_name) { CUDAMessageMap::iterator mm = d_list.find(variable_name); if (mm == d_list.end()) { THROW exception::InvalidMessageVar("Variable '%s' was not found in message '%s', " "in CUDAMessageList::getReadMessageListVariablePointer()", variable_name.c_str(), message.getMessageData().name.c_str()); } return mm->second; } void* CUDAMessageList::getWriteMessageListVariablePointer(std::string variable_name) { CUDAMessageMap::iterator mm = d_swap_list.find(variable_name); if (mm == d_swap_list.end()) { THROW exception::InvalidMessageVar("Variable '%s' was not found in message '%s', " "in CUDAMessageList::getWriteMessageListVariablePointer()", variable_name.c_str(), message.getMessageData().name.c_str()); } return mm->second; } void CUDAMessageList::zeroMessageData(hipStream_t stream) { zeroDeviceMessageList_async(d_list, stream); zeroDeviceMessageList_async(d_swap_list, stream); gpuErrchk(hipStreamSynchronize(stream)); } void CUDAMessageList::swap() { std::swap(d_list, d_swap_list); } unsigned int CUDAMessageList::scatter(unsigned int newCount, detail::CUDAScatter &scatter, hipStream_t stream, unsigned int streamId, bool append) { if (append) { unsigned int oldCount = message.getMessageCount(); return oldCount + scatter.scatter(streamId, stream, CUDAScatter::Type::MESSAGE_OUTPUT, message.getMessageData().variables, d_swap_list, d_list, newCount, oldCount); } else { return scatter.scatter(streamId, stream, CUDAScatter::Type::MESSAGE_OUTPUT, message.getMessageData().variables, d_swap_list, d_list, newCount, 0); } } unsigned int CUDAMessageList::scatterAll(unsigned int newCount, detail::CUDAScatter &scatter, hipStream_t stream, unsigned int streamId) { unsigned int oldCount = message.getMessageCount(); return oldCount + scatter.scatterAll(streamId, stream, message.getMessageData().variables, d_swap_list, d_list, newCount, oldCount); } } // namespace detail } // namespace flamegpu
22c8b2952a2f84652fc19ad9d3522ee314261a8e.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include "flamegpu/simulation/detail/CUDAMessageList.h" #include "flamegpu/simulation/detail/CUDAMessage.h" #include "flamegpu/simulation/detail/CUDAErrorChecking.cuh" #include "flamegpu/runtime/messaging/MessageBruteForce/MessageBruteForceHost.h" #include "flamegpu/simulation/detail/CUDAScatter.cuh" #include "flamegpu/detail/cuda.cuh" namespace flamegpu { namespace detail { /** * CUDAMessageList class * @brief populates CUDA message map */ CUDAMessageList::CUDAMessageList(CUDAMessage& cuda_message, detail::CUDAScatter &scatter, cudaStream_t stream, unsigned int streamId) : message(cuda_message) { // allocate message lists allocateDeviceMessageList(d_list); allocateDeviceMessageList(d_swap_list); zeroDeviceMessageList_async(d_list, stream); zeroDeviceMessageList_async(d_swap_list, stream); gpuErrchk(cudaStreamSynchronize(stream)); } /** * A destructor. * @brief Destroys the CUDAMessageList object */ CUDAMessageList::~CUDAMessageList() { cleanupAllocatedData(); } void CUDAMessageList::cleanupAllocatedData() { // clean up releaseDeviceMessageList(d_list); releaseDeviceMessageList(d_swap_list); } void CUDAMessageList::allocateDeviceMessageList(CUDAMessageMap &memory_map) { // we use the messages memory map to iterate the message variables and do allocation within our GPU hash map const auto &mem = message.getMessageData().variables; // for each variable allocate a device array and add to map for (const auto &mm : mem) { // get the variable name std::string var_name = mm.first; // get the variable size from message description size_t var_size = mm.second.type_size * mm.second.elements; // do the device allocation void * d_ptr; #ifdef UNIFIED_GPU_MEMORY // unified memory allocation gpuErrchk(cudaMallocManaged(reinterpret_cast<void**>(&d_ptr), var_size * message.getMaximumListSize())) #else // non unified memory allocation gpuErrchk(cudaMalloc(reinterpret_cast<void**>(&d_ptr), var_size * message.getMaximumListSize())); #endif // store the pointer in the map memory_map.insert(CUDAMessageMap::value_type(var_name, d_ptr)); } } void CUDAMessageList::resize(CUDAScatter& scatter, cudaStream_t stream, unsigned int streamId, unsigned int keep_len) { // Release d_swap_list, we don't retain this data releaseDeviceMessageList(d_swap_list); // Allocate the new d_list CUDAMessageMap d_list_old; std::swap(d_list, d_list_old); allocateDeviceMessageList(d_list); if (keep_len && keep_len <= message.getMessageCount()) { // Copy data from d_list_old to d_list // Note, if keep_len exceeds length of d_swap_list_old, this will crash scatter.scatterAll(streamId, stream, message.getMessageData().variables, d_list_old, d_list, keep_len, 0); } // Release d_list_old releaseDeviceMessageList(d_list_old); // Allocate the new d_swap_list allocateDeviceMessageList(d_swap_list); // Zero any new buffers with undefined data zeroDeviceMessageList_async(d_list, stream, keep_len); zeroDeviceMessageList_async(d_swap_list, stream); gpuErrchk(cudaStreamSynchronize(stream)); } void CUDAMessageList::releaseDeviceMessageList(CUDAMessageMap& memory_map) { // for each device pointer in the cuda memory map we need to free these for (const auto &mm : memory_map) { // free the memory on the device gpuErrchk(flamegpu::detail::cuda::cudaFree(mm.second)); } memory_map.clear(); } void CUDAMessageList::zeroDeviceMessageList_async(CUDAMessageMap& memory_map, cudaStream_t stream, unsigned int skip_offset) { if (skip_offset >= message.getMaximumListSize()) return; // for each device pointer in the cuda memory map set the values to 0 for (const auto &mm : memory_map) { // get the variable size from message description const auto var = message.getMessageData().variables.at(mm.first); const size_t var_size = var.type_size * var.elements; // set the memory to zero gpuErrchk(cudaMemsetAsync(static_cast<char*>(mm.second) + (var_size * skip_offset), 0, var_size * (message.getMaximumListSize() - skip_offset), stream)); } } void* CUDAMessageList::getReadMessageListVariablePointer(std::string variable_name) { CUDAMessageMap::iterator mm = d_list.find(variable_name); if (mm == d_list.end()) { THROW exception::InvalidMessageVar("Variable '%s' was not found in message '%s', " "in CUDAMessageList::getReadMessageListVariablePointer()", variable_name.c_str(), message.getMessageData().name.c_str()); } return mm->second; } void* CUDAMessageList::getWriteMessageListVariablePointer(std::string variable_name) { CUDAMessageMap::iterator mm = d_swap_list.find(variable_name); if (mm == d_swap_list.end()) { THROW exception::InvalidMessageVar("Variable '%s' was not found in message '%s', " "in CUDAMessageList::getWriteMessageListVariablePointer()", variable_name.c_str(), message.getMessageData().name.c_str()); } return mm->second; } void CUDAMessageList::zeroMessageData(cudaStream_t stream) { zeroDeviceMessageList_async(d_list, stream); zeroDeviceMessageList_async(d_swap_list, stream); gpuErrchk(cudaStreamSynchronize(stream)); } void CUDAMessageList::swap() { std::swap(d_list, d_swap_list); } unsigned int CUDAMessageList::scatter(unsigned int newCount, detail::CUDAScatter &scatter, cudaStream_t stream, unsigned int streamId, bool append) { if (append) { unsigned int oldCount = message.getMessageCount(); return oldCount + scatter.scatter(streamId, stream, CUDAScatter::Type::MESSAGE_OUTPUT, message.getMessageData().variables, d_swap_list, d_list, newCount, oldCount); } else { return scatter.scatter(streamId, stream, CUDAScatter::Type::MESSAGE_OUTPUT, message.getMessageData().variables, d_swap_list, d_list, newCount, 0); } } unsigned int CUDAMessageList::scatterAll(unsigned int newCount, detail::CUDAScatter &scatter, cudaStream_t stream, unsigned int streamId) { unsigned int oldCount = message.getMessageCount(); return oldCount + scatter.scatterAll(streamId, stream, message.getMessageData().variables, d_swap_list, d_list, newCount, oldCount); } } // namespace detail } // namespace flamegpu